input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
weekly recurrence.
"""
return pulumi.get(self, "weekly_recurrence")
@weekly_recurrence.setter
def weekly_recurrence(self, value: Optional[pulumi.Input['WeekDetailsArgs']]):
pulumi.set(self, "weekly_recurrence", value)
@pulumi.input_type
class SharedPublicIpAddressConfigurationArgs:
def __init__(__self__, *,
inbound_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input['InboundNatRuleArgs']]]] = None):
"""
Properties of a virtual machine that determine how it is connected to a load balancer.
:param pulumi.Input[Sequence[pulumi.Input['InboundNatRuleArgs']]] inbound_nat_rules: The incoming NAT rules
"""
if inbound_nat_rules is not None:
pulumi.set(__self__, "inbound_nat_rules", inbound_nat_rules)
@property
@pulumi.getter(name="inboundNatRules")
def inbound_nat_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InboundNatRuleArgs']]]]:
"""
The incoming NAT rules
"""
return pulumi.get(self, "inbound_nat_rules")
@inbound_nat_rules.setter
def inbound_nat_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InboundNatRuleArgs']]]]):
pulumi.set(self, "inbound_nat_rules", value)
@pulumi.input_type
class SubnetArgs:
def __init__(__self__, *,
allow_public_ip: Optional[pulumi.Input[str]] = None,
lab_subnet_name: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
Subnet information.
:param pulumi.Input[str] allow_public_ip: The permission policy of the subnet for allowing public IP addresses (i.e. Allow, Deny)).
:param pulumi.Input[str] lab_subnet_name: The name of the subnet as seen in the lab.
:param pulumi.Input[str] resource_id: The resource ID of the subnet.
"""
if allow_public_ip is not None:
pulumi.set(__self__, "allow_public_ip", allow_public_ip)
if lab_subnet_name is not None:
pulumi.set(__self__, "lab_subnet_name", lab_subnet_name)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="allowPublicIp")
def allow_public_ip(self) -> Optional[pulumi.Input[str]]:
"""
The permission policy of the subnet for allowing public IP addresses (i.e. Allow, Deny)).
"""
return pulumi.get(self, "allow_public_ip")
@allow_public_ip.setter
def allow_public_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "allow_public_ip", value)
@property
@pulumi.getter(name="labSubnetName")
def lab_subnet_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the subnet as seen in the lab.
"""
return pulumi.get(self, "lab_subnet_name")
@lab_subnet_name.setter
def lab_subnet_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lab_subnet_name", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource ID of the subnet.
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class SubnetOverrideArgs:
def __init__(__self__, *,
lab_subnet_name: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
shared_public_ip_address_configuration: Optional[pulumi.Input['SubnetSharedPublicIpAddressConfigurationArgs']] = None,
use_in_vm_creation_permission: Optional[pulumi.Input[str]] = None,
use_public_ip_address_permission: Optional[pulumi.Input[str]] = None,
virtual_network_pool_name: Optional[pulumi.Input[str]] = None):
"""
Property overrides on a subnet of a virtual network.
:param pulumi.Input[str] lab_subnet_name: The name given to the subnet within the lab.
:param pulumi.Input[str] resource_id: The resource ID of the subnet.
:param pulumi.Input['SubnetSharedPublicIpAddressConfigurationArgs'] shared_public_ip_address_configuration: Properties that virtual machines on this subnet will share.
:param pulumi.Input[str] use_in_vm_creation_permission: Indicates whether this subnet can be used during virtual machine creation (i.e. Allow, Deny).
:param pulumi.Input[str] use_public_ip_address_permission: Indicates whether public IP addresses can be assigned to virtual machines on this subnet (i.e. Allow, Deny).
:param pulumi.Input[str] virtual_network_pool_name: The virtual network pool associated with this subnet.
"""
if lab_subnet_name is not None:
pulumi.set(__self__, "lab_subnet_name", lab_subnet_name)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
if shared_public_ip_address_configuration is not None:
pulumi.set(__self__, "shared_public_ip_address_configuration", shared_public_ip_address_configuration)
if use_in_vm_creation_permission is not None:
pulumi.set(__self__, "use_in_vm_creation_permission", use_in_vm_creation_permission)
if use_public_ip_address_permission is not None:
pulumi.set(__self__, "use_public_ip_address_permission", use_public_ip_address_permission)
if virtual_network_pool_name is not None:
pulumi.set(__self__, "virtual_network_pool_name", virtual_network_pool_name)
@property
@pulumi.getter(name="labSubnetName")
def lab_subnet_name(self) -> Optional[pulumi.Input[str]]:
"""
The name given to the subnet within the lab.
"""
return pulumi.get(self, "lab_subnet_name")
@lab_subnet_name.setter
def lab_subnet_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lab_subnet_name", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource ID of the subnet.
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@property
@pulumi.getter(name="sharedPublicIpAddressConfiguration")
def shared_public_ip_address_configuration(self) -> Optional[pulumi.Input['SubnetSharedPublicIpAddressConfigurationArgs']]:
"""
Properties that virtual machines on this subnet will share.
"""
return pulumi.get(self, "shared_public_ip_address_configuration")
@shared_public_ip_address_configuration.setter
def shared_public_ip_address_configuration(self, value: Optional[pulumi.Input['SubnetSharedPublicIpAddressConfigurationArgs']]):
pulumi.set(self, "shared_public_ip_address_configuration", value)
@property
@pulumi.getter(name="useInVmCreationPermission")
def use_in_vm_creation_permission(self) -> Optional[pulumi.Input[str]]:
"""
Indicates whether this subnet can be used during virtual machine creation (i.e. Allow, Deny).
"""
return pulumi.get(self, "use_in_vm_creation_permission")
@use_in_vm_creation_permission.setter
def use_in_vm_creation_permission(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "use_in_vm_creation_permission", value)
@property
@pulumi.getter(name="usePublicIpAddressPermission")
def use_public_ip_address_permission(self) -> Optional[pulumi.Input[str]]:
"""
Indicates whether public IP addresses can be assigned to virtual machines on this subnet (i.e. Allow, Deny).
"""
return pulumi.get(self, "use_public_ip_address_permission")
@use_public_ip_address_permission.setter
def use_public_ip_address_permission(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "use_public_ip_address_permission", value)
@property
@pulumi.getter(name="virtualNetworkPoolName")
def virtual_network_pool_name(self) -> Optional[pulumi.Input[str]]:
"""
The virtual network pool associated with this subnet.
"""
return pulumi.get(self, "virtual_network_pool_name")
@virtual_network_pool_name.setter
def virtual_network_pool_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_pool_name", value)
@pulumi.input_type
class SubnetSharedPublicIpAddressConfigurationArgs:
def __init__(__self__, *,
allowed_ports: Optional[pulumi.Input[Sequence[pulumi.Input['PortArgs']]]] = None):
"""
Configuration for public IP address sharing.
:param pulumi.Input[Sequence[pulumi.Input['PortArgs']]] allowed_ports: Backend ports that virtual machines on this subnet are allowed to expose
"""
if allowed_ports is not None:
pulumi.set(__self__, "allowed_ports", allowed_ports)
@property
@pulumi.getter(name="allowedPorts")
def allowed_ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PortArgs']]]]:
"""
Backend ports that virtual machines on this subnet are allowed to expose
"""
return pulumi.get(self, "allowed_ports")
@allowed_ports.setter
def allowed_ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PortArgs']]]]):
pulumi.set(self, "allowed_ports", value)
@pulumi.input_type
class UserIdentityArgs:
def __init__(__self__, *,
app_id: Optional[pulumi.Input[str]] = None,
object_id: Optional[pulumi.Input[str]] = None,
principal_id: Optional[pulumi.Input[str]] = None,
principal_name: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None):
"""
Identity attributes of a lab user.
:param pulumi.Input[str] app_id: Set to the app Id of the client JWT making the request.
:param pulumi.Input[str] object_id: Set to the object Id of the client JWT making the request. Not all users have object Id. For CSP (reseller) scenarios for example, object Id is not available.
:param pulumi.Input[str] principal_id: Set to the principal Id of the client JWT making the request. Service principal will not have the principal Id.
:param pulumi.Input[str] principal_name: Set to the principal name / UPN of the client JWT making the request.
:param pulumi.Input[str] tenant_id: Set to the tenant ID of the client JWT making the request.
"""
if app_id is not None:
pulumi.set(__self__, "app_id", app_id)
if object_id is not None:
pulumi.set(__self__, "object_id", object_id)
if principal_id is not None:
pulumi.set(__self__, "principal_id", principal_id)
if principal_name is not None:
pulumi.set(__self__, "principal_name", principal_name)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter(name="appId")
def app_id(self) -> Optional[pulumi.Input[str]]:
"""
Set to the app Id of the client JWT making the request.
"""
return pulumi.get(self, "app_id")
@app_id.setter
def app_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_id", value)
@property
@pulumi.getter(name="objectId")
def object_id(self) -> Optional[pulumi.Input[str]]:
"""
Set to the object Id of the client JWT making the request. Not all users have object Id. For CSP (reseller) scenarios for example, object Id is not available.
"""
return pulumi.get(self, "object_id")
@object_id.setter
def object_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "object_id", value)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> Optional[pulumi.Input[str]]:
"""
Set to the principal Id of the client JWT making the request. Service principal will not have the principal Id.
"""
return pulumi.get(self, "principal_id")
@principal_id.setter
def principal_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "principal_id", value)
@property
@pulumi.getter(name="principalName")
def principal_name(self) -> Optional[pulumi.Input[str]]:
"""
Set to the principal name / UPN of the client JWT making the request.
"""
return pulumi.get(self, "principal_name")
@principal_name.setter
def principal_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "principal_name", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
Set to the tenant ID of the client JWT making the request.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@pulumi.input_type
class UserSecretStoreArgs:
def __init__(__self__, *,
key_vault_id: Optional[pulumi.Input[str]] = None,
key_vault_uri: Optional[pulumi.Input[str]] = None):
"""
Properties of a user's secret store.
:param pulumi.Input[str] key_vault_id: The ID of the user's Key vault.
:param pulumi.Input[str] key_vault_uri: The URI of the user's Key vault.
"""
if key_vault_id is not None:
pulumi.set(__self__, "key_vault_id", key_vault_id)
if key_vault_uri is not None:
pulumi.set(__self__, "key_vault_uri", key_vault_uri)
@property
@pulumi.getter(name="keyVaultId")
def key_vault_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the user's Key vault.
"""
return pulumi.get(self, "key_vault_id")
@key_vault_id.setter
def key_vault_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_vault_id", value)
@property
@pulumi.getter(name="keyVaultUri")
def key_vault_uri(self) -> Optional[pulumi.Input[str]]:
"""
The URI of the user's Key vault.
"""
return pulumi.get(self, "key_vault_uri")
@key_vault_uri.setter
def key_vault_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_vault_uri", value)
@pulumi.input_type
class WeekDetailsArgs:
def __init__(__self__, *,
time: Optional[pulumi.Input[str]] = None,
weekdays: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Properties of a weekly schedule.
:param pulumi.Input[str] time: The time of the day the schedule will occur.
:param pulumi.Input[Sequence[pulumi.Input[str]]] weekdays: The days of the week for which the schedule is set (e.g. Sunday, Monday, Tuesday, etc.).
"""
if time is not None:
pulumi.set(__self__, "time", time)
if weekdays is not None:
pulumi.set(__self__, "weekdays", weekdays)
@property
@pulumi.getter
def time(self) -> Optional[pulumi.Input[str]]:
"""
The time of the day the schedule will occur.
"""
return pulumi.get(self, "time")
@time.setter
def time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time", value)
@property
@pulumi.getter
def weekdays(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The days of the week for which the schedule is set (e.g. Sunday, Monday, Tuesday, etc.).
"""
return pulumi.get(self, "weekdays")
@weekdays.setter
def weekdays(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "weekdays", value)
@pulumi.input_type
class WindowsOsInfoArgs:
def __init__(__self__, *,
windows_os_state: Optional[pulumi.Input[str]] = None):
"""
Information about a Windows OS.
:param pulumi.Input[str] windows_os_state: The state of the Windows OS (i.e. NonSysprepped, | |
<gh_stars>1-10
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from keras import backend, constraints, initializers, layers, models, regularizers
from keras.backend import convert_inputs_if_ragged, maybe_convert_to_ragged
from keras.utils.control_flow_util import smart_cond
from keras.utils.generic_utils import register_keras_serializable
from keras.utils.losses_utils import compute_weighted_loss as _compute_weighted_loss, ReductionV2 as Reduction
from keras.utils.tf_utils import shape_type_conversion
from tensorflow.python.distribute import distribution_strategy_context
def compute_weighted_loss(losses, sample_weight=None, reduction=Reduction.SUM_OVER_BATCH_SIZE):
if distribution_strategy_context.has_strategy() and \
reduction in {Reduction.AUTO, Reduction.SUM_OVER_BATCH_SIZE}:
raise ValueError(
'Please use `Reduction.SUM` or `Reduction.NONE` for loss reduction when '
'losses are used with `tf.distribute.Strategy` outside of the built-in training loops. You can implement '
'`Reduction.SUM_OVER_BATCH_SIZE` using global batch size like:\n'
'```\n'
'with strategy.scope():\n'
' loss_obj = losses.CategoricalCrossentropy(reduction=Reduction.NONE)\n'
'....\n'
' loss = tf.reduce_sum(loss_obj(labels, predictions)) * (1. / global_batch_size)\n'
'```\n'
'Please see https://www.tensorflow.org/tutorials/distribute/custom_training for more details.')
return _compute_weighted_loss(losses, sample_weight=sample_weight, reduction=reduction)
@register_keras_serializable(package='Miss')
class AdaptiveSoftmax(layers.Layer):
"""Adaptive softmax layer.
Reference https://arxiv.org/pdf/1609.04309.pdf
Efficient softmax approximation for GPUs
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>egou (2017)
Args:
units: Positive integer, dimensionality of the output space (number of classes).
cutoff: Ordered list of positive integers, numbers for next class-cluster start id's.
factor: Reduction factor for second level projection matrices.
dropout: Dropout for second level projections.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
Returns:
N-D tensor with shape: `(batch_size, ..., units)`. For instance, for a 2D input logits and 1D input targets
with shapes `(batch_size, input_dim)` and `(batch_size,)`, the output would have shape `(batch_size, units)`.
"""
def __init__(
self, units, cutoff, factor=4, dropout=0., use_bias=True, kernel_initializer='glorot_uniform',
bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, kernel_constraint=None,
bias_constraint=None, loss_reduction=Reduction.AUTO, **kwargs):
kwargs['autocast'] = False
super(AdaptiveSoftmax, self).__init__(**kwargs)
self.input_spec = [
layers.InputSpec(min_ndim=2), # predictions
layers.InputSpec(min_ndim=1, dtype='int32'), # targets
]
self.supports_masking = True
self._supports_ragged_inputs = True
if cutoff[-1] > units - 1:
raise ValueError('Can\'t specify `cutoff` larger than `units` size')
units = int(units)
Reduction.validate(loss_reduction)
self.cutoff = cutoff
self._cutoff = cutoff + [units] if units > cutoff[-1] else cutoff
self.units = units
self.factor = factor
self.dropout = dropout
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.loss_reduction = loss_reduction
@shape_type_conversion
def build(self, input_shape):
dtype = tf.dtypes.as_dtype(self.dtype or backend.floatx())
if not (dtype.is_floating or dtype.is_complex):
raise TypeError('Unable to build `AdaptiveSoftmax` layer with non-floating point dtype {}'.format(dtype))
predictions_shape, targets_shape = input_shape
predictions_rank = len(predictions_shape)
if len(targets_shape) + 1 != predictions_rank:
raise ValueError('Targets shape {} rank must be one less than predictions '
'shape rank {}'.format(targets_shape, predictions_shape))
self.input_channels = predictions_shape[-1]
if self.input_channels is None:
raise ValueError('Channel dimension of predictions should be defined. Found `None`.')
self.input_spec = [
layers.InputSpec(ndim=predictions_rank, axes={-1: self.input_channels}),
layers.InputSpec(ndim=predictions_rank - 1, dtype=tf.int32)
]
self.head = layers.Dense(
units=self._cutoff[0] + len(self._cutoff) - 1,
activation=None,
use_bias=False,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=self.kernel_regularizer,
kernel_constraint=self.kernel_constraint,
name='head'
)
self.tails = []
self.tail_channels = []
prev_dim = None
for i in range(len(self._cutoff) - 1):
dim = self.input_channels / (self.factor ** (i + 1))
dim = max(1, round(dim / 8)) * 8
if dim == prev_dim:
raise ValueError('Some cutoffs have same internal size. '
'Try to shorten `cutoffs` or decrease `factor`')
prev_dim = dim
tail = models.Sequential([
layers.Dense(
units=dim,
activation=None,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint,
name='tail_proj_{}'.format(i),
input_shape=(self.input_channels,)
),
layers.Dropout(self.dropout, name='tail_drop_{}'.format(i)),
layers.Dense(
units=self._cutoff[i + 1] - self._cutoff[i],
activation=None,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
bias_regularizer=self.bias_regularizer,
kernel_regularizer=self.kernel_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint,
name='tail_scale_{}'.format(i)
),
])
self.tails.append(tail)
self.tail_channels.append(self._cutoff[i + 1] - self._cutoff[i])
super(AdaptiveSoftmax, self).build(input_shape)
def call(self, inputs, training=None, mask=None):
if training is None:
training = backend.learning_phase()
input_logits, input_targets = inputs
input_logits = tf.cast(input_logits, self.compute_dtype)
input_logits, row_lengths = convert_inputs_if_ragged(input_logits)
input_targets, _ = convert_inputs_if_ragged(input_targets)
is_ragged_input = (row_lengths is not None)
loss_weights = tf.ones_like(input_targets, dtype=tf.bool)
loss_weights = maybe_convert_to_ragged(is_ragged_input, loss_weights, row_lengths)
if is_ragged_input:
loss_weights = loss_weights.to_tensor(False)
if mask is not None:
loss_weights = tf.logical_and(loss_weights, mask)
loss_weights = tf.cast(loss_weights, self.compute_dtype)
probs, loss = smart_cond(
training,
lambda: self._train_probs_loss(input_logits, input_targets, loss_weights),
lambda: self._eval_probs_loss(input_logits, input_targets, loss_weights)
)
self.add_loss(loss, inputs=True)
probs = maybe_convert_to_ragged(is_ragged_input, probs, row_lengths)
return probs
def _train_probs_loss(self, inputs, targets, weights):
root_logits = self.head(inputs)
root_logits = tf.cast(root_logits, 'float32')
root_logprobs = tf.nn.log_softmax(root_logits)
head_logprobs = root_logprobs[..., :self._cutoff[0]]
tail_masks = []
root_targets = targets
for i in range(len(self._cutoff) - 1):
tail_masks.append(tf.logical_and(
tf.greater_equal(targets, self._cutoff[i]),
tf.less(targets, self._cutoff[i + 1])
))
clust_targets = tf.fill(tf.shape(root_targets), tf.cast(self._cutoff[0] + i, root_targets.dtype))
root_targets = tf.where(tail_masks[i], clust_targets, root_targets)
root_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=root_logits, labels=root_targets)
root_loss = compute_weighted_loss(root_loss, sample_weight=weights, reduction=self.loss_reduction)
full_loss = [root_loss]
full_logprobs = [head_logprobs]
targets_shape = tf.shape(targets)
for i in range(len(self._cutoff) - 1):
clust_start = self._cutoff[0] + i
clust_logprob = root_logprobs[..., clust_start:clust_start + 1]
tail_targets = targets - self._cutoff[i]
true_mask = tail_masks[i]
true_inputs = tf.boolean_mask(inputs, true_mask)
true_logits = self.tails[i](true_inputs, training=True)
true_logits = tf.cast(true_logits, 'float32')
true_clust_logprob = tf.boolean_mask(clust_logprob, true_mask)
true_logprobs = tf.nn.log_softmax(true_logits)
true_logprobs = tf.math.add(true_logprobs, true_clust_logprob)
false_mask = tf.logical_not(true_mask)
false_clust_logprob = tf.boolean_mask(clust_logprob, false_mask)
clust_size = tf.cast(self._cutoff[i + 1] - self._cutoff[i], false_clust_logprob.dtype)
false_logprobs = false_clust_logprob - tf.math.log(clust_size)
false_logprobs = tf.tile(false_logprobs, [1, clust_size])
target_indices = tf.range(tf.size(targets))
target_indices = tf.reshape(target_indices, targets_shape)
true_indices = tf.boolean_mask(target_indices, true_mask)
false_indices = tf.boolean_mask(target_indices, false_mask)
target_logprobs = tf.dynamic_stitch( # TODO: data_flow_ops.parallel_dynamic_stitch ?
[true_indices, false_indices],
[true_logprobs, false_logprobs]
)
probs_shape = tf.concat([targets_shape, tf.shape(target_logprobs)[-1:]], axis=-1)
tail_probs = tf.reshape(target_logprobs, probs_shape)
full_logprobs.append(tail_probs)
true_targets = tf.boolean_mask(tail_targets, tail_masks[i])
true_weights = tf.boolean_mask(weights, tail_masks[i])
true_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=true_logits, labels=true_targets)
true_loss = compute_weighted_loss(true_loss, sample_weight=true_weights, reduction=self.loss_reduction)
full_loss.append(true_loss)
loss = tf.reduce_mean(full_loss)
full_logprobs = tf.concat(full_logprobs, axis=-1)
probs = tf.math.exp(full_logprobs)
return probs, loss
def _eval_probs_loss(self, inputs, targets, weights):
root_logits = self.head(inputs)
root_logits = tf.cast(root_logits, 'float32')
root_logprobs = tf.nn.log_softmax(root_logits)
head_logprobs = root_logprobs[..., :self._cutoff[0]]
# required to match tails input shape in train branch
flat_inputs = tf.reshape(inputs, [-1, self.input_channels])
full_logprobs = [head_logprobs]
targets_shape = tf.shape(targets)
for i in range(len(self._cutoff) - 1):
flat_logits = self.tails[i](flat_inputs, training=False)
tail_shape = tf.concat([targets_shape, [self.tail_channels[i]]], axis=-1)
tail_logits = tf.reshape(flat_logits, tail_shape)
tail_logits = tf.cast(tail_logits, 'float32')
tail_logprobs = tf.nn.log_softmax(tail_logits)
clust_start = self._cutoff[0] + i
clust_logprob = root_logprobs[..., clust_start:clust_start + 1]
tail_logprobs = tf.math.add(tail_logprobs, clust_logprob)
full_logprobs.append(tail_logprobs)
full_logprobs = tf.concat(full_logprobs, axis=-1)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=full_logprobs, labels=targets)
loss = compute_weighted_loss(loss, sample_weight=weights, reduction=self.loss_reduction)
probs = tf.math.exp(full_logprobs)
return probs, loss
@shape_type_conversion
def compute_output_shape(self, input_shape):
predictions_shape, _ = input_shape
return predictions_shape[:-1] + (self.units,)
def compute_output_signature(self, input_signature):
outptut_signature = super().compute_output_signature(input_signature)
return tf.TensorSpec(dtype='float32', shape=outptut_signature.shape)
def get_config(self):
config = super(AdaptiveSoftmax, self).get_config()
config.update({
'cutoff': self.cutoff,
'units': self.units,
'factor': self.factor,
'dropout': self.dropout,
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'loss_reduction': self.loss_reduction,
})
return config
@register_keras_serializable(package='Miss')
class SampledSofmax(layers.Layer):
"""Sampled softmax layer.
Reference http://arxiv.org/abs/1412.2007.pdf
On Using Very Large Target Vocabulary for Neural Machine Translation
Jean et al. (2014)
Note: The full softmax cross entropy loss calculated for evaluation.
Note: By default this uses a log-uniform (Zipfian) distribution for sampling, so your labels must be sorted in
order of decreasing frequency to achieve good results. For more details, see
`tf.random.log_uniform_candidate_sampler`.
Args:
units: An `int`. The number of possible classes.
negatives: An `int`. The number of negative classes to randomly sample per batch. This single sample of
negative classes is evaluated for each element in the batch.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
Returns:
N-D tensor with shape: `(batch_size, ..., units)`. For instance, for a 2D input with shape
`(batch_size, input_dim)`, the output would have shape `(batch_size, units)`.
"""
def __init__(
self, units, negatives, kernel_initializer='zeros', bias_initializer='zeros', kernel_regularizer=None,
bias_regularizer=None, kernel_constraint=None, bias_constraint=None,
loss_reduction=Reduction.AUTO, **kwargs):
kwargs['dtype'] = 'float32'
kwargs['autocast'] = False
super(SampledSofmax, self).__init__(**kwargs)
self.input_spec = [
layers.InputSpec(min_ndim=2), # predictions
layers.InputSpec(min_ndim=1, dtype=tf.int32), # targets
]
self.supports_masking = True
self._supports_ragged_inputs = True
Reduction.validate(loss_reduction)
self.units = units
self.negatives = negatives
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.loss_reduction = loss_reduction
@shape_type_conversion
def build(self, input_shape):
dtype = tf.dtypes.as_dtype(self.dtype or backend.floatx())
if not (dtype.is_floating | |
#!/usr/bin/env python
from Errors import NoncriticalError, CriticalError
import sys
import numpy
import libsbml
import warnings
import misc
import math
import sympy
sbo_type2id = {'activator': 21,
'inhibitor': 20,
'enzyme': 14}
TIME_VARIABLE = 'SBML_MCA_TIME_VARIABLE'
MAX_MODEL_SIZE = 2500
class Model:
def __init__(self, sbml_model):
"""
@type model: libsbml.model or string
@param model: SBML model, or filename
"""
if type(sbml_model) == str:
self._doc = libsbml.readSBML(sbml_model)
self.sbml_model = self._doc.getModel()
else:
self.sbml_model = sbml_model.clone()
self._doc = libsbml.SBMLDocument(
sbml_model.getLevel(), sbml_model.getVersion())
self._doc.setModel(self.sbml_model)
self._N = None
self._N_partitioned = None
self._kinetic_laws = None
self._external_species_concentrations = None
self._rate_rules = None
self._assignment_rules = None
self._replacements = None
self._species_2_position = None
self._species_ids = None
self._parameter_ids = None
self._ode_variables = None
self._enzyme_positions = None
self._not_enzyme_positions = None
self._species_volume_prefactor = None
self._reaction_ids = None
self._check_not_supported()
misc.make_unique_local_parameters(self.sbml_model)
@property
def N(self):
"""
get stoichiometric matrix of the model
@return: stoichiometric matrix
@rtype: numpy.array
"""
if self._N is None:
""" get the stoichiometric matrix (not including constant and boundary condition species) """
self._N = numpy.zeros((len(self.species_ids), self.sbml_model.getNumReactions()))
species_ids_changed_by_rule = self.get_species(species_filter=self.is_species_changed_by_rule)
for i, r in enumerate(self.sbml_model.getListOfReactions()):
modes = [(-1, 'Reactants'), (1, 'Products')]
for sign, direction in modes:
for sr in getattr(r, 'getListOf' + direction)():
s = self.sbml_model.getSpecies(sr.getSpecies())
if s.getBoundaryCondition() \
or s.getConstant() \
or s.getId() in species_ids_changed_by_rule:
continue
j = self.species_2_position[sr.getSpecies()]
self._N[j, i] += sign * sr.getStoichiometry()
if len(self._N) == 0:
raise CriticalError('Empty stoichiometric matrix.')
return self._N
@property
def N_partitioned(self):
"""
get partitioned stoichiometric matrix (N = L*Nr)
@return: [inv(L), L, Nr]
@rtype: list
"""
""" get partitioned stoichiometric matrix """
if self._N_partitioned is None:
# compute reduced row echolon form to get the linear indep. rows
rref, pivot = sympy.Matrix(self.N.T).rref()
Nr = self.N[pivot] # linear independent rows of N
# link matrix is L = N*inv(Nr) [because per definition N = L*Nr]
L = numpy.dot(self.N, numpy.linalg.pinv(Nr))
try:
L_inv = numpy.linalg.inv(L) # inverse of link matrix
except:
L_inv = None
self._N_partitioned = [L_inv, L, Nr]
return self._N_partitioned
@property
def kinetic_laws(self):
"""
get kinetic laws
@return: list of kinetic laws as compiled formula strings
@rtype: list
"""
if self._kinetic_laws is None:
self._kinetic_laws = self._get_kinetic_laws(compile_formulas=True)
return self._kinetic_laws
@property
def species_ids(self):
"""
@return: list of species IDs
@rtype: list
"""
if self._species_ids is None:
self._species_ids = [s.getId() for s in filter(self.is_species_not_constant,
self.sbml_model.getListOfSpecies())]
return self._species_ids
@property
def parameter_ids(self):
"""
@return: list of parameters that are varied in p_elasticities
@rtype: list
"""
if self._parameter_ids is None:
const_species_ids = [s.getId() for s in self.get_species(species_filter=self.is_species_constant)]
params_changed_by_rule = [r.getVariable() for r in self.sbml_model.getListOfRules()]
global_param_ids = []
for p in self.sbml_model.getListOfParameters():
if p.getId() not in params_changed_by_rule:
global_param_ids.append(p.getId())
local_param_ids = []
for r in self.sbml_model.getListOfReactions():
kl = r.getKineticLaw()
for p in kl.getListOfParameters():
if p.getConstant():
local_param_ids.append(p.getId())
self._parameter_ids = const_species_ids + global_param_ids + local_param_ids
for p_name in self._parameter_ids:
if self._parameter_ids.count(p_name) != 1:
raise CriticalError(
'Parameter ID %s used multiple times. This is valid but not yet supported.' % p_name)
return self._parameter_ids
@property
def reaction_ids(self):
"""
@return: list of reaction IDs
@rtype: list
"""
if self._reaction_ids is None:
self._reaction_ids = [r.getId() for r in self.sbml_model.getListOfReactions()]
return self._reaction_ids
@property
def ode_variables(self):
"""
@return: list of species IDs and parameter IDs which are modified by an ODE (ether take part in reaction
or are changed by rate rule
@rtype: list
"""
if self._ode_variables is None:
self._ode_variables = self.species_ids \
+ [p_id for p_id in self._rate_rules if not p_id in self.species_ids]
return self._ode_variables
@property
def external_species_concentrations(self):
"""
get external species concentrations
@return: list of initial conditions
@rtype: list
"""
if self._external_species_concentrations is None:
self._external_species_concentrations = {}
for s in self.get_species(species_filter = self.is_species_constant):
if s.isSetInitialConcentration():
self._external_species_concentrations[s.getId()] = s.getInitialConcentration()
elif s.isSetInitialAmount():
self._external_species_concentrations[s.getId()] = s.getInitialAmount()
return self._external_species_concentrations
@property
def rate_rules(self):
"""
get rate rules (explicid ODEs)
@return: dictionary of rate rules key=variable, value=rate rule
@rtype: dict
"""
if self._rate_rules is None:
self._rate_rules = {}
for rule in self.sbml_model.getListOfRules():
var = rule.getVariable()
formula = misc.ast_to_string(rule.getMath(),
self.sbml_model,
self.assignment_rules,
self.replacements,
mode='')
if rule.isRate():
self._rate_rules[var] = formula
elif rule.isAlgebraic():
raise CriticalError('Algebraic rules not supported')
self._rate_rules = self._replace_flux_symbols(self._rate_rules)
return self._rate_rules
@property
def assignment_rules(self):
"""
get assignment rules
@return: dictionary of assignment rules
@rtype: dict
"""
if self._assignment_rules is None:
is_loop = True
self._assignment_rules = {}
while is_loop: # loop until no assignment rule is dependent on another assignment
for rule in self.sbml_model.getListOfRules():
if rule.isAssignment():
var = rule.getVariable()
formula = misc.ast_to_string(rule.getMath(),
self.sbml_model,
self.assignment_rules,
self.replacements,
mode='',
replace=True)
formula_wo_replace = misc.ast_to_string(rule.getMath(),
self.sbml_model,
self.assignment_rules,
self.replacements,
mode='',
replace=False)
self._assignment_rules[var] = {True: formula, False: formula_wo_replace}
# check dependencies
is_loop = False
for var1 in self._assignment_rules:
for var2 in self._assignment_rules:
if var2 in self._assignment_rules[var1][True]:
is_loop = True
return self._assignment_rules
@property
def replacements(self):
"""
get dictionary of parameter values and compartmets
@return: replacements (constant parameter values)
@rtype: dict
"""
if self._replacements is None:
# do not take include parameters that are modified by a rule
self._replacements = {}
params_changed_by_rule = [r.getVariable() for r in self.sbml_model.getListOfRules()]
for (pos, base) in enumerate([self.sbml_model] +
[r.getKineticLaw() for r in self.sbml_model.getListOfReactions()]):
for p in base.getListOfParameters():
if not p.getId() in params_changed_by_rule:
self._replacements[p.getId()] = p.getValue()
for comp in self.sbml_model.getListOfCompartments():
if comp.isSetSize():
s = comp.getSize()
elif comp.isSetVolume():
s = comp.getVolume()
else:
s = 1. # default compartment size
self._replacements[comp.getId()] = s
params_with_species = {}
for s in self.sbml_model.getListOfSpecies():
if s.isSetInitialConcentration():
params_with_species[s.getId()] = s.getInitialConcentration()
elif s.isSetInitialAmount():
params_with_species[s.getId()] = s.getInitialAmount()
# handle initial assignments (they might be dependent on each other,
# therefore try 50 evaluations)
max_iter = 50
while any([math.isnan(self._replacements[x]) for x in self._replacements]) and max_iter > 0:
params_with_species.update(self._replacements)
ass = self._evaluate_initial_assignments(params_with_species)
for p in ass:
if self._replacements.has_key(p) and math.isnan(self._replacements[p]):
self._replacements[p] = float(ass[p])
max_iter -= 1
return self._replacements
@property
def species_2_position(self):
"""
get dictionary mapping species to position in stoichiometric matrix
@return: mapping species to position in stoichiometric matrix
@rtype: dict
"""
if self._species_2_position is None:
self._species_2_position = dict(zip(self.species_ids, range(self.species_ids.__len__())))
return self._species_2_position
@property
def enzyme_positions(self):
"""
@return: list of enzyme positions in species list
@rtype: list
"""
if self._enzyme_positions is None:
self._enzyme_positions = []
for pos, species in enumerate([self.sbml_model.getSpecies(id) for id in self.species_ids]):
if species.getSBOTerm() == sbo_type2id['enzyme'] or species.getId().startswith('enzyme'):
self._enzyme_positions.append(pos)
return self._enzyme_positions
@property
def not_enzyme_positions(self):
"""
@return: list of position of species which are not enzymes
@rtype: list
"""
if self._not_enzyme_positions is None:
self._not_enzyme_positions = [i for i in range(len(self.species_ids)) if i not in self.enzyme_positions]
return self._not_enzyme_positions
@property
def species_volume_prefactor(self):
"""
get a vector of conversion factors (particle number to concentration) for each species
@return: list of volume prefactors
@rtype: list
"""
if self._species_volume_prefactor is None:
comp_size = {comp.getId(): comp.getSize() for comp in self.sbml_model.getListOfCompartments()}
for c in comp_size:
if math.isnan(comp_size[c]):
comp_size[c] = 1.
factors = []
for s in [self.sbml_model.getSpecies(id) for id in self.species_ids]:
if s.getHasOnlySubstanceUnits():
factors.append(1.)
continue
factors.append(1. / comp_size[s.getCompartment()])
self._species_volume_prefactor = numpy.array(factors)
return self._species_volume_prefactor
def get_species(self, species_filter=None):
"""
get list of species
@param species_filter: filter to only get a specific type of species (e.g. constant ones)
@type species_filter: function
@return: list of species
@rtype: list
"""
if species_filter is None:
return self.sbml_model.getListOfSpecies()
else:
return filter(species_filter, self.sbml_model.getListOfSpecies())
def get_parameter_values(self, parameter_ids=None):
"""
Get values for the specified parameters
@type parameter_ids: list
@param parameter_ids: List of strings with parameter ids
@rtype numpy.array
@return array with parameter values
"""
if parameter_ids is None:
parameter_ids = self.parameter_ids
return numpy.array([misc.get_parameter_value(self.sbml_model, p) for p in parameter_ids])
def set_parameter_values(self, parameter_names, parameter_values):
"""
Set list of parameters to new values
@type parameter_names: list
@param parameter_names: List of strings with parameter ids
@type parameter_values: list
@param parameter_values: List of parameter values
"""
rebuild = False
for i, p in enumerate(parameter_names):
if p in self.external_species_concentrations:
self.external_species_concentrations[p] = parameter_values[i]
else:
misc.set_parameter_value(self.sbml_model, p, parameter_values[i])
rebuild = True
def get_delta_parameters(self, d_param, parameter_names):
""" enter a an array of parameter deviations and names and get back the corresponding d_param vector for all parameters
@type d_param: numpy.array
@param d_param: vector of parameter changes
@type parameter_names: list of strings
@param parameter_names: list of parameter names
@rtype: numpy.array
@return: vector of all parameter changes
"""
all_p_names = self.parameter_ids
dp = numpy.zeros(len(all_p_names))
for value, name in zip(d_param, parameter_names):
dp[all_p_names.index(name)] = value
return dp
def get_initial_conc(self, with_rate_rule_params=False):
"""
get vector of initial concentrations
@type with_rate_rule_params: boolean
@param with_rate_rule_params: indicate whehter to include | |
occurrence base is a single document.
privates = {w for (w, base) in occurrenceBase.items() if len(base) == 1}
len(privates)
# ### Peculiarity of documents
#
# As a final exercise with words, lets make a list of all documents, and show their
#
# * total number of words
# * number of private words
# * the percentage of private words: a measure of the peculiarity of the document
# +
docList = []
empty = set()
ordinary = set()
for d in F.otype.s("document"):
pNum = T.documentName(d)
words = {F.sym.v(w) for w in L.d(d, otype="word")}
a = len(words)
if not a:
empty.add(pNum)
continue
o = len({w for w in words if w in privates})
if not o:
ordinary.add(pNum)
continue
p = 100 * o / a
docList.append((pNum, a, o, p))
docList = sorted(docList, key=lambda e: (-e[3], -e[1], e[0]))
print(f"Found {len(empty):>4} empty documents")
print(f"Found {len(ordinary):>4} ordinary documents (i.e. without private words)")
# +
print(
"{:<20}{:>5}{:>5}{:>5}\n{}".format(
"document",
"#all",
"#own",
"%own",
"-" * 35,
)
)
for x in docList[0:20]:
print("{:<20} {:>4} {:>4} {:>4.1f}%".format(*x))
print("...")
for x in docList[-20:]:
print("{:<20} {:>4} {:>4} {:>4.1f}%".format(*x))
# -
# # Locality API
# We travel upwards and downwards, forwards and backwards through the nodes.
# The Locality-API (`L`) provides functions: `u()` for going up, and `d()` for going down,
# `n()` for going to next nodes and `p()` for going to previous nodes.
#
# These directions are indirect notions: nodes are just numbers, but by means of the
# `oslots` feature they are linked to slots. One node *contains* an other node, if the one is linked to a set of slots that contains the set of slots that the other is linked to.
# And one if next or previous to an other, if its slots follow or precede the slots of the other one.
#
# `L.u(node)` **Up** is going to nodes that embed `node`.
#
# `L.d(node)` **Down** is the opposite direction, to those that are contained in `node`.
#
# `L.n(node)` **Next** are the next *adjacent* nodes, i.e. nodes whose first slot comes immediately after the last slot of `node`.
#
# `L.p(node)` **Previous** are the previous *adjacent* nodes, i.e. nodes whose last slot comes immediately before the first slot of `node`.
#
# All these functions yield nodes of all possible otypes.
# By passing an optional parameter, you can restrict the results to nodes of that type.
#
# The result are ordered according to the order of things in the text.
#
# The functions return always a tuple, even if there is just one node in the result.
#
# ## Going up
# We go from the first word to the document it contains.
# Note the `[0]` at the end. You expect one document, yet `L` returns a tuple.
# To get the only element of that tuple, you need to do that `[0]`.
#
# If you are like me, you keep forgetting it, and that will lead to weird error messages later on.
firstDoc = L.u(1, otype="document")[0]
print(firstDoc)
# And let's see all the containing objects of sign 3:
s = 3
for otype in F.otype.all:
if otype == F.otype.slotType:
continue
up = L.u(s, otype=otype)
upNode = "x" if len(up) == 0 else up[0]
print("sign {} is contained in {} {}".format(s, otype, upNode))
# ## Going next
# Let's go to the next nodes of the first document.
afterFirstDoc = L.n(firstDoc)
for n in afterFirstDoc:
print(
"{:>7}: {:<13} first slot={:<6}, last slot={:<6}".format(
n,
F.otype.v(n),
E.oslots.s(n)[0],
E.oslots.s(n)[-1],
)
)
secondDoc = L.n(firstDoc, otype="document")[0]
# ## Going previous
#
# And let's see what is right before the second document.
for n in L.p(secondDoc):
print(
"{:>7}: {:<13} first slot={:<6}, last slot={:<6}".format(
n,
F.otype.v(n),
E.oslots.s(n)[0],
E.oslots.s(n)[-1],
)
)
# ## Going down
# We go to the faces of the first document, and just count them.
faces = L.d(firstDoc, otype="face")
print(len(faces))
# ## The first line
# We pick two nodes and explore what is above and below them:
# the first line and the first word.
for n in [
F.otype.s("word")[0],
F.otype.s("line")[0],
]:
A.indent(level=0)
A.info("Node {}".format(n), tm=False)
A.indent(level=1)
A.info("UP", tm=False)
A.indent(level=2)
A.info("\n".join(["{:<15} {}".format(u, F.otype.v(u)) for u in L.u(n)]), tm=False)
A.indent(level=1)
A.info("DOWN", tm=False)
A.indent(level=2)
A.info("\n".join(["{:<15} {}".format(u, F.otype.v(u)) for u in L.d(n)]), tm=False)
A.indent(level=0)
A.info("Done", tm=False)
# # Text API
#
# So far, we have mainly seen nodes and their numbers, and the names of node types.
# You would almost forget that we are dealing with text.
# So let's try to see some text.
#
# In the same way as `F` gives access to feature data,
# `T` gives access to the text.
# That is also feature data, but you can tell Text-Fabric which features are specifically
# carrying the text, and in return Text-Fabric offers you
# a Text API: `T`.
#
# ## Formats
# Cuneiform text can be represented in a number of ways:
#
# * original ATF, with bracketings and flags
# * essential symbols: readings and graphemes, repeats and fractions (of numerals), no flags, no clusterings
# * unicode symbols
#
# If you wonder where the information about text formats is stored:
# not in the program text-fabric, but in the data set.
# It has a feature `otext`, which specifies the formats and which features
# must be used to produce them. `otext` is the third special feature in a TF data set,
# next to `otype` and `oslots`.
# It is an optional feature.
# If it is absent, there will be no `T` API.
#
# Here is a list of all available formats in this data set.
sorted(T.formats)
# ## Using the formats
#
# The ` T.text()` function is central to get text representations of nodes. Its most basic usage is
#
# ```python
# T.text(nodes, fmt=fmt)
# ```
# where `nodes` is a list or iterable of nodes, usually word nodes, and `fmt` is the name of a format.
# If you leave out `fmt`, the default `text-orig-full` is chosen.
#
# The result is the text in that format for all nodes specified:
T.text([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], fmt="text-orig-plain")
# There is also another usage of this function:
#
# ```python
# T.text(node, fmt=fmt)
# ```
#
# where `node` is a single node.
# In this case, the default format is *ntype*`-orig-full` where *ntype* is the type of `node`.
#
# If the format is defined in the corpus, it will be used. Otherwise, the word nodes contained in `node` will be looked up
# and represented with the default format `text-orig-full`.
#
# In this way we can sensibly represent a lot of different nodes, such as documents, faces, lines, clusters, words and signs.
#
# We compose a set of example nodes and run `T.text` on them:
exampleNodes = [
F.otype.s("sign")[0],
F.otype.s("word")[0],
F.otype.s("cluster")[0],
F.otype.s("line")[0],
F.otype.s("face")[0],
F.otype.s("document")[0],
]
exampleNodes
for n in exampleNodes:
print(f"This is {F.otype.v(n)} {n}:")
print(T.text(n))
print("")
# ## Using the formats
# Now let's use those formats to print out the first line in this corpus.
#
# Note that only the formats starting with `text-` are usable for this.
#
# For the `layout-` formats, see [display](display.ipynb).
for fmt in sorted(T.formats):
if fmt.startswith("text-"):
print("{}:\n\t{}".format(fmt, T.text(range(1, 12), fmt=fmt)))
# If we do not specify a format, the **default** format is used (`text-orig-full`).
T.text(range(1, 12))
firstLine = F.otype.s("line")[0]
T.text(firstLine)
T.text(firstLine, fmt="text-orig-unicode")
# ## Word dividers
#
# First we grab all word dividers in a list.
ds = F.type.s("wdiv")
len(ds)
# Then we take the first word divider and look up the line in which it occurs
d = ds[0]
ln = L.u(d, otype="line")[0]
A.webLink(ln)
# The ATF source of this line is:
A.getSource(ln)
# We use the text formats to display this line in various forms:
T.text(ln)
T.text(ln, fmt="text-orig-plain")
T.text(ln, fmt="text-orig-rich")
T.text(ln, fmt="text-orig-unicode")
# These characters do not look right, but that is because of the font. We can show the text in the right font with the more advanced functions of Text-Fabric (see also [display](display.ipynb):
A.plain(ln, fmt="text-orig-unicode")
# And now with the word divider higlighted:
A.plain(ln, fmt="text-orig-unicode", highlights=set(ds))
# The important things to remember are:
#
# * you can supply a list of slot nodes and get them represented in all formats
# * you can get non-slot nodes `n` in default format by `T.text(n)`
# * you can get non-slot nodes `n` in other formats by `T.text(n, fmt=fmt, descend=True)`
# ## Whole text in all formats in just 6 seconds
# Part of the pleasure of working with computers is that they can crunch massive amounts of data.
# The text of the Old Assyrian Letters is a piece of cake.
#
# It takes just ten seconds to have that cake and eat | |
index
text_4.tStart = t # local t and not account for scr refresh
text_4.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_4, 'tStartRefresh') # time at next scr refresh
text_4.setAutoDraw(True)
if text_4.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > text_4.tStartRefresh + 0.200-frameTolerance:
# keep track of stop time/frame for later
text_4.tStop = t # not accounting for scr refresh
text_4.frameNStop = frameN # exact frame index
win.timeOnFlip(text_4, 'tStopRefresh') # time at next scr refresh
text_4.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in plusComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "plus"-------
for thisComponent in plusComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
trials.addData('text_4.started', text_4.tStartRefresh)
trials.addData('text_4.stopped', text_4.tStopRefresh)
# ------Prepare to start Routine "Experiment1"-------
routineTimer.add(0.080000)
# update component parameters for each repeat
imageGuess.setImage(Image1)
# keep track of which components have finished
Experiment1Components = [imageGuess]
for thisComponent in Experiment1Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
Experiment1Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
continueRoutine = True
# -------Run Routine "Experiment1"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = Experiment1Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=Experiment1Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *imageGuess* updates
if imageGuess.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
imageGuess.frameNStart = frameN # exact frame index
imageGuess.tStart = t # local t and not account for scr refresh
imageGuess.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(imageGuess, 'tStartRefresh') # time at next scr refresh
imageGuess.setAutoDraw(True)
if imageGuess.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > imageGuess.tStartRefresh + 0.08-frameTolerance:
# keep track of stop time/frame for later
imageGuess.tStop = t # not accounting for scr refresh
imageGuess.frameNStop = frameN # exact frame index
win.timeOnFlip(imageGuess, 'tStopRefresh') # time at next scr refresh
imageGuess.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in Experiment1Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Experiment1"-------
for thisComponent in Experiment1Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
trials.addData('imageGuess.started', imageGuess.tStartRefresh)
trials.addData('imageGuess.stopped', imageGuess.tStopRefresh)
# ------Prepare to start Routine "sound1"-------
routineTimer.add(0.100000)
# update component parameters for each repeat
# keep track of which components have finished
sound1Components = [image_2]
for thisComponent in sound1Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
sound1Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
continueRoutine = True
# -------Run Routine "sound1"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = sound1Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=sound1Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *image_2* updates
if image_2.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
image_2.frameNStart = frameN # exact frame index
image_2.tStart = t # local t and not account for scr refresh
image_2.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(image_2, 'tStartRefresh') # time at next scr refresh
image_2.setAutoDraw(True)
if image_2.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > image_2.tStartRefresh + 0.1-frameTolerance:
# keep track of stop time/frame for later
image_2.tStop = t # not accounting for scr refresh
image_2.frameNStop = frameN # exact frame index
win.timeOnFlip(image_2, 'tStopRefresh') # time at next scr refresh
image_2.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in sound1Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "sound1"-------
for thisComponent in sound1Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
trials.addData('image_2.started', image_2.tStartRefresh)
trials.addData('image_2.stopped', image_2.tStopRefresh)
# ------Prepare to start Routine "empty1"-------
routineTimer.add(2.000000)
# update component parameters for each repeat
key_resp_8.keys = []
key_resp_8.rt = []
# keep track of which components have finished
empty1Components = [text_8, key_resp_8]
for thisComponent in empty1Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
empty1Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
continueRoutine = True
# -------Run Routine "empty1"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = empty1Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=empty1Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_8* updates
if text_8.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text_8.frameNStart = frameN # exact frame index
text_8.tStart = t # local t and not account for scr refresh
text_8.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_8, 'tStartRefresh') # time at next scr refresh
text_8.setAutoDraw(True)
if text_8.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > text_8.tStartRefresh + 2.0-frameTolerance:
# keep track of stop time/frame for later
text_8.tStop = t # not accounting for scr refresh
text_8.frameNStop = frameN # exact frame index
win.timeOnFlip(text_8, 'tStopRefresh') # time at next scr refresh
text_8.setAutoDraw(False)
# *key_resp_8* updates
waitOnFlip = False
if key_resp_8.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
key_resp_8.frameNStart = frameN # exact frame index
key_resp_8.tStart = t # local t and not account for scr refresh
key_resp_8.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp_8, 'tStartRefresh') # time at next scr refresh
key_resp_8.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp_8.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp_8.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp_8.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > key_resp_8.tStartRefresh + 2-frameTolerance:
# keep track of stop time/frame for later
key_resp_8.tStop = t # not accounting for scr refresh
key_resp_8.frameNStop = frameN # exact frame index
win.timeOnFlip(key_resp_8, 'tStopRefresh') # time at next scr refresh
key_resp_8.status = FINISHED
if key_resp_8.status == STARTED and not waitOnFlip:
theseKeys = key_resp_8.getKeys(keyList=['right', 'left'], waitRelease=False)
if len(theseKeys):
theseKeys = theseKeys[0] # | |
person_plugin_wrapper(*args, **kwargs):
person = person_from_environ(request.environ)
if person is None or person.uname is None:
log(f'person is None')
return page('error', summary = 'authentication failure',
message = f'Unrecognized user identity.')
if 'person' in inspect.getfullargspec(route.callback)[0]:
kwargs['person'] = person
return callback(*args, **kwargs)
return person_plugin_wrapper
class VerifyStaffUser(BottlePluginBase):
'''Redirect to an error page if the user lacks sufficient priviledges.'''
def apply(self, callback, route):
def staff_person_plugin_wrapper(*args, **kwargs):
person = person_from_environ(request.environ)
if person is None:
log(f'person is None')
return page('error', summary = 'authentication failure',
message = f'Unrecognized user identity.')
if not staff_user(person):
log(f'{request.path} invoked by non-staff {user(person)}')
redirect(f'{dibs.base_url}/notallowed')
return
return callback(*args, **kwargs)
return staff_person_plugin_wrapper
# Administrative interface endpoints.
# .............................................................................
# A note about authentication: the entire DIBS application is assumed to be
# behind a server that implements authentication, for example using SSO.
# This means we never need to log a person in: they will be authenticated by
# SSO before they can get to DIBS pages. However, once in DIBS, we do need
# to provide a way for them to un-authenticate themselves. This is the
# reason for the asymmetry between /logout and (lack of) login.
@dibs.post('/logout')
def logout():
'''Handle the logout action from the navbar menu on every page.'''
# If we are not in debug mode, then whether the user is authenticated or
# not is determined by the presence of REMOTE_USER.
if request.environ.get('REMOTE_USER', None) and not debug_mode():
redirect(f'/Shibboleth.sso/Logout')
else:
redirect('/')
@dibs.get('/list', apply = VerifyStaffUser())
def list_items():
'''Display the list of known items.'''
return page('list', browser_no_cache = True, items = Item.select(),
manifest_dir = _MANIFEST_DIR, process_dir = _PROCESS_DIR)
@dibs.get('/manage', apply = VerifyStaffUser())
def manage_items():
'''Manage the list of known items.'''
return page('manage', browser_no_cache = True, items = Item.select())
@dibs.get('/add', apply = VerifyStaffUser())
def add():
'''Display the page to add new items.'''
return page('edit', action = 'add', item = None,
thumbnails_dir = _THUMBNAILS_DIR,
max_size = naturalsize(_MAX_THUMBNAIL_SIZE))
@dibs.get('/edit/<barcode:int>', apply = VerifyStaffUser())
def edit(barcode):
'''Display the page to add new items.'''
return page('edit', browser_no_cache = True, action = 'edit',
thumbnails_dir = _THUMBNAILS_DIR,
max_size = naturalsize(_MAX_THUMBNAIL_SIZE),
item = Item.get(Item.barcode == barcode))
@dibs.post('/update/add', apply = VerifyStaffUser())
@dibs.post('/update/edit', apply = VerifyStaffUser())
def update_item():
'''Handle http post request to add a new item from the add-new-item page.'''
if 'cancel' in request.POST:
log(f'user clicked Cancel button')
redirect(f'{dibs.base_url}/list')
return
# The HTML form validates the data types, but the POST might come from
# elsewhere, so we always need to sanity-check the values.
barcode = request.forms.get('barcode').strip()
if not barcode.isdigit():
return page('error', summary = 'invalid barcode',
message = f'{barcode} is not a valid barcode')
duration = request.forms.get('duration').strip()
if not duration.isdigit() or int(duration) <= 0:
return page('error', summary = 'invalid duration',
message = f'Duration must be a positive number')
num_copies = request.forms.get('num_copies').strip()
if not num_copies.isdigit() or int(num_copies) <= 0:
return page('error', summary = 'invalid copy number',
message = f'# of copies must be a positive number')
notes = request.forms.get('notes').strip()
thumbnail = request.files.get('thumbnail-image')
item = Item.get_or_none(Item.barcode == barcode)
if '/update/add' in request.path:
if item:
log(f'{barcode} already exists in the database')
return page('error', summary = 'duplicate entry',
message = f'An item with barcode {barcode} already exists.')
lsp = LSP()
try:
rec = lsp.record(barcode = barcode)
except ValueError as ex:
return page('error', summary = 'Incomplete record in LSP',
message = (f'The item with barcode {barcode} lacks one'
+ ' or more basic metadata fields (title,'
+ ' author, year) in the library catalog.'))
if not rec:
log(f'could not find {barcode} in LSP')
return page('error', summary = 'no such barcode',
message = f'Could not find an item with barcode {barcode}.')
log(f'adding item entry {barcode} for {rec.title}')
Item.create(barcode = barcode, title = rec.title, author = rec.author,
item_id = rec.id, item_page = rec.url, year = rec.year,
edition = rec.edition, publisher = rec.publisher,
num_copies = num_copies, duration = duration, notes = notes)
else: # The operation is /update/edit.
if not item:
log(f'there is no item with barcode {barcode}')
return page('error', summary = 'no such barcode',
message = f'There is no item with barcode {barcode}.')
item.barcode = barcode
item.duration = duration
item.num_copies = num_copies
item.notes = notes
log(f'saving changes to {barcode}')
item.save(only = [Item.barcode, Item.num_copies, Item.duration, Item.notes])
# FIXME if we reduced the number of copies, we need to check loans.
# Handle replacement thumbnail images if the user chose one.
if thumbnail and thumbnail.filename:
# We don't seem to get content-length in the headers, so won't know
# the size ahead of time. So, check size, & always convert to jpeg.
try:
data = b''
while (chunk := thumbnail.file.read(1024)):
data += chunk
if len(data) >= _MAX_THUMBNAIL_SIZE:
max_size = naturalsize(_MAX_THUMBNAIL_SIZE)
log(f'file exceeds {max_size} -- ignoring the file')
return page('error', summary = 'cover image is too large',
message = ('The chosen image is larger than'
+ f' the limit of {max_size}.'))
dest_file = join(_THUMBNAILS_DIR, barcode + '.jpg')
log(f'writing {naturalsize(len(data))} image to {dest_file}')
with open(dest_file, 'wb') as new_file:
new_file.write(as_jpeg(data))
except Exception as ex:
log(f'exception trying to save thumbnail: {str(ex)}')
else:
log(f'user did not provide a new thumbnail image file')
redirect(f'{dibs.base_url}/list')
@dibs.get('/delete-thumbnail/<barcode:int>', apply = VerifyStaffUser())
def edit(barcode):
'''Delete the current thumbnail image.'''
thumbnail_file = join(_THUMBNAILS_DIR, str(barcode) + '.jpg')
if exists(thumbnail_file):
delete_existing(thumbnail_file)
else:
log(f'there is no {thumbnail_file}')
redirect(f'{dibs.base_url}/edit/{barcode}')
@dibs.post('/start-processing', apply = VerifyStaffUser())
def start_processing():
'''Handle http post request to start the processing workflow.'''
barcode = request.POST.barcode.strip()
if _PROCESS_DIR:
init_file = join(_PROCESS_DIR, f'{barcode}-initiated')
try:
log(f'creating {init_file}')
os.close(os.open(init_file, os.O_CREAT))
except Exception as ex:
log(f'problem creating {init_file}: {str(ex)}')
else:
log(f'_PROCESS_DIR not set -- ignoring /start-processing for {barcode}')
redirect(f'{dibs.base_url}/list')
@dibs.post('/ready', apply = VerifyStaffUser())
def toggle_ready():
'''Set the ready-to-loan field.'''
barcode = request.POST.barcode.strip()
item = Item.get(Item.barcode == barcode)
item.ready = not item.ready
log(f'locking db to change {barcode} ready to {item.ready}')
with database.atomic('immediate'):
item.save(only = [Item.ready])
# If we are removing readiness, we may have to close outstanding
# loans. Doesn't matter if these are active or recent loans.
if not item.ready:
for loan in Loan.select().where(Loan.item == item):
# Don't count staff users in loan stats except in debug mode.
if staff_user(loan.user) and not debug_mode():
continue
History.create(type = 'loan', what = barcode,
start_time = loan.start_time,
end_time = loan.end_time)
n = Loan.delete().where(Loan.item == item).execute()
if n > 0:
log(f'deleted {n} loans for {barcode}')
redirect(f'{dibs.base_url}/list')
@dibs.post('/remove', apply = VerifyStaffUser())
def remove_item():
'''Handle http post request to remove an item from the database.'''
barcode = request.POST.barcode.strip()
item = Item.get(Item.barcode == barcode)
log(f'locking db to remove {barcode}')
with database.atomic('immediate'):
item.ready = False
item.save(only = [Item.ready])
Loan.delete().where(Loan.item == item).execute()
# Note we don't create History for items that will no longer exist.
Item.delete().where(Item.barcode == barcode).execute()
redirect(f'{dibs.base_url}/manage')
@dibs.get('/stats', apply = VerifyStaffUser())
@dibs.get('/status', apply = VerifyStaffUser())
def show_stats():
'''Display the list of known items.'''
usage_data = []
for item in Item.select():
barcode = item.barcode
active = Loan.select().where(Loan.item == item, Loan.state == 'active').count()
history = History.select().where(History.what == barcode, History.type == 'loan')
last_15min = _REQUESTS['15'].get(barcode, 0)
last_30min = _REQUESTS['30'].get(barcode, 0)
last_45min = _REQUESTS['45'].get(barcode, 0)
last_60min = _REQUESTS['60'].get(barcode, 0)
retrievals = [ last_15min ,
max(0, last_30min - last_15min),
max(0, last_45min - last_30min - last_15min),
max(0, last_60min - last_45min - last_30min - last_15min) ]
durations = [(loan.end_time - loan.start_time) for loan in history]
if durations:
avg_duration = sum(durations, delta()) // len(durations)
else:
avg_duration = delta(seconds = 0)
usage_data.append((item, active, len(durations), avg_duration, retrievals))
return page('stats', browser_no_cache = True, usage_data = usage_data)
@dibs.get('/download/<fmt:re:(csv|json)>/<data:re:(item|history)>', apply = VerifyStaffUser())
def download(fmt, data):
'''Handle http post request to download data from the database.'''
# The values of "data" are limited to known table names by the route, but
# if data_models.py is ever changed and we forget to update this function,
# the next safety check prevents db.freeze from creating a blank table.
if data not in generate_models(database):
log(f'download route database mismatch: requested {data} does not exist')
return page('error', summary = f'unable to download {data} data',
message = 'The requested data is missing from the database ')
db = DataSet('sqlite:///' + database.file_path)
buffer = StringIO()
db.freeze(db[data].all(), format = fmt, file_obj = buffer)
buffer.seek(0)
response = LocalResponse(
body = buffer,
headers = {
'Content-Disposition' : f'attachment; filename=dibs-{data}',
'Content-Type' : f'text/{fmt}',
})
log(f'returning file "dibs-{data}.{fmt}" to user')
return response
# User endpoints.
# .............................................................................
# An operation common to several routes is to test if the user can borrow an
# | |
string + WS + leading text
Shouldn't gobble"""
self.verify(" -- #attr $test = 'blarg' \n$test",
" -- \nblarg")
class DefDirective(OutputTest):
def test1(self):
"""#def without argstring"""
self.verify("#def testMeth\n1234\n#end def\n$testMeth",
"1234\n")
self.verify("#def testMeth ## comment\n1234\n#end def\n$testMeth",
"1234\n")
self.verify("#def testMeth: ## comment\n1234\n#end def\n$testMeth",
"1234\n")
def test2(self):
"""#def without argstring, gobble WS"""
self.verify(" #def testMeth \n1234\n #end def \n$testMeth",
"1234\n")
def test3(self):
"""#def with argstring, gobble WS"""
self.verify(" #def testMeth($a=999) \n1234-$a\n #end def\n$testMeth",
"1234-999\n")
def test4(self):
"""#def with argstring, gobble WS, string used in call"""
self.verify(" #def testMeth($a=999) \n1234-$a\n #end def\n$testMeth('ABC')",
"1234-ABC\n")
def test5(self):
"""#def with argstring, gobble WS, list used in call"""
self.verify(" #def testMeth($a=999) \n1234-$a\n #end def\n$testMeth([1,2,3])",
"1234-[1, 2, 3]\n")
def test6(self):
"""#def with 2 args, gobble WS, list used in call"""
self.verify(" #def testMeth($a, $b='default') \n1234-$a$b\n #end def\n$testMeth([1,2,3])",
"1234-[1, 2, 3]default\n")
def test7(self):
"""#def with *args, gobble WS"""
self.verify(" #def testMeth($*args) \n1234-$args\n #end def\n$testMeth",
"1234-()\n")
def test8(self):
"""#def with **KWs, gobble WS"""
self.verify(" #def testMeth($**KWs) \n1234-$KWs\n #end def\n$testMeth",
"1234-{}\n")
def test9(self):
"""#def with *args + **KWs, gobble WS"""
self.verify(" #def testMeth($*args, $**KWs) \n1234-$args-$KWs\n #end def\n$testMeth",
"1234-()-{}\n")
def test10(self):
"""#def with *args + **KWs, gobble WS"""
self.verify(
" #def testMeth($*args, $**KWs) \n1234-$args-$KWs.a\n #end def\n$testMeth(1,2, a=1)",
"1234-(1, 2)-1\n")
def test11(self):
"""single line #def with extra WS"""
self.verify(
"#def testMeth: aoeuaoeu\n- $testMeth -",
"- aoeuaoeu -")
def test12(self):
"""single line #def with extra WS and nested $placeholders"""
self.verify(
"#def testMeth: $anInt $aFunc(1234)\n- $testMeth -",
"- 1 1234 -")
def test13(self):
"""single line #def escaped $placeholders"""
self.verify(
"#def testMeth: \$aFunc(\$anInt)\n- $testMeth -",
"- $aFunc($anInt) -")
def test14(self):
"""single line #def 1 escaped $placeholders"""
self.verify(
"#def testMeth: \$aFunc($anInt)\n- $testMeth -",
"- $aFunc(1) -")
def test15(self):
"""single line #def 1 escaped $placeholders + more WS"""
self.verify(
"#def testMeth : \$aFunc($anInt)\n- $testMeth -",
"- $aFunc(1) -")
def test16(self):
"""multiline #def with $ on methodName"""
self.verify("#def $testMeth\n1234\n#end def\n$testMeth",
"1234\n")
def test17(self):
"""single line #def with $ on methodName"""
self.verify("#def $testMeth:1234\n$testMeth",
"1234")
def test18(self):
"""single line #def with an argument"""
self.verify("#def $testMeth($arg=1234):$arg\n$testMeth",
"1234")
class DecoratorDirective(OutputTest):
def test1(self):
"""single line #def with decorator"""
self.verify("#from Cheetah.Tests.SyntaxAndOutput import testdecorator\n"
+"#@testdecorator"
+"\n#def $testMeth():1234\n$testMeth",
"1234")
self.verify("#from Cheetah.Tests.SyntaxAndOutput import testdecorator\n"
+"#@testdecorator"
+"\n#block $testMeth():1234",
"1234")
try:
self.verify(
"#from Cheetah.Tests.SyntaxAndOutput import testdecorator\n"
+"#@testdecorator\n sdf"
+"\n#def $testMeth():1234\n$testMeth",
"1234")
except ParseError:
pass
else:
self.fail('should raise a ParseError')
if versionTuple < (2,4):
del DecoratorDirective
class BlockDirective(OutputTest):
def test1(self):
"""#block without argstring"""
self.verify("#block testBlock\n1234\n#end block",
"1234\n")
self.verify("#block testBlock ##comment\n1234\n#end block",
"1234\n")
def test2(self):
"""#block without argstring, gobble WS"""
self.verify(" #block testBlock \n1234\n #end block ",
"1234\n")
def test3(self):
"""#block with argstring, gobble WS
Because blocks can be reused in multiple parts of the template arguments
(!!with defaults!!) can be given."""
self.verify(" #block testBlock($a=999) \n1234-$a\n #end block ",
"1234-999\n")
def test4(self):
"""#block with 2 args, gobble WS"""
self.verify(" #block testBlock($a=999, $b=444) \n1234-$a$b\n #end block ",
"1234-999444\n")
def test5(self):
"""#block with 2 nested blocks
Blocks can be nested to any depth and the name of the block is optional
for the #end block part: #end block OR #end block [name] """
self.verify("""#block testBlock
this is a test block
#block outerNest
outer
#block innerNest
inner
#end block innerNest
#end block outerNest
---
#end block testBlock
""",
"this is a test block\nouter\ninner\n---\n")
def test6(self):
"""single line #block """
self.verify(
"#block testMeth: This is my block",
"This is my block")
def test7(self):
"""single line #block with WS"""
self.verify(
"#block testMeth: This is my block",
"This is my block")
def test8(self):
"""single line #block 1 escaped $placeholders"""
self.verify(
"#block testMeth: \$aFunc($anInt)",
"$aFunc(1)")
def test9(self):
"""single line #block 1 escaped $placeholders + WS"""
self.verify(
"#block testMeth: \$aFunc( $anInt )",
"$aFunc( 1 )")
def test10(self):
"""single line #block 1 escaped $placeholders + more WS"""
self.verify(
"#block testMeth : \$aFunc( $anInt )",
"$aFunc( 1 )")
def test11(self):
"""multiline #block $ on argstring"""
self.verify("#block $testBlock\n1234\n#end block",
"1234\n")
def test12(self):
"""single line #block with $ on methodName """
self.verify(
"#block $testMeth: This is my block",
"This is my block")
def test13(self):
"""single line #block with an arg """
self.verify(
"#block $testMeth($arg='This is my block'): $arg",
"This is my block")
def test14(self):
"""single line #block with None for content"""
self.verify(
"""#block $testMeth: $None\ntest $testMeth-""",
"test -")
def test15(self):
"""single line #block with nothing for content"""
self.verify(
"""#block $testMeth: \nfoo\n#end block\ntest $testMeth-""",
"foo\ntest foo\n-")
class IncludeDirective(OutputTest):
def setUp(self):
fp = open('parseTest.txt','w')
fp.write("$numOne $numTwo")
fp.flush()
fp.close
def tearDown(self):
if os.path.exists('parseTest.txt'):
os.remove('parseTest.txt')
def test1(self):
"""#include raw of source $emptyString"""
self.verify("#include raw source=$emptyString",
"")
def test2(self):
"""#include raw of source $blockToBeParsed"""
self.verify("#include raw source=$blockToBeParsed",
"$numOne $numTwo")
def test3(self):
"""#include raw of 'parseTest.txt'"""
self.verify("#include raw 'parseTest.txt'",
"$numOne $numTwo")
def test4(self):
"""#include raw of $includeFileName"""
self.verify("#include raw $includeFileName",
"$numOne $numTwo")
def test5(self):
"""#include raw of $includeFileName, with WS"""
self.verify(" #include raw $includeFileName ",
"$numOne $numTwo")
def test6(self):
"""#include raw of source= , with WS"""
self.verify(" #include raw source='This is my $Source '*2 ",
"This is my $Source This is my $Source ")
def test7(self):
"""#include of $blockToBeParsed"""
self.verify("#include source=$blockToBeParsed",
"1 2")
def test8(self):
"""#include of $blockToBeParsed, with WS"""
self.verify(" #include source=$blockToBeParsed ",
"1 2")
def test9(self):
"""#include of 'parseTest.txt', with WS"""
self.verify(" #include source=$blockToBeParsed ",
"1 2")
def test10(self):
"""#include of "parseTest.txt", with WS"""
self.verify(" #include source=$blockToBeParsed ",
"1 2")
def test11(self):
"""#include of 'parseTest.txt', with WS and surrounding text"""
self.verify("aoeu\n #include source=$blockToBeParsed \naoeu",
"aoeu\n1 2aoeu")
def test12(self):
"""#include of 'parseTest.txt', with WS and explicit closure"""
self.verify(" #include source=$blockToBeParsed# ",
" 1 2 ")
class SilentDirective(OutputTest):
def test1(self):
"""simple #silent"""
self.verify("#silent $aFunc",
"")
def test2(self):
"""simple #silent"""
self.verify("#silent $anObj.callIt\n$anObj.callArg",
"1234")
self.verify("#silent $anObj.callIt ##comment\n$anObj.callArg",
"1234")
def test3(self):
"""simple #silent"""
self.verify("#silent $anObj.callIt(99)\n$anObj.callArg",
"99")
class SetDirective(OutputTest):
def test1(self):
"""simple #set"""
self.verify("#set $testVar = 'blarg'\n$testVar",
"blarg")
self.verify("#set testVar = 'blarg'\n$testVar",
"blarg")
self.verify("#set testVar = 'blarg'##comment\n$testVar",
"blarg")
def test2(self):
"""simple #set with no WS between operands"""
self.verify("#set $testVar='blarg'",
"")
def test3(self):
"""#set + use of var"""
self.verify("#set $testVar = 'blarg'\n$testVar",
"blarg")
def test4(self):
"""#set + use in an #include"""
self.verify("#set global $aSetVar = 1234\n#include source=$includeBlock2",
"1 2 1234")
def test5(self):
"""#set with a dictionary"""
self.verify( """#set $testDict = {'one':'one1','two':'two2','three':'three3'}
$testDict.one
$testDict.two""",
"one1\ntwo2")
def test6(self):
"""#set with string, then used in #if block"""
self.verify("""#set $test='a string'\n#if $test#blarg#end if""",
"blarg")
def test7(self):
"""simple #set, gobble WS"""
self.verify(" #set $testVar = 'blarg' ",
"")
def test8(self):
"""simple #set, don't gobble WS"""
self.verify(" #set $testVar = 'blarg'#---",
" ---")
def test9(self):
"""simple #set with a list"""
self.verify(" #set $testVar = [1, 2, 3] \n$testVar",
"[1, 2, 3]")
def test10(self):
"""simple #set global with a list"""
self.verify(" #set global $testVar = [1, 2, 3] \n$testVar",
"[1, 2, 3]")
def test11(self):
"""simple #set global with a list and *cache
Caching only works with global #set vars. Local vars are not accesible
to the cache namespace.
"""
self.verify(" #set global $testVar = [1, 2, 3] \n$*testVar",
"[1, 2, 3]")
def test12(self):
"""simple #set global with a list and *<int>*cache"""
self.verify(" #set global $testVar = [1, 2, 3] \n$*5*testVar",
"[1, 2, 3]")
def test13(self):
"""simple #set with a list and *<float>*cache"""
self.verify(" #set global $testVar = [1, 2, 3] \n$*.5*testVar",
"[1, 2, 3]")
def test14(self):
"""simple #set without NameMapper on"""
self.verify("""#compiler useNameMapper = 0\n#set $testVar = 1 \n$testVar""",
"1")
def test15(self):
"""simple #set without $"""
self.verify("""#set testVar = 1 \n$testVar""",
"1")
def test16(self):
"""simple #set global without $"""
self.verify("""#set global testVar = 1 \n$testVar""",
"1")
def test17(self):
"""simple #set module without $"""
self.verify("""#set module __foo__ = 'bar'\n$__foo__""",
"bar")
def test18(self):
"""#set with i,j=list style assignment"""
self.verify("""#set i,j = [1,2]\n$i$j""",
"12")
self.verify("""#set $i,$j = [1,2]\n$i$j""",
"12")
def test19(self):
"""#set with (i,j)=list style assignment"""
self.verify("""#set (i,j) = [1,2]\n$i$j""",
"12")
self.verify("""#set ($i,$j) = [1,2]\n$i$j""",
"12")
def test20(self):
"""#set with i, (j,k)=list style assignment"""
self.verify("""#set i, (j,k) = [1,(2,3)]\n$i$j$k""",
"123")
self.verify("""#set $i, ($j,$k) = [1,(2,3)]\n$i$j$k""",
"123")
class IfDirective(OutputTest):
def test1(self):
"""simple #if block"""
self.verify("#if 1\n$aStr\n#end if\n",
"blarg\n")
self.verify("#if 1:\n$aStr\n#end if\n",
"blarg\n")
self.verify("#if 1: \n$aStr\n#end if\n",
"blarg\n")
self.verify("#if 1: ##comment \n$aStr\n#end if\n",
"blarg\n")
self.verify("#if 1 ##comment \n$aStr\n#end if\n",
"blarg\n")
self.verify("#if 1##for i in range(10)#$i#end for##end if",
'0123456789')
self.verify("#if | |
<gh_stars>0
#!/usr/bin/env python
"""Train a simple deep CNN on the CIFAR10 small images dataset.
GPU run command with Theano backend (with TensorFlow, the GPU is automatically used):
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatx=float32 python cifar10_cnn.py
It gets down to 0.65 test logloss in 25 epochs, and down to 0.55 after 50 epochs.
(it's still underfitting at that point, though).
"""
from __future__ import print_function
import os
import json
import time
import random
import argparse
import pickle
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import keras
from keras.preprocessing.image import ImageDataGenerator, NumpyArrayIterator
from keras.models import Model, load_model, Sequential
from keras.layers import Dense, Dropout, Flatten, Input, merge, Concatenate, concatenate
from keras.layers import Conv2D, MaxPooling2D, Lambda, Merge
from keras import backend as K
from keras.optimizers import SGD,Adam
from keras.regularizers import l2
from keras.utils import plot_model
from sklearn import metrics
from scipy.misc import imsave
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.8
config.gpu_options.polling_inactive_delay_msecs = 10
sess = tf.Session(config=config)
set_session(sess) # set this TensorFlow session as the default session for Keras
from cnn import eucl_dist_output_shape, contrastive_loss, euclidean_distance
import settings
def euc_dist(x):
'Merge function: euclidean_distance(u,v)'
s = x[0] - x[1]
output = (s ** 2).sum(axis=1)
output = K.reshape(output, (output.shape[0],1))
return output
def euc_dist_shape(input_shape):
'Merge output shape'
shape = list(input_shape)
outshape = (shape[0][0],1)
return tuple(outshape)
def plot_model_history(model_history, filename):
fig, axs = plt.subplots(1, 2, figsize=(15, 5))
# summarize history for accuracy
axs[0].plot(range(1, len(model_history.history['acc']) + 1),
model_history.history['acc'])
axs[0].plot(range(1, len(model_history.history['val_acc']) + 1),
model_history.history['val_acc'])
axs[0].set_title('Model Accuracy')
axs[0].set_ylabel('Accuracy')
axs[0].set_xlabel('Epoch')
axs[0].set_xticks(np.arange(1,
len(model_history.history['acc']) + 1),
len(model_history.history['acc']) / 10)
axs[0].legend(['train', 'val'], loc='best')
# summarize history for loss
axs[1].plot(range(1, len(model_history.history['loss']) + 1),
model_history.history['loss'])
axs[1].plot(range(1, len(model_history.history['val_loss']) + 1),
model_history.history['val_loss'])
axs[1].set_title('Model Loss')
axs[1].set_ylabel('Loss')
axs[1].set_xlabel('Epoch')
axs[1].set_xticks(np.arange(1,
len(model_history.history['loss']) + 1),
len(model_history.history['loss']) / 10)
axs[1].legend(['train', 'val'], loc='best')
fig.savefig(filename)
plt.close(fig)
def accuracy(x, y, model, class_names, cm_filename, datagen, words=None, anomaly=False):
num_classes = len(class_names)
print(len(set(np.argmax(y, axis=1))))
if words is not None:
x, y = word_datagen(datagen, x, words, y, len(x)).next()
else:
x, y = datagen.flow(x, y, batch_size=len(x), shuffle=False).next()
result = model.predict(x)
anomaly_class = num_classes
anomalies = np.zeros(len(result))
if anomaly:
from dl_inference_service import DlInferenceService
dlis = DlInferenceService()
anomalies = dlis.anomaly_model.predict(result)
class_names += ['anomaly']
num_classes += 1
predicted_class = np.argmax(result, axis=1)
predicted_class[anomalies == 1] = anomaly_class
true_class = np.argmax(y, axis=1)
if anomaly:
predicted_class[0] = anomaly_class
true_class[0] = anomaly_class
print(len(set(true_class)))
print(predicted_class[0:10])
print(true_class[0:10])
np.save('small.npy', x[0:10])
num_correct = np.sum(predicted_class == true_class)
accuracy = float(num_correct) / result.shape[0]
# draw the confusion matrix
confusion_matrix = np.zeros((num_classes, num_classes))
for idx in range(len(predicted_class)):
confusion_matrix[true_class[idx]][predicted_class[idx]] += 1
conf_arr = confusion_matrix
norm_conf = []
for i in conf_arr:
a = 0
tmp_arr = []
a = sum(i)
for j in i:
try:
tmp_arr.append(float(j) / float(a))
except ZeroDivisionError:
tmp_arr.append(0)
# the sqrt makes the colors a bit better
norm_conf.append(np.sqrt(tmp_arr))
fig = plt.figure(figsize=(40, 40), dpi=150)
plt.clf()
ax = fig.add_subplot(111)
ax.set_aspect(1)
res = ax.imshow(np.array(norm_conf), cmap=plt.cm.Blues,
interpolation='nearest')
width, height = conf_arr.shape
for xx in xrange(width):
for yy in xrange(height):
ax.annotate(str(int(conf_arr[xx][yy])), xy=(yy, xx),
horizontalalignment='center',
verticalalignment='center',
fontsize=10)
# cb = fig.colorbar(res)
plt.xticks(range(width), class_names, rotation=90)
plt.yticks(range(height), class_names)
plt.savefig(cm_filename)
print(cm_filename)
return accuracy * 100, predicted_class, true_class, x
def load_data(data_folder):
x_train = np.load(data_folder("training_x.npy"))
y_train = np.load(data_folder("training_y.npy"))
x_test = np.load(data_folder("validation_x.npy"))
y_test = np.load(data_folder("validation_y.npy"))
# Print Training and Testing data dimension
print('x_train shape:', x_train.shape)
print('y_train shape:', y_train.shape)
print('x_test shape:', x_test.shape)
print('y_test shape:', y_test.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Pre-process data, so value is between 0.0 and 1.0
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
num_classes = np.unique(y_train).shape[0]
# Print Unique Icon Classes, 99 classes
# print(np.unique(y_train))
# print(num_classes, ' classes')
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
return x_train, x_test, y_train, y_test, num_classes
def load_word_data(data_folder):
x_train = np.load(data_folder("training_x_words.npy"))
x_test = np.load(data_folder("validation_x_words.npy"))
return x_train, x_test
def create_model(embedding_size, num_classes, model_type='', siamese=False, conv_only=False):
model = Sequential()
if model_type == 'simple':
model.add(Conv2D(32, (3, 3), padding='same', activation='elu', input_shape=(32, 32, 1)))
model.add(Conv2D(32, (3, 3), padding='same', activation='elu'))
model.add(Conv2D(32, (3, 3), padding='same', activation='elu'))
model.add(Conv2D(32, (3, 3), padding='same', activation='elu'))
model.add(Conv2D(32, (3, 3), padding='same', activation='elu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(fmp)
model.add(Dropout(0.15))
model.add(Conv2D(64, (3, 3), padding='same', activation='elu'))
model.add(Conv2D(64, (3, 3), padding='same', activation='elu'))
model.add(Conv2D(64, (3, 3), padding='same', activation='elu'))
model.add(Conv2D(64, (3, 3), padding='same', activation='elu'))
model.add(Conv2D(64, (3, 3), padding='same', activation='elu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3), padding='same', activation='elu'))
model.add(Conv2D(128, (3, 3), padding='same', activation='elu'))
model.add(Conv2D(128, (3, 3), padding='same', activation='elu'))
model.add(Conv2D(128, (3, 3), padding='same', activation='elu'))
model.add(Conv2D(128, (3, 3), padding='same', activation='elu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5, name='dropout'))
model.add(Flatten(name='flattened'))
if conv_only:
return model
model.add(Dense(embedding_size, activation='elu', name='embedding'))
if siamese:
return model
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
return model
model.add(Conv2D(384, (3, 3), padding='same', activation='elu', input_shape=(32, 32, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(384, (1, 1), padding='same', activation='elu'))
model.add(Conv2D(384, (2, 2), padding='same', activation='elu'))
model.add(Conv2D(640, (2, 2), padding='same', activation='elu'))
model.add(Conv2D(640, (2, 2), padding='same', activation='elu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.1))
model.add(Conv2D(640, (1, 1), padding='same', activation='elu'))
model.add(Conv2D(768, (2, 2), padding='same', activation='elu'))
model.add(Conv2D(768, (2, 2), padding='same', activation='elu'))
model.add(Conv2D(768, (2, 2), padding='same', activation='elu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(768, (1, 1), padding='same', activation='elu'))
model.add(Conv2D(896, (2, 2), padding='same', activation='elu'))
model.add(Conv2D(896, (2, 2), padding='same', activation='elu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(Conv2D(896, (3, 3), padding='same', activation='elu'))
model.add(Conv2D(1024, (2, 2), padding='same', activation='elu'))
model.add(Conv2D(1024, (2, 2), padding='same', activation='elu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.4))
model.add(Conv2D(1024, (1, 1), padding='same', activation='elu'))
model.add(Conv2D(1152, (2, 2), padding='same', activation='elu'))
model.add(Dropout(0.5))
model.add(Flatten())
if conv_only:
return model
model.add(Dense(embedding_size, activation='elu', name='embedding'))
model.add(Dense(num_classes, activation='softmax', name='classification'))
return model
def initialize_model(embedding_size, num_classes, model_type=''):
p_ratio = [1.0, 1.44, 1.73, 1.0]
fmp = Lambda(lambda x: tf.nn.fractional_max_pool(x, p_ratio)[0])
model = create_model(embedding_size, num_classes, model_type=model_type)
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-4)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return model
def initialize_word_model(embedding_size, num_classes, model_type=''):
base_model = create_model(embedding_size, num_classes, model_type=model_type, conv_only=True)
left_input = Input((32, 32, 1), name='left')
right_input = Input((835,), name='right')
word_model = Sequential()
word_model.add(Dense(1024, activation='elu', input_shape=(835,)))
word_model.add(Dropout(0.5))
word_model.add(Dense(512, activation='elu'))
word_model.add(Dropout(0.5))
encoded_l = base_model(left_input)
encoded_r = word_model(right_input)
both = concatenate([encoded_l, encoded_r])
out = Dense(4096, activation='elu')(both)
sigh = Dense(4096, activation='elu')(out)
sigh = Dropout(0.5)(sigh)
sigh = Dense(embedding_size, activation='elu')(sigh)
sigh = Dropout(0.5)(sigh)
sigh = Dense(num_classes, activation='softmax')(sigh)
model = Model(input=[left_input, right_input], output=sigh)
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-4)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return model
def initialize_siamese_model(embedding_size, num_classes, model_type=''):
in_dim = (32, 32, 1)
left_input = Input((32, 32, 1), name='left')
right_input = Input((32, 32, 1), name='right')
base_model = create_model(embedding_size, num_classes, model_type=model_type, siamese=True)
file_model = keras.models.load_model('/home/ranjitha/code/mobile-embeddings/clustering_with_cnns/saved_models_f_simple/small_cnn_weights_10_512.h5')
print("file layers")
print(file_model.layers)
base_model = Sequential()
for layer in file_model.layers[:-1]:
layer.training = False
base_model.add(layer)
base_model.add(Dense(4096, activation='elu'))
base_model.add(Dropout(0.2))
base_model.add(Dense(embedding_size, activation='elu'))
base_model.add(Dense(num_classes, activation='softmax'))
encoded_l = base_model(left_input)
encoded_r = base_model(right_input)
print(type(encoded_l))
opt = keras.optimizers.rmsprop()
both = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([encoded_l, encoded_r])
model = Model(input=[left_input, right_input], output=both)
# train
model.compile(loss=contrastive_loss, optimizer=opt)
print(model.layers[-2].get_output_at(0))
return model
def convert_to_normal(model, num_classes):
# new_model = Model(inputs=model.get_input_at(0), outputs=model.layers[-2].get_output_at(0))
print("model layers")
print(model.layers)
new_model = Sequential()
for layer in model.layers[1:3]:
new_model.add(layer)
for layer in new_model.layers:
layer.trainable = False
if num_classes:
new_model.add(Dense(output_dim=num_classes, activation='elu'))
model = new_model
opt = keras.optimizers.rmsprop(lr=0.00001, decay=1e-4)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return model
def initialize_datagen(x_train):
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=True, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=True, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=True, # apply ZCA whitening
rotation_range=0,
# randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1,
# randomly shift images horizontally (fraction of total width)
height_shift_range=0.1,
# randomly shift images vertically (fraction of total height)
horizontal_flip=False, # randomly flip images
# preprocessing_function=lambda t: random.choice([t, 1 - t]), # randomly invert images
vertical_flip=False) # randomly flip images
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
return datagen
def word_datagen(datagen, x, x_words, y, batch_size):
generator = ImageDataGenerator()
normal = datagen.flow(x, y, shuffle=False, batch_size=batch_size)
original_shape = x_words.shape
x2 = generator.flow(x_words.reshape(original_shape + (1, 1)), shuffle=False, batch_size=batch_size)
while True:
x1, y1 = normal.next()
words = x2.next()
if words.shape[0] != batch_size:
print(words.shape)
continue
yield [x1, words.reshape((batch_size, original_shape[1]))], y1
def train_model(model, datagen, x_train, y_train, x_test, y_test, batch_size, epochs, train_words=None, test_words=None):
if train_words is not None and test_words is not None:
model_info = model.fit_generator(word_datagen(datagen, x_train, train_words, y_train, batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=epochs,
validation_data=word_datagen(datagen, x_test, test_words, y_test, len(x_test)).next(),
workers=1)
return model_info
model_info = model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=epochs,
validation_data=datagen.flow(x_test,
y_test,
batch_size=len(x_test)).next(),
workers=4)
return model_info
def make_buckets(x, y):
ys = np.unique(y)
print(ys)
y = y.flatten()
buckets = {int(c): x[y == c] for c in ys}
for bucket | |
import os
from itertools import chain
from statistics import mean
import numpy as np
import pandas as pd
import torch
from torch.utils.data import TensorDataset
class ThymioState:
"""
Object containing all the agent information
:param state_dict
"""
def __init__(self, state_dict):
for k, v in state_dict.items():
setattr(self, k, v)
def check_dir(directory):
"""
Check if the path is a directory, if not create it.
:param directory: path to the directory
"""
os.makedirs(directory, exist_ok=True)
def directory_for_dataset(dataset, controller):
"""
:param dataset: name of the dataset
:param controller: name of the controller
:return run_dir, run_img_dir, run_video_dir: output directories for the simulations
"""
run_dir = os.path.join(dataset, controller)
run_img_dir = os.path.join(run_dir, 'images')
check_dir(run_img_dir)
run_video_dir = os.path.join(run_dir, 'videos')
check_dir(run_video_dir)
return run_dir, run_img_dir, run_video_dir
def directory_for_model(args):
"""
:param args: command line arguments
:return model_dir, model_img_dir, model_video_dir, metrics_path: output directories for the models
"""
model_dir = os.path.join(args.models_folder, args.task, args.model_type, args.model)
model_img_dir = os.path.join(model_dir, 'images')
check_dir(model_img_dir)
model_video_dir = os.path.join(model_dir, 'videos')
check_dir(model_video_dir)
metrics_path = os.path.join(model_dir, 'metrics.pkl')
return model_dir, model_img_dir, model_video_dir, metrics_path
def cartesian_product(*arrays):
"""
:param arrays: arrays used to compute the cartesian product
:return arr.reshape(-1, la): cartesian product
"""
la = len(arrays)
dtype = np.result_type(*arrays)
arr = np.empty([len(a) for a in arrays] + [la], dtype=dtype)
for i, a in enumerate(np.ix_(*arrays)):
arr[...,i] = a
return arr.reshape(-1, la)
def signed_distance(state):
"""
:param state: object containing all the agent information
:return b - a: signed distance between current and the goal position, along the current theta of the robot
"""
a = state.position[0] * np.cos(state.angle) + state.position[1] * np.sin(state.angle)
b = state.goal_position[0] * np.cos(state.angle) + state.goal_position[1] * np.sin(state.angle)
return b - a
def load_dataset(runs_dir, dataset):
"""
:param runs_dir: directory containing the simulation runs
:param dataset: name of the dataset
:return dataframe: resulting dataframe
"""
pickle_file = os.path.join(runs_dir, dataset)
runs = pd.read_pickle(pickle_file)
flatten_runs = list(chain.from_iterable(list(chain.from_iterable(runs))))
dataframe = pd.DataFrame(flatten_runs)
return dataframe
def get_prox_comm(myt):
"""
Create a dictionary containing all the senders as key and the corresponding intensities as value.
:param myt: agent
:return prox_comm: prox_comm sensing
"""
prox_comm = {}
prox_comm_events = myt.prox_comm_events.copy()
if len(prox_comm_events) > 0:
for idx, _ in enumerate(prox_comm_events):
intensities = prox_comm_events[idx].intensities
if mean([intensities[5], intensities[6]]) != 0:
sender = myt.index
else:
sender = myt.index + 2
prox_comm['myt%d' % sender] = {'intensities': intensities}
return prox_comm
def get_received_communication(myt, goal='distribute'):
"""
Create a list containing the messages received from the back and front.
:param myt: agent
:param goal: goal of the task, by default distribute
:return communication: the communication received from left to right
"""
communication = [0, 0]
prox_comm_events = myt.prox_comm_events.copy()
for idx, _ in enumerate(prox_comm_events):
message = prox_comm_events[idx].rx
if goal == 'distribute':
message = float(message / (2 ** 10))
if mean([prox_comm_events[idx].intensities[5], prox_comm_events[idx].intensities[6]]) != 0:
communication[0] = message
else:
communication[1] = message
return communication.copy()
def get_transmitted_communication(myt):
"""
Return the values transmitted during the communication.
:param myt: agent
:return communication: the communication to be transmitted
"""
communication = myt.prox_comm_tx
return communication
def parse_prox_comm(prox_comm):
"""
:param prox_comm: prox_comm dictionary
:return prox_comm: parsed prox_comm list
"""
if len(prox_comm) == 0:
prox_comm = [0, 0, 0, 0, 0, 0, 0]
else:
_, values = get_key_value_of_nested_dict(prox_comm)
if len(prox_comm) == 1:
prox_comm = values[0]
else:
prox_comm = np.max(np.array(values), axis=0).tolist()
return prox_comm
def get_all_sensors(prox_values, prox_comm):
"""
:param prox_values: prox_values reading
:param prox_comm: prox_comm reading
:return all_sensors: combination of the two sensor readings
"""
prox_comm = parse_prox_comm(prox_comm)
all_sensors = prox_values + prox_comm
return all_sensors
def dataset_split(file_name, num_run=1000):
"""
:param file_name: path to the file where to save the splits of the dataset
:param num_run: number of simulations, by default 1000
"""
x = np.arange(num_run)
np.random.shuffle(x)
np.save(file_name, x)
def get_input_sensing(in_label, myt, normalise=True):
"""
:param in_label: input of the net between prox_values, prox_comm or all_sensors
:param myt: agent
:param normalise: states if normalise the input sensing (default: True)
:return sensing: sensing perceived by the agent
"""
from thymio import DistributedThymio2
if isinstance(myt, dict):
myt = ThymioState(myt)
elif isinstance(myt, DistributedThymio2):
if len(myt.prox_comm_events) == 0:
prox_comm = {'sender': {'intensities': [0, 0, 0, 0, 0, 0, 0]}}
else:
prox_comm = get_prox_comm(myt)
state_dict = {'initial_position': myt.initial_position, 'goal_position': myt.goal_position,
'prox_values': myt.prox_values, 'prox_comm': prox_comm}
myt = ThymioState(state_dict)
if in_label == 'prox_values':
prox_values = getattr(myt, 'prox_values').copy()
sensing = prox_values
elif in_label == 'prox_comm':
prox_comm = getattr(myt, 'prox_comm').copy()
prox_comm = parse_prox_comm(prox_comm)
sensing = prox_comm
elif in_label == 'all_sensors':
prox_values = getattr(myt, 'prox_values').copy()
prox_comm = getattr(myt, 'prox_comm').copy()
sensing = get_all_sensors(prox_values, prox_comm)
else:
raise ValueError("Invalid value for net_input")
if normalise:
sensing = np.divide(np.array(sensing), 1000).tolist()
return sensing
def get_key_value_of_nested_dict(nested_dict):
"""
Access a nested dictionary and return a list of tuples (rv) and values. Used to return the list of intensities
given a prox_comm dictionary containing multiple senders.
:param nested_dict: nested dictionary, usually containing prox_comm_events
:return rv, values: rv is a list of tuples where, in each of these, the first element
is a list of keys and the second is the final value.
Values is the list of inner values.
"""
rv = []
values = []
for outer_key, value in nested_dict.items():
try:
inner_kvs, _ = get_key_value_of_nested_dict(value)
for i_kvs in inner_kvs:
rv.append((outer_key,) + i_kvs)
values.append(i_kvs[1])
except AttributeError:
rv.append((outer_key, value))
values.append(value)
return rv, values
def prepare_dataset(run_dir, split, num_run):
"""
:param run_dir: directory containing the simulation runs
:param split: states if generate or load the split file
:param num_run: number of runs used in the simulation
:return file, indices: file containing the splits and the splits indices
"""
file = os.path.join(run_dir, 'dataset_split.npy')
# Uncomment the following line to generate a new dataset split
if split:
dataset_split(file, num_run)
# Load the indices
dataset = np.load(file)
n_train = 600
n_validation = 800
train_indices, validation_indices, test_indices = dataset[:n_train], dataset[n_train:n_validation], \
dataset[n_validation:]
indices = [train_indices, validation_indices, test_indices]
return file, indices
def from_indices_to_dataset(runs_dir, train_indices, validation_indices, test_indices, net_input, communication=False, task='distribute'):
"""
:param runs_dir: directory containing the simulations
:param train_indices: indices of the sample belonging to the training set
:param validation_indices: indices of the sample belonging to the validation set
:param test_indices: indices of the sample belonging to the testing set
:param net_input: input of the net between prox_values, prox_comm or all_sensors
:param communication: states if the communication is used by the network
:param task: task to perform (can be distribute or colour)
:return: (train_sample, valid_sample, test_sample), train_target, valid_target, test_target, train_quantities, valid_quantities, test_quantities:
all the train, validation and test samples, targets and masks
"""
runs = load_dataset(runs_dir, 'simulation.pkl')
if 'myt_quantity' in runs.columns:
N = runs.myt_quantity.unique().max() - 2
else:
runs['myt_quantity'] = 5
N = 5 - 2
myt_quantities = np.array(runs[['run', 'myt_quantity']].drop_duplicates().myt_quantity) - 2
# For old datasets
# N = 3
# myt_quantities = np.full(shape=(1000,), fill_value=N, dtype='float32')
if not 'goal_colour' in runs.columns:
runs['goal_colour'] = 1
runs.loc[runs['index'] > ((N + 2) // 2), 'goal_colour'] = 0
if (N + 2) % 2 == 0:
runs.loc[runs['index'] == ((N + 2) // 2), 'goal_colour'] = 0
if communication:
runs_sub = runs[['timestep', 'name', 'run', 'motor_left_target', 'goal_colour', 'prox_values', 'prox_comm', 'all_sensors']]
else:
runs_sub = runs[['timestep', 'myt_quantity', 'run', 'motor_left_target', 'goal_colour', 'prox_values', 'prox_comm', 'all_sensors']]
train_runs = runs_sub[runs_sub['run'].isin(train_indices)].reset_index()
valid_runs = runs_sub[runs_sub['run'].isin(validation_indices)].reset_index()
test_runs = runs_sub[runs_sub['run'].isin(test_indices)].reset_index()
train_sample, train_target, train_quantities, _, _ = extract_input_output(train_runs, net_input, N=N,
communication=communication,
input_combination=False,
myt_quantities=myt_quantities,
task=task)
valid_sample, valid_target, valid_quantities, _, _ = extract_input_output(valid_runs, net_input, N=N,
communication=communication,
input_combination=False,
myt_quantities=myt_quantities,
task=task)
test_sample, test_target, test_quantities, _, _ = extract_input_output(test_runs, net_input, N=N,
communication=communication,
input_combination=False,
myt_quantities=myt_quantities,
task=task)
return train_sample, valid_sample, test_sample, train_target, valid_target, test_target, train_quantities, valid_quantities, test_quantities
def from_dataset_to_tensors(train_sample, train_target, valid_sample, valid_target, test_sample, test_target, q_train, q_valid, q_test):
"""
:param train_sample: training set samples
:param train_target: training set targets
:param valid_sample: validation set samples
:param valid_target: validation set targets
:param test_sample: testing set samples
:param test_target: testing set targets
:param q_train: mask containing the number of agents for each sample of the training set
:param q_valid: mask containing the number of agents for each sample of the validation set
:param q_test: mask containing the number of agents for each sample of the testing set
:return test, train, valid: test, train and valid TensorDataset
"""
x_train_tensor = torch.tensor(train_sample, dtype=torch.float32)
x_valid_tensor = torch.tensor(valid_sample, dtype=torch.float32)
x_test_tensor = torch.tensor(test_sample, dtype=torch.float32)
y_train_tensor = torch.tensor(train_target, dtype=torch.float32)
y_valid_tensor = torch.tensor(valid_target, dtype=torch.float32)
y_test_tensor = torch.tensor(test_target, dtype=torch.float32)
q_train_tensor = torch.tensor(q_train, dtype=torch.float32)
q_valid_tensor = torch.tensor(q_valid, dtype=torch.float32)
q_test_tensor = torch.tensor(q_test, dtype=torch.float32)
train = TensorDataset(x_train_tensor, y_train_tensor, q_train_tensor)
valid | |
ordinary function/method templated function/method
# go through child elements, collecting ordinary args and possibly template params
template_params_list = [] # list of tuple, e.g. [('typename', 'T', ''), ('void (*)()', 'F', ''), ('int', 'N', '0')]
template_params_repr_list = [] # list of str, e.g. ['typename T', 'void (*F)()', 'int N = 0']
args_list = [] # list of tuple, e.g. [('char', '', ''), ('void (*)()', 'f', ''), ('int[]', 'a, ''), ('int', 'n', '0')]
args_repr_list = [] # list of str, e.g. ['char', 'void (*f)()', 'int a[]', 'int n = 0']
# specifiers
is_final = False
is_override = False
is_pure_virtual = False
is_no_throw_bool_or_None = False # True, False, None (for reason, see below)
for c in cursor.get_children():
if c.kind == cindex.CursorKind.TEMPLATE_TYPE_PARAMETER:
template_param_text = _format_arg_tuple_str_spelling(_get_text_range(c.extent).replace("class ", "typename "))
template_params_repr_list.append(template_param_text)
template_params_list.append({
"type": _collect_type_info(c.type, context_hierarchy, c), # tuple
"arg_spelling": c.spelling, # str
"default_expr": _get_default_expr(template_param_text) # str or NoneType
})
elif c.kind == cindex.CursorKind.TEMPLATE_NON_TYPE_PARAMETER:
template_param_text = _format_arg_tuple_str_spelling(_get_text_range(c.extent))
template_params_repr_list.append(template_param_text)
template_params_list.append({
"type": _collect_type_info(c.type, context_hierarchy, c), # tuple
"arg_spelling": c.spelling, # str
"default_expr": _get_default_expr(template_param_text) # str or NoneType
})
elif c.kind == cindex.CursorKind.PARM_DECL: # the args in the parenthesis
# if the prototype doesn't name the argument, then c.spelling is ""
args_text = _format_arg_tuple_str_spelling(_get_text_range(c.extent))
args_repr_list.append(args_text)
args_list.append({
"type": _collect_type_info(c.type, context_hierarchy, c), # tuple
"arg_spelling": c.spelling, # str
"default_expr": _get_default_expr(args_text) # str or NoneType
})
elif c.kind == cindex.CursorKind.CXX_FINAL_ATTR:
is_final = True
elif c.kind == cindex.CursorKind.CXX_OVERRIDE_ATTR:
is_override = True
# NOTE is_pure_virtual and is_no_throw_bool_or_None are not checked by inspecting c.kind
# 1. possibly function template header
template_header = ""
if template_params_list:
template_header = "template <%s>" % ", ".join(template_params_repr_list)
# 2. return type
if cursor.kind in no_return_funcs_CursorKindCursorKind:
return_type = None
else:
return_type = _collect_type_info(cursor.result_type) # dict { spelling: str, type_info: dict }
# 3. function name
func_name = str(cursor.displayname).split('(')[0]
# 4. for methods: cv-qualifier, "= 0", "final", "override"
postfix_str_list = []
if cursor.is_const_method():
postfix_str_list.append("const")
# if cursor.is_volatile_method(): # defect in clang.index: this method not provided
# postfix_str_list.append("volatile")
if is_final:
postfix_str_list.append("final")
if is_override:
postfix_str_list.append("override")
if cursor.is_pure_virtual_method():
is_pure_virtual = True
postfix_str_list.append("= 0")
exception_spec = cursor.exception_specification_kind
if exception_spec in noexcept_ExceptionSpecificationKind:
is_no_throw_bool_or_None = True
postfix_str_list.append("noexcept")
elif exception_spec == cindex.ExceptionSpecificationKind.UNEVALUATED:
# not knowing if it's True or False, this is because per C++11, some functions
# are non-throwing even if they are not marked with "noexcept" or "throw()" -
# rule is very complicated: https://en.cppreference.com/w/cpp/language/noexcept_spec
is_no_throw_bool_or_None = None # not True or False
postfix_str = ' '.join(postfix_str_list)
# build prototype string, without template header
if return_type and cursor.kind != cindex.CursorKind.CONVERSION_FUNCTION:
accumulate_proto_str = "%s %s" % (return_type["spelling"], func_name)
else:
accumulate_proto_str = func_name
if cursor.is_virtual_method():
accumulate_proto_str = "virtual %s" % accumulate_proto_str
proto_str = "%s(%s) %s" % (accumulate_proto_str,
', '.join(args_repr_list),
postfix_str)
proto_str_pretty = proto_str
if len(proto_str) > 75:
proto_str_pretty = "%s(\n%s\n) %s" % (accumulate_proto_str,
",\n".join(["\t%s" % arg for arg in args_repr_list]),
postfix_str)
# add template header
proto_str = proto_str if not template_header else template_header + "\n" + proto_str
proto_str_pretty = proto_str_pretty if not template_header else template_header + "\n" + proto_str_pretty
# strip redundant whitespaces at both ends
proto_str = proto_str.strip()
proto_str_pretty = proto_str_pretty.strip()
return (
(proto_str, proto_str_pretty),
template_params_list, args_list, return_type,
(is_final, is_override, is_pure_virtual, is_no_throw_bool_or_None)
)
inheritance_access_specifiers = [ "public", "protected", "private" ]
def _format_class_proto(cursor, context_hierarchy=[]):
template_params_list = [] # list of tuple, e.g. [('int', 'N', ''), ('void (*)()', 'F', ''), ('typename', 'T', 'int')]
template_params_repr_list = [] # list of str, e.g. ['int N', 'void (*F)()', 'typename T = int']
base_list = []
is_final = False
for c in cursor.get_children():
if c.kind == cindex.CursorKind.TEMPLATE_TYPE_PARAMETER:
template_param_text = _format_arg_tuple_str_spelling(_get_text_range(c.extent).replace("class ", "typename "))
template_params_repr_list.append(template_param_text)
template_params_list.append({
"type": _collect_type_info(c.type, context_hierarchy, c), # tuple
"arg_spelling": c.spelling, # str
"default_expr": _get_default_expr(template_param_text) # str or NoneType
})
elif c.kind == cindex.CursorKind.TEMPLATE_NON_TYPE_PARAMETER:
template_param_text = _format_arg_tuple_str_spelling(_get_text_range(c.extent))
template_params_repr_list.append(template_param_text)
template_params_list.append({
"type": _collect_type_info(c.type, context_hierarchy, c), # tuple
"arg_spelling": c.spelling, # str
"default_expr": _get_default_expr(template_param_text) # str or NoneType
})
elif c.kind == cindex.CursorKind.CXX_FINAL_ATTR:
is_final = True
elif c.kind == cindex.CursorKind.CXX_BASE_SPECIFIER:
# if the base is a class template instatiation, base_spelling includes the "<..>" part
base_spelling = _format_type_spelling(c.spelling).replace("class ", "").replace("struct ", "")
base_spelling = _format_type_spelling(base_spelling)
inheritance_access_specifier = "public" # the default
is_virtual_inheritance = False # the default
# defect in clang.cindex:
# no way to check inheritance access and virtual-ness from cindex.Cursor's method,
# so I have to go through the tokens
for t in c.get_tokens():
if t.kind == cindex.TokenKind.KEYWORD:
if t.spelling in inheritance_access_specifiers:
inheritance_access_specifier = t.spelling
if t.spelling == "virtual":
is_virtual_inheritance = True
base_def = c.get_definition() # cindex.Cursor object to the base class/template definition
base_list.append({
"access": inheritance_access_specifier, # str
"virtual_inheritance": is_virtual_inheritance, # bool
# str, has the "<..>" part for template instantiation
"spelling": base_spelling,
# str, where the base class/template is defined
"definition_location": _format_location(base_def.location)
})
template_header = ""
if template_params_list:
template_header = "template <%s>" % ", ".join(template_params_repr_list)
class_name_str_raw = "class %s" % _format_type_spelling(cursor.spelling)
class_name_str_raw = class_name_str_raw if not is_final else ("%s final" % class_name_str_raw)
class_name_str = class_name_str_raw if not template_header else "%s %s" % (
template_header, class_name_str_raw)
class_name_str_pretty = class_name_str_raw if not template_header else "%s\n%s" % (
template_header, class_name_str_raw)
return (
(class_name_str.strip(), class_name_str_pretty.strip()),
template_params_list,
is_final,
base_list
)
def is_deleted_method(cursor):
# defect in clang.cindex: no way to check method being marked by "=delete" from
# cindex.Cursor's method, so I have to go through the tokens
for t in cursor.get_tokens():
if (t.kind == cindex.TokenKind.KEYWORD
and t.spelling == "delete"):
return True
return False
def _format_sizeof_type(type_obj):
sizeof_type_raw = type_obj.get_size()
sizeof_type = sizeof_type_raw if sizeof_type_raw > 0 else None # int or NoneType (e.g. type param)
return sizeof_type
"""
Index visiting
"""
func_like_CursorKind = [ # function-like
cindex.CursorKind.FUNCTION_DECL,
cindex.CursorKind.FUNCTION_TEMPLATE,
cindex.CursorKind.CONVERSION_FUNCTION,
cindex.CursorKind.CONSTRUCTOR,
cindex.CursorKind.DESTRUCTOR,
cindex.CursorKind.CXX_METHOD,
]
method_like_CursorKind = [ # method-like
cindex.CursorKind.CXX_METHOD,
cindex.CursorKind.CONVERSION_FUNCTION, # only valid for class, e.g. MyClass::operator int();
cindex.CursorKind.CONSTRUCTOR,
cindex.CursorKind.DESTRUCTOR,
# FUNCTION_TEMPLATE -- needs to check semantic_parent
]
class_like_CursorKind = [ # class-like
cindex.CursorKind.CLASS_DECL,
cindex.CursorKind.STRUCT_DECL,
cindex.CursorKind.CLASS_TEMPLATE,
]
val_like_CursorKind = [ # value-like
cindex.CursorKind.VAR_DECL,
cindex.CursorKind.FIELD_DECL,
cindex.CursorKind.ENUM_CONSTANT_DECL,
]
array_TypeKind = [
# 1) int arr[5]; int arr[] = {..}; int arr[expr] where expr is an Integral Constant Expression
cindex.TypeKind.CONSTANTARRAY,
# 2) int arr[], as a function formal arg
cindex.TypeKind.INCOMPLETEARRAY,
# 3) int arr[expr]; where expr is not an Integral Constant Expression
cindex.TypeKind.VARIABLEARRAY,
# 4) size unknown until template instantiation, then it becomes either 1) or 3)
cindex.TypeKind.DEPENDENTSIZEDARRAY,
]
pointer_TypeKind = [
cindex.TypeKind.POINTER, # 1) int *p = &n; Class *p = &objClass; int (*p)(int) = &func;
cindex.TypeKind.MEMBERPOINTER, # 2) int Class::* p = &Class::member; int (Class::* p)(int) = &Class::method;
]
# C++ has a very complicated type system
# this function is potentially called recursively
def _collect_type_info(c_type, context_hierarchy=[], c=None): # return a tuple (spelling str, dict)
type_kind = c_type.kind
type_spelling = _format_type(c_type)
sizeof_type = _format_sizeof_type(c_type) # int or NoneType (e.g. type param)
if type_kind in [ cindex.TypeKind.TYPEDEF, cindex.TypeKind.ELABORATED ]:
# if the canonical type (real type under all the layers of typedef) is not a type param, then it is the same
# as type_alias_chain[-1].spelling, i.e. completely resoluted;
# if it is a type param, then it is "(type_parameter)"
canonical_type = c_type.get_canonical()
canonical_type_kind = canonical_type.kind
canonical_type_spelling = _format_type(canonical_type) # str
res = (
type_spelling, {
"type_size": sizeof_type, # int or NoneType
# though this type itself is not a type param, yet as a
# type alias, its underlying type may be a type param
"is_type_alias": True, # bool
"is_type_param": False, # bool
"is_array": False, # bool
"is_pointer": False, # bool
"is_function": False, # bool
# real type, alias resoluted one step only
"type_alias_underlying_type": _format_type(
c_type.get_declaration().underlying_typedef_type), # str
# type alias chain, this type first, completely resoluted last
"type_alias_chain": _format_type_alias_chain(c_type), # str or NoneType
# real type under all the layers of typedef
"canonical_type": _collect_type_info(
canonical_type, context_hierarchy, c), # tuple of (str, dict)
}
) # tuple of (str, dict)
elif type_kind == cindex.TypeKind.UNEXPOSED:
if type_spelling.endswith(")"): # this is a function type, e.g. "int (int, int)"
res = (
type_spelling, {
"type_size": None, # NoneType, function does not have a sizeof result
"is_type_alias": False, # bool
"is_type_param": False, # bool
"is_array": False, # bool
"is_pointer": False, # bool
"is_function": True, # bool
# type_kind is TypeKind.UNEXPOSED, so we cannot extract function return
# type | |
many problems
# if (only_scope is not None):
new_scope = phil_object.get(only_scope)
scope_master = self.master_phil.get(only_scope)
fetched_scope = scope_master.fetch(source=new_scope)
#fetched_scope.show()
find_and_replace_scope(
current_phil=self.working_phil,
new_scope=fetched_scope,
scope_name=only_scope)
new_phil = self.working_phil
else :
new_phil = self.master_phil.fetch(sources=[old_phil, phil_object])
if (new_phil is not None):
self.working_phil = new_phil
if rebuild_index :
self.log2("rebuilding index")
self.rebuild_index(only_scope=only_scope)
else :
self.log("*** ERROR: new phil object is empty")
self._phil_has_changed = True
self.params = None
def erase_scope(self, phil_scope):
delete_phil_objects(self.working_phil, [phil_scope])
# Safe wrapper of merge_phil for loading parameter files from GUI
def merge_param_file(self, file_name):
if not os.path.isfile(file_name):
raise Sorry("The path %s does not exist or is not a file." % file_name)
try :
phil_object = self.parse(file_name=file_name)
except KeyboardInterrupt :
raise
except Exception as e :
self.log(e)
raise Sorry("This parameter file could not be parsed correctly.")
try :
new_phil = self.master_phil.fetch(source=phil_object)
except KeyboardInterrupt :
raise
except Exception as e :
self.log(e)
self.log(open(file_name).read())
raise Sorry("This file contains invalid parameters for this program. "+
"Check the manual for a list of allowed parameters "+
"for each module.")
self.merge_phil(phil_object=phil_object)
# Safe wrapper of merge_phil for phil strings
def update(self, phil_string, only_scope=None, raise_sorry=True):
try :
phil_object = self.parse(phil_string)
new_phil = self.master_phil.fetch(source=phil_object)
except KeyboardInterrupt :
raise
except Exception as e :
print(str(e))
print("bad string:")
print(str(phil_string))
if (raise_sorry):
raise Sorry("An unknown error occurred parsing internal parameters. "+
"This is probably a bug; if the program was launched with "+
"the argument --debug, further information will be printed "+
"to the console.")
else :
raise
self.merge_phil(phil_object=phil_object, only_scope=only_scope)
def adopt_phil(self, phil_object=None, phil_string=None, phil_file=None):
assert [phil_object, phil_string, phil_file].count(None) == 2
if phil_string:
phil_object = self.parse(phil_string)
elif phil_file:
phil_object = libtbx.phil.parse(file_name=phil_file)
self.master_phil.adopt_scope(phil_object)
self.working_phil = self.master_phil.fetch(sources=[self.working_phil])
self.rebuild_index()
self.params = self.working_phil.extract()
#---------------------------------------------------------------------
# DEBUG/TEST METHODS
def check_scopes(self, phil_names):
missing_scopes = []
for phil_name in phil_names :
if self.get_scope_by_name(phil_name) is None :
missing_scopes.append(phil_name)
return missing_scopes
def log(self, message):
self._log.write(message + "\n")
def log2(self, message):
f = sys._getframe(1)
filename = os.path.basename(f.f_code.co_filename)
self._log.write("%s (%s:%d): %s\n" %
(f.f_code.co_name, filename, f.f_lineno, str(message).strip()))
#---------------------------------------------------------------------
# GUI style handling
def parse_styles(self):
self.style = {}
self._event_handlers = {}
self._update_handlers = {}
self._renderers = {}
self._file_type_mappings = {}
self._menu_tree = gui_objects.menu_hierarchy("settings")
self.generate_gui_components(self.working_phil)
def create_style(self, style_string):
return gui_objects.style(style_string)
def generate_gui_components(self, phil_scope, in_submenu=False,
current_menu=None):
use_submenu = in_submenu
if not current_menu :
current_menu = self._menu_tree
if phil_scope.is_template < 0 :
return
for object in phil_scope.objects :
next_menu = None
full_object_path = object.full_path()
if object.style is not None and phil_scope.is_template != -1 :
style = self.create_style(object.style)
if (style.selection) and (object.type.phil_type == "str"):
print("WARNING: deprecated 'str' type with 'selection' style")
print(" name: %s" % full_object_path)
self.style[full_object_path] = style
if style.hidden :
self._hidden.append(full_object_path)
if (style.output_dir):
self._output_dir_path = full_object_path
if style.OnUpdate is not None :
print("OnUpdate is deprecated (%s)" % full_object_path)
self._update_handlers[full_object_path] = style.OnUpdate
elif style.process_hkl :
self._event_handlers[full_object_path] = "auto_extract_hkl_params"
if style.OnChange is not None :
self._event_handlers[full_object_path] = style.OnChange
if style.renderer is not None :
self._renderers[full_object_path] = style.renderer
if style.menu_item :
if phil_scope.multiple and phil_scope.is_template == 0 :
pass
elif style.parent_submenu :
current_menu.add_submenu(style.parent_submenu)
current_menu.get_submenu(style.parent_submenu).add_menu_item(
full_object_path)
else :
current_menu.add_menu_item(full_object_path)
elif style.submenu :
if phil_scope.multiple and phil_scope.is_template == 0 :
pass
elif style.parent_submenu :
current_menu.add_submenu(style.parent_submenu)
parent_submenu = current_menu.get_submenu(style.parent_submenu)
parent_submenu.add_submenu(full_object_path)
next_menu = parent_submenu.get_submenu(full_object_path)
else :
current_menu.add_submenu(full_object_path)
next_menu = current_menu.get_submenu(full_object_path)
else :
self.style[full_object_path] = gui_objects.style()
if not object.is_definition :
self.generate_gui_components(object, use_submenu, next_menu)
use_submenu = False
def get_scope_style(self, scope_name=None):
if scope_name in self.style :
return self.style[scope_name]
else :
return gui_objects.style()
def get_menu_db(self):
return self._menu_tree
def get_file_type_map(self, file_type, default_label=None,
exclude_params=()):
if (file_type in self._file_type_mappings):
return self._file_type_mappings[file_type]
param_info = []
for path_name, def_style in self.style.items():
def_types = []
if (def_style.file_type is not None):
def_types = def_style.get_list("file_type")
if (file_type in def_types):
if ((def_style.no_map) or (def_style.new_file) or
(path_name in exclude_params)):
continue
phil_object = self.get_scope_by_name(path_name)
if isinstance(phil_object, list):
phil_object = phil_object[0]
label = get_standard_phil_label(phil_object)
parent_scope = phil_object.primary_parent_scope
if ((phil_object.multiple) or (parent_scope.multiple) or
(phil_object.type.phil_type=="strings")):
count = None
else :
count = 1
if def_style.file_type_default :
default_label = label
param_info.append((phil_object.full_path(), label, count))
type_map = gui_objects.file_type_map(param_info, default_label)
self._file_type_mappings[file_type] = type_map
return type_map
def get_seq_file_def_name(self):
paths = []
for path_name, def_style in self.style.items():
if (def_style.seq_file):
paths.append(path_name)
if (len(paths) == 0):
return None
elif (len(paths) > 1):
raise RuntimeError("Multiple seq_file definitions: %s" % " ".join(paths))
else :
return paths[0]
########################################################################
#--- STANDALONE FUNCTIONS
def delete_phil_objects(current_phil, phil_path_list, only_scope=None):
assert isinstance(phil_path_list, list)
i = 0
while i < len(current_phil.objects):
full_path = current_phil.objects[i].full_path()
if (only_scope is not None):
if not ((only_scope == full_path) or
(only_scope.startswith(full_path + ".")) or
(full_path.startswith(only_scope + "."))):
i += 1
continue
if current_phil.objects[i].is_template != 0 :
i += 1
elif full_path in phil_path_list :
del current_phil.objects[i]
else :
# XXX: is this always true?
if hasattr(current_phil.objects[i], "objects"):
for path_name in phil_path_list :
if path_name.startswith(full_path):
delete_phil_objects(current_phil=current_phil.objects[i],
phil_path_list=phil_path_list,
only_scope=only_scope)
i += 1
def find_and_replace_scope(current_phil, new_scope, scope_name):
i = 0
while (i < len(current_phil.objects)):
full_path = current_phil.objects[i].full_path()
if (full_path == scope_name):
#assert (not current_phil.objects[i].multiple)
new_scope.change_primary_parent_scope(current_phil)
j = i
while (j < len(current_phil.objects)):
if (current_phil.objects[j].full_path() == scope_name):
del current_phil.objects[j]
else :
j += 1
current_phil.objects[i:i] = new_scope.objects
break
elif (scope_name.startswith(full_path + ".")):
find_and_replace_scope(
current_phil=current_phil.objects[i],
new_scope=new_scope,
scope_name=scope_name)
i += 1
def collect_redundant_paths(master_phil, new_phil, multiple_only=True):
phil_diff = master_phil.fetch_diff(source=new_phil)
return _collect_unique_paths(phil_diff, multiple_only)
def _collect_unique_paths(phil_object, multiple_only=True):
paths = []
if phil_object.multiple :
paths.append(phil_object.full_path())
elif phil_object.is_scope :
for object in phil_object.objects :
if not object.full_path() in paths :
paths.extend(_collect_unique_paths(object, multiple_only))
elif not multiple_only and phil_object.is_definition :
paths.append(phil_object.full_path())
return paths
def get_all_path_names(phil_object, paths=None):
if paths is None :
paths = []
full_path = phil_object.full_path()
if not full_path in paths :
paths.append(full_path)
if phil_object.is_scope :
for object in phil_object.objects :
get_all_path_names(object, paths)
def index_phil_objects(phil_object,
path_index,
text_index,
template_index,
multiple_scopes=None,
multiple_defs=None,
collect_multiple=True,
in_template=False,
expert_levels=None,
input_files=None):
full_path = phil_object.full_path()
if expert_levels is not None :
if phil_object.expert_level is not None :
expert_levels[full_path] = phil_object.expert_level
else :
parent_scope = ".".join(full_path.split(".")[:-1])
expert_levels[full_path] = expert_levels.get(parent_scope, 0)
if phil_object.is_template != 0 :
template_index[full_path] = phil_object
if phil_object.is_template == -1 :
return
else :
in_template = True
elif in_template :
template_index[full_path] = phil_object
if (phil_object.multiple == True):
if collect_multiple :
if (phil_object.is_scope) and (multiple_scopes is not None):
multiple_scopes[full_path] = True
elif multiple_defs is not None :
multiple_defs[full_path] = True
if full_path in path_index :
path_index[full_path].append(phil_object)
else :
path_index[full_path] = [phil_object]
else :
path_index[full_path] = phil_object
if phil_object.is_definition and phil_object.type is None :
raise RuntimeError("Type required for parameter '%s'." % full_path)
text_index_for_child_objects = None
if ((text_index is not None) and
((phil_object.expert_level is None) or (phil_object.expert_level <=3 ))):
label = get_standard_phil_label(phil_object)
text_fields = (label, str(phil_object.caption), str(phil_object.help),
phil_object.is_definition)
text_index[full_path] = text_fields
text_index_for_child_objects = text_index
if phil_object.is_scope :
for object in phil_object.objects :
index_phil_objects(phil_object=object,
path_index=path_index,
text_index=text_index_for_child_objects,
template_index=template_index,
multiple_scopes=multiple_scopes,
multiple_defs=multiple_defs,
collect_multiple=collect_multiple,
in_template=in_template,
expert_levels=expert_levels,
input_files=input_files)
elif (input_files is not None):
if (phil_object.type.phil_type in ["path", "strings"]):
style = phil_object.style
if (style is not None):
style_words = style.split()
if ("input_file" in style_words):
input_files.append(full_path)
def reindex_phil_objects(phil_object, path_index, only_scope=None):
if phil_object.is_template < 0 :
return
full_path = phil_object.full_path()
if phil_object.multiple == True :
if full_path in path_index :
path_index[full_path].append(phil_object)
else :
path_index[full_path] = [phil_object]
else :
path_index[full_path] = phil_object
if phil_object.is_scope :
for object in phil_object.objects :
reindex_phil_objects(object, path_index)
non_alnum = re.compile("[^A-Za-z0-9_]")
def substitute_directory_name(phil_object, path_name, sub_name,
treat_name_as_var_name=True):
assert (not non_alnum.search(sub_name))
if (treat_name_as_var_name):
sub_var = "$(" + sub_name + ")"
else :
sub_var = sub_name
if path_name.endswith("/") or path_name.endswith("\\"):
path_name = path_name[:-1]
for object in phil_object.objects :
if object.is_definition :
if (object.type is None):
raise RuntimeError("Missing type for PHIL parameter %s" %
object.full_path())
if (object.type.phil_type == "path"):
py_object = object.extract()
if (py_object is None) or (py_object is Auto) : continue
if (not isinstance(py_object, str)):
if isinstance(py_object, unicode) : # FIXME need to prevent this!
py_object = str(py_object)
else :
raise RuntimeError("Disallowed type '%s' for path parameter '%s'." %
(type(py_object), object.full_path()))
py_object = py_object.replace(path_name, sub_var)
new_object = object.format(python_object=py_object)
object.words = new_object.words
else :
substitute_directory_name(object, path_name, sub_name)
def update_phil_file_paths(master_phil, file_name, old_path, new_path,
use_iotbx_parser=False):
if (use_iotbx_parser):
import iotbx.phil
parse = iotbx.phil.parse
else :
parse = libtbx.phil.parse
phil_in = open(file_name).read()
new_format = False
out_lines = []
for line in phil_in.splitlines():
if line.startswith("LIBTBX_BASE_DIR"):
line = line.replace(old_path, new_path)
new_format = True
out_lines.append(line)
else :
out_lines.append(line)
if (new_format):
open(file_name, "w").write("\n".join(out_lines))
else :
file_phil = parse(file_name=file_name)
working_phil = master_phil.fetch(source=file_phil,
skip_incompatible_objects=True)
substitute_directory_name(
phil_object=working_phil,
path_name=old_path,
sub_name="LIBTBX_BASE_DIR")
f = open(file_name, "w")
| |
JOIN
(SELECT
SVR_IP,
SVR_PORT,
VALUE1,
VALUE2,
GMT_CREATE
FROM
OCEANBASE.__ALL_SERVER_EVENT_HISTORY
WHERE
EVENT = 'minor merge finish'
AND (EFFECTIVE_TENANT_ID() = 1 OR VALUE1 = EFFECTIVE_TENANT_ID())) B
ON
A.SVR_IP = B.SVR_IP AND A.SVR_PORT = B.SVR_PORT AND A.VALUE1 = B.VALUE1 AND A.VALUE2 = B.VALUE2
ORDER BY
SVR_IP, SVR_PORT, TENANT_ID, FREEZE_SNAPSHOT
""".replace("\n", " ")
)
# 21070 for v$minor_merge_info is absoleted from 2_2_3_release
# def_table_schema(
# table_name = 'v$minor_merge_info',
# table_id = '21070',
# table_type = 'SYSTEM_VIEW',
# rowkey_columns = [],
# normal_columns = [],
# gm_columns = [],
# in_tenant_space = True,
# view_definition = """
# SELECT *
# FROM OCEANBASE.gv$minor_merge_info
# WHERE SVR_IP = HOST_IP() AND SVR_PORT = RPC_PORT()
# """.replace("\n", " ")
# )
def_table_schema(
table_name = 'gv$tenant_px_worker_stat',
table_id = '21071',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
select
session_id,
tenant_id,
svr_ip,
svr_port,
trace_id,
qc_id,
sqc_id,
worker_id,
dfo_id,
start_time
from oceanbase.__all_virtual_px_worker_stat
where effective_tenant_id() = 1 OR tenant_id = effective_tenant_id()
order by session_id, svr_ip, svr_port
""".replace("\n", " ")
)
def_table_schema(
table_name = 'v$tenant_px_worker_stat',
table_id = '21072',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
select
session_id,
tenant_id,
svr_ip,
svr_port,
trace_id,
qc_id,
sqc_id,
worker_id,
dfo_id,
start_time
from oceanbase.gv$tenant_px_worker_stat
where svr_ip = host_ip() AND svr_port = rpc_port()
""".replace("\n", " ")
)
def_table_schema(
table_name = 'gv$partition_audit',
table_id = '21073',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
SELECT *
FROM oceanbase.__all_virtual_partition_audit
WHERE effective_tenant_id() = 1 OR tenant_id = effective_tenant_id()
""".replace("\n", " ")
)
def_table_schema(
table_name = 'v$partition_audit',
table_id = '21074',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
SELECT *
FROM oceanbase.gv$partition_audit
WHERE svr_ip = host_ip() AND svr_port = rpc_port()
""".replace("\n", " ")
)
def_table_schema(
table_name = 'v$ob_cluster',
table_id = '21075',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = False,
view_definition = """
SELECT cluster_id,
cluster_name,
created,
cluster_role,
cluster_status,
`switchover#`,
switchover_status,
switchover_info,
current_scn,
standby_became_primary_scn,
primary_cluster_id,
protection_mode,
protection_level,
redo_transport_options
FROM oceanbase.__all_virtual_cluster
""".replace("\n", " ")
)
def_table_schema(
table_name = 'gv$ps_stat',
table_id = '21079',
table_type = 'SYSTEM_VIEW',
gm_columns = [],
in_tenant_space = True,
rowkey_columns = [],
view_definition = """
SELECT tenant_id, svr_ip, svr_port, stmt_count,
hit_count, access_count, mem_hold
FROM oceanbase.__all_virtual_ps_stat
WHERE is_serving_tenant(svr_ip, svr_port, effective_tenant_id()) and
(tenant_id = effective_tenant_id() or effective_tenant_id() = 1)
""".replace("\n", " "),
normal_columns = [
],
)
def_table_schema(
table_name = 'v$ps_stat',
table_id = '21080',
table_type = 'SYSTEM_VIEW',
gm_columns = [],
in_tenant_space = True,
rowkey_columns = [],
view_definition = """
SELECT tenant_id, svr_ip, svr_port, stmt_count,
hit_count, access_count, mem_hold
FROM oceanbase.gv$ps_stat
WHERE svr_ip=HOST_IP() AND svr_port=RPC_PORT()
""".replace("\n", " "),
normal_columns = [
],
)
def_table_schema(
table_name = 'gv$ps_item_info',
table_id = '21081',
table_type = 'SYSTEM_VIEW',
gm_columns = [],
in_tenant_space = True,
rowkey_columns = [],
view_definition = """
SELECT tenant_id, svr_ip, svr_port, stmt_id,
db_id, ps_sql, param_count, stmt_item_ref_count,
stmt_info_ref_count, mem_hold, stmt_type, checksum, expired
FROM oceanbase.__all_virtual_ps_item_info
WHERE is_serving_tenant(svr_ip, svr_port, effective_tenant_id()) and
(tenant_id = effective_tenant_id() or effective_tenant_id() = 1)
""".replace("\n", " "),
normal_columns = [
],
)
def_table_schema(
table_name = 'v$ps_item_info',
table_id = '21082',
table_type = 'SYSTEM_VIEW',
gm_columns = [],
in_tenant_space = True,
rowkey_columns = [],
view_definition = """
SELECT tenant_id, svr_ip, svr_port, stmt_id,
db_id, ps_sql, param_count, stmt_item_ref_count,
stmt_info_ref_count, mem_hold, stmt_type, checksum, expired
FROM oceanbase.gv$ps_item_info
WHERE svr_ip=HOST_IP() AND svr_port=RPC_PORT()
""".replace("\n", " "),
normal_columns = [
],
)
def_table_schema(
table_name = 'gv$sql_workarea',
table_id = '21083',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
select
cast(null as binary(8)) as address,
cast(null as signed) as hash_value,
sql_id,
cast(null as signed) as child_number,
cast(null as binary(8)) as workarea_address,
operation_type,
operation_id,
policy,
estimated_optimal_size,
estimated_onepass_size,
last_memory_used,
last_execution,
last_degree,
total_executions,
optimal_executions,
onepass_executions,
multipasses_executions,
active_time,
max_tempseg_size,
last_tempseg_size,
tenant_id as con_id
from oceanbase.__all_virtual_sql_workarea_history_stat
where effective_tenant_id() = 1 OR tenant_id = effective_tenant_id()
""".replace("\n", " ")
)
def_table_schema(
table_name = 'v$sql_workarea',
table_id = '21084',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
select
cast(null as binary(8)) as address,
cast(null as signed) as hash_value,
sql_id,
cast(null as signed) as child_number,
cast(null as binary(8)) as workarea_address,
operation_type,
operation_id,
policy,
estimated_optimal_size,
estimated_onepass_size,
last_memory_used,
last_execution,
last_degree,
total_executions,
optimal_executions,
onepass_executions,
multipasses_executions,
active_time,
max_tempseg_size,
last_tempseg_size,
tenant_id as con_id
from oceanbase.__all_virtual_sql_workarea_history_stat
where (effective_tenant_id() = 1 OR tenant_id = effective_tenant_id())
and svr_ip = host_ip() AND svr_port = rpc_port()
""".replace("\n", " ")
)
def_table_schema(
table_name = 'gv$sql_workarea_active',
table_id = '21085',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
select
cast(null as signed) as sql_hash_value,
sql_id,
cast(null as date) as sql_exec_start,
sql_exec_id,
cast(null as binary(8)) as workarea_address,
operation_type,
operation_id,
policy,
sid,
cast(null as signed) as qcinst_id,
cast(null as signed) as qcsid,
active_time,
work_area_size,
expect_size,
actual_mem_used,
max_mem_used,
number_passes,
tempseg_size,
cast(null as char(20)) as tablespace,
cast(null as signed) as `segrfno#`,
cast(null as signed) as `segblk#`,
tenant_id as con_id
from oceanbase.__all_virtual_sql_workarea_active
where effective_tenant_id() = 1 OR tenant_id = effective_tenant_id()
""".replace("\n", " ")
)
def_table_schema(
table_name = 'v$sql_workarea_active',
table_id = '21086',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
select
cast(null as signed) as sql_hash_value,
sql_id,
cast(null as date) as sql_exec_start,
sql_exec_id,
cast(null as binary(8)) as workarea_address,
operation_type,
operation_id,
policy,
sid,
cast(null as signed) as qcinst_id,
cast(null as signed) as qcsid,
active_time,
work_area_size,
expect_size,
actual_mem_used,
max_mem_used,
number_passes,
tempseg_size,
cast(null as char(20)) as tablespace,
cast(null as signed) as `segrfno#`,
cast(null as signed) as `segblk#`,
tenant_id as con_id
from oceanbase.__all_virtual_sql_workarea_active
where (effective_tenant_id() = 1 OR tenant_id = effective_tenant_id())
and svr_ip = host_ip() AND svr_port = rpc_port()
""".replace("\n", " ")
)
def_table_schema(
table_name = 'gv$sql_workarea_histogram',
table_id = '21087',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
select
low_optimal_size,
high_optimal_size,
optimal_executions,
onepass_executions,
multipasses_executions,
total_executions,
tenant_id as con_id
from oceanbase.__all_virtual_sql_workarea_histogram
where effective_tenant_id() = 1 OR tenant_id = effective_tenant_id()
""".replace("\n", " ")
)
def_table_schema(
table_name = 'v$sql_workarea_histogram',
table_id = '21088',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
select
low_optimal_size,
high_optimal_size,
optimal_executions,
onepass_executions,
multipasses_executions,
total_executions,
tenant_id as con_id
from oceanbase.__all_virtual_sql_workarea_histogram
where (effective_tenant_id() = 1 OR tenant_id = effective_tenant_id())
and svr_ip = host_ip() AND svr_port = rpc_port()
""".replace("\n", " ")
)
def_table_schema(
table_name = 'gv$ob_sql_workarea_memory_info',
table_id = '21089',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
select
max_workarea_size,
workarea_hold_size,
max_auto_workarea_size,
mem_target,
total_mem_used,
global_mem_bound,
drift_size,
workarea_count,
manual_calc_count
from oceanbase.__all_virtual_sql_workarea_memory_info
where effective_tenant_id() = 1 OR tenant_id = effective_tenant_id()
""".replace("\n", " ")
)
def_table_schema(
table_name = 'v$ob_sql_workarea_memory_info',
table_id = '21090',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
select
max_workarea_size,
workarea_hold_size,
max_auto_workarea_size,
mem_target,
total_mem_used,
global_mem_bound,
drift_size,
workarea_count,
manual_calc_count
from oceanbase.__all_virtual_sql_workarea_memory_info
where (effective_tenant_id() = 1 OR tenant_id = effective_tenant_id())
and svr_ip = host_ip() AND svr_port = rpc_port()
""".replace("\n", " ")
)
def_table_schema(
table_name = 'gv$plan_cache_reference_info',
table_id = '21097',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
SELECT SVR_IP,
SVR_PORT,
TENANT_ID,
PC_REF_PLAN_LOCAL,
PC_REF_PLAN_REMOTE,
PC_REF_PLAN_DIST,
PC_REF_PLAN_ARR,
PC_REF_PL,
PC_REF_PL_STAT,
PLAN_GEN,
CLI_QUERY,
OUTLINE_EXEC,
PLAN_EXPLAIN,
ASYN_BASELINE,
LOAD_BASELINE,
PS_EXEC,
GV_SQL,
PL_ANON,
PL_ROUTINE,
PACKAGE_VAR,
PACKAGE_TYPE,
PACKAGE_SPEC,
PACKAGE_BODY,
PACKAGE_RESV,
GET_PKG,
INDEX_BUILDER,
PCV_SET,
PCV_RD,
PCV_WR,
PCV_GET_PLAN_KEY,
PCV_GET_PL_KEY,
PCV_EXPIRE_BY_USED,
PCV_EXPIRE_BY_MEM
FROM oceanbase.__all_virtual_plan_cache_stat WHERE
IS_SERVING_TENANT(SVR_IP, SVR_PORT, EFFECTIVE_TENANT_ID())
AND (TENANT_ID = EFFECTIVE_TENANT_ID() OR EFFECTIVE_TENANT_ID() = 1)
""".replace("\n", " ")
)
def_table_schema(
table_name = 'v$plan_cache_reference_info',
table_id = '21098',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
SELECT * FROM oceanbase.gv$plan_cache_reference_info
WHERE SVR_IP=HOST_IP() AND SVR_PORT=RPC_PORT()
"""
)
def_table_schema(
table_name = 'v$ob_timestamp_service',
table_id = '21099',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
SELECT tenant_id as tenant_id,
case when ts_type=0 then 'Local'
when ts_type=1 then 'Global'
when ts_type=2 then 'HA Global'
ELSE NULL END as ts_type,
ts_value as ts_value
FROM oceanbase.__all_virtual_timestamp_service where (effective_tenant_id() = 1 OR tenant_id = effective_tenant_id())
""".replace("\n", " ")
)
def_table_schema(
table_name = 'gv$sstable',
table_id = '21100',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
SELECT
M.SVR_IP,
M.SVR_PORT,
M.TABLE_TYPE,
M.TABLE_ID,
T.TABLE_NAME,
T.TENANT_ID,
M.PARTITION_ID,
M.INDEX_ID,
M.BASE_VERSION,
M.MULTI_VERSION_START,
M.SNAPSHOT_VERSION,
M.START_LOG_TS,
M.END_LOG_TS,
M.MAX_LOG_TS,
M.VERSION,
M.LOGICAL_DATA_VERSION,
M.SIZE,
M.IS_ACTIVE,
M.REF,
M.WRITE_REF,
M.TRX_COUNT,
M.PENDING_LOG_PERSISTING_ROW_CNT,
M.UPPER_TRANS_VERSION,
M.CONTAIN_UNCOMMITTED_ROW
FROM
oceanbase.__all_virtual_table_mgr M JOIN oceanbase.__all_virtual_table T ON M.TABLE_ID = T.TABLE_ID
WHERE
effective_tenant_id() = 1 OR T.tenant_id = effective_tenant_id()
""".replace("\n", " ")
)
def_table_schema(
table_name = 'v$sstable',
table_id = '21101',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
SELECT
M.TABLE_TYPE,
M.TABLE_ID,
T.TABLE_NAME,
T.TENANT_ID,
M.PARTITION_ID,
M.INDEX_ID,
M.BASE_VERSION,
M.MULTI_VERSION_START,
M.SNAPSHOT_VERSION,
M.START_LOG_TS,
M.END_LOG_TS,
M.MAX_LOG_TS,
M.VERSION,
M.LOGICAL_DATA_VERSION,
M.SIZE,
| |
#!/usr/bin/env python
#pylint: disable=C0103
"""
This module provides business object class to interact with File.
"""
from __future__ import print_function
from WMCore.DAOFactory import DAOFactory
from dbs.utils.dbsExceptionHandler import dbsExceptionHandler
from sqlalchemy.exc import IntegrityError as SQLAlchemyIntegrityError
from dbs.utils.dbsUtils import dbsUtils
class DBSFile:
"""
File business object class
"""
def __init__(self, logger, dbi, owner):
daofactory = DAOFactory(package='dbs.dao', logger=logger,
dbinterface=dbi, owner=owner)
self.logger = logger
self.dbi = dbi
self.filelist = daofactory(classname="File.List")
self.filebrieflist = daofactory(classname="File.BriefList")
self.filesummarylist = daofactory(classname="File.SummaryList")
self.sm = daofactory(classname = "SequenceManager")
self.filein = daofactory(classname = "File.Insert")
self.flumiin = daofactory(classname = "FileLumi.Insert")
self.fparentin = daofactory(classname = "FileParent.Insert")
self.fileid = daofactory(classname = "File.GetID")
self.datasetid = daofactory(classname = "Dataset.GetID")
self.blockid = daofactory(classname = "Block.GetID")
self.blocklist = daofactory(classname = "Block.List")
self.ftypeid = daofactory(classname = "FileType.GetID")
self.fpbdlist = daofactory(classname = "FileParentBlock.List")
self.blkparentin = daofactory(classname = "BlockParent.Insert2")
self.dsparentin = daofactory(classname = "DatasetParent.Insert2")
self.fparentin2 = daofactory(classname = "FileParent.Insert2")
self.blkparentin3 = daofactory(classname = "BlockParent.Insert3")
self.blkstats = daofactory(classname = "Block.ListStats")
self.blkstatsin = daofactory(classname = "Block.UpdateStats")
self.outconfigid = daofactory(classname='OutputModuleConfig.GetID')
self.fconfigin = daofactory(classname='FileOutputMod_config.Insert')
self.updatestatus = daofactory(classname='File.UpdateStatus')
self.dsconfigids = daofactory(
classname='DatasetOutputMod_config.GetDSConfigs')
self.fileparentlist = daofactory(classname="FileParent.List")
self.fileparentbylumi = daofactory(classname="FileParent.ListFileParentageByLumi")
self.filechildlist = daofactory(classname="FileParent.ListChild")
self.filelumilist = daofactory(classname="FileLumi.List")
self.filebufin = daofactory(classname = "FileBuffer.Insert")
def listFileLumis(self, logical_file_name="", block_name="", run_num=-1, validFileOnly=0, input_body=-1):
"""
optional parameter: logical_file_name, block_name, validFileOnly
returns: logical_file_name, file_lumi_id, run_num, lumi_section_num
"""
if((logical_file_name=='' or '*'in logical_file_name or '%' in logical_file_name) \
and (block_name=='' or '*' in block_name or '%' in block_name) and input_body==-1 ):
dbsExceptionHandler('dbsException-invalid-input', \
"Fully specified logical_file_name or block_name is required if GET is called. No wildcards are allowed.",
self.logger.exception, "Fully specified logical_file_name or block_name is required if GET is called. No wildcards are allowed.")
elif input_body != -1 :
try:
logical_file_name = input_body["logical_file_name"]
run_num = input_body.get("run_num", -1)
validFileOnly = input_body.get("validFileOnly", 0)
block_name = ""
except cjson.DecodeError as de:
msg = "business/listFileLumis requires at least a list of logical_file_name. %s" % de
dbsExceptionHandler('dbsException-invalid-input2', "Invalid input", self.logger.exception, msg)
elif input_body != -1 and (logical_file_name is not None or block_name is not None):
dbsExceptionHandler('dbsException-invalid-input', "listFileLumis may have input in the command or in the payload, not mixed.", self.logger.exception, "listFileLumis may have input in the command or in the payload, not mixed.")
with self.dbi.connection() as conn:
for item in self.filelumilist.execute(conn, logical_file_name, block_name, run_num, validFileOnly=validFileOnly):
yield item
def listFileSummary(self, block_name="", dataset="", run_num=-1, validFileOnly=0, sumOverLumi=0):
"""
required parameter: full block_name or dataset name. No wildcards allowed. run_num is optional.
"""
if not block_name and not dataset:
msg = "Block_name or dataset is required for listFileSummary API"
dbsExceptionHandler('dbsException-invalid-input', msg, self.logger.exception)
if '%' in block_name or '*' in block_name or '%' in dataset or '*' in dataset:
msg = "No wildcard is allowed in block_name or dataset for filesummaries API"
dbsExceptionHandler('dbsException-invalid-input', msg, self.logger.exception)
#
with self.dbi.connection() as conn:
for item in self.filesummarylist.execute(conn, block_name, dataset, run_num,
validFileOnly=validFileOnly, sumOverLumi=sumOverLumi):
if item['num_file']==0 and item['num_block']==0 \
and item['num_event']==0 and item['file_size']==0:
pass
else:
yield item
def listFileParents(self, logical_file_name="", block_id=0, block_name=""):
"""
required parameter: logical_file_name or block_name
returns: this_logical_file_name, parent_logical_file_name, parent_file_id
"""
#self.logger.debug("lfn %s, block_name %s, block_id :%s" % (logical_file_name, block_name, block_id))
if not logical_file_name and not block_name and not block_id:
dbsExceptionHandler('dbsException-invalid-input', \
"Logical_file_name, block_id or block_name is required for fileparents api", self.logger.exception )
with self.dbi.connection() as conn:
sqlresult = self.fileparentlist.execute(conn, logical_file_name, block_id, block_name)
d = {}
#self.logger.debug(sqlresult)
for i in sqlresult:
k = i['this_logical_file_name']
v = i['parent_logical_file_name']
d.setdefault(k, []).append(v)
for k, v in d.iteritems():
yield {'logical_file_name':k, 'parent_logical_file_name': v}
del d
def listFileParentsByLumi(self, block_name='', logical_file_name=[]):
"""
required parameter: block_name
returns: [{child_parent_id_list: [(cid1, pid1), (cid2, pid2), ... (cidn, pidn)]}]
"""
#self.logger.debug("lfn %s, block_name %s" % (logical_file_name, block_name))
if not block_name:
dbsExceptionHandler('dbsException-invalid-input', \
"Child block_name is required for fileparents/listFileParentsByLumi api", self.logger.exception )
with self.dbi.connection() as conn:
sqlresult = self.fileparentbylumi.execute(conn, block_name, logical_file_name)
return [{"child_parent_id_list":sqlresult}]
def listFileChildren(self, logical_file_name='', block_name='', block_id=0):
"""
required parameter: logical_file_name or block_name or block_id
returns: logical_file_name, child_logical_file_name, parent_file_id
"""
conn = self.dbi.connection()
try:
if not logical_file_name and not block_name and not block_id:
dbsExceptionHandler('dbsException-invalid-input',\
"Logical_file_name, block_id or block_name is required for listFileChildren api")
sqlresult = self.filechildlist.execute(conn, logical_file_name, block_name, block_id)
d = {}
result = []
for i in range(len(sqlresult)):
k = sqlresult[i]['logical_file_name']
v = sqlresult[i]['child_logical_file_name']
if k in d:
d[k].append(v)
else:
d[k] = [v]
for k, v in d.iteritems():
r = {'logical_file_name':k, 'child_logical_file_name': v}
result.append(r)
return result
finally:
if conn:
conn.close()
def updateStatus(self, logical_file_name, is_file_valid, lost, dataset):
"""
Used to toggle the status of a file from is_file_valid=1 (valid) to is_file_valid=0 (invalid)
"""
conn = self.dbi.connection()
trans = conn.begin()
try :
self.updatestatus.execute(conn, logical_file_name, is_file_valid, lost, dataset, trans)
trans.commit()
trans = None
except Exception as ex:
if trans:
trans.rollback()
trans = None
raise ex
finally:
if trans:
trans.rollback()
if conn:
conn.close()
def listFiles(self, dataset="", block_name="", logical_file_name="",
release_version="", pset_hash="", app_name="",
output_module_label="", run_num=-1,
origin_site_name="", lumi_list=[], detail=False, validFileOnly=0, sumOverLumi=0, input_body=-1):
"""
One of below parameter groups must be present:
non-patterned dataset, non-patterned block, non-patterned dataset with lfn, non-patterned block with lfn,
non-patterned lfn
non-patterned lfn list
"""
if input_body != -1 :
try:
logical_file_name = input_body.get("logical_file_name", "")
run_num = input_body.get("run_num", -1)
validFileOnly = input_body.get("validFileOnly", 0)
sumOverLumi = input_body.get("sumOverLumi", 0)
detail = input_body.get("detail", False)
block_name = input_body.get("block_name", "")
dataset = input_body.get("dataset", "")
release_version = input_body.get("release_version", "")
pset_hash = input_body.get("pset_hash", "")
app_name = input_body.get("app_name", "")
output_module_label = input_body.get("output_module_label", "")
origin_site_name = input_body.get("origin_site_name", "")
lumi_list = input_body.get("lumi_list", [])
except cjson.DecodeError as de:
msg = "business/listFilss POST call requires at least dataset, block_name, or a list of logical_file_name %s" % de
dbsExceptionHandler('dbsException-invalid-input', "Invalid input", self.logger.exception, msg)
if ('%' in block_name):
dbsExceptionHandler('dbsException-invalid-input', "You must specify exact block name not a pattern", self.logger.exception)
elif ('%' in dataset):
print("***** in dataset name")
dbsExceptionHandler('dbsException-invalid-input', " You must specify exact dataset name not a pattern", self.logger.exception)
elif (not dataset and not block_name and (not logical_file_name or '%'in logical_file_name) ):
dbsExceptionHandler('dbsException-invalid-input', """You must specify one of the parameter groups: \
non-pattern dataset, \
non-pattern block , non-pattern dataset with lfn ,\
non-pattern block with lfn or no-pattern lfn, \
non-patterned lfn list .""", self.logger.exception)
elif (lumi_list and len(lumi_list) != 0):
if run_num==-1:
dbsExceptionHandler('dbsException-invalid-input', "Lumi list must accompany A single run number, \
use run_num=123", self.logger.exception)
elif isinstance(run_num, basestring):
try:
run_num = int(run_num)
except:
dbsExceptionHandler('dbsException-invalid-input', "Lumi list must accompany A single run number,\
use run_num=123", self.logger.exception)
elif isinstance(run_num, list):
if len(run_num) == 1:
try:
run_num = int(run_num[0])
except:
dbsExceptionHandler('dbsException-invalid-input', "Lumi list must accompany A single run number,\
use run_num=123", self.logger.exception)
else:
dbsExceptionHandler('dbsException-invalid-input', "Lumi list must accompany A single run number,\
use run_num=123", self.logger.exception)
else:
pass
with self.dbi.connection() as conn:
dao = (self.filebrieflist, self.filelist)[detail]
for item in dao.execute(conn, dataset, block_name, logical_file_name, release_version, pset_hash, app_name,
output_module_label, run_num, origin_site_name, lumi_list, validFileOnly, sumOverLumi):
yield item # we need to yield while connection is open
def insertFile(self, businput, qInserts=False):
"""
This method supports bulk insert of files
performing other operations such as setting Block and Dataset parentages,
setting mapping between OutputConfigModules and File(s) etc.
:param qInserts: True means that inserts will be queued instead of done immediately. INSERT QUEUE Manager will perform the inserts, within few minutes.
:type qInserts: bool
:param logical_file_name (required) : string
:param is_file_valid: (optional, default = 1): 1/0
:param block, required: /a/b/c#d
:param dataset, required: /a/b/c
:param file_type (optional, default = EDM): one of the predefined types,
:param check_sum (optional): string
:param event_count (optional, default = -1): int
:param file_size (optional, default = -1.): float
:param adler32 (optional): string
:param md5 (optional): string
:param auto_cross_section (optional, default = -1.): float
:param file_lumi_list (optional, default = []): [{'run_num': 123, 'lumi_section_num': 12},{}....]
:param file_parent_list(optional, default = []) :[{'file_parent_lfn': 'mylfn'},{}....]
:param file_assoc_list(optional, default = []) :[{'file_parent_lfn': 'mylfn'},{}....]
:param file_output_config_list(optional, default = []) :
[{'app_name':..., 'release_version':..., 'pset_hash':...., output_module_label':...},{}.....]
"""
# We do not want to go be beyond 10 files at a time
# If user wants to insert over 10 files in one shot, we run into risks of locking the database
# tables for longer time, and in case of error, it will be hard to see where error occured
if len(businput) > 10:
dbsExceptionHandler('dbsException-input-too-large', "DBS cannot insert \
more than 10 files in one bulk call")
return
conn = self.dbi.connection()
tran = conn.begin()
try:
| |
"""The WaveBlocks Project
Compute some observables like norm, kinetic and potential energy
of Hagedorn wavepackets. This class implements the mixed case
where the bra does not equal the ket.
@author: <NAME>
@copyright: Copyright (C) 2014, 2016 <NAME>
@license: Modified BSD License
"""
from functools import partial
from numpy import squeeze, sum
from WaveBlocksND.Observables import Observables
__all__ = ["ObservablesMixedHAWP"]
class ObservablesMixedHAWP(Observables):
r"""This class implements the mixed case observable computation
:math:`\langle \Psi | \cdot | \Psi^{\prime} \rangle` for Hagedorn
wavepackets :math:`\Psi` where the bra :math:`\Psi` does not equal
the ket :math:`\Psi^{\prime}`.
"""
def __init__(self, *, innerproduct=None, gradient=None):
r"""Initialize a new :py:class:`ObservablesMixedHAWP` instance for observable computation of Hagedorn wavepackets.
"""
self._innerproduct = None
self._gradient = None
def set_innerproduct(self, innerproduct):
r"""Set the innerproduct.
:param innerproduct: An inner product for computing the integrals. The inner product is used
for the computation of all brakets
:math:`\langle \Psi | \cdot | \Psi^{\prime} \rangle`.
:type innerproduct: A :py:class:`InnerProduct` subclass instance.
.. note:: Make sure to use an inhomogeneous inner product here.
"""
self._innerproduct = innerproduct
def set_gradient(self, gradient):
r"""Set the gradient.
:param gradient: A gradient operator. The gradient is only used for the computation of the kinetic
energy :math:`\langle \Psi | T | \Psi^{\prime} \rangle`.
:type gradient: A :py:class:`Gradient` subclass instance.
"""
self._gradient = gradient
def overlap(self, pacbra, packet, *, component=None, summed=False):
r"""Calculate the overlap :math:`\langle \Psi | \Psi^{\prime} \rangle` of the wavepackets
:math:`\Psi` and :math:`\Psi^{\prime}`.
:param pacbra: The wavepacket :math:`\Psi` which takes part in the overlap integral.
:type pacbra: A :py:class:`HagedornWavepacketBase` subclass instance.
:param packet: The wavepacket :math:`\Psi^{\prime}` which takes part in the overlap integral.
:type packet: A :py:class:`HagedornWavepacketBase` subclass instance.
:param component: The index :math:`i` of the components :math:`\Phi_i` of :math:`\Psi`
and :math:`\Phi_i^{\prime}` of :math:`\Psi^{\prime}` whose overlap is
computed. The default value is ``None`` which means to compute the
overlaps with all :math:`N` components involved.
:type component: Integer or ``None``.
:param summed: Whether to sum up the overlaps :math:`\langle \Phi_i | \Phi_i^{\prime} \rangle`
of the individual components :math:`\Phi_i` and :math:`\Phi_i^{\prime}`.
:type summed: Boolean, default is ``False``.
:return: The overlap of :math:`\Psi` with :math:`\Psi^{\prime}` or the overlap of :math:`\Phi_i`
with :math:`\Phi_i^{\prime}` or a list with the :math:`N` overlaps of all components.
(Depending on the optional arguments.)
"""
return self._innerproduct.quadrature(pacbra, packet, diag_component=component, diagonal=True, summed=summed)
def norm(self, wavepacket, *, component=None, summed=False):
r"""Calculate the :math:`L^2` norm :math:`\langle \Psi | \Psi \rangle` of the wavepacket :math:`\Psi`.
:param wavepacket: The wavepacket :math:`\Psi` of which we compute the norm.
:type wavepacket: A :py:class:`HagedornWavepacketBase` subclass instance.
:param component: The index :math:`i` of the component :math:`\Phi_i` whose norm is computed.
The default value is ``None`` which means to compute the norms of all :math:`N` components.
:type component: int or ``None``.
:param summed: Whether to sum up the norms :math:`\langle \Phi_i | \Phi_i \rangle` of the
individual components :math:`\Phi_i`.
:type summed: Boolean, default is ``False``.
:return: The norm of :math:`\Psi` or the norm of :math:`\Phi_i` or a list with the :math:`N`
norms of all components. (Depending on the optional arguments.)
.. note:: This method just redirects to a call to :py:meth:`HagedornWavepacketBase.norm`.
"""
return wavepacket.norm(component=component, summed=summed)
def kinetic_overlap_energy(self, pacbra, packet, *, component=None, summed=False):
r"""Compute the kinetic energy overlap :math:`\langle \Psi | T | \Psi^{\prime} \rangle`
of the different components :math:`\Phi_i` and :math:`\Phi_i^{\prime}` of the
wavepackets :math:`\Psi` and :math:`\Psi^{\prime}`.
:param pacbra: The wavepacket :math:`\Psi` which takes part in the kinetic energy integral.
:type pacbra: A :py:class:`HagedornWavepacketBase` subclass instance.
:param packet: The wavepacket :math:`\Psi^{\prime}` which takes part in the kinetic energy integral.
:type packet: A :py:class:`HagedornWavepacketBase` subclass instance.
:param component: The index :math:`i` of the components :math:`\Phi_i` of :math:`\Psi`
and :math:`\Phi_i^{\prime}` of :math:`\Psi^{\prime}` which take part in the
kinetic energy integral. If set to ``None`` the computation is performed for
all :math:`N` components of :math:`\Psi` and :math:`\Psi^{\prime}`.
:type component: Integer or ``None``.
:param summed: Whether to sum up the kinetic energies :math:`E_i` of the individual
components :math:`\Phi_i` and :math:`\Phi_i^{\prime}`.
:type summed: Boolean, default is ``False``.
:return: A list of the kinetic energy overlap integrals of the individual components or
the overall kinetic energy overlap of the wavepackets. (Depending on the optional arguments.)
"""
Nbra = pacbra.get_number_components()
Nket = packet.get_number_components()
if not Nbra == Nket:
# TODO: Drop this requirement, should be easy when zip(...) exhausts
raise ValueError("Number of components in bra (%d) and ket (%d) differs!" % (Nbra, Nket))
if component is None:
components = range(Nbra)
else:
components = [component]
ekin = []
for n in components:
gradpacbra = self._gradient.apply_gradient(pacbra, component=n)
gradpacket = self._gradient.apply_gradient(packet, component=n)
Q = [self._innerproduct.quadrature(gpb, gpk, diag_component=n) for gpb, gpk in zip(gradpacbra, gradpacket)]
ekin.append(0.5 * sum(Q))
if summed is True:
ekin = sum(ekin)
elif component is not None:
# Do not return a list for specific single components
ekin = ekin[0]
return ekin
def kinetic_energy(self, wavepacket, *, component=None, summed=False):
r"""Compute the kinetic energy :math:`E_{\text{kin}} := \langle \Psi | T | \Psi \rangle`
of the different components :math:`\Phi_i` of the wavepacket :math:`\Psi`.
:param wavepacket: The wavepacket :math:`\Psi` of which we compute the kinetic energy.
:type wavepacket: A :py:class:`HagedornWavepacketBase` subclass instance.
:param component: The index :math:`i` of the component :math:`\Phi_i` whose
kinetic energy we compute. If set to ``None`` the
computation is performed for all :math:`N` components.
:type component: Integer or ``None``.
:param summed: Whether to sum up the kinetic energies :math:`E_i` of the individual
components :math:`\Phi_i`.
:type summed: Boolean, default is ``False``.
:return: A list of the kinetic energies of the individual components or the
overall kinetic energy of the wavepacket. (Depending on the optional arguments.)
.. note:: This method just expands to a call of the :py:meth:`ObservablesMixedHAWP.kinetic_overlap_energy`
method. Better use :py:meth:`ObservablesHAWP.kinetic_energy`.
"""
return self.kinetic_overlap_energy(wavepacket, wavepacket, component=component, summed=summed)
def potential_overlap_energy(self, pacbra, packet, potential, *, component=None, summed=False):
r"""Compute the potential energy overlap :math:`\langle \Psi | V(x) | \Psi^{\prime} \rangle`
of the different components :math:`\Phi_i` and :math:`\Phi_i^{\prime}` of the
wavepackets :math:`\Psi` and :math:`\Psi^{\prime}`.
:param pacbra: The wavepacket :math:`\Psi` which takes part in the potential energy integral.
:type pacbra: A :py:class:`HagedornWavepacketBase` subclass instance.
:param packet: The wavepacket :math:`\Psi^{\prime}` which takes part in the potential energy integral.
:type packet: A :py:class:`HagedornWavepacketBase` subclass instance.
:param potential: The potential :math:`V(x)`. (Actually, not the potential object itself
but one of its ``V.evaluate_*`` methods.)
:param component: The index :math:`i` of the components :math:`\Phi_i` of :math:`\Psi`
and :math:`\Phi_i^{\prime}` of :math:`\Psi^{\prime}` which take part in the
potential energy integral. If set to ``None`` the computation is performed for
all :math:`N` components of :math:`\Psi` and :math:`\Psi^{\prime}`.
:type component: Integer or ``None``.
:param summed: Whether to sum up the potential energies :math:`E_i` of the individual
components :math:`\Phi_i` and :math:`\Phi_i^{\prime}`.
:type summed: Boolean, default is ``False``.
:return: A list of the potential energy overlap integrals of the individual components or
the overall potential energy overlap of the wavepackets. (Depending on the optional arguments.)
"""
Nbra = pacbra.get_number_components()
Nket = packet.get_number_components()
if not Nbra == Nket:
# TODO: Drop this requirement, should be easy when zip(...) exhausts
raise ValueError("Number of components in bra (%d) and ket (%d) differs!" % (Nbra, Nket))
# TODO: Better take 'V' instead of 'V.evaluate_at' as argument?
# f = partial(potential.evaluate_at, as_matrix=True)
f = partial(potential, as_matrix=True)
# Compute the brakets for each component
if component is not None:
Q = self._innerproduct.quadrature(pacbra, packet, operator=f, diag_component=component, eval_at_once=True)
Q = [squeeze(Q)]
else:
Q = self._innerproduct.quadrature(pacbra, packet, operator=f, eval_at_once=True)
Q = list(map(squeeze, Q))
# And don't forget the summation in the matrix multiplication of 'operator' and 'ket'
# TODO: Should this go inside the innerproduct?
epot = [sum(Q[i * Nket:(i + 1) * Nket]) for i in range(Nbra)]
if summed is True:
epot = sum(epot)
elif component is not None:
# Do not return a list for specific single components
epot = epot[0]
return epot
def potential_energy(self, wavepacket, potential, *, component=None, summed=False):
r"""Compute the potential energy :math:`E_{\text{pot}} := \langle \Psi | V(x) | \Psi \rangle`
of the different components :math:`\Phi_i` of the wavepacket :math:`\Psi`.
:param wavepacket: The wavepacket :math:`\Psi` of which we compute the potential energy.
:type wavepacket: A :py:class:`HagedornWavepacketBase` subclass instance.
:param | |
import copy
import gc
import pickle
import sys
import unittest
import warnings
import weakref
import inspect
import types
from test import support
_testcapi = support.import_module('_testcapi')
# This tests to make sure that if a SIGINT arrives just before we send into a
# yield from chain, the KeyboardInterrupt is raised in the innermost
# generator (see bpo-30039).
class SignalAndYieldFromTest(unittest.TestCase):
def generator1(self):
return (yield from self.generator2())
def generator2(self):
try:
yield
except KeyboardInterrupt:
return "PASSED"
else:
return "FAILED"
def test_raise_and_yield_from(self):
gen = self.generator1()
gen.send(None)
try:
_testcapi.raise_SIGINT_then_send_None(gen)
except BaseException as _exc:
exc = _exc
self.assertIs(type(exc), StopIteration)
self.assertEqual(exc.value, "PASSED")
class FinalizationTest(unittest.TestCase):
def test_frame_resurrect(self):
# A generator frame can be resurrected by a generator's finalization.
def gen():
nonlocal frame
try:
yield
finally:
frame = sys._getframe()
g = gen()
wr = weakref.ref(g)
next(g)
del g
support.gc_collect()
self.assertIs(wr(), None)
self.assertTrue(frame)
del frame
support.gc_collect()
def test_refcycle(self):
# A generator caught in a refcycle gets finalized anyway.
old_garbage = gc.garbage[:]
finalized = False
def gen():
nonlocal finalized
try:
g = yield
yield 1
finally:
finalized = True
g = gen()
next(g)
g.send(g)
self.assertGreater(sys.getrefcount(g), 2)
self.assertFalse(finalized)
del g
support.gc_collect()
self.assertTrue(finalized)
self.assertEqual(gc.garbage, old_garbage)
def test_lambda_generator(self):
# Issue #23192: Test that a lambda returning a generator behaves
# like the equivalent function
f = lambda: (yield 1)
def g(): return (yield 1)
# test 'yield from'
f2 = lambda: (yield from g())
def g2(): return (yield from g())
f3 = lambda: (yield from f())
def g3(): return (yield from f())
for gen_fun in (f, g, f2, g2, f3, g3):
gen = gen_fun()
self.assertEqual(next(gen), 1)
with self.assertRaises(StopIteration) as cm:
gen.send(2)
self.assertEqual(cm.exception.value, 2)
class GeneratorTest(unittest.TestCase):
def test_name(self):
def func():
yield 1
# check generator names
gen = func()
self.assertEqual(gen.__name__, "func")
self.assertEqual(gen.__qualname__,
"GeneratorTest.test_name.<locals>.func")
# modify generator names
gen.__name__ = "name"
gen.__qualname__ = "qualname"
self.assertEqual(gen.__name__, "name")
self.assertEqual(gen.__qualname__, "qualname")
# generator names must be a string and cannot be deleted
self.assertRaises(TypeError, setattr, gen, '__name__', 123)
self.assertRaises(TypeError, setattr, gen, '__qualname__', 123)
self.assertRaises(TypeError, delattr, gen, '__name__')
self.assertRaises(TypeError, delattr, gen, '__qualname__')
# modify names of the function creating the generator
func.__qualname__ = "func_qualname"
func.__name__ = "func_name"
gen = func()
self.assertEqual(gen.__name__, "func_name")
self.assertEqual(gen.__qualname__, "func_qualname")
# unnamed generator
gen = (x for x in range(10))
self.assertEqual(gen.__name__,
"<genexpr>")
self.assertEqual(gen.__qualname__,
"GeneratorTest.test_name.<locals>.<genexpr>")
def test_copy(self):
def f():
yield 1
g = f()
with self.assertRaises(TypeError):
copy.copy(g)
def test_pickle(self):
def f():
yield 1
g = f()
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((TypeError, pickle.PicklingError)):
pickle.dumps(g, proto)
class ExceptionTest(unittest.TestCase):
# Tests for the issue #23353: check that the currently handled exception
# is correctly saved/restored in PyEval_EvalFrameEx().
def test_except_throw(self):
def store_raise_exc_generator():
try:
self.assertEqual(sys.exc_info()[0], None)
yield
except Exception as exc:
# exception raised by gen.throw(exc)
self.assertEqual(sys.exc_info()[0], ValueError)
self.assertIsNone(exc.__context__)
yield
# ensure that the exception is not lost
self.assertEqual(sys.exc_info()[0], ValueError)
yield
# we should be able to raise back the ValueError
raise
make = store_raise_exc_generator()
next(make)
try:
raise ValueError()
except Exception as exc:
try:
make.throw(exc)
except Exception:
pass
next(make)
with self.assertRaises(ValueError) as cm:
next(make)
self.assertIsNone(cm.exception.__context__)
self.assertEqual(sys.exc_info(), (None, None, None))
def test_except_next(self):
def gen():
self.assertEqual(sys.exc_info()[0], ValueError)
yield "done"
g = gen()
try:
raise ValueError
except Exception:
self.assertEqual(next(g), "done")
self.assertEqual(sys.exc_info(), (None, None, None))
def test_except_gen_except(self):
def gen():
try:
self.assertEqual(sys.exc_info()[0], None)
yield
# we are called from "except ValueError:", TypeError must
# inherit ValueError in its context
raise TypeError()
except TypeError as exc:
self.assertEqual(sys.exc_info()[0], TypeError)
self.assertEqual(type(exc.__context__), ValueError)
# here we are still called from the "except ValueError:"
self.assertEqual(sys.exc_info()[0], ValueError)
yield
self.assertIsNone(sys.exc_info()[0])
yield "done"
g = gen()
next(g)
try:
raise ValueError
except Exception:
next(g)
self.assertEqual(next(g), "done")
self.assertEqual(sys.exc_info(), (None, None, None))
def test_except_throw_exception_context(self):
def gen():
try:
try:
self.assertEqual(sys.exc_info()[0], None)
yield
except ValueError:
# we are called from "except ValueError:"
self.assertEqual(sys.exc_info()[0], ValueError)
raise TypeError()
except Exception as exc:
self.assertEqual(sys.exc_info()[0], TypeError)
self.assertEqual(type(exc.__context__), ValueError)
# we are still called from "except ValueError:"
self.assertEqual(sys.exc_info()[0], ValueError)
yield
self.assertIsNone(sys.exc_info()[0])
yield "done"
g = gen()
next(g)
try:
raise ValueError
except Exception as exc:
g.throw(exc)
self.assertEqual(next(g), "done")
self.assertEqual(sys.exc_info(), (None, None, None))
def test_stopiteration_warning(self):
# See also PEP 479.
def gen():
raise StopIteration
yield
with self.assertRaises(StopIteration), \
self.assertWarnsRegex(DeprecationWarning, "StopIteration"):
next(gen())
with self.assertRaisesRegex(DeprecationWarning,
"generator .* raised StopIteration"), \
warnings.catch_warnings():
warnings.simplefilter('error')
next(gen())
def test_tutorial_stopiteration(self):
# Raise StopIteration" stops the generator too:
def f():
yield 1
raise StopIteration
yield 2 # never reached
g = f()
self.assertEqual(next(g), 1)
with self.assertWarnsRegex(DeprecationWarning, "StopIteration"):
with self.assertRaises(StopIteration):
next(g)
with self.assertRaises(StopIteration):
# This time StopIteration isn't raised from the generator's body,
# hence no warning.
next(g)
def test_return_tuple(self):
def g():
return (yield 1)
gen = g()
self.assertEqual(next(gen), 1)
with self.assertRaises(StopIteration) as cm:
gen.send((2,))
self.assertEqual(cm.exception.value, (2,))
def test_return_stopiteration(self):
def g():
return (yield 1)
gen = g()
self.assertEqual(next(gen), 1)
with self.assertRaises(StopIteration) as cm:
gen.send(StopIteration(2))
self.assertIsInstance(cm.exception.value, StopIteration)
self.assertEqual(cm.exception.value.value, 2)
class YieldFromTests(unittest.TestCase):
def test_generator_gi_yieldfrom(self):
def a():
self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_RUNNING)
self.assertIsNone(gen_b.gi_yieldfrom)
yield
self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_RUNNING)
self.assertIsNone(gen_b.gi_yieldfrom)
def b():
self.assertIsNone(gen_b.gi_yieldfrom)
yield from a()
self.assertIsNone(gen_b.gi_yieldfrom)
yield
self.assertIsNone(gen_b.gi_yieldfrom)
gen_b = b()
self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_CREATED)
self.assertIsNone(gen_b.gi_yieldfrom)
gen_b.send(None)
self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_SUSPENDED)
self.assertEqual(gen_b.gi_yieldfrom.gi_code.co_name, 'a')
gen_b.send(None)
self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_SUSPENDED)
self.assertIsNone(gen_b.gi_yieldfrom)
[] = gen_b # Exhaust generator
self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_CLOSED)
self.assertIsNone(gen_b.gi_yieldfrom)
tutorial_tests = """
Let's try a simple generator:
>>> def f():
... yield 1
... yield 2
>>> for i in f():
... print(i)
1
2
>>> g = f()
>>> next(g)
1
>>> next(g)
2
"Falling off the end" stops the generator:
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
StopIteration
"return" also stops the generator:
>>> def f():
... yield 1
... return
... yield 2 # never reached
...
>>> g = f()
>>> next(g)
1
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 3, in f
StopIteration
>>> next(g) # once stopped, can't be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
However, "return" and StopIteration are not exactly equivalent:
>>> def g1():
... try:
... return
... except:
... yield 1
...
>>> list(g1())
[]
>>> def g2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print(list(g2()))
[42]
This may be surprising at first:
>>> def g3():
... try:
... return
... finally:
... yield 1
...
>>> list(g3())
[1]
Let's create an alternate range() function implemented as a generator:
>>> def yrange(n):
... for i in range(n):
... yield i
...
>>> list(yrange(5))
[0, 1, 2, 3, 4]
Generators always return to the most recent caller:
>>> def creator():
... r = yrange(5)
... print("creator", next(r))
... return r
...
>>> def caller():
... r = creator()
... for i in r:
... print("caller", i)
...
>>> caller()
creator 0
caller 1
caller 2
caller 3
caller 4
Generators can call other generators:
>>> def zrange(n):
... for i in yrange(n):
... yield i
...
>>> list(zrange(5))
[0, 1, 2, 3, 4]
"""
# The examples from PEP 255.
pep_tests = """
Specification: Yield
Restriction: A generator cannot be resumed while it is actively
running:
>>> def g():
... i = next(me)
... yield i
>>> me = g()
>>> next(me)
Traceback (most recent call last):
...
File "<string>", line 2, in g
ValueError: generator already executing
Specification: Return
Note that return isn't always equivalent to raising StopIteration: the
difference lies in how enclosing try/except constructs are treated.
For example,
>>> def f1():
... try:
... return
... except:
... yield 1
>>> print(list(f1()))
[]
because, as in any function, return simply exits, but
>>> def f2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print(list(f2()))
[42]
because StopIteration is captured by a bare "except", as is any
exception.
Specification: Generators and Exception Propagation
>>> def f():
... return 1//0
>>> def g():
... yield f() # the zero division exception propagates
... yield 42 # and we'll never get here
>>> k = g()
>>> next(k)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
File "<stdin>", line 2, in f
ZeroDivisionError: integer division or modulo by zero
>>> next(k) # and the generator cannot be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>>
Specification: Try/Except/Finally
>>> def f():
... try:
... yield | |
frame must be None'
# Check sizing
total_title_height = self._frame_title.get_height(apply_padding=False)
assert button.get_height() <= total_title_height, \
f'{button.get_class_id()} height ({button.get_height()}) must be lower' \
f' than frame title height ({total_title_height})'
# Add frame to button kwargs
if 'frame' in button._kwargs.keys():
raise ValueError(f'{button.get_class_id()} already has "frame" kwargs option')
button._kwargs['frame'] = self
if 'button' in button._kwargs.keys():
raise ValueError(f'{button.get_class_id()} already has "button" kwargs option')
button._kwargs['button'] = button
# Pack
align = self._frame_title.get_attribute('buttons_alignment')
button.set_attribute('align', align)
button.set_attribute('margin', margin)
self._frame_title.pack(button, align=align, margin=margin)
self._frame_title.update_position()
return self
def add_title_button(
self,
style: FrameTitleButtonType,
callback: CallbackType,
background_color: ColorInputType = (150, 150, 150),
cursor: CursorInputType = CURSOR_HAND,
margin: Vector2NumberType = (4, 0),
symbol_color: ColorInputType = (0, 0, 0),
symbol_height: NumberType = 0.75,
symbol_margin: int = 4
) -> 'Button':
"""
Add predefined button to title. The button kwargs receive the ``button``
reference and the Frame reference in ``frame`` argument, such as:
.. code-block:: python
onreturn_button_callback(*args, frame=Frame, button=Button, **kwargs)
:param style: Style of the button (changes the symbol)
:param callback: Callback of the button if pressed
:param cursor: Button cursor
:param background_color: Button background color
:param margin: Pack margin on x-axis and y-axis (x, y) in px
:param symbol_color: Color of the symbol
:param symbol_height: Symbol height factor, if ``1.0`` uses 100% of the button height
:param symbol_margin: Symbol margin in px
:return: Added button
"""
if not self._accepts_title:
raise _FrameDoNotAcceptTitle(f'{self.get_class_id()} does not accept a title')
assert self._has_title, \
f'{self.get_class_id()} does not have any title, call set_title(...) beforehand'
assert isinstance(symbol_height, NumberInstance) and 0 <= symbol_height <= 1
assert isinstance(symbol_margin, int) and 0 <= symbol_margin
h = self._frame_title.get_height(apply_padding=False) * symbol_height
dh = self._frame_title.get_height(apply_padding=False) * (1 - symbol_height)
assert symbol_margin < h / 2
if dh > 0:
dh += 1
# Create button
btn = Button('', onreturn=callback,
button_id=self._frame_title._id + '+button-' + uuid4(short=True))
btn.set_padding(h / 2)
btn.translate(0, dh / 2)
btn.set_cursor(cursor)
btn.set_background_color(background_color)
btn.configured = True
btn._update__repr___(self)
# Create style decoration
btn_rect = btn.get_rect()
btn_rect.x = 0
btn_rect.y = 0
t = symbol_margin
border = 1
if style == FRAME_TITLE_BUTTON_CLOSE:
style_pos = (
(btn_rect.left + t, btn_rect.top + t),
(btn_rect.centerx, btn_rect.centery),
(btn_rect.right - t, btn_rect.top + t),
(btn_rect.centerx, btn_rect.centery),
(btn_rect.right - t, btn_rect.bottom - t),
(btn_rect.centerx, btn_rect.centery),
(btn_rect.left + t, btn_rect.bottom - t),
(btn_rect.centerx, btn_rect.centery),
(btn_rect.left + t, btn_rect.top + t)
)
border = 0
elif style == FRAME_TITLE_BUTTON_MAXIMIZE:
style_pos = (
(btn_rect.left + t, btn_rect.bottom - t),
(btn_rect.right - t, btn_rect.bottom - t),
(btn_rect.right - t, btn_rect.top + t),
(btn_rect.left + t, btn_rect.top + t)
)
elif style == FRAME_TITLE_BUTTON_MINIMIZE:
style_pos = (
(btn_rect.left + t, btn_rect.centery + border),
(btn_rect.right - t, btn_rect.centery + border)
)
else:
raise ValueError(f'unknown button style "{style}"')
# Draw style
style_surface = make_surface(h, h, alpha=True)
# noinspection PyArgumentList
pygame.draw.polygon(style_surface, symbol_color, style_pos, border)
btn.get_decorator().add_surface(0, 0, surface=style_surface, centered=True)
self.add_title_generic_button(btn, margin)
return btn
def get_title(self) -> str:
if not self._has_title:
# raise ValueError(f'{self.get_class_id()} does not have any title')
return ''
return self._title
def get_inner_size(self) -> Tuple2IntType:
"""
Return Frame inner size (width, height).
:return: Size tuple in px
"""
return self._width, self._height
def _get_menu_update_frames(self) -> List['pygame_menu.widgets.Frame']:
"""
Return the menu update frames list.
.. warning::
Use with caution.
:return: Frame update list if the menu reference is not ``None``, else, return an empty list
"""
if self._menu is not None:
return self._menu._update_frames
return []
def _sort_menu_update_frames(self) -> None:
"""
Sort the menu update frames (frames which receive updates).
:return: None
"""
if self._menu is not None:
self._menu._sort_update_frames()
def _append_menu_update_frame(self, frame: 'Frame') -> None:
"""
Append update frame to menu and sort.
:param frame: Frame to append
:return: None
"""
assert isinstance(frame, Frame)
update_frames = self._get_menu_update_frames()
if frame not in update_frames:
update_frames.append(frame)
self._sort_menu_update_frames()
def _remove_menu_update_frame(self, frame: 'Frame') -> None:
"""
Remove update frame to menu and sort.
:param frame: Frame to append
:return: None
"""
assert isinstance(frame, Frame)
update_frames = self._get_menu_update_frames()
if frame in update_frames:
update_frames.remove(frame)
def on_remove_from_menu(self) -> 'Frame':
for w in self.get_widgets(unpack_subframes=False):
self.unpack(w)
self.update_indices()
return self
def set_menu(self, menu: Optional['pygame_menu.Menu']) -> 'Frame':
# If menu is set, remove from previous scrollable if enabled
self._remove_menu_update_frame(self)
# Update menu
super(Frame, self).set_menu(menu)
# Add self to scrollable
if self.is_scrollable:
self._append_menu_update_frame(self)
return self
def relax(self, relax: bool = True) -> 'Frame':
"""
Set relax status. If ``True`` Frame ignores sizing checks.
:param relax: Relax status
:return: Self reference
"""
assert isinstance(relax, bool)
self._relax = relax
return self
def get_max_size(self) -> Tuple2IntType:
"""
Return the max size of the frame.
:return: Max (width, height) in px
"""
if self._frame_scrollarea is not None:
return self._frame_scrollarea.get_size(inner=True)
return self.get_size()
def make_scrollarea(
self,
max_width: Optional[NumberType],
max_height: Optional[NumberType],
scrollarea_color: Optional[Union[ColorInputType, 'pygame_menu.BaseImage']],
scrollbar_color: ColorInputType,
scrollbar_cursor: CursorInputType,
scrollbar_shadow: bool,
scrollbar_shadow_color: ColorInputType,
scrollbar_shadow_offset: int,
scrollbar_shadow_position: str,
scrollbar_slider_color: ColorInputType,
scrollbar_slider_hover_color: ColorInputType,
scrollbar_slider_pad: NumberType,
scrollbar_thick: NumberType,
scrollbars: Union[str, Tuple[str, ...]]
) -> 'Frame':
"""
Make the scrollarea of the frame.
:param max_width: Maximum width of the scrollarea in px
:param max_height: Maximum height of the scrollarea in px
:param scrollarea_color: Scroll area color or image. If ``None`` area is transparent
:param scrollbar_color: Scrollbar color
:param scrollbar_cursor: Scrollbar cursor
:param scrollbar_shadow: Indicate if a shadow is drawn on each scrollbar
:param scrollbar_shadow_color: Color of the shadow of each scrollbar
:param scrollbar_shadow_offset: Offset of the scrollbar shadow in px
:param scrollbar_shadow_position: Position of the scrollbar shadow. See :py:mod:`pygame_menu.locals`
:param scrollbar_slider_color: Color of the sliders
:param scrollbar_slider_hover_color: Color of the slider if hovered or clicked
:param scrollbar_slider_pad: Space between slider and scrollbars borders in px
:param scrollbar_thick: Scrollbar thickness in px
:param scrollbars: Positions of the scrollbars. See :py:mod:`pygame_menu.locals`
:return: Self reference
"""
if not self._accepts_scrollarea:
raise _FrameDoNotAcceptScrollarea(f'{self.get_class_id()} does not accept a scrollarea')
assert len(self._widgets.keys()) == 0, 'frame widgets must be empty if creating the scrollarea'
assert self.configured, 'frame must be configured before adding the scrollarea'
if max_width is None:
max_width = self._width
if max_height is None:
max_height = self._height
assert isinstance(max_width, NumberInstance)
assert isinstance(max_height, NumberInstance)
assert 0 < max_width <= self._width, \
f'scroll area width ({max_width}) cannot exceed frame width ({self._width})'
assert 0 < max_height <= self._height, \
f'scroll area height ({max_height}) cannot exceed frame height ({self._height})'
# if not self._relax:
# pass
# else:
# max_height = min(max_height, self._height)
# max_width = min(max_width, self._width)
sx = 0 if self._width == max_width else scrollbar_thick
sy = 0 if self._height == max_height else scrollbar_thick
if self._width > max_width or self._height > max_height:
self.is_scrollable = True
self.set_padding(0)
else:
# Configure size
self._frame_size = (self._width, self._height)
self._frame_scrollarea = None
# If in previous scrollable frames
if self.is_scrollable:
self._remove_menu_update_frame(self)
self.is_scrollable = False
return self
# Create area object
self._frame_scrollarea = pygame_menu._scrollarea.ScrollArea(
area_color=scrollarea_color,
area_height=max_height + sx,
area_width=max_width + sy,
controls_joystick=self._joystick_enabled,
controls_keyboard=self._keyboard_enabled,
controls_mouse=self._mouse_enabled,
controls_touchscreen=self._touchscreen_enabled,
parent_scrollarea=self._scrollarea,
scrollbar_color=scrollbar_color,
scrollbar_cursor=scrollbar_cursor,
scrollbar_slider_color=scrollbar_slider_color,
scrollbar_slider_hover_color=scrollbar_slider_hover_color,
scrollbar_slider_pad=scrollbar_slider_pad,
scrollbar_thick=scrollbar_thick,
scrollbars=scrollbars,
shadow=scrollbar_shadow,
shadow_color=scrollbar_shadow_color,
shadow_offset=scrollbar_shadow_offset,
shadow_position=scrollbar_shadow_position
)
# Store constructor data
self._frame_scrollarea.set_attribute(
'constructor',
{
'scrollarea_color': scrollarea_color,
'scrollbar_color': scrollbar_color,
'scrollbar_cursor': scrollbar_cursor,
'scrollbar_shadow': scrollbar_shadow,
'scrollbar_shadow_color': scrollbar_shadow_color,
'scrollbar_shadow_offset': scrollbar_shadow_offset,
'scrollbar_shadow_position': scrollbar_shadow_position,
'scrollbar_slider_color': scrollbar_slider_color,
'scrollbar_slider_hover_color': scrollbar_slider_hover_color,
'scrollbar_slider_pad': scrollbar_slider_pad,
'scrollbar_thick': scrollbar_thick,
'scrollbars': scrollbars
}
)
if self._width == max_width:
self._frame_scrollarea.hide_scrollbars(ORIENTATION_HORIZONTAL)
if self._height == max_height:
self._frame_scrollarea.hide_scrollbars(ORIENTATION_VERTICAL)
# Create surface
self._surface = make_surface(self._width, self._height, alpha=True)
# Configure area
self._frame_scrollarea.set_world(self._surface)
# Configure size
self._frame_size = (max_width + sy, max_height + sx)
# Set menu
self._frame_scrollarea.set_menu(self._menu)
# If has title
if self._has_title:
warn(f'previous {self.get_class_id()} title has been removed')
self.remove_title()
return self
def get_indices(self) -> Tuple[int, int]:
"""
Return first and last selectable indices tuple.
:return: First, Last widget selectable indices
"""
return self.first_index, self.last_index
def get_total_packed(self) -> int:
"""
Return the total number of packed widgets.
:return: Number of packed widgets
"""
return len(self._widgets.values())
def select(self, *args, **kwargs) -> 'Frame':
return self
def set_selection_effect(self, *args, **kwargs) -> 'Frame':
pass
def _apply_font(self) -> None:
pass
def _title_height(self) -> int:
"""
Return the title height.
:return: Title height in px
"""
if not self._has_title or self._frame_title is None:
return 0
h = self._frame_title.get_height()
h += self._frame_title.get_translate()[1]
h += self._frame_title.get_attribute('pbottom') # Bottom padding
return h
def _render(self) -> None:
self._rect.height = self._frame_size[1] + self._title_height()
self._rect.width = self._frame_size[0]
def | |
column + '_months'
days = column + '_days'
# Date time to unix timestamp
for feature in include_columns:
df_copy[feature] = pd.to_datetime(
df_copy[feature], infer_datetime_format=True, errors='coerce')
try:
df_copy[feature] = df_copy[feature].dt.tz_localize(None)
except:
pass
# calculate diffs
timeDiffs = df_copy[include_columns[0]] - df_copy[include_columns[1]]
# df_copy[years] = timeDiffs /np.timedelta64(1,'Y')
# df_copy[months] = timeDiffs /np.timedelta64(1,'M')
df_copy[column] = timeDiffs / np.timedelta64(1, 'D')
# fill na values
# df_copy = self._execute_na_strategy(df_copy, years, na_strategy)
# df_copy = self._execute_na_strategy(df_copy, months, na_strategy)
df_copy = self._execute_na_strategy(df_copy, column, na_strategy)
# self._add_column_to_data_frame(df_copy, years)
# self._add_column_to_data_frame(df_copy, months)
self._add_column_to_data_frame(df_copy, column)
def _add_dependencies(self, dependencies=[]):
for dep in dependencies:
self.dependencies.append(dep)
def _transform_binary_variables(self, column, na_strategy='set:0'):
logger.debug("Transform binary variable for column {}".format(column))
# Copy Dataframe
df_copy = self.df
# Fill NAs
df_copy = self._execute_na_strategy(df_copy, column, na_strategy)
# df_copy[column][df_copy[column] != '0'] = 1
df_copy.loc[df_copy[column] != '0', column] = 1
df_copy[column] = df_copy[column].astype(int)
self._add_column_to_data_frame(df_copy, column)
def _transform_categorical_variables_label_encoded(self, column, na_strategy='set:NAN'):
logger.debug(
"Transform categorical variable to label encoding for column {}".format(column))
label_name = "labels_" + column
self.label_dict[column] = label_name
# Copy Dataframe
df_copy = self.df.copy()
# Fill NAs
df_copy = self._execute_na_strategy(df_copy, column, na_strategy)
# Transform labels
labels = self._label_encode_categorical_feature(df_copy, column)
dictonary = {self.label_dict[column]: labels}
df_copy = df_copy.assign(**dictonary)
self._add_column_to_data_frame(df_copy, self.label_dict[column])
def _transform_categorical_variables_one_hot_encoded(self, column, na_strategy='set:NAN'):
logger.debug(
"Transform categorical variable to one hot encoded for column {}".format(column))
# Copy Dataframe
df_copy = self.df.copy()
# Fill NAs
df_copy = self._execute_na_strategy(df_copy, column, na_strategy)
label_name = column + "_"
# Tansform one hot encoded
df_ohe_id = self._one_hote_encoder(df_copy, column)
self._add_df_to_feature_df(df_ohe_id)
def _transform_categorical_skip_encoded(self, column, na_strategy='set:NAN'):
logger.debug(
"Transform categorical variable to one hot encoded for column {}".format(column))
# Copy Dataframe
df_copy = self.df.copy()
# Fill NAs
df_copy = self._execute_na_strategy(df_copy, column, na_strategy)
self._add_column_to_data_frame(df_copy, column)
def _rename_column(self, column, rename):
df_copy = self.df_features.copy()
df_copy[rename] = df_copy[column]
self._remove_column_from_data_frame(column)
self._add_column_to_data_frame(df_copy, rename)
def _transform_link_binary(self, column):
try:
df_link_feature = pd.read_csv('data/external/'+column + '.csv')
self._add_column_to_data_frame(df_link_feature, column)
except:
logger.warn(
'could not add link binary feature {}, csv file was not found'.format(column))
def get_X_y(self):
"""This function returns X_train, y_train and X_test.
These are not the splits for training! This is just for preprocessing both datasets.
Returns
-------
DataFrames
X_train, y_train, X_test
"""
df_train = self.df_features.loc[self.df_features.success != 'TEST']
df_test = self.df_features.loc[self.df_features.success == 'TEST']
self.X_train = df_train.drop(
['success', 'OBS_ID'], axis=1)
self.y_train = df_train.loc[:, 'success'].values.astype(int)
self.X_test = df_test.drop('success', axis=1)
# self.X_train = self.X_train.values
# self.y_train = self.y_train.values.astype(int)
logger.debug("X_train shape: {}".format(self.X_train.shape))
logger.debug("y_train shape: {}".format(self.y_train.shape))
logger.debug("X_test shape: {}".format(self.X_test.shape))
return self.X_train, self.y_train, self.X_test
def get_all_kw_cols(self):
"""
Get all columns which are associated to KW
returns: list with kw columns
"""
cols = list(self.df.columns)
kws = [s for s in cols if "kw" in s.lower()]
kws.sort(key=self.natural_keys)
return kws
def remove_kw_from_column(self, kws):
"""
Removes all string chars from KW columns
returns: list
"""
new_cols = []
for i in list(kws):
new_cols.append(i.replace('KW', ''))
return new_cols
def atof(self, text):
try:
retval = float(text)
except ValueError:
retval = text
return retval
def natural_keys(self, text):
return [self.atof(c) for c in re.split(r'[+-]?([0-9]+(?:[.][0-9]*)?|[.][0-9]+)', text)]
def calc_ext_difference(self, amt_weeks: int, df_ext, col_name):
"""
Function to calculate the differences of calendar weeks and bitcoin price.
amt_weeks: Number of weeks to go back from last week available
"""
kws = self.get_all_kw_cols()
kws.append('OBS_ID')
df_kws = self.df.loc[:, kws]
kws_wo_id = set(kws) - set(['OBS_ID'])
kws_wo_id = list(kws_wo_id)
kws_wo_id.sort(key=self.natural_keys)
kws_slice = kws_wo_id[-amt_weeks:]
grouped_prices_kws = df_ext.groupby(
'calendar_week').mean()['High']
new_df = pd.DataFrame(df_kws.OBS_ID)
for week in kws_slice:
new_col = col_name + '_' + week
btc_col = int(re.findall(
r'[+-]?([0-9]+(?:[.][0-9]*)?|[.][0-9]+)', new_col)[0])
btc_price = grouped_prices_kws[btc_col]
difference = df_kws.loc[:, week] - btc_price
new_df.loc[:, new_col] = difference
return new_df
def _build_bitcoin_difference(self, amt_weeks: int):
"""
Function to build difference between bitcoin price and ico price.
amt_weeks: Number of weeks to go back from last week
"""
logger.info("Create bitcoin difference feature")
new_df = self.calc_ext_difference(
amt_weeks, self.df_gem_btc_usd, 'btc_difference')
self._add_df_to_feature_df(new_df)
def _build_eth_difference(self, amt_weeks: int):
"""
Function to build difference between bitcoin price and ico price.
amt_weeks: Number of weeks to go back from last week
"""
logger.info("Create eth difference feature")
new_df = self.calc_ext_difference(
amt_weeks, self.df_gem_eth_usd, 'eth_difference')
self._add_df_to_feature_df(new_df)
def _build_ltc_difference(self, amt_weeks: int):
"""
Function to build difference between bitcoin price and ico price.
amt_weeks: Number of weeks to go back from last week
"""
logger.info("Create ltc difference feature")
new_df = self.calc_ext_difference(
amt_weeks, self.df_gem_ltc_usd, 'ltc_difference')
self._add_df_to_feature_df(new_df)
def _build_bitcoin_avg_difference(self):
logger.info("Build average difference over all weeks")
df_differences = self.calc_ext_difference(
39, self.df_gem_btc_usd, 'btc_difference')
cols = set(df_differences.columns) - set('OBS_ID')
df_differences_wo_id = df_differences.loc[:, cols]
mean_per_ico = df_differences_wo_id.mean(axis=1)
df_differences['mean_difference_btc'] = mean_per_ico
self._add_column_to_data_frame(df_differences, 'mean_difference_btc')
def _build_eth_avg_difference(self):
logger.info("Build average difference over all weeks for eth")
df_differences = self.calc_ext_difference(
39, self.df_gem_eth_usd, 'eth_difference')
cols = set(df_differences.columns) - set('OBS_ID')
df_differences_wo_id = df_differences.loc[:, cols]
mean_per_ico = df_differences_wo_id.mean(axis=1)
df_differences['mean_difference_eth'] = mean_per_ico
self._add_column_to_data_frame(df_differences, 'mean_difference_eth')
def _build_ltc_avg_difference(self):
logger.info("Build average difference over all weeks for ltc")
df_differences = self.calc_ext_difference(
39, self.df_gem_ltc_usd, 'ltc_difference')
cols = set(df_differences.columns) - set('OBS_ID')
df_differences_wo_id = df_differences.loc[:, cols]
mean_per_ico = df_differences_wo_id.mean(axis=1)
df_differences['mean_difference_ltc'] = mean_per_ico
self._add_column_to_data_frame(df_differences, 'mean_difference_ltc')
def calc_coeff_kw(self, df_external, col_name):
kws = self.get_all_kw_cols()
kws.append('OBS_ID')
df_kws = self.df.loc[:, kws]
kws_wo_id = set(kws) - set(['OBS_ID'])
kws_wo_id = list(kws_wo_id)
kws_wo_id.sort(key=self.natural_keys)
grouped_prices_kws = df_external.groupby(
'calendar_week').mean()['High']
ext_price = grouped_prices_kws[:39].values
logger.info("Calculate pearson coefficient of {}".format(col_name))
for index, row in df_kws.iterrows():
ico_price = row[kws_wo_id].values
correlation = pearsonr(ico_price, ext_price)[0]
df_kws.loc[df_kws.OBS_ID == row.OBS_ID, col_name] = correlation
return df_kws[['OBS_ID', col_name]]
def _build_btc_coeff(self):
df_kws = self.calc_coeff_kw(self.df_gem_btc_usd, 'corr_btc')
self._add_column_to_data_frame(df_kws, 'corr_btc')
def _build_eth_coeff(self):
df_kws = self.calc_coeff_kw(self.df_gem_eth_usd, 'corr_eth')
self._add_column_to_data_frame(df_kws, 'corr_eth')
def _build_ltc_coeff(self):
df_kws = self.calc_coeff_kw(self.df_gem_ltc_usd, 'corr_ltc')
self._add_column_to_data_frame(df_kws, 'corr_ltc')
def _build_exist_icobench(self):
col = 'company_name'
df_ids = self.df[[col, 'OBS_ID']]
df_ids[col] = df_ids[col].str.lower()
df_ids[col] = df_ids[col].str.strip()
count_exist = 0
tqdm.write("Creating exist on icobench feature")
for index, row in tqdm(df_ids.iterrows(), total=df_ids.shape[0]):
try:
if self.df_icobench.id.str.contains(row[col]).any():
count_exist += 1
df_ids.loc[df_ids[col] == row[col],
'exist_on_icobench'] = 1
else:
df_ids.loc[df_ids[col] == row[col],
'exist_on_icobench'] = 0
except Exception as e:
logger.warning("Exception: {}".format(e))
self._add_column_to_data_frame(df_ids, 'exist_on_icobench')
logger.info(
"{} icos were matched with thos on icobench".format(count_exist))
def _check_meta_information(self, feature, feature_name):
assert (
'na_strategy' in feature), "No na_strategy for difference {} provided".format(
feature_name)
strategy = feature["na_strategy"]
assert (
'columns' in feature), "No columns for difference in feature {} provided".format(feature_name)
columns = feature["columns"]
assert (
len(columns) > 1), "Please provide at least 2 columns for difference {} provided".format(
feature_name)
return strategy, columns
def construct_feature_set(self, features):
"""This function is the pipeline for adding all features to the dataset
"""
# Iterate through features beforehand for deleting nas
for feature in features:
if 'meta' in feature:
continue
assert ('column' in feature), "No column key provided"
feature_name = feature["column"]
if 'na_strategy' in feature and feature['na_strategy'] == "delete":
self._delete_na_values(feature_name)
self._init_df_features()
# Check dependencies
for feature in features:
if 'meta' in feature:
continue
if 'dependsOn' in feature:
feature_name = feature["column"]
dependencies = feature["dependsOn"]
assert (
len(dependencies) > 0), "Please provide at least 1 dependency for {} ".format(
feature_name)
self._add_dependencies(dependencies)
# rearange based on dependencies
features_copy = features.copy()
for feature in features:
if 'meta' in feature:
continue
feature_name = feature["column"]
if feature_name in self.dependencies:
features_copy.remove(feature)
features_copy.insert(0, feature)
# Iterating through features and construct feature set
for feature in features:
logger.debug("Feature: {}".format(feature))
if 'meta' in feature:
feature.pop('meta')
continue
if feature['column'] == 'bitcoin_difference':
amt_weeks = int(feature['amt_weeks'])
self._build_bitcoin_difference(amt_weeks)
continue
elif feature['column'] == 'eth_difference':
amt_weeks = int(feature['amt_weeks'])
self._build_eth_difference(amt_weeks)
continue
elif feature['column'] == 'ltc_difference':
amt_weeks = int(feature['amt_weeks'])
self._build_ltc_difference(amt_weeks)
continue
elif feature['column'] == 'bitcoin_avg_difference':
self._build_bitcoin_avg_difference()
continue
elif feature['column'] == 'eth_avg_difference':
self._build_eth_avg_difference()
continue
elif feature['column'] == 'ltc_avg_difference':
self._build_ltc_avg_difference()
continue
elif feature['column'] == 'btc_coeff':
self._build_btc_coeff()
continue
elif feature['column'] == 'eth_coeff':
self._build_eth_coeff()
continue
elif feature['column'] == 'ltc_coeff':
self._build_ltc_coeff()
continue
elif feature['column'] == 'exist_on_icobench':
self._build_exist_icobench()
continue
assert (
'column' in feature), "No column key provided in feature " + feature
assert ('type' in feature), "No column type provided"
feature_type = feature["type"]
feature_name = feature["column"]
if feature_type == "categorical":
assert (
'encoder' in feature), "No encoder for categorical feauter {} provided".format(feature_name)
feauter_encoder = feature["encoder"]
assert (
'na_strategy' in feature), "No na_strategy for numerical feauter {} provided".format(
feature_name)
strategy = feature["na_strategy"]
if feauter_encoder == "label":
self._transform_categorical_variables_label_encoded(
feature_name, strategy)
elif feauter_encoder == "one_hot":
self._transform_categorical_variables_one_hot_encoded(
feature_name, strategy)
elif feauter_encoder == "skip":
self._transform_categorical_skip_encoded(
feature_name, strategy)
else:
raise ValueError("Feauter encoder not recognized")
elif feature_type == "numerical":
assert (
'na_strategy' in feature), "No na_strategy for categorical feauter {} provided".format(
feature_name)
strategy = feature["na_strategy"]
self._transform_numerical_variables(feature_name, strategy)
elif feature_type == "average":
strategy, columns = self._check_meta_information(
feature, feature_name)
self._transform_average_feature(
feature_name, | |
<gh_stars>1-10
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from . import ivi
# Exceptions
class MarkerNotEnabledException(ivi.IviException): pass
class NotDeltaMarkerException(ivi.IviException): pass
# Parameter Values
AmplitudeUnits = set(['dBm', 'dBmV', 'dBuV', 'volt', 'watt'])
DetectorType = set(['auto_peak', 'average', 'maximum_peak', 'minimum_peak', 'sample', 'rms'])
TraceType = set(['clear_write', 'maximum_hold', 'minimum_hold', 'video_average', 'view', 'store'])
VerticalScale = set(['linear', 'logarithmic'])
AcquisitionStatus = set(['complete', 'in_progress', 'unknown'])
class Base(object):
"Base IVI methods for all spectrum analyzers"
def __init__(self, *args, **kwargs):
super(Base, self).__init__( *args, **kwargs)
cls = 'IviSpecAn'
grp = 'Base'
ivi.add_group_capability(self, cls+grp)
self._trace_count = 1
self._level_amplitude_units = 'dBm'
self._level_attenuation = 0.0
self._level_attenuation_auto = False
self._acquisition_detector_type = 'sample'
self._acquisition_detector_type_auto = False
self._frequency_start = 1e3
self._frequency_stop = 1e9
self._frequency_offset = 0.0
self._level_input_impedance = 50
self._acquisition_number_of_sweeps = 1
self._level_reference = 0.0
self._level_reference_offset = 0.0
self._sweep_coupling_resolution_bandwidth = 1e2
self._sweep_coupling_resolution_bandwidth_auto = False
self._acquisition_sweep_mode_continuous = True
self._sweep_coupling_sweep_time = 1e-1
self._sweep_coupling_sweep_time_auto = False
self._trace_name = list()
self._trace_type = list()
self._acquisition_vertical_scale = 'logarithmic'
self._sweep_coupling_video_bandwidth = 1e2
self._sweep_coupling_video_bandwidth_auto = False
ivi.add_property(self, 'level.amplitude_units',
self._get_level_amplitude_units,
self._set_level_amplitude_units,
None,
"""
Specifies the amplitude units for input, output and display amplitude.
""")
ivi.add_property(self, 'level.attenuation',
self._get_level_attenuation,
self._set_level_attenuation,
None,
"""
Specifies the input attenuation (in positive dB).
""")
ivi.add_property(self, 'level.attenuation_auto',
self._get_level_attenuation_auto,
self._set_level_attenuation_auto,
None,
"""
If set to True, attenuation is automatically selected. If set to False,
attenuation is manually selected.
""")
ivi.add_property(self, 'acquisition.detector_type',
self._get_acquisition_detector_type,
self._set_acquisition_detector_type,
None,
"""
Specifies the detection method used to capture and process the signal.
This governs the data acquisition for a particular sweep, but does not
have any control over how multiple sweeps are processed.
""")
ivi.add_property(self, 'acquisition.detector_type_auto',
self._get_acquisition_detector_type_auto,
self._set_acquisition_detector_type_auto,
None,
"""
If set to True, the detector type is automatically selected. The
relationship between Trace Type and Detector Type is not defined by the
specification when the Detector Type Auto is set to True. If set to False,
the detector type is manually selected.
""")
ivi.add_property(self, 'frequency.start',
self._get_frequency_start,
self._set_frequency_start,
None,
"""
Specifies the left edge of the frequency domain in Hertz. This is used in
conjunction with the Frequency Stop attribute to define the frequency
domain. If the Frequency Start attribute value is equal to the Frequency
Stop attribute value then the spectrum analyzer's horizontal attributes
are in time-domain.
""")
ivi.add_property(self, 'frequency.stop',
self._get_frequency_stop,
self._set_frequency_stop,
None,
"""
Specifies the right edge of the frequency domain in Hertz. This is used in
conjunction with the Frequency Start attribute to define the frequency
domain. If the Frequency Start attribute value is equal to the Frequency
Stop attribute value then the spectrum analyzer's horizontal attributes are
in time-domain.
""")
ivi.add_property(self, 'frequency.offset',
self._get_frequency_offset,
self._set_frequency_offset,
None,
"""
Specifies an offset value, in Hertz, that is added to the frequency
readout. The offset is used to compensate for external frequency
conversion. This changes the driver's Frequency Start and Frequency Stop
attributes.
The equations relating the affected values are:
Frequency Start = Actual Start Frequency + Frequency Offset
Frequency Stop = Actual Stop Frequency + Frequency Offset
Marker Position = Actual Marker Frequency + Frequency Offset
""")
ivi.add_property(self, 'level.input_impedance',
self._get_level_input_impedance,
self._set_level_input_impedance,
None,
"""
Specifies the value of input impedance, in ohms, expected at the active
input port. This is typically 50 ohms or 75 ohms.
""")
ivi.add_property(self, 'acquisition.number_of_sweeps',
self._get_acquisition_number_of_sweeps,
self._set_acquisition_number_of_sweeps,
None,
"""
This attribute defines the number of sweeps. This attribute value has no
effect if the Trace Type attribute is set to the value Clear Write.
""")
ivi.add_property(self, 'level.reference',
self._get_level_reference,
self._set_level_reference,
None,
"""
The calibrated vertical position of the captured data used as a reference
for amplitude measurements. This is typically set to a value slightly
higher than the highest expected signal level. The units are determined by
the Amplitude Units attribute.
""")
ivi.add_property(self, 'level.reference_offset',
self._get_level_reference_offset,
self._set_level_reference_offset,
None,
"""
Specifies an offset for the Reference Level attribute. This value is used
to adjust the reference level for external signal gain or loss. A
positive value corresponds to a gain while a negative number corresponds
to a loss. The value is in dB.
""")
ivi.add_property(self, 'sweep_coupling.resolution_bandwidth',
self._get_sweep_coupling_resolution_bandwidth,
self._set_sweep_coupling_resolution_bandwidth,
None,
"""
Specifies the width of the IF filter in Hertz. For more information see
Section 4.1.1, Sweep Coupling Overview.
""")
ivi.add_property(self, 'sweep_coupling.resolution_bandwidth_auto',
self._get_sweep_coupling_resolution_bandwidth_auto,
self._set_sweep_coupling_resolution_bandwidth_auto,
None,
"""
If set to True, the resolution bandwidth is automatically selected. If set
to False, the resolution bandwidth is manually selected.
""")
ivi.add_property(self, 'acquisition.sweep_mode_continuous',
self._get_acquisition_sweep_mode_continuous,
self._set_acquisition_sweep_mode_continuous,
None,
"""
If set to True, the sweep mode is continuous If set to False, the sweep
mode is not continuous.
""")
ivi.add_property(self, 'sweep_coupling.sweep_time',
self._get_sweep_coupling_sweep_time,
self._set_sweep_coupling_sweep_time,
None,
"""
Specifies the length of time to sweep from the left edge to the right edge
of the current domain. The units are seconds.
""")
ivi.add_property(self, 'sweep_coupling.sweep_time_auto',
self._get_sweep_coupling_sweep_time_auto,
self._set_sweep_coupling_sweep_time_auto,
None,
"""
If set to True, the sweep time is automatically selected If set to False,
the sweep time is manually selected.
""")
ivi.add_property(self, 'traces[].name',
self._get_trace_name,
None,
None,
"""
Returns the physical repeated capability identifier defined by the
specific driver for the trace that corresponds to the index that the user
specifies. If the driver defines a qualified trace name, this property
returns the qualified name.
""")
ivi.add_property(self, 'traces[].type',
self._get_trace_type,
self._set_trace_type,
None,
"""
Specifies the representation of the acquired data.
""")
ivi.add_property(self, 'acquisition.vertical_scale',
self._get_acquisition_vertical_scale,
self._set_acquisition_vertical_scale,
None,
"""
Specifies the vertical scale of the measurement hardware (use of log
amplifiers versus linear amplifiers).
""")
ivi.add_property(self, 'sweep_coupling.video_bandwidth',
self._get_sweep_coupling_video_bandwidth,
self._set_sweep_coupling_video_bandwidth,
None,
"""
Specifies the video bandwidth of the post-detection filter in Hertz.
""")
ivi.add_property(self, 'sweep_coupling.video_bandwidth_auto',
self._get_sweep_coupling_video_bandwidth_auto,
self._set_sweep_coupling_video_bandwidth_auto,
None,
"""
If set to True, the video bandwidth is automatically selected. If set to
False, the video bandwidth is manually selected.
""")
ivi.add_method(self, 'acquisition.abort',
self._acquisition_abort,
"""
This function aborts a previously initiated measurement and returns the
spectrum analyzer to the idle state. This function does not check
instrument status.
""")
ivi.add_method(self, 'acquisition.status',
self._acquisition_status,
"""
This function determines and returns the status of an acquisition.
""")
ivi.add_method(self, 'acquisition.configure',
self._acquisition_configure,
"""
This function configures the acquisition attributes of the spectrum
analyzer.
""")
ivi.add_method(self, 'frequency.configure_center_span',
self._frequency_configure_center_span,
"""
This function configures the frequency range defining the center frequency
and the frequency span. If the span corresponds to zero Hertz, then the
spectrum analyzer operates in time-domain mode. Otherwise, the spectrum
analyzer operates in frequency-domain mode.
This function modifies the Frequency Start and Frequency Stop attributes as
follows:
Frequency Start = CenterFrequency - Span / 2
Frequency Stop = CenterFrequency + Span / 2
""")
ivi.add_method(self, 'frequency.configure_start_stop',
self._frequency_configure_start_stop,
"""
This function configures the frequency range defining its start frequency
and its stop frequency. If the start frequency is equal to the stop
frequency, then the spectrum analyzer operates in time-domain mode.
Otherwise, the spectrum analyzer operates in frequency-domain mode.
""")
ivi.add_method(self, 'level.configure',
self._level_configure,
"""
This function configures the vertical attributes of the spectrum analyzer.
This corresponds to the Amplitude Units, Input Attenuation, Input
Impedance, Reference Level, and Reference Level Offset attributes.
""")
ivi.add_method(self, 'sweep_coupling.configure',
self._sweep_coupling_configure,
"""
This function configures the coupling and sweeping attributes. For
additional sweep coupling information refer to Section 4.1.1, Sweep
Coupling Overview.
""")
ivi.add_method(self, 'traces[].fetch_y',
self._trace_fetch_y,
"""
This function returns the trace the spectrum analyzer acquires. | |
obj_dict.items():
piv_pos += cmds.xform(vtx, q=True, t=True, ws=True)
piv_pos = self.get_piv_pos(piv_pos)
#print 'Pivot COG :', piv_pos
if sid == 0 or sid ==4:
cmds.scale(add_scale[0], add_scale[1], add_scale[2], r=True, ws=True, p=piv_pos, smn=sym)
if sid == 1 or sid == 2 or sid == 5:
cmds.scale(add_scale[0], add_scale[1], add_scale[2], r=True, ls=True, p=piv_pos, smn=sym)
if sid == 3:
cmds.scale(add_scale[0], add_scale[1], add_scale[2], r=True, os=True, p=piv_pos, smn=sym)
else:#それぞれのメッシュの中心ピボット
for mesh, vtx in obj_dict.items():
if cmds.nodeType(mesh) == 'mesh':
mesh = cmds.listRelatives(mesh, p=True, f=True)[0]
#print 'comp_mode pre scale :', pre_scale
if sym:
base_pos = piv_pos
else:
base_pos = cmds.xform(mesh, q=True, t=True, ws=True)
#print 'comp_mode base scale position :', base_pos
if sid == 3:#オブジェクトモードの時だけそれぞれの角度にスケール
#print 'object mode :'
#cmds.xform(vtx, s=add_scale, r=True, os=True)
cmds.scale(add_scale[0], add_scale[1], add_scale[2], r=True, os=True, smn=sym)
else:#それ以外の場合はグローバル座標それぞれの位置にスケール
#print 'add_mode :'
#SIだとコンポーネントスケールはワールドもローカルも手打ちでは同じ動きをする。分けられるけど、どうしよう。
#cmds.scale(add_scale[0], add_scale[1], add_scale[2], vtx, r=True, ws=True, p=base_pos)
#分けたバージョンは以下
if sid == 0 or sid ==4:
cmds.scale(add_scale[0], add_scale[1], add_scale[2], r=True, ws=True, p=base_pos, smn=sym)
if sid == 1 or sid == 2 or sid == 5:
cmds.scale(add_scale[0], add_scale[1], add_scale[2], r=True, ls=True, p=base_pos, smn=sym)
sisidebar_sub.get_matrix()
#self.out_focus()
if focus:
global input_line_id#フォーカス外すラインを限定する
global input_srt_id#フォーカス外すラインを限定する
input_srt_id = 0
input_line_id = axis
create_focus_job()
#ローテーション入力をオブジェクトに反映
def rotation(self, text='', axis=0, focus=True):
global world_str_mode
global world_str_axis
world_str_mode=1
world_str_axis=axis
#print '/*/*/*/*/rotation'
global pre_rot
if text == str(pre_rot[axis]):
#print 'skip rot'
return
#print 'rotate method :',axis , 'pre :', pre_scale, 'current :', text
sid = space_group.checkedId()
space = self.space_list[sid]
value, sign = self.text2num(text)
if value is None:
sisidebar_sub.get_matrix()
return
if self.child_comp_but.isChecked():
pcp = ', pcp=True'
else:
pcp = ''
transforms = cmds.ls(sl=True, l=True, tr=True)
for sel in transforms:
if sid == 1 or sid == 2:#ローカルスペースとビューの時の処理
rot = cmds.xform(sel, q=True, ro=True)
else:#グローバル処理
rot = cmds.xform(sel, q=True, ro=True, ws=True)
if sign:
exec('rot[axis] '+sign+'= value')
#print rot
else:
rot[axis]=value
#回転実行
if sid == 1 or sid == 2:#ローカルスペースとビューの時の処理
#print 'rot os'
exec('cmds.rotate(rot[0], rot[1], rot[2], sel'+pcp+', os=True)')
else:#グローバル処理
exec('cmds.rotate(rot[0], rot[1], rot[2], sel'+pcp+', ws=True)')
exec('trans'+self.axis_list[axis]+'.setText(str(rot[axis]))')
if pre_rot[axis] != value:
sel_comps = cmds.ls(sl=True, type='float3')
#カーブもとっておく
cv_selection = cmds.ls(sl=True, type='double3', fl=True)
components = cmds.polyListComponentConversion(sel_comps, tv=True)
if components:
components = cmds.filterExpand(components, sm=31)+cv_selection
else:
components = cv_selection
if components:
obj_list = list(set([vtx.split('.')[0] for vtx in components]))
obj_dict = {obj:[] for obj in obj_list}
[obj_dict[vtx.split('.')[0]].append(vtx) for vtx in components]
add_rot = [0.0, 0.0, 0.0]
current_rot = [0.0, 0.0, 0.0]
if sign:
if sign == '+':
add_value = value
elif sign == '-':
add_value = -1*value
else:
add_value = 0.0
else:
add_value = value
add_rot[axis] = add_value
sym = cmds.symmetricModelling(q=True, symmetry=True)
#print 'New rot :', add_rot
if sym:
smn = self.get_snm_flag()
else:
smn = False
if self.cog_but.isChecked():
#COGのときは全てのコンポーネントの中心ピボット
#グローバル回転+COGを処理
piv_pos = []
if self.cog_but.text() == 'COP' and cmds.manipPivot(q=True, pv=True):
piv_pos = cmds.manipPivot(p=True, q=True)[0]
#print 'cop mode :', piv_pos
else:
for mesh, vtx in obj_dict.items():
piv_pos += cmds.xform(vtx, q=True, t=True, ws=True)
piv_pos = self.get_piv_pos(piv_pos)
#print 'Pivot COG :', piv_pos
if sid == 0 or sid == 4:
cmds.rotate(add_rot[0], add_rot[1], add_rot[2], r=True, ws=True, p=piv_pos, smn=smn)
if sid == 3:#ジンバル
cmds.rotate(add_rot[0], add_rot[1], add_rot[2], r=True, eu=True, p=piv_pos, smn=smn)
if sid == 1 or sid == 2 or sid == 5:#オブジェクト
cmds.rotate(add_rot[0], add_rot[1], add_rot[2], r=True, os=True, p=piv_pos, smn=smn)
#return
else:
#COGグローバル以外の処理
if sid == 0 or sid == 4:#ワールドスペース
#print 'global_mode :'
cmds.rotate(add_rot[0], add_rot[1], add_rot[2], r=True, ws=True, smn=smn)
if sid == 3:#ジンバル
#print 'object mode :'
cmds.rotate(add_rot[0], add_rot[1], add_rot[2], r=True, eu=True, smn=smn)
if sid == 1 or sid == 2 or sid == 5:#オブジェクト
#print 'local_mode :'
cmds.rotate(add_rot[0], add_rot[1], add_rot[2], r=True, os=True, smn=smn)
sisidebar_sub.get_matrix()
#self.out_focus()
if focus:
global input_line_id#フォーカス外すラインを限定する
global input_srt_id#フォーカス外すラインを限定する
input_srt_id = 1
input_line_id = axis
create_focus_job()
#移動をオブジェクトに反映
#os_trans_flag = False
def translation(self, text='', axis=0, focus=True):
global world_str_mode
global world_str_axis
world_str_mode=2
world_str_axis=axis
#移動方向反転フラグsmnを現在の軸座標から判定
#print '/*/*/*/*/translation'
global pre_trans
if text == self.focus_text:
#print 'focus same', text, self.focus_text
return
#同じ数字が打っても効かないので前々回のラインとも比較する
if text == str(pre_trans[axis]) and text == self.pre_pre_lines_text[2][axis]:
#print 'Same Input Text : Skip trans'
return
#print 'transration method :',axis , 'pre :', pre_trans, 'current :', text
space = self.space_list[space_group.checkedId()]
value, sign = self.text2num(text)
if value is None:
sisidebar_sub.get_matrix()
return
sid = space_group.checkedId()
#print space
#print self.child_comp_but.isChecked()
if self.child_comp_but.isChecked():
pcp = ', pcp=True'
else:
pcp = ''
transforms = cmds.ls(sl=True, l=True, tr=True)
#print 'check selection in translation :', selection
for sel in transforms:
if sid == 0 or sid == 4:#ワールドスペース
pos = cmds.xform(sel, q=True, t=True, ws=True)
elif sid == 3 or sid == 2 or sid == 5:#ローカルスペース
pos = cmds.xform(sel, q=True, t=True)
elif sid == 1:#オブジェクトスペース
pos = cmds.xform(sel, q=True, t=True, os=True)
#exec('pos = cmds.xform(sel, q=True, t=True'+space+')')
#pos = [cmds.getAttr(sel+'.translate'+a)for a in self.axis_attr_list]
if sign:
exec('pos[axis] '+sign+'= value')
#print pos
else:
pos[axis]=value
#移動実行
if sid == 0 or sid == 4:#ワールドスペース
exec('cmds.move(pos[0], pos[1], pos[2], sel, ws=True'+pcp+')')
elif sid == 3 or sid == 2 or sid == 5:#ローカルスペース
exec('cmds.move(pos[0], pos[1], pos[2], sel'+pcp+',ls=True)')
elif sid == 1:#オブジェクトスペース
#print 'os move', text
exec('cmds.move(pos[0], pos[1], pos[2], sel, os=True'+pcp+')')
exec('trans'+self.axis_list[axis]+'.setText(str(pos[axis]))')
if text != self.focus_text:
if pre_trans[axis] != value or text != self.pre_pre_lines_text[2][axis]:
sel_comps = cmds.ls(sl=True, type='float3')
#カーブもとっておく
cv_selection = cmds.ls(sl=True, type='double3', fl=True)
components = cmds.polyListComponentConversion(sel_comps, tv=True)
if components:
components = cmds.filterExpand(components, sm=31)+cv_selection
else:
components = cv_selection
if components:
obj_list = list(set([vtx.split('.')[0] for vtx in components]))
#obj_list = cmds.ls(hl=True)
obj_dict = {obj:[] for obj in obj_list}
[obj_dict[vtx.split('.')[0]].append(vtx) for vtx in components]
#print obj_dict
for mesh, vtx in obj_dict.items():
if cmds.nodeType(mesh) == 'mesh':
mesh = cmds.listRelatives(mesh, p=True, f=True)[0]
#print 'comp_mode pre trans :', pre_trans
add_trans = [0.0, 0.0, 0.0]
if sid == 0 or sid == 4:#ワールドスペース
base_trans = cmds.xform(mesh, q=True, t=True, ws=True)
else:#ローカルスペース
base_trans = cmds.xform(mesh, q=True, t=True, os=True)
if sign:
if sign == '+':
add_value = value
elif sign == '-':
add_value = -1*value
else:
exec('add_value = pre_trans[axis] '+sign+' value-pre_trans[axis]')
else:
if cp_abs_flag:
for line_obj in self.t_xyz_list:
if line_obj.hasFocus():
break
else:
#print 'skip for trans in scale rot mode'
return
#print 'run cp absolute'
self.scaling(text='0.0', axis=axis, focus=True)
add_value = value - pre_trans[axis]
#print 'add value', add_value
add_trans[axis] = add_value
sym = cmds.symmetricModelling(q=True, symmetry=True)
##symmetry有効の場合smnの場合分けが必要そう
if sym:
smn = self.get_snm_flag()
if sid == 0 or sid == 4:#ワールドスペース
cmds.move(add_trans[0], add_trans[1], add_trans[2], r=True, smn=smn)
#cmds.xform(vtx, t=add_trans, r=True, ws=True)
elif sid == 3 or sid == 2 or sid == 5:#ローカルスペース
cmds.move(add_trans[0], add_trans[1], add_trans[2], r=True, ls=True, smn=smn)
#cmds.xform(vtx, t=add_trans, r=True, os=True)
elif sid == 1:#オブジェクトスペース
cmds.move(add_trans[0], add_trans[1], add_trans[2], r=True, os=True, wd=sym, smn=smn)
else:
if sid == 0 or sid == 4:#ワールドスペース
cmds.move(add_trans[0], add_trans[1], add_trans[2], r=True, ws=True)
#cmds.xform(vtx, t=add_trans, r=True, ws=True)
elif sid == 3 or sid == 2 or sid == 5:#ローカルスペース
cmds.move(add_trans[0], add_trans[1], add_trans[2], r=True, ls=True)
#cmds.xform(vtx, t=add_trans, r=True, os=True)
elif sid == 1:#オブジェクトスペース
cmds.move(add_trans[0], add_trans[1], add_trans[2], r=True, os=True)
sisidebar_sub.get_matrix()
#self.out_focus()
if focus:
global input_line_id#フォーカス外すラインを限定する
global input_srt_id
input_srt_id = 2
input_line_id = axis
create_focus_job()
#移動方向反転フラグsmnを現在の軸座標から判定
def get_snm_flag(self):
axis_list = ['x', 'y', 'z']
sym_axis = cmds.symmetricModelling(q=True, ax=True)
axis_id = axis_list.index(sym_axis)
sym_axis_trans = pre_trans[axis_id]
if sym_axis_trans >= 0 :
return True
else:
return False
#一軸を絶対値にした位置リストを返す
def exchange_abs_val(self, components, axis, abs):
pos_list = [cmds.xform(con, q=True, t=True, ws=True) for con in components]
return [map(lambda a:pos[a] if a != axis else abs, range(3)) for pos in pos_list]
def get_piv_pos(self, piv_pos):
start = dt.datetime.now()
if np_flag:
piv_pos = np.average(np.array(piv_pos).reshape(len(piv_pos)/3, 3), axis=0)
else:
srt_list = [0, 0, 0]
for i in range(0, len(piv_pos), 3):
srt_list[0] += piv_pos[i+0]
srt_list[1] += piv_pos[i+1]
srt_list[2] += piv_pos[i+2]
piv_pos = map(lambda a: a/(len(piv_pos)/3), srt_list)
end = dt.datetime.now()
culc_time = end - start
view_np_time(culc_time=culc_time)
return piv_pos
#入力文字を分解して数値とシンボルに変える
def text2num(self, text):
#計算式の答えがあればそのまま返す、無い場合は四則演算モードへ
value = self.formula_analyze(text)
if value:
#リストタイプだったら特殊処理する
if isinstance(value, list):
#ヒストリをまとめながら実行
qt.Callback(self.linear_sort_selection(value))
return None, None
else:
return value, None
else:
if text == ' ':
return 0.0, None
signs = ['+', '-', '*', '/']
try:
return float(text), None
except:
try:
for s in signs:
if text.startswith(s+'='):
text = text[2:]
sign = s
if text.endswith(s):
text = text[:-1]
sign = s
try:
if float(text)==0.0 and s=='/':
return None, None
return float(text), s
except:
pass
except:
pass
return None, None
#計算式を解析して戻す
def formula_analyze(self, text):
#print 'input formula :', text
text = text.upper()
text | |
try:
deployment = client.deployments.create(
blueprint_id,
deployment_id,
inputs=inputs,
visibility=visibility,
skip_plugins_validation=skip_plugins_validation,
site_name=site_name,
runtime_only_evaluation=runtime_only_evaluation,
labels=labels,
display_name=display_name
)
except (MissingRequiredDeploymentInputError,
UnknownDeploymentInputError) as e:
logger.error('Unable to create deployment: {0}'.format(e))
raise SuppressedCloudifyCliError(str(e))
except DeploymentPluginNotFound as e:
logger.info("Unable to create deployment. Not all "
"deployment plugins are installed on the Manager.{}"
"* Use 'cfy plugins upload' to upload the missing plugins"
" to the Manager, or use 'cfy deployments create' with "
"the '--skip-plugins-validation' flag "
" to skip this validation.".format(os.linesep))
raise CloudifyCliError(str(e))
except (UnknownDeploymentSecretError,
UnsupportedDeploymentGetSecretError) as e:
logger.info('Unable to create deployment due to invalid secret')
raise CloudifyCliError(str(e))
logger.info("Deployment `{0}` created. The deployment's id is "
"{1}".format(deployment.display_name, deployment.id))
@cfy.command(name='delete',
short_help='Delete a deployment [manager only]')
@cfy.argument('deployment-id')
@cfy.options.force(help=helptexts.FORCE_DELETE_DEPLOYMENT)
@cfy.options.common_options
@cfy.options.with_logs
@cfy.options.tenant_name(required=False, resource_name_for_help='deployment')
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def manager_delete(deployment_id, force, with_logs, logger, client,
tenant_name):
"""Delete a deployment from the manager
`DEPLOYMENT_ID` is the id of the deployment to delete.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Trying to delete deployment {0}...'.format(deployment_id))
client.deployments.delete(deployment_id, force,
with_logs=with_logs)
try:
execution = get_deployment_environment_execution(
client, deployment_id, DELETE_DEP)
if execution:
execution_events_fetcher.wait_for_execution(
client, execution, logger=logger)
except ExecutionTimeoutError:
raise CloudifyCliError(
'Timed out waiting for deployment `{0}` to be deleted. Please '
'execute `cfy deployments list` to check whether the '
'deployment has been deleted.'.format(deployment_id))
except CloudifyClientError as e:
# ignore 404 errors for the execution or deployment - it was already
# deleted before we were able to follow it
if 'not found' not in str(e):
raise
except RuntimeError as e:
# ignore the failure to get the execution - it was already deleted
# before we were able to follow it
if 'Failed to get' not in str(e):
raise
logger.info("Deployment deleted")
@cfy.command(name='outputs',
short_help='Show deployment outputs [manager only]')
@cfy.argument('deployment-id')
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='deployment')
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def manager_outputs(deployment_id, logger, client, tenant_name):
"""Retrieve outputs for a specific deployment
`DEPLOYMENT_ID` is the id of the deployment to print outputs for.
"""
_present_outputs_or_capabilities(
'outputs',
deployment_id,
tenant_name,
logger,
client
)
@cfy.command(name='capabilities',
short_help='Show deployment capabilities [manager only]')
@cfy.argument('deployment-id')
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='deployment')
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def manager_capabilities(deployment_id, logger, client, tenant_name):
"""Retrieve capabilities for a specific deployment
`DEPLOYMENT_ID` is the id of the deployment to print capabilities for.
"""
_present_outputs_or_capabilities(
'capabilities',
deployment_id,
tenant_name,
logger,
client
)
def _present_outputs_or_capabilities(
resource, deployment_id, tenant_name, logger, client
):
# resource is either "outputs" or "capabilities"
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info(
'Retrieving {0} for deployment {1}...'.format(resource, deployment_id)
)
dep = client.deployments.get(deployment_id, _include=[resource])
definitions = getattr(dep, resource)
client_api = getattr(client.deployments, resource)
response = client_api.get(deployment_id)
values_dict = getattr(response, resource)
if get_global_json_output():
values = {out: {
'value': val,
'description': definitions[out].get('description')
} for out, val in values_dict.items()}
print_details(values, 'Deployment {0}:'.format(resource))
else:
values = StringIO()
for elem_name, elem in values_dict.items():
values.write(' - "{0}":{1}'.format(elem_name, os.linesep))
description = definitions[elem_name].get('description', '')
values.write(' Description: {0}{1}'.format(description,
os.linesep))
values.write(
' Value: {0}{1}'.format(elem, os.linesep))
logger.info(values.getvalue())
@cfy.command(name='inputs',
short_help='Show deployment inputs [manager only]')
@cfy.argument('deployment-id')
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='deployment')
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def manager_inputs(deployment_id, logger, client, tenant_name):
"""Retrieve inputs for a specific deployment
`DEPLOYMENT_ID` is the id of the deployment to print inputs for.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Retrieving inputs for deployment {0}...'.format(
deployment_id))
dep = client.deployments.get(deployment_id, _include=['inputs'])
if get_global_json_output():
print_details(dep.inputs, 'Deployment inputs:')
else:
inputs_ = StringIO()
for input_name, input in dep.inputs.items():
inputs_.write(' - "{0}":{1}'.format(input_name, os.linesep))
inputs_.write(' Value: {0}{1}'.format(input, os.linesep))
logger.info(inputs_.getvalue())
@cfy.command(name='set-visibility',
short_help="Set the deployment's visibility [manager only]")
@cfy.argument('deployment-id')
@cfy.options.visibility(required=True, valid_values=VISIBILITY_EXCEPT_PRIVATE)
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client(use_tenant_in_header=True)
@cfy.pass_logger
def manager_set_visibility(deployment_id, visibility, logger, client):
"""Set the deployment's visibility to tenant
`DEPLOYMENT_ID` is the id of the deployment to update
"""
validate_visibility(visibility, valid_values=VISIBILITY_EXCEPT_PRIVATE)
status_codes = [400, 403, 404]
with prettify_client_error(status_codes, logger):
client.deployments.set_visibility(deployment_id, visibility)
logger.info('Deployment `{0}` was set to {1}'.format(deployment_id,
visibility))
@cfy.command(name='inputs', short_help='Show deployment inputs [locally]')
@cfy.options.common_options
@cfy.options.blueprint_id(required=True)
@cfy.pass_logger
def local_inputs(blueprint_id, logger):
"""Display inputs for the execution
"""
env = load_env(blueprint_id)
logger.info(json.dumps(env.plan['inputs'] or {}, sort_keys=True, indent=2))
@cfy.command(name='outputs', short_help='Show deployment outputs [locally]')
@cfy.options.common_options
@cfy.options.blueprint_id(required=True)
@cfy.pass_logger
def local_outputs(blueprint_id, logger):
"""Display outputs for the execution
"""
env = load_env(blueprint_id)
logger.info(json.dumps(env.outputs() or {}, sort_keys=True, indent=2))
@deployments.command(name='summary',
short_help='Retrieve summary of deployment details '
'[manager only]')
@cfy.argument('target_field', type=click.Choice(DEPLOYMENTS_SUMMARY_FIELDS))
@cfy.argument('sub_field', type=click.Choice(DEPLOYMENTS_SUMMARY_FIELDS),
default=None, required=False)
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='summary')
@cfy.options.group_id_filter
@cfy.options.all_tenants
@cfy.pass_logger
@cfy.pass_client()
def summary(target_field, sub_field, group_id, logger, client, tenant_name,
all_tenants):
"""Retrieve summary of deployments, e.g. a count of each deployment with
the same blueprint ID.
`TARGET_FIELD` is the field to summarise deployments on.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Retrieving summary of deployments on field {field}'.format(
field=target_field))
summary = client.summary.deployments.get(
_target_field=target_field,
_sub_field=sub_field,
_all_tenants=all_tenants,
deployment_group_id=group_id,
)
columns, items = structure_summary_results(
summary.items,
target_field,
sub_field,
'deployments',
)
print_data(
columns,
items,
'Deployment summary by {field}'.format(field=target_field),
)
@cfy.command(name='set-site',
short_help="Set the deployment's site [manager only]")
@cfy.argument('deployment-id')
@cfy.options.site_name
@cfy.options.detach_site
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client(use_tenant_in_header=True)
@cfy.pass_logger
def manager_set_site(deployment_id, site_name, detach_site, client, logger):
"""Set the deployment's site
`DEPLOYMENT_ID` is the id of the deployment to update
"""
if not (site_name or detach_site):
raise CloudifyCliError(
'Must provide either a `--site-name` of a valid site or '
'`--detach-site` (for detaching the current site of '
'the given deployment)'
)
client.deployments.set_site(deployment_id,
site_name=site_name,
detach_site=detach_site)
if detach_site:
logger.info('The site of `{0}` was detached'.format(deployment_id))
else:
logger.info('The site of `{0}` was set to {1}'.format(deployment_id,
site_name))
@deployments.group(name='labels',
short_help="Handle a deployment's labels")
@cfy.options.common_options
def labels():
if not env.is_initialized():
env.raise_uninitialized()
@labels.command(name='list',
short_help="List the labels of a specific deployment")
@cfy.argument('deployment-id')
@cfy.options.tenant_name(required=False, resource_name_for_help='deployment')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def list_deployment_labels(deployment_id,
logger,
client,
tenant_name):
list_labels(deployment_id, 'deployment', client.deployments,
logger, tenant_name)
@labels.command(name='add',
short_help="Add labels to a specific deployment")
@cfy.argument('labels-list',
callback=cfy.parse_and_validate_labels)
@cfy.argument('deployment-id')
@cfy.options.tenant_name(required=False, resource_name_for_help='deployment')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def add_deployment_labels(labels_list,
deployment_id,
logger,
client,
tenant_name):
"""
LABELS_LIST: <key>:<value>,<key>:<value>.
Any comma and colon in <value> must be escaped with '\\'.
"""
add_labels(deployment_id, 'deployment', client.deployments, labels_list,
logger, tenant_name)
@labels.command(name='delete',
short_help="Delete labels from a specific deployment")
@cfy.argument('label', callback=cfy.parse_and_validate_label_to_delete)
@cfy.argument('deployment-id')
@cfy.options.tenant_name(required=False, resource_name_for_help='deployment')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def delete_deployment_labels(label,
deployment_id,
logger,
client,
tenant_name):
"""
LABEL: A mixed list of labels and keys, i.e.
<key>:<value>,<key>,<key>:<value>. If <key> is provided,
all labels associated with this key will be deleted from the deployment.
Any comma and colon in <value> must be escaped with `\\`
"""
delete_labels(deployment_id, 'deployment', client.deployments, label,
logger, tenant_name)
@deployments.group(name='modifications',
short_help="Handle the deployments' modifications")
@cfy.options.common_options
def modifications():
if not env.is_initialized():
env.raise_uninitialized()
@modifications.command(name='list',
short_help="List the deployments' modifications")
@cfy.argument('deployment-id')
@cfy.options.tenant_name(required=False, resource_name_for_help='deployment')
@cfy.options.pagination_offset
@cfy.options.pagination_size
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def list_modifications(deployment_id,
pagination_offset,
pagination_size,
logger,
client,
tenant_name):
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Listing modifications of the deployment %s...', deployment_id)
deployment_modifications = client.deployment_modifications.list(
deployment_id,
_offset=pagination_offset,
_size=pagination_size,
)
flattened = [dict(dm, **dm.context) if dm.get('context') else dm
for dm in deployment_modifications]
total = deployment_modifications.metadata.pagination.total
print_data(DEPLOYMENT_MODIFICATION_COLUMNS, flattened,
'Deployment modifications:')
logger.info('Showing %d of %d deployment modifications',
len(deployment_modifications), total)
@modifications.command(name='get',
short_help="Retrieve information for a deployment's "
"modification")
@cfy.argument('deployment-modification-id')
@cfy.options.tenant_name(required=False,
resource_name_for_help='deployment modification')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def get_modification(deployment_modification_id,
logger,
client,
tenant_name):
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Retrieving deployment modification %s...',
deployment_modification_id)
deployment_modification = client.deployment_modifications.get(
deployment_modification_id)
_print_deployment_modification(deployment_modification)
@modifications.command(name='rollback',
short_help="Rollback a deployment's modification")
@cfy.argument('deployment-modification-id')
@cfy.options.tenant_name(required=False,
resource_name_for_help='deployment modification')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def rollback_modification(deployment_modification_id,
logger,
client,
tenant_name):
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Rolling back a deployment modification %s...',
deployment_modification_id)
deployment_modification = client.deployment_modifications.rollback(
deployment_modification_id)
_print_deployment_modification(deployment_modification)
def _print_deployment_modification(deployment_modification):
def print_node_instance(genre, title, modified_only=False):
if genre not in deployment_modification['node_instances'] or \
not deployment_modification['node_instances'].get(genre):
return
print_list(
[
'{0} ({1})'.format(ni.get('id'), ni.get('node_id'))
for ni in deployment_modification['node_instances'].get(genre)
if not modified_only or ni.get('modification')
],
title
)
columns = DEPLOYMENT_MODIFICATION_COLUMNS
if get_global_json_output():
columns += MACHINE_READABLE_MODIFICATION_COLUMNS
dm = (dict(deployment_modification, **deployment_modification.context)
if deployment_modification.context else deployment_modification)
print_single(columns, dm, 'Deployment Modification:')
if not get_global_json_output():
if 'modified_nodes' in dm and dm['modified_nodes']:
print_list(dm['modified_nodes'].keys(), 'Modified nodes:')
if 'node_instances' in dm and dm['node_instances']:
print_node_instance('before_modification',
'\nNode instances before modifications:')
print_node_instance('before_rollback',
'\nNode instances before rollback:')
print_node_instance('added_and_related',
'\nAdded node instances:',
modified_only=True)
print_node_instance('removed_and_related',
'\nRemoved node instances:',
modified_only=True)
@deployments.group('groups')
def groups():
"""Manage deployment groups"""
def _format_group(g):
"""Format a restclient deployment group for display"""
return {
'id': g['id'],
'description': g['description'],
'default_blueprint_id': g['default_blueprint_id'],
'deployments': str(len(g['deployment_ids']))
}
@groups.command('list', short_help='List all deployment groups')
@cfy.pass_client()
@cfy.pass_logger
def groups_list(client, logger):
groups = [_format_group(g) for g in client.deployment_groups.list()]
print_data(DEP_GROUP_COLUMNS, groups, 'Deployment groups:')
@groups.command('create', short_help='Create a new deployment group')
@click.argument('deployment-group-name')
@cfy.options.inputs
@cfy.options.group_default_blueprint
@cfy.options.group_description
@cfy.pass_client()
@cfy.pass_logger
def groups_create(deployment_group_name, inputs, default_blueprint,
description, client, logger):
client.deployment_groups.put(
deployment_group_name,
default_inputs=inputs,
blueprint_id=default_blueprint,
description=description
)
logger.info('Group %s created', deployment_group_name)
@groups.command('delete', short_help='Delete a deployment group')
@click.argument('deployment-group-name')
@cfy.options.delete_deployments
@cfy.options.with_logs
@cfy.options.force(help=helptexts.FORCE_DELETE_DEPLOYMENT)
@cfy.pass_client()
@cfy.pass_logger
def groups_delete(deployment_group_name, delete_deployments, force, with_logs,
client, logger):
client.deployment_groups.delete(
deployment_group_name,
delete_deployments=delete_deployments,
force=force,
with_logs=with_logs,
)
logger.info('Group %s deleted', deployment_group_name)
@groups.command('update', short_help='Update a deployment group')
@click.argument('deployment-group-name')
@cfy.options.inputs
@cfy.options.group_default_blueprint
@cfy.options.group_description
@cfy.pass_client()
@cfy.pass_logger
def groups_update(deployment_group_name, inputs, default_blueprint,
description, client, logger):
client.deployment_groups.put(
deployment_group_name,
default_inputs=inputs,
blueprint_id=default_blueprint,
description=description
)
logger.info('Group %s updated', deployment_group_name)
@groups.command('extend', short_help='Add deployments to a group')
@click.argument('deployment-group-name')
@cfy.options.group_deployment_id
@cfy.options.group_count
@cfy.options.deployment_group_filter_id
@cfy.options.deployment_filter_rules
@cfy.options.deployment_group_deployments_from_group
@cfy.options.into_environments_group
@cfy.pass_client()
@cfy.pass_logger
def groups_extend(deployment_group_name, deployment_id, count, filter_id,
filter_rules, from_group, environments_group,
client, logger):
new_deployments = []
if environments_group:
for deployment in client.deployments.list(
deployment_group_id=environments_group):
if deployment.is_environment():
new_deployments.append({
'id': '{uuid}',
'display_name': '{blueprint_id}-{uuid}',
'labels': [{'csys-obj-parent': deployment.id}],
})
group = client.deployment_groups.add_deployments(
deployment_group_name,
filter_id=filter_id,
filter_rules=filter_rules,
count=count,
deployment_ids=deployment_id or None,
deployments_from_group=from_group,
new_deployments=new_deployments or None,
)
logger.info(
'Group %s updated. It now contains %d deployments',
deployment_group_name, len(group.deployment_ids)
)
@groups.command('shrink', short_help='Remove deployments from a group')
@click.argument('deployment-group-name')
@cfy.options.group_deployment_id
@cfy.options.deployment_group_filter_id
@cfy.options.deployment_filter_rules
@cfy.options.deployment_group_deployments_from_group
@cfy.pass_client()
@cfy.pass_logger
def groups_shrink(deployment_group_name, deployment_id, filter_id,
filter_rules, from_group, client, logger):
group = client.deployment_groups.remove_deployments(
deployment_group_name,
deployment_id,
filter_id=filter_id,
filter_rules=filter_rules,
deployments_from_group=from_group,
)
removed_what_message = []
if deployment_id:
removed_what_message.append(', '.join(deployment_id))
if filter_id:
removed_what_message.append('given by filter {0}'.format(filter_id))
if from_group:
removed_what_message.append(
'belonging to the group {0}'.format(from_group))
logger.info(
'Unlinked deployments %s. Group %s now has %d deployments',
'; '.join(removed_what_message), deployment_group_name,
len(group.deployment_ids)
)
@groups.group(name='labels', short_help="Handle a group's labels")
@cfy.options.common_options
def group_labels():
if not env.is_initialized():
env.raise_uninitialized()
@group_labels.command(name='list', short_help="List the labels of a group")
@click.argument('deployment-group-name')
@cfy.options.tenant_name(required=False, resource_name_for_help='group')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def list_group_labels(deployment_group_name, logger, client, tenant_name):
list_labels(deployment_group_name, 'deployment group',
client.deployment_groups, logger, tenant_name)
@group_labels.command(name='add', short_help="Add labels to a | |
des):
lines = self.lines
handled = True
# Create map to es2 function objects
es2funcs = {}
for f in des.es2.group:
cname = f.shortname
es2funcs[cname] = f
if des.name == 'uniform':
for t in ('float', 'int'):
for i in (1,2,3,4):
args = ', '.join(['v%i'%j for j in range(1,i+1)])
cname = 'uniform%i%s' % (i, t[0])
sig = '%s(location, %s)' % (apiname(cname), args)
self._add_group_function(des, sig, es2funcs[cname])
for t in ('float', 'int'):
for i in (1,2,3,4):
cname = 'uniform%i%sv' % (i, t[0])
sig = '%s(location, count, values)' % apiname(cname)
self._add_group_function(des, sig, es2funcs[cname])
elif des.name == 'uniformMatrix':
for i in (2,3,4):
cname = 'uniformMatrix%ifv' % i
sig = '%s(location, count, transpose, values)' % apiname(cname)
self._add_group_function(des, sig, es2funcs[cname])
elif des.name == 'vertexAttrib':
for i in (1,2,3,4):
args = ', '.join(['v%i'%j for j in range(1,i+1)])
cname = 'vertexAttrib%if' % i
sig = '%s(index, %s)' % (apiname(cname), args)
self._add_group_function(des, sig, es2funcs[cname])
elif des.name == 'texParameter':
for t in ('float', 'int'):
cname = 'texParameter%s' % t[0]
sig = '%s(target, pname, param)' % apiname(cname)
self._add_group_function(des, sig, es2funcs[cname])
else:
handled = False
if handled:
functions_auto.add(des.name)
else:
functions_todo.add(des.name)
lines.append('# todo: Dont know group %s' % des.name)
def _add_function(self, des):
# Need to be overloaded in subclass
raise NotImplementedError()
def _add_group_function(self, des, sig, es2func):
# Need to be overloaded in subclass
raise NotImplementedError()
class ProxyApiGenerator(ApiGenerator):
""" Generator for the general proxy class that will be loaded into gloo.gl.
"""
filename = os.path.join(GLDIR, '_proxy.py')
DESCRIPTION = 'Base proxy API for GL ES 2.0.'
PREAMBLE = '''
class BaseGLProxy(object):
""" Base proxy class for the GL ES 2.0 API. Subclasses should
implement __call__ to process the API calls.
"""
def __call__(self, funcname, returns, *args):
raise NotImplementedError()
'''
def _returns(self, des):
shortame = des.name
for prefix in ("get", "is", "check", "create", "read"):
if shortame.startswith(prefix):
return True
else:
return False
def _add_function(self, des):
ret = self._returns(des)
prefix = 'return ' if ret else ''
argstr = ', '.join(des.args)
self.lines.append(' def %s(self, %s):' % (des.apiname, argstr))
self.lines.append(' %sself("%s", %r, %s)' %
(prefix, apiname(des.name),ret, argstr))
def _add_group_function(self, des, sig, es2func):
ret = self._returns(des)
prefix = 'return ' if ret else ''
funcname = apiname(sig.split('(')[0])
args = sig.split('(', 1)[1].split(')')[0]
#self.lines.append(' def %s:' % sig)
self.lines.append(' def %s(self, %s):' % (funcname, args))
self.lines.append(' %sself("%s", %r, %s)' %
(prefix, funcname, ret, args))
class Gl2ApiGenerator(ApiGenerator):
""" Generator for the gl2 (desktop) backend.
"""
filename = os.path.join(GLDIR, '_gl2.py')
write_c_sig = True
define_argtypes_in_module = False
DESCRIPTION = "Subset of desktop GL API compatible with GL ES 2.0"
PREAMBLE = """
import ctypes
from .gl2 import _lib, _get_gl_func
"""
def _get_argtype_str(self, es2func):
ce_arg_types = [arg.ctype for arg in es2func.args[1:]]
ct_arg_types = [KNOWN_TYPES.get(arg.ctype, None) for arg in es2func.args]
# Set argument types on ctypes function
if None in ct_arg_types:
argstr = 'UNKNOWN_ARGTYPES'
elif es2func.group:
argstr = 'UNKNOWN_ARGTYPES'
else:
argstr = ', '.join(['ctypes.%s' % t[1] for t in ct_arg_types[1:]])
argstr = '()' if not argstr else '(%s,)' % argstr
# Set output arg (if available)
if ct_arg_types[0][0] != type(None):
resstr = 'ctypes.%s' % ct_arg_types[0][1]
else:
resstr = 'None'
return resstr, argstr
def _write_argtypes(self, es2func):
lines = self.lines
ce_arg_types = [arg.ctype for arg in es2func.args[1:]]
ct_arg_types = [KNOWN_TYPES.get(arg.ctype, None) for arg in es2func.args]
# Set argument types on ctypes function
if None in ct_arg_types:
lines.append('# todo: unknown argtypes')
elif es2func.group:
lines.append('# todo: oops, dont set argtypes for group!')
else:
if ct_arg_types[1:]:
argstr = ', '.join(['ctypes.%s' % t[1] for t in ct_arg_types[1:]])
lines.append('_lib.%s.argtypes = %s,' % (es2func.glname, argstr))
else:
lines.append('_lib.%s.argtypes = ()' % es2func.glname)
# Set output arg (if available)
if ct_arg_types[0][0] != type(None):
lines.append('_lib.%s.restype = ctypes.%s' % (es2func.glname, ct_arg_types[0][1]))
def _native_call_line(self, name, es2func, cargstr=None, prefix='', indent=4):
#'_lib.%s(%s)' % (des.es2.glname, cargstr)
resstr, argstr = self._get_argtype_str(es2func)
if cargstr is None:
cargs = [arg.name for arg in es2func.args[1:]]
cargstr = ', '.join(cargs)
lines = 'try:\n'
lines += ' nativefunc = %s._native\n' % apiname(name)
lines += 'except AttributeError:\n'
lines += ' nativefunc = %s._native = _get_gl_func("%s", %s, %s)\n' % (
apiname(name), es2func.glname, resstr, argstr)
lines += '%snativefunc(%s)\n' % (prefix, cargstr)
#lines += 'check_error("%s")' % name
lines = [' '*indent + line for line in lines.splitlines()]
return '\n'.join(lines)
def _add_function(self, des):
lines = self.lines
es2func = des.es2
# Write arg types
if self.define_argtypes_in_module:
self._write_argtypes(es2func)
# Get names and types of C-API
ce_arg_types = [arg.ctype for arg in es2func.args[1:]]
ce_arg_names = [arg.name for arg in es2func.args[1:]]
ct_arg_types = [KNOWN_TYPES.get(arg.ctype, None) for arg in es2func.args]
ct_arg_types_easy = [EASY_TYPES.get(arg.ctype, None) for arg in es2func.args]
# Write C function signature, for debugging and development
if self.write_c_sig:
argnamesstr = ', '.join([c_type+' '+c_name for c_type, c_name in zip(ce_arg_types, ce_arg_names)])
lines.append('# %s = %s(%s)' % (es2func.args[0].ctype, es2func.oname, argnamesstr))
# Write Python function def
lines.append('def %s(%s):' % (des.apiname, ', '.join(des.args)))
# Construct C function call
cargs = [arg.name for arg in des.es2.args[1:]]
cargstr = ', '.join(cargs)
#callline = '_lib.%s(%s)' % (des.es2.glname, cargstr)
# Now write the body of the function ...
if des.ann:
prefix = 'res = '
# Annotation available
functions_anno.add(des.name)
callline = self._native_call_line(des.name, es2func, prefix=prefix)
lines.extend( des.ann.get_lines(callline, 'gl') )
elif es2func.group:
# Group?
functions_todo.add(des.name)
lines.append(' pass # todo: Oops. this is a group!')
elif None in ct_arg_types_easy:
functions_todo.add(des.name)
lines.append(' pass # todo: Not all easy types!')
elif des.args != [arg.name for arg in des.wgl.args[1:]]:
functions_todo.add(des.name)
lines.append(' pass # todo: ES 2.0 and WebGL args do not match!')
else:
# This one is easy!
functions_auto.add(des.name)
# Get prefix
prefix = ''
if ct_arg_types[0][0] != type(None):
prefix = 'return '
elif des.es2.shortname.startswith('get'):
raise RuntimeError('Get func returns void?')
# Set string
callline = self._native_call_line(des.name, des.es2, prefix=prefix)
lines.append(callline)
if 'gl2' in self.__class__.__name__.lower():
# Post-fix special cases for gl2. See discussion in #201
# glDepthRangef and glClearDepthf are not always available,
# and sometimes they do not work if they are
if es2func.oname in ('glDepthRangef', 'glClearDepthf'):
for i in range(1,10):
line = lines[-i]
if not line.strip() or line.startswith('#'):
break
line = line.replace('c_float', 'c_double')
line = line.replace('glDepthRangef', 'glDepthRange')
line = line.replace('glClearDepthf', 'glClearDepth')
lines[-i] = line
def _add_group_function(self, des, sig, es2func):
lines = self.lines
handled = True
call_line = self._native_call_line
if self.define_argtypes_in_module:
self._write_argtypes(es2func)
funcname = sig.split('(', 1)[0]
args = sig.split('(', 1)[1].split(')')[0]
cfuncname = 'gl' + funcname[0].upper() + funcname[1:]
if des.name == 'uniform':
if funcname[-1] != 'v':
lines.append('def %s:' % sig)
lines.append(call_line(funcname, es2func, args))
else:
t = {'f':'float', 'i':'int'}[funcname[-2]]
lines.append('def %s:' % sig)
lines.append(' values = [%s(val) for val in values]' % t)
lines.append(' values = (ctypes.c_%s*len(values))(*values)' % t)
lines.append(call_line(funcname, es2func, 'location, count, values'))
elif des.name == 'uniformMatrix':
lines.append('def %s:' % sig)
lines.append(' if not values.flags["C_CONTIGUOUS"]:')
lines.append(' values = values.copy()')
lines.append(' assert values.dtype.name == "float32"')
lines.append(' values_ = values')
lines.append(' values = values_.ctypes.data_as(ctypes.POINTER(ctypes.c_float))')
lines.append(call_line(funcname, es2func, 'location, count, transpose, values'))
elif des.name == 'vertexAttrib':
lines.append('def %s:' % sig)
lines.append(call_line(funcname, es2func, args))
elif des.name == 'texParameter':
lines.append('def %s:' % sig)
lines.append(call_line(funcname, es2func, args))
else:
raise ValueError('unknown group func')
class Es2ApiGenrator(Gl2ApiGenerator):
""" Generator for the es2 backend (i.e. Angle on Windows). Very
similar to the gl2 API, but we do not need that deferred loading
of GL functions here.
"""
filename = os.path.join(GLDIR, '_es2.py')
write_c_sig = True
define_argtypes_in_module = True
DESCRIPTION = "GL ES 2.0 API (via Angle/DirectX on Windows)"
PREAMBLE = """
import ctypes
from .es2 import _lib
"""
def _native_call_line(self, name, es2func, cargstr=None, prefix='', indent=4):
resstr, argstr = self._get_argtype_str(es2func)
if cargstr is None:
cargs = [arg.name for arg in es2func.args[1:]]
cargstr = ', '.join(cargs)
return ' '*indent + '%s_lib.%s(%s)' % (prefix, es2func.glname, cargstr)
class PyOpenGL2ApiGenrator(ApiGenerator):
""" Generator for a fallback pyopengl backend.
"""
filename = os.path.join(GLDIR, '_pyopengl2.py')
DESCRIPTION = 'Proxy API for GL ES 2.0 subset, via the PyOpenGL library.'
PREAMBLE = """
import ctypes
from OpenGL import GL
import OpenGL.GL.framebufferobjects as FBO
"""
def __init__(self):
ApiGenerator.__init__(self)
| |
<gh_stars>1-10
#!python
# set PYSPARK_DRIVER_PYTHON=python
# set PYSPARK_DRIVER_PYTHON_OPTS=
# spark-submit --master local[7] --deploy-mode client SectionPerfTest.py
import gc
import scipy.stats, numpy
import time
import random
from LinearRegression import linear_regression
from pyspark.sql import SparkSession
from pyspark.storagelevel import StorageLevel
spark = None
sc = None
log = None
def createSparkContext():
global spark
spark = SparkSession \
.builder \
.appName("SectionPerfTest") \
.config("spark.sql.shuffle.partitions", 16) \
.config("spark.ui.enabled", "false") \
.config("spark.rdd.compress", "false") \
.config("spark.worker.cleanup.enabled", "true") \
.config("spark.default.parallelism", 7) \
.config("spark.driver.memory", "2g") \
.config("spark.executor.memory", "3g") \
.config("spark.executor.memoryOverhead", "1g") \
.config("spark.python.worker.reuse", "true") \
.config("spark.port.maxRetries","1") \
.config("spark.rpc.retry.wait","10s") \
.config("spark.reducer.maxReqsInFlight","1") \
.config("spark.network.timeout","30s") \
.config("spark.shuffle.io.maxRetries","10") \
.config("spark.shuffle.io.retryWait","60s") \
.config("spark.sql.execution.arrow.enabled", "true") \
.enableHiveSupport() \
.getOrCreate()
return spark
def setupSparkContext(in_spark):
global spark, sc, log
spark = in_spark
sc = spark.sparkContext
sc.setLogLevel("WARN")
log4jLogger = sc._jvm.org.apache.log4j
log = log4jLogger.LogManager.getLogger(__name__)
log.info("script initialized")
sc.setCheckpointDir("SectionAggCheckpoint")
return sc, log
# http://codeliberates.blogspot.com/2008/05/detecting-cpuscores-in-python.html
def detectCPUs():
"""
Detects the number of CPUs on a system. Cribbed from pp.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"):
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# Windows:
if os.environ.has_key("NUMBER_OF_PROCESSORS"):
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]);
if ncpus > 0:
return ncpus
return 1 # Default
NumExecutors = 7
MaximumProcessableSegment = pow(10, 5)
import datetime as dt
import random
import re
import collections
import math
import os
import pyspark.sql.functions as func
import pyspark.sql.types as DataTypes
from pyspark.sql.window import Window
from pyspark.sql import Row
#region GenData
StudentHeader = collections.namedtuple("StudentHeader",
["StudentId", "StudentName"])
TrimesterHeader = collections.namedtuple("TrimesterHeader",
["Date", "WasAbroad"])
ClassLine = collections.namedtuple("ClassLine",
["Dept", "Credits", "Grade"])
TrimesterFooter = collections.namedtuple("TrimesterFooter",
["Major", "GPA", "Credits"])
StudentSummary = collections.namedtuple("StudentSummary",
["StudentId", "StudentName", "SourceLines", "GPA", "Major", "MajorGPA"])
StudentSummaryStruct = DataTypes.StructType([
DataTypes.StructField("StudentId", DataTypes.IntegerType(), True),
DataTypes.StructField("StudentName", DataTypes.StringType(), True),
DataTypes.StructField("SourceLines", DataTypes.IntegerType(), True),
DataTypes.StructField("GPA", DataTypes.DoubleType(), True),
DataTypes.StructField("Major", DataTypes.StringType(), True),
DataTypes.StructField("MajorGPA", DataTypes.DoubleType(), True),
])
SparseLineSchema = DataTypes.StructType([
DataTypes.StructField("Type", DataTypes.StringType(), True),
DataTypes.StructField("StudentId", DataTypes.IntegerType(), True),
DataTypes.StructField("StudentName", DataTypes.StringType(), True),
DataTypes.StructField("Date", DataTypes.StringType(), True),
DataTypes.StructField("WasAbroad", DataTypes.BooleanType(), True),
DataTypes.StructField("Dept", DataTypes.IntegerType(), True),
DataTypes.StructField("ClassCredits", DataTypes.IntegerType(), True),
DataTypes.StructField("ClassGrade", DataTypes.IntegerType(), True),
DataTypes.StructField("Major", DataTypes.IntegerType(), True),
DataTypes.StructField("TriGPA", DataTypes.DoubleType(), True),
DataTypes.StructField("TriCredits", DataTypes.IntegerType(), True),
])
LabeledTypedRow = collections.namedtuple("LabeledTypedRow",
["Index", "Value"])
NumDepts = 4
#endregion
TestMethod = collections.namedtuple("TestMethod",
["name", "interface", "scale", "delegate"])
test_method_list = []
def count_iter(iterator):
count = 0
for obj in iterator:
count += 1
return count
#region parsers
def parseLineToTypes(line):
fields = line.split(',')
if fields[0] == 'S':
return StudentHeader(StudentId=int(fields[1]), StudentName=fields[2] )
if fields[0] == 'TH':
return TrimesterHeader(Date=fields[1], WasAbroad=(fields[2] == 'True') )
if fields[0] == 'C':
return ClassLine(Dept=int(fields[1]), Credits=int(fields[2]), Grade=int(fields[3]) )
if fields[0] == 'TF':
return TrimesterFooter(Major=int(fields[1]), GPA=float(fields[2]), Credits=int(fields[3]) )
raise Exception("Malformed data "+line)
def parseLineToRow(line):
fields = line.split(',')
if fields[0] == 'S':
return Row(Type=fields[0],
StudentId=int(fields[1]), StudentName=fields[2],
Date=None, WasAbroad=None,
Dept=None, ClassCredits=None, ClassGrade=None,
Major=None, TriGPA=None, TriCredits=None)
if fields[0] == 'TH':
return Row(Type=fields[0],
StudentId=None, StudentName=None,
Date=fields[1], WasAbroad=(fields[2] == 'True'),
Dept=None, ClassCredits=None, ClassGrade=None,
Major=None, TriGPA=None, TriCredits=None)
if fields[0] == 'C':
return Row(Type=fields[0],
StudentId=None, StudentName=None,
Date=None, WasAbroad=None,
Dept=int(fields[1]), ClassCredits=int(fields[2]), ClassGrade=int(fields[3]),
Major=None, TriGPA=None, TriCredits=None)
if fields[0] == 'TF':
return Row(Type=fields[0],
StudentId=None, StudentName=None,
Date=None, WasAbroad=None,
Dept=None, ClassCredits=None, ClassGrade=None,
Major=int(fields[1]), TriGPA=float(fields[2]), TriCredits=int(fields[3]) )
raise Exception("Malformed data "+line)
def dfSparseRowsFactory(filename, numPartitions=None):
rdd = sc.textFile(filename, minPartitions=(numPartitions or 1))
rdd = rdd \
.map(parseLineToRow)
df = spark.createDataFrame(rdd, SparseLineSchema)
return df
def rddTypedWithIndexFactory(filename, numPartitions=None):
rdd = sc.textFile(filename, minPartitions=(numPartitions or 1))
rddTypedWithIndex = rdd \
.map(parseLineToTypes) \
.zipWithIndex() \
.map(lambda pair:
LabeledTypedRow(\
Index = pair[1],
Value = pair[0]))
return rddTypedWithIndex
#endregion
#region Mutable
class MutableTrimester:
def __init__(self, date, wasAbroad):
self.SourceLines = 1
self.Credits = [0 for x in range(0, NumDepts)]
self.WeightedGradeTotal = [0 for x in range(0, NumDepts)]
self.Major = None
def addClass(self, dept, credits, grade):
self.SourceLines += 1
self.Credits[dept] += credits
self.WeightedGradeTotal[dept] += credits*grade
def addFooter(self, major, gpa, credits):
self.SourceLines += 1
self.Major = major
def _asdict(self):
return {"Credits":list(self.Credits), "WGrade":list(self.WeightedGradeTotal), "Major":self.Major}
class MutableStudent:
def __init__(self, studentId, studentName):
self.SourceLines = 1
self.StudentId = studentId
self.StudentName = studentName
self.LastMajor = None
self.Credits = [0 for x in range(0, NumDepts)]
self.WeightedGradeTotal = [0 for x in range(0, NumDepts)]
def addTrimester(self, trimester):
self.SourceLines += trimester.SourceLines
self.LastMajor = trimester.Major
for dept in range(0, NumDepts):
self.Credits[dept] += trimester.Credits[dept]
self.WeightedGradeTotal[dept] += trimester.WeightedGradeTotal[dept]
def gradeSummary(self):
return StudentSummary(
StudentId=self.StudentId,
StudentName=self.StudentName,
SourceLines=self.SourceLines,
Major=self.LastMajor,
GPA=sum(self.WeightedGradeTotal)/max(1,sum(self.Credits)),
MajorGPA=self.WeightedGradeTotal[self.LastMajor]/max(1,self.Credits[self.LastMajor])
if self.LastMajor is not None else None
)
def gradeSummaryRow(self):
return Row(**self.gradeSummary())
def _asdict(self):
return {"StudentId":self.StudentId, "LastMajor":self.LastMajor, "SourceLines":self.SourceLines, "Credits":list(self.Credits), "WGrade":list(self.WeightedGradeTotal)}
#endregion
#region aggregators
def aggregateTypedRowsToGrades(iterator):
student = None
trimester = None
for lineno, rec in enumerate(iterator):
if rec.__class__.__name__ == 'StudentHeader':
if student is not None:
yield student.gradeSummary()
student = MutableStudent(rec.StudentId, rec.StudentName)
elif rec.__class__.__name__ == 'TrimesterHeader':
trimester = MutableTrimester(rec.Date, rec.WasAbroad)
elif rec.__class__.__name__ == 'ClassLine':
trimester.addClass(rec.Dept, rec.Credits, rec.Grade)
elif rec.__class__.__name__ == 'TrimesterFooter':
trimester.addFooter(rec.Major, rec.GPA, rec.Credits)
student.addTrimester(trimester)
trimester = None
else:
raise Exception(f"Unknown parsed row type {rec.__class__.__name__} on line {lineno}")
if student is not None:
yield student.gradeSummary()
#
def rowToStudentSummary(x):
return StudentSummary(
StudentId=x.StudentId,
StudentName=x.StudentName,
SourceLines=x.SourceLines,
Major=x.Major,
GPA=x.GPA,
MajorGPA=x.MajorGPA)
#
#endregion
#region Snippets
StudentSnippet = collections.namedtuple("StudentSnippet",
["StudentId", "StudentName",
"FirstTrimester", "LastTrimester", "LastMajor", "Credits", "WeightedGradeTotal",
"FirstLineIndex", "LastLineIndex"])
CompletedStudent = collections.namedtuple("CompletedStudent",
["StudentId", "StudentName", "SourceLines", "LastMajor", "Credits", "WeightedGradeTotal",
"FirstLineIndex", "LastLineIndex"])
class StudentSnippetBuilder:
@staticmethod
def studentSnippetFromTypedRow(lineIndex, rec):
credits = [0 for x in range(0, NumDepts)]
weightedGradeTotal = [0 for x in range(0, NumDepts)]
if rec.__class__.__name__ == 'StudentHeader':
return StudentSnippet(
StudentId=rec.StudentId, StudentName=rec.StudentName,
FirstTrimester=None, LastTrimester=None,
LastMajor=None,
Credits=credits, WeightedGradeTotal=weightedGradeTotal,
FirstLineIndex=lineIndex, LastLineIndex=lineIndex)
elif rec.__class__.__name__ == 'TrimesterHeader':
return StudentSnippet(
StudentId=None, StudentName=None,
FirstTrimester=rec.Date, LastTrimester=rec.Date,
LastMajor=None,
Credits=credits, WeightedGradeTotal=weightedGradeTotal,
FirstLineIndex=lineIndex, LastLineIndex=lineIndex)
elif rec.__class__.__name__ == 'ClassLine':
credits[rec.Dept] += rec.Credits
weightedGradeTotal[rec.Dept] += rec.Credits * rec.Grade
return StudentSnippet(
StudentId=None, StudentName=None,
FirstTrimester=None, LastTrimester=None,
LastMajor=None,
Credits=credits, WeightedGradeTotal=weightedGradeTotal,
FirstLineIndex=lineIndex, LastLineIndex=lineIndex)
elif rec.__class__.__name__ == 'TrimesterFooter':
return StudentSnippet(
StudentId=None, StudentName=None,
FirstTrimester=None, LastTrimester=None,
LastMajor=rec.Major,
Credits=credits, WeightedGradeTotal=weightedGradeTotal,
FirstLineIndex=lineIndex, LastLineIndex=lineIndex)
else:
raise Exception("Unknown parsed row type")
@staticmethod
def completedFromSnippet(lhs):
assert lhs.StudentId is not None and lhs.LastMajor is not None
return CompletedStudent(
StudentId = lhs.StudentId,
StudentName = lhs.StudentName,
SourceLines = lhs.LastLineIndex - lhs.FirstLineIndex + 1,
LastMajor = lhs.LastMajor,
Credits = lhs.Credits,
WeightedGradeTotal = lhs.WeightedGradeTotal,
FirstLineIndex = lhs.FirstLineIndex,
LastLineIndex = lhs.LastLineIndex)
@staticmethod
def addSnippets(lhgroup, rhgroup):
assert len(rhgroup) > 0
if len(lhgroup) == 0:
return rhgroup
while len(rhgroup) > 0:
lhs = lhgroup[-1]
rhs = rhgroup[0]
# print("Trying snip ending at %d against %d"%(lhs.LastLineIndex, rhs.FirstLineIndex))
if rhs.StudentId is not None:
if lhs.StudentId is not None:
lhgroup[-1] = StudentSnippetBuilder.completedFromSnippet(lhs)
# print("Found student %d from lgroup"%(lhs.StudentId))
lhgroup.append(rhs)
else:
assert lhs.LastLineIndex + 1 == rhs.FirstLineIndex
credits = [0 for x in range(0, NumDepts)]
weightedGradeTotal = [0 for x in range(0, NumDepts)]
for dept in range(0, NumDepts):
credits[dept] = lhs.Credits[dept] + rhs.Credits[dept]
weightedGradeTotal[dept] = lhs.WeightedGradeTotal[dept] + rhs.WeightedGradeTotal[dept]
lhgroup[-1] = StudentSnippet(
StudentId = lhs.StudentId,
StudentName = lhs.StudentName,
FirstTrimester = lhs.FirstTrimester if lhs.FirstTrimester is not None else rhs.FirstTrimester,
LastTrimester = rhs.LastTrimester,
LastMajor = rhs.LastMajor,
Credits = credits,
WeightedGradeTotal = weightedGradeTotal,
FirstLineIndex = lhs.FirstLineIndex,
LastLineIndex = rhs.LastLineIndex)
rhgroup.pop(0)
return lhgroup
@staticmethod
def addSnippetsWOCompleting(lhgroup, rhgroup):
assert len(rhgroup) > 0
if len(lhgroup) == 0:
return rhgroup
for rhs in rhgroup:
lhs = lhgroup[-1]
# print("Trying snip ending at %d against %d"%(lhs.LastLineIndex, rhs.FirstLineIndex))
if rhs.StudentId is not None:
lhgroup.append(rhs)
else:
if lhs.LastLineIndex + 1 != rhs.FirstLineIndex:
print('about to assert ', lhs.LastLineIndex, rhs.FirstLineIndex)
assert lhs.LastLineIndex + 1 == rhs.FirstLineIndex
credits = [0 for x in range(0, NumDepts)]
weightedGradeTotal = [0 for x in range(0, NumDepts)]
for dept in range(0, NumDepts):
credits[dept] = lhs.Credits[dept] + rhs.Credits[dept]
weightedGradeTotal[dept] = lhs.WeightedGradeTotal[dept] + rhs.WeightedGradeTotal[dept]
lhgroup[-1] = StudentSnippet(
StudentId = lhs.StudentId,
StudentName = lhs.StudentName,
FirstTrimester = lhs.FirstTrimester if lhs.FirstTrimester is not None else rhs.FirstTrimester,
LastTrimester = rhs.LastTrimester,
LastMajor = rhs.LastMajor,
Credits = credits,
WeightedGradeTotal = weightedGradeTotal,
FirstLineIndex = lhs.FirstLineIndex,
LastLineIndex = rhs.LastLineIndex)
return lhgroup
@staticmethod
def gradeSummary(x):
assert x.LastMajor is not None
return StudentSummary(
StudentId=x.StudentId,
StudentName=x.StudentName,
SourceLines=x.SourceLines,
Major=x.LastMajor,
GPA=sum(x.WeightedGradeTotal)/max(1,sum(x.Credits)),
MajorGPA=x.WeightedGradeTotal[x.LastMajor]/max(1,x.Credits[x.LastMajor])
)
#endregion
#region Preprocessor
def identifySectionUsingIntermediateFile(srcFilename):
destFilename = "e:/temp/sparkperftesting/temp.csv"
if os.path.exists(destFilename):
os.unlink(destFilename)
reExtraType = re.compile("^S,")
sectionId = -1
with open(destFilename,"w") as outf:
with open(srcFilename,"r") as inf:
for line in inf:
if reExtraType.match(line):
sectionId += 1
assert sectionId >= 0
outf.write(f"{sectionId},{line}")
return destFilename
#endregion
#region nospark
def method_nospark_single_threaded(dataSize, filename, sectionMaximum):
count = 0
with open(filename, "r") as fh:
for student in aggregateTypedRowsToGrades(map(parseLineToTypes, fh)):
count += 1
return count, None
test_method_list.append(TestMethod(
name='method_nospark_single_threaded',
interface='python',
scale='singleline',
delegate=method_nospark_single_threaded))
#endregion
#region mapPartitions
def method_mappart_single_threaded(dataSize, filename, sectionMaximum):
rdd = sc.textFile(filename, minPartitions=1)
rdd = rdd \
.map(parseLineToTypes) \
.mapPartitions(aggregateTypedRowsToGrades)
return count_iter(rdd.toLocalIterator()), rdd
test_method_list.append(TestMethod(
name='method_mappart_single_threaded',
interface='rdd',
scale='wholefile',
delegate=method_mappart_single_threaded))
#
def method_mappart_odd_even(dataSize, filename, sectionMaximum):
SegmentOffset = sectionMaximum - 1
SegmentExtra = 2 * sectionMaximum
SegmentSize | |
"""
Link: https://github.com/honglianghe/CDNet/blob/f436555539e140ff8bafa3c9f54cbc2550b7cebd/my_transforms.py
Author: <NAME>
"""
import torch
import random
from PIL import Image, ImageOps, ImageEnhance, ImageFilter
import numpy as np
import numbers
import collections
from skimage import morphology
import SimpleITK as sitk
import time
import copy
from skimage import io
import albumentations as albu
import warnings
warnings.filterwarnings("ignore")
class Compose(object):
""" Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
"""
def __init__(self, transforms):
self.transforms = transforms
# self.selectorNameList = selectorNameList
def __call__(self, imgs):
# number = 0
for t in self.transforms:
#selectorName = str(self.selectorNameList[number])
#start_time = time.time()
imgs = t(imgs)
# number = number + 1
return imgs
class Scale(object):
"""Rescale the input PIL images to the given size. """
def __init__(self, size, interpolation=Image.BILINEAR):
assert isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)
self.size = size
self.interpolation = interpolation
def __call__(self, imgs):
pics = []
for img in imgs:
if isinstance(self.size, int):
w, h = img.size
if (w <= h and w == self.size) or (h <= w and h == self.size):
pics.append(img)
continue
if w < h:
ow = self.size
oh = int(self.size * h / w)
pics.append(img.resize((ow, oh), self.interpolation))
continue
else:
oh = self.size
ow = int(self.size * w / h)
pics.append(img.resize((ow, oh), self.interpolation))
else:
pics.append(img.resize(self.size, self.interpolation))
return tuple(pics)
import cv2
class RandomResize(object):
"""Randomly Resize the input PIL Image using a scale of lb~ub.
Args:
lb (float): lower bound of the scale
ub (float): upper bound of the scale
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
"""
def __init__(self, lb=0.8, ub=1.3, interpolation=Image.BILINEAR):
self.lb = lb
self.ub = ub
self.interpolation = interpolation
def __call__(self, imgs):
"""
Args:
imgs (PIL Images): Images to be scaled.
Returns:
PIL Images: Rescaled images.
"""
for img in imgs:
if not isinstance(img, Image.Image):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
scale = random.uniform(self.lb, self.ub)
# print scale
w, h = imgs[0].size
ow = int(w * scale)
oh = int(h * scale)
do_albu = 0 # TODO
if(do_albu == 1):
transf = albu.Resize(always_apply=False, p=1.0, height=oh, width=ow, interpolation=0)
image = np.array(imgs[0])
weightmap = np.expand_dims(imgs[1], axis=2)
label = np.array(imgs[2]) #np.expand_dims(imgs[2], axis=2)
if (len(label.shape) == 2):
label = label.reshape(label.shape[0], label.shape[1], 1)
if(len(image.shape)==2):
image = image.reshape(image.shape[0], image.shape[1], 1)
concat_map = np.concatenate((image, weightmap, label), axis=2)
concat_map_transf = transf(image=np.array(concat_map))['image']
image_channel = image.shape[-1]
image_transf = concat_map_transf[:, :, :image_channel]
image_transf = np.squeeze(image_transf)
weightmap_transf = concat_map_transf[:, :, image_channel]
if (label.shape[2] == 1):
#label = label.reshape(label.shape[0], label.shape[1], 1)
label_transf = concat_map_transf[:, :, -1:]
label_transf = label_transf.reshape(label_transf.shape[0], label_transf.shape[1])
else:
label_transf = concat_map_transf[:, :, -3:]
image_PIL = Image.fromarray(image_transf.astype(np.uint8))
weightmap_PIL = Image.fromarray(weightmap_transf.astype(np.uint8))
label_PIL = Image.fromarray(label_transf.astype(np.uint8))
pics = []
pics.append(image_PIL)
pics.append(weightmap_PIL)
pics.append(label_PIL)
else:
if scale < 1:
padding_l = (w - ow)//2
padding_t = (h - oh)//2
padding_r = w - ow - padding_l
padding_b = h - oh - padding_t
padding = (padding_l, padding_t, padding_r, padding_b)
pics = []
for i in range(len(imgs)):
img = imgs[i]
img = img.resize((ow, oh), self.interpolation)
if scale < 1:
# img = np.array(img)
img = cv2.copyMakeBorder(np.array(img),padding_t,padding_b,padding_l,padding_r,cv2.BORDER_REFLECT)
# print(img.shape)
img = Image.fromarray(img)
# print("img: ",img.size)
# img = ImageOps.expand(img, border=padding , fill=0)
pics.append(img)
# print(pics[0].size)
return tuple(pics)
class RandomColor(object):
def __init__(self, randomMin = 1, randomMax = 2):
self.randomMin = randomMin
self.randomMax = randomMax
def __call__(self, imgs):
out_imgs = list(imgs)
img = imgs[0]
random_factor = 1 + (np.random.rand()-0.5)
color_image = ImageEnhance.Color(img).enhance(random_factor)
random_factor = 1 + (np.random.rand()-0.5)
brightness_image = ImageEnhance.Brightness(color_image).enhance(random_factor)
random_factor = 1 + (np.random.rand()-0.5)
contrast_image = ImageEnhance.Contrast(brightness_image).enhance(random_factor)
random_factor = 1 + (np.random.rand()-0.5)
img_output = ImageEnhance.Sharpness(contrast_image).enhance(random_factor)
out_imgs[0] = img_output
return tuple(out_imgs)
class RandomAffine(object):
""" Transform the input PIL Image using a random affine transformation
The parameters of an affine transformation [a, b, c=0
d, e, f=0]
are generated randomly according to the bound, and there is no translation
(c=f=0)
Args:
bound: the largest possible deviation of random parameters
"""
def __init__(self, bound):
if bound < 0 or bound > 0.5:
raise ValueError("Bound is invalid, should be in range [0, 0.5)")
self.bound = bound
def __call__(self, imgs):
img = imgs[0]
x, y = img.size
a = 1 + 2 * self.bound * (random.random() - 0.5)
b = 2 * self.bound * (random.random() - 0.5)
d = 2 * self.bound * (random.random() - 0.5)
e = 1 + 2 * self.bound * (random.random() - 0.5)
# correct the transformation center to image center
c = -a * x / 2 - b * y / 2 + x / 2
f = -d * x / 2 - e * y / 2 + y / 2
trans_matrix = [a, b, c, d, e, f]
pics = []
for img in imgs:
pics.append(img.transform((x, y), Image.AFFINE, trans_matrix))
return tuple(pics)
class RandomHorizontalFlip(object):
"""Horizontally flip the given PIL.Image randomly with a probability of 0.5."""
def __call__(self, imgs):
"""
Args:
img (PIL.Image): Image to be flipped.
Returns:
PIL.Image: Randomly flipped image.
"""
pics = []
if random.random() < 0.5:
for img in imgs:#imgs
pics.append(img.transpose(Image.FLIP_LEFT_RIGHT))
return tuple(pics)
else:
return imgs
class RandomVerticalFlip(object):
"""Horizontally flip the given PIL.Image randomly with a probability of 0.5."""
def __call__(self, imgs):
"""
Args:
img (PIL.Image): Image to be flipped.
Returns:
PIL.Image: Randomly flipped image.
"""
pics = []
if random.random() < 0.5:
for img in imgs:
pics.append(img.transpose(Image.FLIP_TOP_BOTTOM))
return tuple(pics)
else:
return imgs
class RandomElasticDeform(object):
""" Elastic deformation of the input PIL Image using random displacement vectors
drawm from a gaussian distribution
Args:
sigma: the largest possible deviation of random parameters
"""
def __init__(self, num_pts=4, sigma=20):
self.num_pts = num_pts
self.sigma = sigma
def __call__(self, imgs):
pics = []
do_albu = 1
if (do_albu == 1):
image = np.array(imgs[0])
weightmap = np.expand_dims(imgs[1], axis=2)
label = np.array(imgs[2]) # np.expand_dims(imgs[2], axis=2)
if(len(label.shape)==2):
label = label.reshape(label.shape[0], label.shape[1], 1)
if(len(image.shape)==2):
image = image.reshape(image.shape[0], image.shape[1], 1)
concat_map = np.concatenate((image, weightmap, label), axis=2)
transf = albu.ElasticTransform(always_apply=False, p=1.0, alpha=1.0, sigma=50, alpha_affine=50,
interpolation=0, border_mode=0,
value=(0, 0, 0),
mask_value=None, approximate=False) # border_mode 用于指定插值算法
concat_map_transf = transf(image=concat_map)['image']
image_channel = image.shape[-1]
image_transf = concat_map_transf[:, :, :image_channel]
image_transf = np.squeeze(image_transf)
weightmap_transf = concat_map_transf[:, :, image_channel]
if (label.shape[2] == 1):
label_transf = concat_map_transf[:, :, -1:]
label_transf = label_transf.reshape(label_transf.shape[0], label_transf.shape[1])
else:
label_transf = concat_map_transf[:, :, -3:]
image_PIL = Image.fromarray(image_transf.astype(np.uint8))
weightmap_PIL = Image.fromarray(weightmap_transf.astype(np.uint8))
label_PIL = Image.fromarray(label_transf.astype(np.uint8))
pics.append(image_PIL)
pics.append(weightmap_PIL)
pics.append(label_PIL)
else:
img = np.array(imgs[0])
if len(img.shape) == 3:
img = img[:,:,0]
sitkImage = sitk.GetImageFromArray(img, isVector=False)
mesh_size = [self.num_pts]*sitkImage.GetDimension()
tx = sitk.BSplineTransformInitializer(sitkImage, mesh_size)
params = tx.GetParameters()
paramsNp = np.asarray(params, dtype=float)
paramsNp = paramsNp + np.random.randn(paramsNp.shape[0]) * self.sigma
paramsNp[0:int(len(params)/3)] = 0 # remove z deformations! The resolution in z is too bad
params = tuple(paramsNp)
tx.SetParameters(params)
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(sitkImage)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetDefaultPixelValue(0)
resampler.SetTransform(tx)
resampler.SetDefaultPixelValue(0)
for img in imgs:
is_expand = False
if not isinstance(img, np.ndarray):
img = np.array(img)
if len(img.shape) == 2:
img = np.expand_dims(img, axis=2)
is_expand = True
img_deformed = np.zeros(img.shape, dtype=img.dtype)
for i in range(img.shape[2]):
sitkImage = sitk.GetImageFromArray(img[:,:,i], isVector=False)
outimgsitk = resampler.Execute(sitkImage)
img_deformed[:,:,i] = sitk.GetArrayFromImage(outimgsitk)
if is_expand:
img_deformed = img_deformed[:,:,0]
# print img_deformed.dtype
pics.append(Image.fromarray(img_deformed))
return tuple(pics)
class RandomRotation(object):
"""Rotate the image by angle.
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees).
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
def __init__(self, degrees, resample=Image.BILINEAR, expand=False, center=None):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError("If degrees is a sequence, it must be of len 2.")
self.degrees = degrees
self.resample = resample
self.expand | |
def enterUnbound_type121_name(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#unbound_type121_name.
def exitUnbound_type121_name(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#generic_dimension_specifier.
def enterGeneric_dimension_specifier(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#generic_dimension_specifier.
def exitGeneric_dimension_specifier(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#commas.
def enterCommas(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#commas.
def exitCommas(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#checked_expression.
def enterChecked_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#checked_expression.
def exitChecked_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#unchecked_expression.
def enterUnchecked_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#unchecked_expression.
def exitUnchecked_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#default_value_expression.
def enterDefault_value_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#default_value_expression.
def exitDefault_value_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#unary_expression.
def enterUnary_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#unary_expression.
def exitUnary_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#scan_for_cast_generic_precedence.
def enterScan_for_cast_generic_precedence(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#scan_for_cast_generic_precedence.
def exitScan_for_cast_generic_precedence(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#cast_disambiguation_token.
def enterCast_disambiguation_token(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#cast_disambiguation_token.
def exitCast_disambiguation_token(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#pre_increment_expression.
def enterPre_increment_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#pre_increment_expression.
def exitPre_increment_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#pre_decrement_expression.
def enterPre_decrement_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#pre_decrement_expression.
def exitPre_decrement_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#cast_expression.
def enterCast_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#cast_expression.
def exitCast_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#multiplicative_expression.
def enterMultiplicative_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#multiplicative_expression.
def exitMultiplicative_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#additive_expression.
def enterAdditive_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#additive_expression.
def exitAdditive_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#shift_expression.
def enterShift_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#shift_expression.
def exitShift_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#relational_expression.
def enterRelational_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#relational_expression.
def exitRelational_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#scan_for_shift_generic_precedence.
def enterScan_for_shift_generic_precedence(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#scan_for_shift_generic_precedence.
def exitScan_for_shift_generic_precedence(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#shift_disambiguation_token.
def enterShift_disambiguation_token(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#shift_disambiguation_token.
def exitShift_disambiguation_token(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#istype121.
def enterIstype121(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#istype121.
def exitIstype121(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#is_disambiguation_token.
def enterIs_disambiguation_token(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#is_disambiguation_token.
def exitIs_disambiguation_token(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#equality_expression.
def enterEquality_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#equality_expression.
def exitEquality_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#and_expression.
def enterAnd_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#and_expression.
def exitAnd_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#exclusive_or_expression.
def enterExclusive_or_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#exclusive_or_expression.
def exitExclusive_or_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#inclusive_or_expression.
def enterInclusive_or_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#inclusive_or_expression.
def exitInclusive_or_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#conditional_and_expression.
def enterConditional_and_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#conditional_and_expression.
def exitConditional_and_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#conditional_or_expression.
def enterConditional_or_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#conditional_or_expression.
def exitConditional_or_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#null_coalescing_expression.
def enterNull_coalescing_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#null_coalescing_expression.
def exitNull_coalescing_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#conditional_expression.
def enterConditional_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#conditional_expression.
def exitConditional_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#lambda_expression.
def enterLambda_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#lambda_expression.
def exitLambda_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#anonymous_method_expression.
def enterAnonymous_method_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#anonymous_method_expression.
def exitAnonymous_method_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#anonymous_function_signature.
def enterAnonymous_function_signature(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#anonymous_function_signature.
def exitAnonymous_function_signature(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#explicit_anonymous_function_signature.
def enterExplicit_anonymous_function_signature(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#explicit_anonymous_function_signature.
def exitExplicit_anonymous_function_signature(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#explicit_anonymous_function_parameter_list.
def enterExplicit_anonymous_function_parameter_list(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#explicit_anonymous_function_parameter_list.
def exitExplicit_anonymous_function_parameter_list(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#explicit_anonymous_function_parameter.
def enterExplicit_anonymous_function_parameter(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#explicit_anonymous_function_parameter.
def exitExplicit_anonymous_function_parameter(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#anonymous_function_parameter_modifier.
def enterAnonymous_function_parameter_modifier(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#anonymous_function_parameter_modifier.
def exitAnonymous_function_parameter_modifier(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#implicit_anonymous_function_signature.
def enterImplicit_anonymous_function_signature(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#implicit_anonymous_function_signature.
def exitImplicit_anonymous_function_signature(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#implicit_anonymous_function_parameter_list.
def enterImplicit_anonymous_function_parameter_list(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#implicit_anonymous_function_parameter_list.
def exitImplicit_anonymous_function_parameter_list(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#implicit_anonymous_function_parameter.
def enterImplicit_anonymous_function_parameter(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#implicit_anonymous_function_parameter.
def exitImplicit_anonymous_function_parameter(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#anonymous_function_body.
def enterAnonymous_function_body(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#anonymous_function_body.
def exitAnonymous_function_body(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#query_expression.
def enterQuery_expression(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#query_expression.
def exitQuery_expression(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#from_clause.
def enterFrom_clause(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#from_clause.
def exitFrom_clause(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#query_body.
def enterQuery_body(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#query_body.
def exitQuery_body(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#query_body_clauses.
def enterQuery_body_clauses(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#query_body_clauses.
def exitQuery_body_clauses(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#query_body_clause.
def enterQuery_body_clause(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#query_body_clause.
def exitQuery_body_clause(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#let_clause.
def enterLet_clause(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#let_clause.
def exitLet_clause(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#where_clause.
def enterWhere_clause(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#where_clause.
def exitWhere_clause(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#join_clause.
def enterJoin_clause(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#join_clause.
def exitJoin_clause(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#join_into_clause.
def enterJoin_into_clause(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#join_into_clause.
def exitJoin_into_clause(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#combined_join_clause.
def enterCombined_join_clause(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#combined_join_clause.
def exitCombined_join_clause(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#orderby_clause.
def enterOrderby_clause(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#orderby_clause.
def exitOrderby_clause(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#orderings.
def enterOrderings(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#orderings.
def exitOrderings(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#ordering.
def enterOrdering(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#ordering.
def exitOrdering(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#ordering_direction.
def enterOrdering_direction(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#ordering_direction.
def exitOrdering_direction(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#select_or_group_clause.
def enterSelect_or_group_clause(self, ctx):
pass
# Exit a parse | |
Z[zn] = fsxn.to_standard(Z_xr[zn])
if dzndc != -1:
P_deriv = P.deriv()
deriv_scale = dx_xr[0] / kc[0]
Z[dzndc] = fsxn.to_standard(
P_deriv.__call__(c_scaled) * deriv_scale
)
if (dzndz != -1):
Z[dzndz] = 1.
return numba_init_impl
# Defines iterate via a function factory - jitted implementation
def numba_iterate(
M_divergence_sq, max_iter, reason_max_iter, reason_M_divergence,
epsilon_stationnary_sq, interior_detect_activated, reason_stationnary,
SA_activated, xr_detect_activated, BLA_activated,
calc_dzndc,
zn, dzndz, dzndc,
p_iter_zn, p_iter_dzndz, p_iter_dzndc
):
@numba.njit
def numba_impl(
c, c_xr, Z, Z_xr, Z_xr_trigger, U, stop, n_iter,
Zn_path, dZndc_path, has_xr, ref_index_xr, ref_xr, ref_div_iter, ref_order,
refpath_ptr, out_is_xr, out_xr, M_bla, r_bla, bla_len, stages_bla
):
"""
Parameters
----------
c, c_xr: c and it "Xrange" counterparts
Z, Z_xr: idem for result vector Z
Z_xr_trigger : bolean, activated when Z_xr need to be used
"""
# SA skipped - wrapped iteration if we reach the cycle order
w_iter = n_iter
if w_iter >= ref_order:
w_iter = w_iter % ref_order
# We know that :
# ref_orbit_len = max_iter + 1 >= ref_div_iter
# if order is not None:
# ref_orbit_len = min(order, ref_orbit_len)
ref_orbit_len = Zn_path.shape[0]
first_invalid_index = min(ref_orbit_len, ref_div_iter, ref_order)
M_out = np.empty((2,), dtype=np.complex128)
while True:
#==========================================================
# Try a BLA_step
if BLA_activated and (w_iter & STG_SKIP_MASK) == 0:
# [ A 0 0]
# M = [ 0 A B]
# [ 0 0 1]
#
# [dzndc]
# Zn = [ zn]
# [ c]
#
# Z_(n+1) = M * Zn
#
step = ref_BLA_get(
M_bla, r_bla, bla_len, stages_bla, Z[zn], w_iter,
first_invalid_index, M_out, True
)
if step != 0:
n_iter += step
w_iter = (w_iter + step) % ref_order
if xr_detect_activated:
Z_xr[zn] = M_out[0] * Z_xr[zn] + M_out[1] * c_xr
# /!\ keep this, needed for next BLA step
Z[zn] = fsxn.to_standard(Z_xr[zn])
if calc_dzndc:
Z_xr[dzndc] = M_out[0] * Z_xr[dzndc]
else:
# just the usual BLA step
Z[zn] = M_out[0] * Z[zn] + M_out[1] * c
if calc_dzndc:
Z[dzndc] = M_out[0] * Z[dzndc]
continue
#==================================================================
# BLA failed, launching a full perturbation iteration
n_iter += 1 # the indice we are going to compute now
# Load reference point value @ w_iter
# refpath_ptr = [prev_idx, curr_xr]
if xr_detect_activated:
ref_zn = ref_path_get(
Zn_path, w_iter,
has_xr, ref_index_xr, ref_xr, refpath_ptr,
out_is_xr, out_xr, 0
)
ref_zn_xr = ensure_xr(ref_zn, out_xr[0], out_is_xr[0])
else:
ref_zn = Zn_path[w_iter]
#==================================================================
# Pertubation iter block
#------------------------------------------------------------------
# dzndc subblock
if calc_dzndc:
ref_dzndc = dZndc_path[w_iter] # This may be Xrange
if xr_detect_activated:
p_iter_dzndc(Z_xr, ref_zn_xr, ref_dzndc)
else:
p_iter_dzndc(Z, ref_zn, ref_dzndc)
#------------------------------------------------------------------
# Interior detection - Used only at low zoom level
if interior_detect_activated and (n_iter > 1):
p_iter_dzndz(Z) # 2. * (Z[zn] * Z[dzndz])
#------------------------------------------------------------------
# zn subblok
if xr_detect_activated:
p_iter_zn(Z_xr, ref_zn_xr, c_xr)# in place mod
# std is used for div condition
Z[zn] = fsxn.to_standard(Z_xr[zn])
else:
p_iter_zn(Z, ref_zn, c)
#==================================================================
# Stopping condition: maximum iter reached
if n_iter >= max_iter:
stop[0] = reason_max_iter
break
#==================================================================
# Stopping condition: Interior points detection
if interior_detect_activated:
bool_stationnary = (
Z[dzndz].real ** 2 + Z[dzndz].imag ** 2
< epsilon_stationnary_sq)
if bool_stationnary:
stop[0] = reason_stationnary
break
#==================================================================
# Stopping condition: divergence
# ZZ = "Total" z + dz
w_iter += 1
if w_iter >= ref_order:
w_iter = w_iter % ref_order
if xr_detect_activated:
ref_zn_next = fs.perturbation.ref_path_get(
Zn_path, w_iter,
has_xr, ref_index_xr, ref_xr, refpath_ptr,
out_is_xr, out_xr, 0
)
else:
ref_zn_next = Zn_path[w_iter]
# div condition computation with std only
ZZ = Z[zn] + ref_zn_next
full_sq_norm = ZZ.real ** 2 + ZZ.imag ** 2
# Flagged as 'diverging'
bool_infty = (full_sq_norm > M_divergence_sq)
if bool_infty:
stop[0] = reason_M_divergence
break
#==================================================================
# Glitch correction - reference point diverging
if (w_iter >= ref_div_iter - 1):
# Rebasing - we are already big no underflow risk
Z[zn] = ZZ
if xr_detect_activated:
Z_xr[zn] = fsxn.to_Xrange_scalar(ZZ)
if calc_dzndc:
Z_xr[dzndc] = Z_xr[dzndc] + dZndc_path[w_iter]
else:
if calc_dzndc:
# not a cycle, dZndc_path[0] == 0
Z[dzndc] = Z[dzndc] + dZndc_path[w_iter]
w_iter = 0
continue
#==================================================================
# Glitch correction - "dynamic glitch"
bool_dyn_rebase = (
(abs(ZZ.real) <= abs(Z[zn].real))
and (abs(ZZ.imag) <= abs(Z[zn].imag))
)
if bool_dyn_rebase: # and False:
if xr_detect_activated:
# Can we *really* rebase ??
# Note: if Z[zn] underflows we might miss a rebase
# So we cast everything to xr
Z_xrn = Z_xr[zn]
if out_is_xr[0]:
# Reference underflows, use available xr ref
ZZ_xr = Z_xrn + out_xr[0]
else:
ZZ_xr = Z_xrn + ref_zn_next
bool_dyn_rebase_xr = (
fsxn.extended_abs2(ZZ_xr)
<= fsxn.extended_abs2(Z_xrn)
)
if bool_dyn_rebase_xr:
Z_xr[zn] = ZZ_xr
# /!\ keep this, needed for next BLA step - TODO: for BS
Z[zn] = fsxn.to_standard(ZZ_xr)
if calc_dzndc:
Z_xr[dzndc] = (
Z_xr[dzndc] + dZndc_path[w_iter]
- dZndc_path[0]
)
w_iter = 0
continue
else:
# No risk of underflow - safe to rebase
Z[zn] = ZZ
if calc_dzndc:
# Here we need to substract the first item (as it could
# possibly be a cycle)
Z[dzndc] += dZndc_path[w_iter] - dZndc_path[0]
w_iter = 0
continue
# End of iterations for this point
U[0] = w_iter
if xr_detect_activated:
Z[zn] = fsxn.to_standard(Z_xr[zn]) + Zn_path[w_iter]
Z[dzndc] = fsxn.to_standard(Z_xr[dzndc] + dZndc_path[w_iter])
else:
Z[zn] += Zn_path[w_iter]
Z[dzndc] += dZndc_path[w_iter]
# print(n_iter, w_iter, "--> exit with zn dzndc", Z[zn], Z[dzndc])
return n_iter
return numba_impl
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Non-holomorphic perturbation iterations
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@numba.njit(nogil=True)
def numba_cycles_perturb_BS(
c_pix, Z, U, stop_reason, stop_iter,
initialize, iterate,
Zn_path, dXnda_path, dXndb_path, dYnda_path, dYndb_path,
has_xr, ref_index_xr, refx_xr, refy_xr, ref_div_iter, ref_order,
driftx_xr, drifty_xr, dx_xr,
P, kc, n_iter_init, M_bla, r_bla, bla_len, stages_bla,
_interrupted
):
nz, npts = Z.shape
Z_xr = Xr_float_template.repeat(nz)
Z_xr_trigger = np.ones((nz,), dtype=np.bool_)
for ipt in range(npts):
refpath_ptr = np.zeros((2,), dtype=np.int32)
out_is_xr = np.zeros((2,), dtype=numba.bool_)
out_xr = Xr_float_template.repeat(4)
Zpt = Z[:, ipt]
Upt = U[:, ipt]
apt, bpt, a_xr, b_xr = ref_path_c_from_pix_BS(
c_pix[ipt], dx_xr, driftx_xr, drifty_xr
)
stop_pt = stop_reason[:, ipt]
initialize(Zpt, Z_xr)
n_iter = iterate(
apt, bpt, a_xr, b_xr, Zpt, Z_xr, Z_xr_trigger,
Upt, stop_pt, n_iter_init,
Zn_path, dXnda_path, dXndb_path, dYnda_path, dYndb_path,
has_xr, ref_index_xr, refx_xr, refy_xr, ref_div_iter, ref_order,
refpath_ptr, out_is_xr, out_xr, M_bla, r_bla, bla_len, stages_bla
)
# print('n_iter', n_iter)
stop_iter[0, ipt] = n_iter#n_iterv- debug
stop_reason[0, ipt] = stop_pt[0]
# print("after iterate", ipt, npts)
if _interrupted[0]:
return USER_INTERRUPTED
return 0
def numba_initialize_BS(xn, yn, dxnda, dxndb, dynda, dyndb):
@numba.njit
def numba_init_impl(Z, Z_xr):
"""
Only : initialize the Xrange (no SA here)
"""
for key in (xn, yn, dxnda, dxndb, dynda, dyndb):
if key!= -1:
Z_xr[key] = fsxn.to_Xrange_scalar(Z[key])
return numba_init_impl
# Defines iterate for non-holomorphic function via a function factory
# jitted implementation
def numba_iterate_BS(
M_divergence_sq, max_iter, reason_max_iter, reason_M_divergence,
xr_detect_activated, BLA_activated,
calc_hessian,
xn, yn, dxnda, dxndb, dynda, dyndb,
p_iter_zn, p_iter_hessian
):
@numba.njit
def numba_impl(
a, b, a_xr, b_xr, Z, Z_xr, Z_xr_trigger,
U, stop, n_iter,
Zn_path, dXnda_path, dXndb_path, dYnda_path, dYndb_path,
has_xr, ref_index_xr, refx_xr, refy_xr, ref_div_iter, ref_order,
refpath_ptr, out_is_xr, out_xr, M_bla, r_bla, bla_len, stages_bla
):
"""
Parameters
----------
c, c_xr: c and it "Xrange" counterparts
Z, Z_xr: idem for result vector Z
Z_xr_trigger : bolean, activated when Z_xr need to be used
"""
# print("in numba impl")
# SA skipped - wrapped iteration if we reach the cycle order
w_iter = n_iter
if w_iter >= ref_order:
w_iter = w_iter % ref_order
# We know that :
# ref_orbit_len = max_iter + 1 >= ref_div_iter
# if order is not None:
# ref_orbit_len = min(order, ref_orbit_len)
ref_orbit_len = Zn_path.shape[0]
first_invalid_index = min(ref_orbit_len, ref_div_iter, ref_order)
M_out = np.empty((8,), dtype=np.float64)
while True:
#==========================================================
# Try a BLA_step
if BLA_activated and (w_iter & STG_SKIP_MASK) == 0: # and False:
Zn = Z[xn] + 1j * Z[yn]
step = ref_BLA_get(
M_bla, r_bla, bla_len, stages_bla, Zn, w_iter,
first_invalid_index, M_out, False
)
if step != 0:
n_iter += step
w_iter = (w_iter + step) % ref_order
if xr_detect_activated:
apply_BLA_BS(M_out, Z_xr, a_xr, b_xr, xn, yn)
# /!\ keep this, needed for next BLA step
Z[xn] = fsxn.to_standard(Z_xr[xn])
Z[yn] = fsxn.to_standard(Z_xr[yn])
if calc_hessian:
apply_BLA_deriv_BS(M_out, Z_xr, a_xr, b_xr,
dxnda, dxndb, dynda, dyndb)
else:
# just the usual BLA step
apply_BLA_BS(M_out, Z, a, b, xn, yn)
if calc_hessian:
apply_BLA_deriv_BS(M_out, Z, a, b,
dxnda, dxndb, dynda, dyndb)
continue
#==================================================================
# BLA failed, launching a full perturbation iteration
n_iter += 1 # the indice we are going to | |
import numpy as np
import ast
import sys
import json
from auxiliary_functions import SampleListToArray
import matplotlib
from matplotlib import rc
rc('text', usetex=True)
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
matplotlib.rc('xtick', labelsize=30)
matplotlib.rc('ytick', labelsize=30)
import matplotlib.pyplot as plt
from file_operations_in import ReadFromFile, AverageCostsFromFile
from file_operations_out import MakeTrialNameFile, MakeDirectory
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset
'''
This file produces the various plots provided in 'The Born Supremacy: Quantum Advantage and Training of an Ising Born Machine'
'''
####################################################################################################################
# #Compare Costs
###################################################################################################################
def CompareCostFunctions(N_epochs, learning_rate, data_type, data_circuit,
N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score,
stein_eigvecs, stein_eta, sinkhorn_eps, runs, comparison, legend = True):
loss, born_final_probs, data_probs_final = ReadFromFile(N_epochs, learning_rate, data_type, data_circuit,
N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score,
stein_eigvecs, stein_eta, sinkhorn_eps, runs)
if all(x.lower() == 'mmd' for x in cost_func) is True:
#If all cost functions to be compared are the mmd,
plot_colour = ['r*-', 'r+-', 'ro-', 'b*-', 'b+-', 'bo-']
else:
plot_colour = ['green', 'darkorange', 'c', 'blue', 'red', 'm']
N_trials = len(N_epochs)
if comparison.lower() == 'probs':
fig, axs = plt.subplots()
data_plot_colour = 'k'
axs.clear()
x = np.arange(len(data_probs_final[0]))
bar_plot_colour = ['green', 'blue', 'red', 'm']
#Plot MMD
axs.bar(x, data_probs_final[0].values(), width=0.1, color= '%s' %data_plot_colour, align='center')
axs.bar(x-(0.2*(0+0.5)), born_final_probs[-5].values(), width=0.1, color='%s' %(bar_plot_colour[-4]), align='center')
axs.bar(x-(0.2*(0+1)), born_final_probs[-3].values(), width=0.1, color='%s' %(bar_plot_colour[-3]), align='center')
axs.bar(x-(0.2*(0+1.5)), born_final_probs[-2].values(), width=0.1, color='%s' %(bar_plot_colour[-2]), align='center')
axs.bar(x-(0.2*(0+2)), born_final_probs[-1].values(), width=0.1, color='%s' %(bar_plot_colour[-1]), align='center')
axs.legend(('Data',r'\textsf{MMD}', r'Sinkhorn', r'Exact Stein', r'Spectral Stein' ), fontsize = 20)
axs.set_xticks(range(len(data_probs_final[0])))
axs.set_xticklabels(list(data_probs_final[0].keys()),rotation=70)
elif comparison.lower() == 'tv':
fig, ax = plt.subplots()
if qc[0][0].lower() == '3':
axins = zoomed_inset_axes(ax, 5, loc='center')
x1, x2, y1, y2 = 190, 200, 0.00, 0.021 # specify the limits
elif qc[0][0].lower() == '4':
axins = zoomed_inset_axes(ax, 2.5, loc='center right')
x1, x2, y1, y2 = 180, 200, 0.02, 0.06 # specify the limits
elif qc[0][0].lower() == '5':
axins = zoomed_inset_axes(ax, 2.5, loc='upper left')
x1, x2, y1, y2 = 0,1 , 0.24, 0.25 # specify the limits
axins.set_xlim(x1, x2) # apply the x-limits
axins.set_ylim(y1, y2) # apply the y-limits
plt.xticks(visible=False)
mark_inset(ax, axins, loc1=3, loc2=1, fc="none", ec="0.5")
for trial in range(N_trials):
#Compute Average losses and errors, over a certain number of runs
try:
average_loss, upper_error, lower_error = AverageCostsFromFile(N_epochs[trial], learning_rate[trial], data_type[trial], data_circuit[trial],
N_born_samples[trial], N_data_samples[trial], N_kernel_samples[trial],
batch_size[trial], kernel_type[trial], cost_func[trial], qc[trial], score[trial],
stein_eigvecs[trial], stein_eta[trial], sinkhorn_eps[trial])
cost_error = np.vstack((lower_error['TV'], upper_error['TV'])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
except:
pass
x = np.arange(0, N_epochs[trial]-1, 1)
if cost_func[trial].lower() == 'mmd':
ax.plot(x, average_loss['TV'], '-', color ='%s' % plot_colour[trial] , label =r'$\mathsf{MMD}$ for $\kappa_{%s}$, $\eta_{init}$ = %.3f.' %( kernel_type[trial][0], learning_rate[trial]))
ax.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2, facecolor= plot_colour[trial] )
axins.plot(x, average_loss['TV'], color ='%s' % plot_colour[trial] , label =r'$\mathsf{MMD}$ for $\kappa_{%s}$, $\eta_{init}$ = %.3f.' %( kernel_type[trial][0], learning_rate[trial]))
axins.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2, facecolor= plot_colour[trial] )
elif cost_func[trial].lower() == 'stein':
if score[trial].lower() == 'exact':
# plot_colour = 'r'
ax.plot(x, average_loss['TV'], '-', color ='%s' % plot_colour[trial] , label =r'Stein using Exact score for $\eta_{init}$ = %.3f.'% learning_rate[trial])
ax.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2, facecolor= plot_colour[trial] )
axins.plot(x, average_loss['TV'], '-', color ='%s' % plot_colour[trial] , label =r'Stein using Exact score for $\eta_{init}$ = %.3f.'% learning_rate[trial])
axins.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2, facecolor= plot_colour[trial] )
elif score[trial].lower() == 'spectral':
# plot_colour = 'm'
ax.plot(loss[trial][('TV')], '-', color ='%s' % plot_colour[trial], label =r'Stein using Spectral score for $\eta_{init}$ = %.3f.' \
% learning_rate[trial])
axins.plot(loss[trial][('TV')], color ='%s' % plot_colour[trial] , label =r'Stein using Spectral score for $\eta_{init}$ = %.3f.' \
%learning_rate[trial])
elif cost_func[trial].lower() == 'sinkhorn':
ax.plot(x, average_loss['TV'],'-', color ='%s' % plot_colour[trial] , label =r'Sinkhorn using Hamming cost, $\eta_{init}$ = %.3f.' % learning_rate[trial] )
ax.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2, facecolor= plot_colour[trial] )
axins.plot(x, average_loss['TV'], '-', color ='%s' % plot_colour[trial] , label =r'Sinkhorn using Hamming cost, $\eta_{init}$ = %.3f.' % learning_rate[trial] )
axins.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2, facecolor= plot_colour[trial] )
ax.legend(loc='best', prop={'size': 20})
elif comparison.lower() == 'cost':
for trial in range(N_trials):
try:
average_loss, upper_error, lower_error = AverageCostsFromFile(N_epochs[trial], learning_rate[trial], data_type[trial], data_circuit[trial],
N_born_samples[trial], N_data_samples[trial], N_kernel_samples[trial],
batch_size[trial], kernel_type[trial], cost_func[trial], qc[trial], score[trial],
stein_eigvecs[trial], stein_eta[trial], sinkhorn_eps[trial])
except:
print('Average Not found')
loss, born_final_probs, data_probs_final = ReadFromFile(N_epochs[trial], learning_rate[trial], data_type[trial], data_circuit[trial],
N_born_samples[trial], N_data_samples[trial], N_kernel_samples[trial],
batch_size[trial], kernel_type[trial], cost_func[trial], qc[trial], score[trial],
stein_eigvecs[trial], stein_eta[trial], sinkhorn_eps[trial], runs[trial])
if cost_func[trial].lower() == 'mmd':
plot_colour = ['c', 'y', 'g']
x = np.arange(0, len(average_loss['MMD', 'Train']))
plt.plot(x, average_loss['MMD', 'Train'],'%so-' % plot_colour[trial],\
label =r'$\mathsf{MMD}$ on training set using $\eta_{init}$ = %.3f.' % learning_rate[trial] )
plt.fill_between(x, average_loss['MMD', 'Train'] - lower_error['MMD', 'Train'],\
average_loss['MMD', 'Train'] + upper_error['MMD', 'Train'], facecolor= plot_colour[trial], alpha=0.3)
plt.plot(x, average_loss['MMD', 'Test'],'%s-' % plot_colour[trial],\
label =r'$\mathsf{MMD}$ on test set using $\eta_{init}$ = %.3f.' % learning_rate[trial] )
plt.fill_between(x, average_loss['MMD', 'Test'] - lower_error['MMD', 'Test'],\
average_loss['MMD', 'Test'] + upper_error['MMD', 'Test'], alpha=0.3, facecolor= plot_colour[trial], interpolate=True)
elif cost_func[trial].lower() == 'stein':
if score[trial].lower() == 'exact':
plot_colour = 'r'
x = np.arange(0, len(average_loss['Stein', 'Train']))
plt.plot(x, average_loss['Stein', 'Train'],'%so-' % plot_colour,\
label =r'Stein using Exact score, $\eta_{init}$ = %.3f.' % learning_rate[trial] )
plt.fill_between(x, average_loss['Stein', 'Train'] - lower_error['Stein', 'Train'],\
average_loss['Stein', 'Train'] + upper_error['Stein', 'Train'], alpha=0.3, facecolor=plot_colour)
elif score[trial].lower() == 'spectral':
plot_colour = 'm'
plt.plot(loss[('Stein', 'Train')], '%so-' % plot_colour, \
label =r'Stein on training set using Spectral score, $\eta_{init}$ = %.3f.' %(learning_rate[trial] ))
plt.plot(loss[('Stein', 'Test')], '%s-' % plot_colour,\
label =r'Stein on test set using Spectral score, $\eta_{init}$ = %.3f.' %( learning_rate[trial]))
elif cost_func[trial].lower() == 'sinkhorn':
plot_colour = 'b'
x = np.arange(0, len(average_loss['Sinkhorn', 'Train']))
plt.plot(x, average_loss['Sinkhorn', 'Train'],'%so-' % plot_colour,\
label =r'Sinkhorn on training set using Hamming cost, $\eta_{init}$ = %.3f.' % learning_rate[trial] )
plt.fill_between(x, average_loss['Sinkhorn', 'Train'] - lower_error['Sinkhorn', 'Train'],\
average_loss['Sinkhorn', 'Train'] + upper_error['Sinkhorn', 'Train'], alpha=0.3)
plt.plot(x, average_loss['Sinkhorn', 'Test'],'%s-' % plot_colour,\
label =r'Sinkhorn on test set using Hamming cost, $\eta_{init}$ = %.3f.' % learning_rate[trial] )
plt.fill_between(x, average_loss['Sinkhorn', 'Test'] - lower_error['Sinkhorn', 'Test'],\
average_loss['Sinkhorn', 'Test'] + upper_error['Sinkhorn', 'Test'], alpha=0.3, facecolor=plot_colour, interpolate=True)
plt.legend(loc='best', prop={'size': 20})
plt.show()
return
[N_epochs, learning_rate, data_type, data_circuit, N_born_samples, N_data_samples, N_kernel_samples, batch_size, kernel_type, \
cost_func, qc, score, stein_eigvecs, stein_eta, sinkhorn_eps, runs] = [[] for _ in range(16)]
'''THREE QUBITS'''
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('3q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.01)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.05)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('3q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(1)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.1)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('3q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(1)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Sinkhorn')
# qc.append('3q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.1)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.08)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Stein')
# qc.append('3q-qvm')
# score.append('Exact')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(40)
# N_data_samples.append(40)
# N_kernel_samples.append(2000)
# batch_size.append(20)
# kernel_type.append('Gaussian')
# cost_func.append('Stein')
# qc.append('3q-qvm')
# score.append('Spectral')
# stein_eigvecs.append(4)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
'''################################'''
'''FOUR QUBITS'''
'''################################'''
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('4q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.05)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.05)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('4q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.05)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.1)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('4q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.05)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.05)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Sinkhorn')
# qc.append('4q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(1)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.05)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Stein')
# qc.append('4q-qvm')
# score.append('Exact')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.1)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(50)
# N_data_samples.append(50)
# N_kernel_samples.append(2000)
# batch_size.append(25)
# kernel_type.append('Gaussian')
# cost_func.append('Stein')
# qc.append('4q-qvm')
# score.append('Spectral')
# stein_eigvecs.append(6)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
# CompareCostFunctions(N_epochs, learning_rate, data_type, data_circuit,
# N_born_samples, N_data_samples, N_kernel_samples,
# batch_size, kernel_type, cost_func, qc, score,
# stein_eigvecs, stein_eta, sinkhorn_eps, runs, 'probs', legend =True)
###################################################################################################################
# #Compute MMD Averages and error bars over certain number of runs
###################################################################################################################
def AverageCost(N_epochs, learning_rate, data_type, data_circuit, N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score, stein_eigvecs, stein_eta, sinkhorn_eps, runs):
'''
This function reads in a number of runs, each run has the same parameters: computes average losses, and error and prints to a new file
'''
loss, born_final_probs, data_probs_final = ReadFromFile(N_epochs, learning_rate, data_type, data_circuit,
N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score,
stein_eigvecs, stein_eta, sinkhorn_eps, runs)
N_runs = len(runs)
TV_loss_total = np.zeros_like(loss[0]['TV'])
CostTrain_loss_total = np.zeros_like(loss[0][('%s' %cost_func[0], 'Train')])
CostTest_loss_total = np.zeros_like(loss[0][('%s' %cost_func[0], 'Test')])
[average_loss, error_upper, error_lower] = [{} for _ | |
import random
import numpy
from matplotlib import pyplot as plt
'''
INPUT VARIABLES
n: the number of active devices in the network
id_length: if given, all the binary id strings will be in certain length.
'''
def initialiseIDs():
'''
Given parameters n(the number of active devices in the network) and id_length(The length of the binary ID string),
then n random unique binary ID strings will be generated, each represents a device in the network
'''
id_max = 2**id_length - 1
for i in range(n):
id = random.randint(0,id_max)
if id in id_list_int: #ensure all the IDs are unique
while(id in id_list_int):
id = random.randint(0,id_max)
id_list_int.append(id)
for a in id_list_int:
id_bin = ("{:0%db}"%(id_length)).format(a) # here addtional prefix (like"00" or "11") can be added
ID_list.append(id_bin)
def responseToquery(query):
'''
If the query is a prefix to a device, then this device will repond to the gateway.
This function will return:
0 if no devices responded
2 if more than 1 devices reponded (caused collision)
"ID" if only one ID successfully transmitted
'''
counter = 0
for i in ID_list:
if i.startswith(query):
counter += 1
res_id = i
if counter > 1:
return 2 # functions returns 2 when theres a collision
if counter == 1:
return res_id # if only one ID successfully transmitted, returns the ID
if counter == 0:
return 0 # returns 0 if no reponse.
def queryTree():
'''
Simulate the querytree algorithm,
at the end, the number of slots needed to resolve the collision will be returned
'''
slot = 1
query_list = ['0','1'] # the query list corresponding to the Q in algorithm
Memory_list = [] # memory list of the gateway, corresponding M in the algorithm
while(len(query_list)!=0):
slot += 1
q = query_list[0] # q is the query sended by the gateway at the beginning of each time slot
#print(q)
response = responseToquery(q)
if response == 0: #if no response, delete the query from list
query_list.remove(q)
elif response == 2: # if collided, append "q0" and "q1" to the list
q1 = q + '0'
q2 = q + '1'
query_list.append(q1)
query_list.append(q2)
query_list.remove(q)
else :
Memory_list.append(response)
query_list.remove(q)
#print(Memory_list)
print ("QT: %d slots"%slot,file=f)
return slot
def rdm(p): #for SICTA,generate '0' with probablity of p, or '1' otherwise
'''
a random simulator, it returns 0 with probability p, returns 1 with probability of "1-p"
'''
a = random.random()
if a < p:
return 0
else:
return 1
def calcuK(reception):
'''
For SICTA and SICQTA, calculate the feedback k, where k means how many packet or empty slot
are decoded after one successful transmission
'''
k = 1 #Because the Pre-condition to calculate k is "already one successful transmission"
i=0
while(1):
flag = 0
buff=[]
a = reception[-2-i].copy() #Father of the successful transmitted slot
if (2+i) == len(reception): #when Father is the same as the first slot, it comes to end.
flag = 1
b = reception[-1-i].copy() #the successful transmitted slot
for ele in b:
a.remove(ele) # Interference Cancellation
buff = a
if len(buff) > 1: # No single ID is decoded from SIC(after Cancellation, still collision)
break
else: #After Cancellation, one ID is successfully decoded
k = k + 1
i += 1
if len(buff)==1:
memory_list.append(buff[0])
if flag == 1:
break
#print("k is %d"%k)
return k
def SICTA():
'''
simulate the SICTA algorithm,
at the end, the number of slots needed to resolve the collision will be returned
'''
slot = 0
end_con = 0
sicta_id = [] # local counter of each ID
gateway= [] # Gateway received IDs from each time slot
buffer = []
for i in ID_list: #Initailise all local counter to '0'
sicta_id.append([i,0])
while (end_con!= 1):
slot += 1
buffer = []
for i in sicta_id: # when counter is '0'. this device can send its ID
if i[1] == 0:
buffer.append(i[0])
response = len(buffer) #detect if there's a collision(when response>1)
gateway.append(buffer)
if response > 1: #according the algorithm in PAPER 7
for i in sicta_id:
if i[1] > 0:
i[1] += 1
if i[1]==0:
i[1]=rdm(p)
if response == 0: #according the algorithm in PAPER 7
#slot = slot - 1 #MTA! saves one slot if empty slot(doesn't work by rdm method)
for i in sicta_id:
if i[1]>1:
pass
if i[1]==1:
i[1] = rdm(p)
if response == 1: #according the algorithm in PAPER 7
memory_list.append(buffer[0])
tmp=[]
for emp in gateway: # Delete the empty slots(1.NULL Transmission.2.some slots are decoded)
if emp != []:
tmp.append(emp)
gateway=tmp
k=calcuK(gateway) # Calculate the feedback K
for c in range(k):
gateway.pop() # pop out the decoded(saved through SIC) time slots fron gateway
sicta_id_copy = sicta_id.copy()
for i in sicta_id:
i[1] = i[1]-(k-1)
if i[1]<= 0:
#i[1]=-100 #to enhance the ID-Quit Condition, i set all decoded to -100
for j in gateway:
if i[0] in j:
j.remove(i[0]) #remove the decoded IDs from each slot
sicta_id_copy.remove(i)
# end_con += 1 # each time an ID is decoded, end_con + 1
elif i[1] == 1:
i[1]=rdm(p)
sicta_id = sicta_id_copy.copy()
if len(sicta_id)==0:
end_con = 1
# double check if all the IDs are decoded.
for check in ID_list:
if check not in memory_list:
print(check,file=f),
print("not decoded",file=f)
print ("SICTA: %d slots"%slot,file=f)
return slot
def feedbackToSICQT(query): # By SICQT, all the slots received by gateway must be saved for SIC
'''
This function is for the SICQTA algorithm, it saves the response from each slot for the future use.
'''
receiving = []
for i in ID_list:
if i.startswith(query):
receiving.append(i)
return receiving
def SICQT(): #with shortcutting
'''
Simulate the SICQTA algorithm,
at the end, the number of slots needed to resolve the collision will be returned
'''
slot = 1 # all the IDs transmitted already in the first slot!
query_brother = [] # saves the brother of each time slot, so we know which query-slot to skip later
received = [] # all received time slots are saved here
re_id_tmp = ID_list.copy()
received.append(re_id_tmp) # initialise first time slot with all IDs
q = '0' # initalise first query with '0'
end_con = 0
while(end_con!=1):
buffer = []
q_b = q[:-1] +'1' #the brother of query
slot += 1
buffer = feedbackToSICQT(q)
if len(buffer) == 0:
q = q_b+'0' # if no response, append '0' to the last q_b, but not append q_b to list querybrother
elif len(buffer) > 1:
query_brother.append(q_b) #append q_b to list querybrother
q = q + '0'
received.append(buffer)
elif len(buffer) == 1 :
query_brother.append(q_b)
memory_list.append(buffer[0]) # save the decoded IDs. which must be the same as ID_list in the end
received.append(buffer)
k=calcuK(received)
if k > len(query_brother): #end condtion when k> existed time slots, means it goes all way back to first slot
end_con = 1
break
pos_in_qbrother = -1 - (k-1) # find out which query can be skipped
q = query_brother[pos_in_qbrother] + '0' # the next query is the + '0'
query_brother = query_brother[:pos_in_qbrother] #delete the skipped query
for a in range(k):
received.pop()
for j in received: # delete the decoded IDs from list 'received'
for suc in memory_list:
if suc in j:
j.remove(suc)
# double check if all the IDs are decoded.
for check in ID_list:
if check not in memory_list:
print(check,file=f),
print("not decoded",file=f)
print("SICQT: %d slots"%slot,file=f)
return slot
if __name__ == '__main__':
'''
Main function: let all 3 algorithms to resolve a same group of IDs, and repeat for 50000 times for the average performance
afterwards, repeat for different group of IDs (with different active users).
In the end, there will be text files recording the performance and plots correspondingly
'''
id_length = ??? # give parameter
for active in range(2,2**id_length+1,2): #change when length changed
result_SICQA = []
result_SICTA = []
result_QT = []
for test in range(50000):
id_length = 4
global n
n = active
file_string="test_4bits_"+str(active)+"active.txt" #change when length changed
graph_string="test_4bits_"+str(active)+"active.png"
f = open(file_string,"a")
p = 0.5 # parameter, can be changed
id_list_int = []
ID_list = []#include all the device IDs
initialiseIDs()
| |
from __future__ import division
import os
from collections import OrderedDict
from future.utils import iteritems
import numpy as np
import scipy.stats
from scipy.integrate import cumtrapz
from scipy.interpolate import interp1d
from scipy.special import erf, erfinv
# Keep import bilby statement, it is necessary for some eval() statements
import bilby # noqa
from .utils import logger, infer_args_from_method, check_directory_exists_and_if_not_mkdir
class PriorDict(OrderedDict):
def __init__(self, dictionary=None, filename=None):
""" A set of priors
Parameters
----------
dictionary: dict, None
If given, a dictionary to generate the prior set.
filename: str, None
If given, a file containing the prior to generate the prior set.
"""
OrderedDict.__init__(self)
if isinstance(dictionary, dict):
self.from_dictionary(dictionary)
elif type(dictionary) is str:
logger.debug('Argument "dictionary" is a string.' +
' Assuming it is intended as a file name.')
self.from_file(dictionary)
elif type(filename) is str:
self.from_file(filename)
elif dictionary is not None:
raise ValueError("PriorDict input dictionary not understood")
self.convert_floats_to_delta_functions()
def to_file(self, outdir, label):
""" Write the prior distribution to file.
Parameters
----------
outdir: str
output directory name
label: str
Output file naming scheme
"""
check_directory_exists_and_if_not_mkdir(outdir)
prior_file = os.path.join(outdir, "{}.prior".format(label))
logger.debug("Writing priors to {}".format(prior_file))
with open(prior_file, "w") as outfile:
for key in self.keys():
outfile.write(
"{} = {}\n".format(key, self[key]))
def from_file(self, filename):
""" Reads in a prior from a file specification
Parameters
----------
filename: str
Name of the file to be read in
"""
prior = {}
with open(filename, 'r') as f:
for line in f:
if line[0] == '#':
continue
elements = line.split('=')
key = elements[0].replace(' ', '')
val = '='.join(elements[1:])
prior[key] = eval(val)
self.update(prior)
def from_dictionary(self, dictionary):
for key, val in iteritems(dictionary):
if isinstance(val, str):
try:
prior = eval(val)
if isinstance(prior, (Prior, float, int, str)):
val = prior
except (NameError, SyntaxError, TypeError):
logger.debug(
"Failed to load dictionary value {} correctlty"
.format(key))
pass
self[key] = val
def convert_floats_to_delta_functions(self):
""" Convert all float parameters to delta functions """
for key in self:
if isinstance(self[key], Prior):
continue
elif isinstance(self[key], float) or isinstance(self[key], int):
self[key] = DeltaFunction(self[key])
logger.debug(
"{} converted to delta function prior.".format(key))
else:
logger.debug(
"{} cannot be converted to delta function prior."
.format(key))
def fill_priors(self, likelihood, default_priors_file=None):
"""
Fill dictionary of priors based on required parameters of likelihood
Any floats in prior will be converted to delta function prior. Any
required, non-specified parameters will use the default.
Note: if `likelihood` has `non_standard_sampling_parameter_keys`, then
this will set-up default priors for those as well.
Parameters
----------
likelihood: bilby.likelihood.GravitationalWaveTransient instance
Used to infer the set of parameters to fill the prior with
default_priors_file: str, optional
If given, a file containing the default priors.
Returns
-------
prior: dict
The filled prior dictionary
"""
self.convert_floats_to_delta_functions()
missing_keys = set(likelihood.parameters) - set(self.keys())
for missing_key in missing_keys:
if not self.test_redundancy(missing_key):
default_prior = create_default_prior(missing_key, default_priors_file)
if default_prior is None:
set_val = likelihood.parameters[missing_key]
logger.warning(
"Parameter {} has no default prior and is set to {}, this"
" will not be sampled and may cause an error."
.format(missing_key, set_val))
else:
self[missing_key] = default_prior
for key in self:
self.test_redundancy(key)
def sample(self, size=None):
"""Draw samples from the prior set
Parameters
----------
size: int or tuple of ints, optional
See numpy.random.uniform docs
Returns
-------
dict: Dictionary of the samples
"""
return self.sample_subset(keys=self.keys(), size=size)
def sample_subset(self, keys=iter([]), size=None):
"""Draw samples from the prior set for parameters which are not a DeltaFunction
Parameters
----------
keys: list
List of prior keys to draw samples from
size: int or tuple of ints, optional
See numpy.random.uniform docs
Returns
-------
dict: Dictionary of the drawn samples
"""
self.convert_floats_to_delta_functions()
samples = dict()
for key in keys:
if isinstance(self[key], Prior):
samples[key] = self[key].sample(size=size)
else:
logger.debug('{} not a known prior.'.format(key))
return samples
def prob(self, sample, **kwargs):
"""
Parameters
----------
sample: dict
Dictionary of the samples of which we want to have the probability of
kwargs:
The keyword arguments are passed directly to `np.product`
Returns
-------
float: Joint probability of all individual sample probabilities
"""
return np.product([self[key].prob(sample[key]) for key in sample], **kwargs)
def ln_prob(self, sample):
"""
Parameters
----------
sample: dict
Dictionary of the samples of which we want to have the log probability of
Returns
-------
float: Joint log probability of all the individual sample probabilities
"""
return np.sum([self[key].ln_prob(sample[key]) for key in sample])
def rescale(self, keys, theta):
"""Rescale samples from unit cube to prior
Parameters
----------
keys: list
List of prior keys to be rescaled
theta: list
List of randomly drawn values on a unit cube associated with the prior keys
Returns
-------
list: List of floats containing the rescaled sample
"""
return [self[key].rescale(sample) for key, sample in zip(keys, theta)]
def test_redundancy(self, key):
"""Empty redundancy test, should be overwritten in subclasses"""
return False
class PriorSet(PriorDict):
def __init__(self, dictionary=None, filename=None):
""" DEPRECATED: USE PriorDict INSTEAD"""
logger.warning("The name 'PriorSet' is deprecated use 'PriorDict' instead")
super(PriorSet, self).__init__(dictionary, filename)
def create_default_prior(name, default_priors_file=None):
"""Make a default prior for a parameter with a known name.
Parameters
----------
name: str
Parameter name
default_priors_file: str, optional
If given, a file containing the default priors.
Return
------
prior: Prior
Default prior distribution for that parameter, if unknown None is
returned.
"""
if default_priors_file is None:
logger.debug(
"No prior file given.")
prior = None
else:
default_priors = PriorDict(filename=default_priors_file)
if name in default_priors.keys():
prior = default_priors[name]
else:
logger.debug(
"No default prior found for variable {}.".format(name))
prior = None
return prior
class Prior(object):
_default_latex_labels = dict()
def __init__(self, name=None, latex_label=None, unit=None, minimum=-np.inf,
maximum=np.inf):
""" Implements a Prior object
Parameters
----------
name: str, optional
Name associated with prior.
latex_label: str, optional
Latex label associated with prior, used for plotting.
unit: str, optional
If given, a Latex string describing the units of the parameter.
minimum: float, optional
Minimum of the domain, default=-np.inf
maximum: float, optional
Maximum of the domain, default=np.inf
"""
self.name = name
self.latex_label = latex_label
self.unit = unit
self.minimum = minimum
self.maximum = maximum
def __call__(self):
"""Overrides the __call__ special method. Calls the sample method.
Returns
-------
float: The return value of the sample method.
"""
return self.sample()
def __eq__(self, other):
if self.__class__ != other.__class__:
return False
if sorted(self.__dict__.keys()) != sorted(other.__dict__.keys()):
return False
for key in self.__dict__:
if type(self.__dict__[key]) is np.ndarray:
if not np.array_equal(self.__dict__[key], other.__dict__[key]):
return False
else:
if not self.__dict__[key] == other.__dict__[key]:
return False
return True
def sample(self, size=None):
"""Draw a sample from the prior
Parameters
----------
size: int or tuple of ints, optional
See numpy.random.uniform docs
Returns
-------
float: A random number between 0 and 1, rescaled to match the distribution of this Prior
"""
return self.rescale(np.random.uniform(0, 1, size))
def rescale(self, val):
"""
'Rescale' a sample from the unit line element to the prior.
This should be overwritten by each subclass.
Parameters
----------
val: float
A random number between 0 and 1
Returns
-------
None
"""
return None
def prob(self, val):
"""Return the prior probability of val, this should be overwritten
Parameters
----------
val: float
Returns
-------
np.nan
"""
return np.nan
def ln_prob(self, val):
"""Return the prior ln probability of val, this should be overwritten
Parameters
----------
val: float
Returns
-------
np.nan
"""
return np.log(self.prob(val))
def is_in_prior_range(self, val):
"""Returns True if val is in the prior boundaries, zero otherwise
Parameters
----------
val: float
Returns
-------
np.nan
"""
return (val >= self.minimum) & (val <= self.maximum)
@staticmethod
def test_valid_for_rescaling(val):
"""Test if 0 < val < 1
Parameters
----------
val: float
Raises
-------
ValueError: If val is not between 0 and 1
"""
val = np.atleast_1d(val)
tests = (val < 0) + (val > 1)
if np.any(tests):
raise ValueError("Number to be rescaled should be in [0, 1]")
def __repr__(self):
"""Overrides the special method __repr__.
Returns a representation of this instance that resembles how it is instantiated.
Works correctly for all child classes
Returns
-------
str: A string representation of this instance
"""
subclass_args = infer_args_from_method(self.__init__)
prior_name = self.__class__.__name__
property_names = [p for p in dir(self.__class__) if isinstance(getattr(self.__class__, p), property)]
dict_with_properties = self.__dict__.copy()
for key in property_names:
dict_with_properties[key] = getattr(self, key)
args = ', '.join(['{}={}'.format(key, repr(dict_with_properties[key])) for key in subclass_args])
return "{}({})".format(prior_name, args)
@property
def is_fixed(self):
"""
Returns True if the prior is fixed and should not be used in the sampler. Does this by | |
backColor[2]
pointColor = alpha + pointColor[0] * 256 * 256 + pointColor[1] * 256 \
+ pointColor[2]
# Make a call to the API, and save the result in a variable.
r = etapi.PerformCalibration(ctypes.c_int32(noPoints), \
ctypes.c_int32(location), ctypes.c_bool(randomizePoints), \
ctypes.c_bool(slowMode), ctypes.c_bool(audioFeedback), \
ctypes.c_int32(eye), ctypes.c_bool(calibrationImprovement), \
ctypes.c_bool(skipBadPoints), ctypes.c_bool(autoCalibration), \
ctypes.c_int32(backColor), ctypes.c_int32(pointColor), \
ctypes.c_char_p(imageName.encode("utf-8")))
# Check the result.
if not check_result(r):
self._error(r)
def WaitForCalibrationResult(self):
"""
desc:
Waits until the calibration is done, or until an error occurs.
returns:
desc: Status and improve values for the current calibration,
Boolean values captured in a tuple (status, improve)
type: tuple
"""
# Set up variables to pass to the API function.
status = ctypes.c_int32()
improve = ctypes.c_bool()
# Set the wait time to -1, to signal there isn't a fixed timeout.
dwMilliseconds = ctypes.c_int(-1)
# Wait for it.
r = etapi.WaitForCalibrationResult(ctypes.byref(status), \
ctypes.byref(improve), dwMilliseconds)
# Check the result.
if not check_result(r):
self._error(r)
return (status.value, improve.value)
def DataStreaming(self, mode):
"""
desc:
Instructs the eye tracker to stream data, which will cause
callback functions to be called when new data becomes available.
When streaming is disabled, the application centre of IntelliGaze
is active, as is the mouse control. When data streaming is turned
on, IntelliGaze is operated in "background mode": The application
centre is invisible, and no IntelliGaze mouse control takes place.
arguments:
mode:
desc:
Determines the streaming mode. Choose from 0 (disable
streaming), 1 (stream raw data at the maximum tracker
speed), 2 (stream eye events data, i.e. fixations and
saccades), 4 (stream Blickfang activation data), or 8
(EyeGesture data).
type: int
"""
# Make a call to the API, and save the result in a variable.
r = etapi.DataStreaming(ctypes.c_int32(mode))
# Check the result.
if not check_result(r):
self._error(r)
def ShowStatusWindow(self, posX, posY, size, opacity):
"""
desc:
Displays the eye tracker status window at the given position. The
status window informs about the relative position of the head and
any eye tracking problems.
arguments:
posX:
desc:
Horizontal position on the screen (in pixels).
type: int
posY:
desc:
Vertical position on the screen (in pixels).
type: int
size:
desc:
Width of the status window (in pixels). Can range from 100
to 768.
type: int
opacity:
desc:
Opacity of the status window, expressed as a percentage.
type: int
"""
# Make a call to the API, and save the result in a variable.
r = etapi.ShowStatusWindow(ctypes.c_int32(posX), ctypes.c_int32(posY), \
ctypes.c_int32(size), ctypes.c_int32(opacity))
# Check the result.
if not check_result(r):
self._error(r)
def HideStatusWindow(self):
"""
desc:
Hides the status window. For info on how to show the status window,
see the ShowStatusWindow function.
"""
# Make a call to the API, and save the result in a variable.
r = etapi.HideStatusWindow()
# Check the result.
if not check_result(r):
self._error(r)
def ExitServer(self):
"""
desc:
Exits the eye tracker server application.
"""
# Make a call to the API, and save the result in a variable.
r = etapi.ExitServer()
# Check the result.
if not check_result(r):
self._error(r)
def QuitServer(self):
"""
desc:
Exits the eye tracker server application.
"""
# NOTE: Not entirely sure which is the correct function: ExitServer is
# used in the C API, but QuitServer is listed in the documentation.
# Make a call to the API, and save the result in a variable.
r = etapi.QuitServer()
# Check the result.
if not check_result(r):
self._error(r)
# # # # #
# UNSUPPORTED IN C API
# The following functions appear in the API, but are not supported in the C
# wrapper for the API. They are commented out for now, but retained in the
# code base in case support is supported/required in future releases.
# def StartCalibration(self, noPoints=9, location=0, randomizePoints=True, \
# eye=0, calibrationImprovement=False, skipBadPoints=True,
# autoCalibration=True):
#
# """
# desc:
# Starts a client-controlled calibration. This means the client
# software is responsible for showing and moving the calibration
# points! The eye tracker will call the CalibrationDoneDelegate
# callback when it's finished or an error occurred.
#
# keywords:
# noPoints:
# desc:
# Number of points used in the calibration. Choose from 1, 5,
# 9, or 16. (Default = 9)
# type: int
# location:
# desc:
# Indication of where the calibration points should be
# presented. Choose from 0 (Full, outer points are 5% off
# the monitor edge), 1 (Center, outer points are 20% off
# the monitor edge), 2 (Bottom, points are in the lower half
# of the monitor), 3 (Horizontal, points are located in a
# horizontal line), and 4 (Vertical, points are located in
# a vertical line). (Default = 0)
# type: int
# randomizePoints:
# desc:
# Set to True to allow the tracker to randomise the order in
# which calibration points are shown. Some experienced users
# have a tendency to anticipate where points will be shown,
# and to produce a systematic calibration error by moving
# their eyes to the next point too quickly. Shuffling the
# points prevents this. (Default = True)
# type: bool
# eye:
# desc:
# Determines what eyes to calibrate and what eyes to track.
# Choose from 0 (calibrate both eyes), 1 (calibrate the left
# eye and track both eyes, "right glass eye"), 2 (calibrate
# the right eye and track both eyes, "left glass eye"), 3
# (calibrate and track only the left eye, "right pirate
# eye"), or 4 (calibrate and track only the right eye, "left
# pirate eye"). (Default = 0)
# type: int
# calibrationImprovement:
# desc:
# Set to True if outliers or skipped points from a previous
# calibrations should be re-calibrated. Can only be done
# when a previous calibration returned with an "Improvement"
# suggestion! (Default = False)
# type: bool
# skipBadPoints:
# desc:
# When set to True, IntelliGaze will not get stuck at
# uncalibratable points. It will skip them, and try to
# complete the calibration without them. (Default = True)
# type: bool
# autoCalibration:
# desc:
# Set to True to allow the tracker to detect fixations and
# accept points automatically. (Default = True)
# type: bool
# """
#
# # Make a call to the API, and save the result in a variable.
# r = etapi.StartCalibration(ctypes.c_int32(noPoints), \
# ctypes.c_int32(location), ctypes.c_bool(randomizePoints), \
# ctypes.c_int32(eye), ctypes.c_bool(calibrationImprovement), \
# ctypes.c_bool(skipBadPoints), ctypes.c_bool(autoCalibration))
# # Check the result.
# if not check_result(r):
# self._error(r)
# def StopCalibration(self):
#
# """
# desc:
# Interrupts the calibration procedure, and will cause the eye
# tracker to notify the client about the calibration result by
# calling the CalibrationDoneDelegate callback.
# """
#
# # Make a call to the API, and save the result in a variable.
# r = etapi.StopCalibration()
# # Check the result.
# if not check_result(r):
# self._error(r)
# def CalibrationStatus(self, isMoving, isHot, acceptPoint):
#
# """
# desc:
# Informs the eye tracker server about the current status of the
# calibration procedure. Note that this function allows client
# software to let the tracker know about the calibration,
# particularly whether the calibration target is moving, whether
# it's "hot" (OK to accept fixations for), and whether to the point
# should be force-accepted. This data is required by the eye tracker
# to know when to search for fixations during the calibration
# procedure.
#
# arguments:
# isMoving:
# desc:
# Set this to True while the fixation target is moving.
# type: bool
# isHot:
# desc:
# Set this to True to make the eye tracker accept the next
# fixation it detects.
# type: bool
# acceptPoint:
# desc:
# If set to True, the eye tracker will accept the next
# fixation it detects to accept the calibration point. Use
# this parameter when doing a manual (not self-paced)
# calibration, i.e. set this to True when the operator hits
# a key to confirm fixation. (Not available in
# autoCalibration mode.)
# type: bool
# """
#
# # Make a call to the API, and save the result in a variable.
# r = etapi.StartCalibration(ctypes.c_bool(isMoving), \
# ctypes.c_bool(isHot), ctypes.c_bool(acceptPoint))
# # Check the result.
# if not check_result(r):
# self._error(r)
# def LoadCalibration(self, profileName):
#
# """
# desc:
# Tries to load a calibration for the passed profile name.
#
# arguments:
# profileName:
# desc:
# Name | |
for gathering data. If None, there is no limit.
Returns
-------
None
"""
#Note: same beginning as gather_slices (TODO: consolidate?)
if comm is None: return # no gathering needed!
#Perform broadcasts for each slice in order
my_rank = comm.Get_rank()
arIndx = [slice(None, None)] * arToFill.ndim
arIndx[0:len(arToFillInds)] = arToFillInds
axes = (axes,) if _compat.isint(axes) else axes
max_indices = [None] * len(axes)
if max_buffer_size is not None: # no maximum of buffer size
chunkBytes = arToFill.nbytes # start with the entire array as the "chunk"
for iaxis, axis in enumerate(axes):
# Consider restricting the chunk size along the iaxis-th axis.
# If we can achieve the desired max_buffer_size by restricting
# just along this axis, great. Otherwise, restrict to at most
# 1 index along this axis and keep going.
bytes_per_index = chunkBytes / arToFill.shape[axis]
max_inds = int(max_buffer_size / bytes_per_index)
if max_inds == 0:
max_indices[iaxis] = 1
chunkBytes /= arToFill.shape[axis]
else:
max_indices[iaxis] = max_inds
break
else:
_warnings.warn("gather_slices_by_owner: Could not achieve max_buffer_size")
# -- end part that is the same as gather_slices
#Get a list of the slices to broadcast, indexed by the rank of the owner proc
slices_by_owner = comm.allgather(slicesIOwn)
for owner, slices in enumerate(slices_by_owner):
for slcOrSlcTup in slices:
slcTup = (slcOrSlcTup,) if isinstance(slcOrSlcTup, slice) else slcOrSlcTup
assert(len(slcTup) == len(axes))
#Get the a list of the (sub-)slices along each axis, whose product
# (along the specified axes) gives the entire block given by slcTup
axisSlices = []
for iaxis, axis in enumerate(axes):
slc = slcTup[iaxis]
if max_indices[iaxis] is None or max_indices[iaxis] >= _slct.length(slc):
axisSlices.append([slc]) # arIndx[axis] = slc
else:
axisSlices.append(_slct.divide(slc, max_indices[iaxis]))
for axSlcs in _itertools.product(*axisSlices):
#create arIndx from per-axis (sub-)slices and broadcast
for iaxis, axis in enumerate(axes):
arIndx[axis] = axSlcs[iaxis]
#broadcast arIndx slice
buf = _findx(arToFill, arIndx, True) if (my_rank == owner) \
else _np.empty(_findx_shape(arToFill, arIndx), arToFill.dtype)
comm.Bcast(buf, root=owner)
if my_rank != owner: _fas(arToFill, arIndx, buf)
buf = None # free buffer mem asap
def gather_indices(indices, index_owners, arToFill, arToFillInds,
axes, comm, max_buffer_size=None):
"""
Gathers data within a numpy array, `arToFill`, according to given indices.
Upon entry it is assumed that the different processors within `comm` have
computed different parts of `arToFill`, namely different slices or
index-arrays of the `axis`-th axis. At exit, data has been gathered such
that all processors have the results for the entire `arToFill` (or at least
for all the indices given).
Parameters
----------
indices : list
A list of all the integer-arrays or slices (computed by *any* of
the processors, not just the current one). Each element of `indices`
may be either a single slice/index-array or a tuple of such
elements (when gathering across multiple dimensions).
index_owners : dict
A dictionary mapping the index of an element within `slices` to an
integer rank of the processor responsible for communicating that
slice/index-array's data to the rest of the processors.
arToFill : numpy.ndarray
The array which contains partial data upon entry and the gathered
data upon exit.
arToFillInds : list
A list of slice or index-arrays specifying the (fixed) sub-array of
`arToFill` that should be gathered into. The elements of
`arToFillInds` are taken to be indices for the leading dimension
first, and any unspecified dimensions or `None` elements are
assumed to be unrestricted (as if `slice(None,None)`). Note that
the combination of `arToFill` and `arToFillInds` is essentally like
passing `arToFill[arToFillInds]` to this function, except it will
work with index arrays as well as slices.
axes : int or tuple of ints
The axis or axes of `arToFill` on which the slices apply (which axis
do the elements of `indices` refer to?). Note that `len(axes)` must
be equal to the number of sub-indices (i.e. the tuple length) of each
element of `indices`.
comm : mpi4py.MPI.Comm or None
The communicator specifying the processors involved and used
to perform the gather operation.
max_buffer_size : int or None
The maximum buffer size in bytes that is allowed to be used
for gathering data. If None, there is no limit.
Returns
-------
None
"""
if comm is None: return # no gathering needed!
#Perform broadcasts for each slice in order
my_rank = comm.Get_rank()
arIndx = [slice(None, None)] * arToFill.ndim
arIndx[0:len(arToFillInds)] = arToFillInds
axes = (axes,) if _compat.isint(axes) else axes
max_indices = [None] * len(axes)
if max_buffer_size is not None: # no maximum of buffer size
chunkBytes = arToFill.nbytes # start with the entire array as the "chunk"
for iaxis, axis in enumerate(axes):
# Consider restricting the chunk size along the iaxis-th axis.
# If we can achieve the desired max_buffer_size by restricting
# just along this axis, great. Otherwise, restrict to at most
# 1 index along this axis and keep going.
bytes_per_index = chunkBytes / arToFill.shape[axis]
max_inds = int(max_buffer_size / bytes_per_index)
if max_inds == 0:
max_indices[iaxis] = 1
chunkBytes /= arToFill.shape[axis]
else:
max_indices[iaxis] = max_inds
break
else:
_warnings.warn("gather_indices: Could not achieve max_buffer_size")
for iIndex, indOrIndTup in enumerate(indices):
owner = index_owners[iIndex] # owner's rank
indTup = (indOrIndTup,) if not isinstance(indOrIndTup, tuple) else indOrIndTup
assert(len(indTup) == len(axes))
def to_slice_list(indexArrayOrSlice):
"""Breaks a slice or index array into a list of slices"""
if isinstance(indexArrayOrSlice, slice):
return [indexArrayOrSlice] # easy!
lst = indexArrayOrSlice
if len(lst) == 0: return [slice(0, 0)]
slc_lst = []
i = 0; N = len(lst)
while i < N:
start = lst[i]
step = lst[i + 1] - lst[i] if i + 1 < N else None
while i + 1 < N and lst[i + 1] - lst[i] == step: i += 1
stop = lst[i] + 1
slc_lst.append(slice(start, stop, None if step == 1 else step))
i += 1
return slc_lst
#Get the a list of the (sub-)indices along each axis, whose product
# (along the specified axes) gives the entire block given by slcTup
axisSlices = []
for iaxis, axis in enumerate(axes):
ind = indTup[iaxis]
sub_slices = []
#break `ind`, which may be either a single slice or an index array,
# into a list of slices that are broadcast one at a time (sometimes
# these `ind_slice` slices themselves need to be broken up further
# to obey max_buffer_size).
for islice in to_slice_list(ind):
if max_indices[iaxis] is None or max_indices[iaxis] >= _slct.length(islice):
sub_slices.append(islice) # arIndx[axis] = slc
else:
sub_slices.extend(_slct.divide(islice, max_indices[iaxis]))
axisSlices.append(sub_slices)
for axSlcs in _itertools.product(*axisSlices):
#create arIndx from per-axis (sub-)slices and broadcast
for iaxis, axis in enumerate(axes):
arIndx[axis] = axSlcs[iaxis]
#broadcast arIndx slice
buf = _findx(arToFill, arIndx, True) if (my_rank == owner) \
else _np.empty(_findx_shape(arToFill, arIndx), arToFill.dtype)
comm.Bcast(buf, root=owner)
if my_rank != owner: _fas(arToFill, arIndx, buf)
buf = None # free buffer mem asap
def distribute_for_dot(contracted_dim, comm):
"""
Prepares for one or muliple distributed dot products given the dimension
to be contracted (i.e. the number of columns of A or rows of B in dot(A,B)).
The returned slice should be passed as `loc_slice` to :func:`mpidot`.
Parameters
----------
contracted_dim : int
The dimension that will be contracted in ensuing :func:`mpidot`
calls (see above).
comm : mpi4py.MPI.Comm or None
The communicator used to perform the distribution.
Returns
-------
slice
The "local" slice specifying the indices belonging to the current
processor. Should be passed to :func:`mpidot` as `loc_slice`.
"""
loc_indices, _, _ = distribute_indices(
list(range(contracted_dim)), comm, False)
#Make sure local columns are contiguous
start, stop = loc_indices[0], loc_indices[-1] + 1
assert(loc_indices == list(range(start, stop)))
return slice(start, stop) # local column range as a slice
def mpidot(a, b, loc_slice, comm):
"""
Performs a distributed dot product, dot(a,b).
Parameters
----------
a,b : numpy.ndarray
Arrays to dot together.
loc_slice : slice
A slice specifying the indices along the contracted dimension belonging
to this processor (obtained from :func:`distribute_for_dot`)
comm : mpi4py.MPI.Comm or None
The communicator used to parallelize the dot product.
Returns
-------
numpy.ndarray
"""
if comm is None or comm.Get_size() == 1:
assert(loc_slice == slice(0, b.shape[0]))
return _np.dot(a, b)
from mpi4py import MPI # not at top so can import pygsti on cluster login nodes
loc_dot = _np.dot(a[:, loc_slice], b[loc_slice, :])
result | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# quick script for installing tap
#
##
import subprocess,re,os,shutil,sys
import base64
from src.core.tapcore import ssh_keygen
from src.core.tapcore import motd
from src.core.tapcore import set_background
import getpass
import sys
try:
import pexpect
except ImportError:
subprocess.Popen("apt-get -y install python3-pexpect", shell=True).wait()
try:
import pexpect
except ImportError:
print("Install python3-pexpect first, then re-run setup.")
sys.exit(1)
print("[*] Installing some base modules requested...")
subprocess.Popen("apt-get -y install nfs-common tree htop tshark smbclient", shell=True).wait()
# add customized metasploit banner
if os.path.isdir("/root/.msf4/"):
print("[*] Metasploit installed, installing custom banner and timestamp to /root/.msf4/config")
filewrite = open("/root/.msf4/config", "w")
filewrite.write("[framework/core]\nSessionLogging=true\nLogLevel=5\nTimestampOutput=true\nPromptTimeFormat=%I:%H:%S\nConsoleLogging=true\nprompt=%grn[%grn%T] %grnTrustedSec %whiMSF%whi (s:%grn%S %whij:%grn%J%whi)\nload sounds\n[framework/ui/console]")
filewrite.close()
def kill_tap():
proc = subprocess.Popen("ps -au | grep tap", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
for line in proc.stdout:
try:
match = re.search("tap.py", line)
if match:
print("[*] Killing running version of TAP..")
line = line.split(" ")
pid = line[6]
subprocess.Popen("kill %s" % (pid), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
print("[*] Killed the TAP process: " + pid)
except: pass
try:
# kill the heartbeat health check
match = re.search("heartbeat.py", line)
if match:
print("[*] Killing running version of TAP HEARTBEAT..")
line = line.split(" ")
pid = line[6]
subprocess.Popen("kill %s" % (pid), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
print("[*] Killed the Heartbeat TAP process: " + pid)
except: pass
# here we encrypt via aes, will return encrypted string based on secret key which is random
def encryptAES(data):
# the character used for padding--with a block cipher such as AES, the value
# you encrypt must be a multiple of BLOCK_SIZE in length. This character is
# used to ensure that your value is always a multiple of BLOCK_SIZE
PADDING = '{'
BLOCK_SIZE = 32
# one-liner to sufficiently pad the text to be encrypted
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
# random value here to randomize builds
a = 50 * 5
# one-liners to encrypt/encode and decrypt/decode a string
# encrypt with AES, encode with base64
EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))
DecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)
secret = os.urandom(BLOCK_SIZE)
cipher = AES.new(secret)
secret = base64.b64encode(secret)
aes = EncodeAES(cipher, data)
return aes.decode("utf-8") + "::::" + secret.decode("utf-8")
print (r"""
TTTTTTTTTTTTTTTTTTTTTTT AAA PPPPPPPPPPPPPPPPP
T:::::::::::::::::::::T A:::A P::::::::::::::::P
T:::::::::::::::::::::T A:::::A P::::::PPPPPP:::::P
T:::::TT:::::::TT:::::T A:::::::A PP:::::P P:::::P
TTTTTT T:::::T TTTTTT A:::::::::A P::::P P:::::P
T:::::T A:::::A:::::A P::::P P:::::P
T:::::T A:::::A A:::::A P::::PPPPPP:::::P
T:::::T A:::::A A:::::A P:::::::::::::PP
T:::::T A:::::A A:::::A P::::PPPPPPPPP
T:::::T A:::::AAAAAAAAA:::::A P::::P
T:::::T A:::::::::::::::::::::A P::::P
T:::::T A:::::AAAAAAAAAAAAA:::::A P::::P
TT:::::::TT A:::::A A:::::A PP::::::PP
T:::::::::T A:::::A A:::::A P::::::::P
T:::::::::T A:::::A A:::::A P::::::::P
TTTTTTTTTTTAAAAAAA AAAAAAAPPPPPPPPPP
The TrustedSec Attack Platform
Written by: <NAME> (@HackingDave)
https://github.com/trustedsec/tap
The self contained-deployable penetration testing kit
""")
print("""
Welcome to the TAP installer. TAP is a remote connection setup tool that will install a remote
pentest platform for you and automatically reverse SSH out back to home.
""")
if os.path.isfile("/etc/init.d/tap"):
answer = input("TAP detected. Do you want to uninstall [y/n:] ")
if answer.lower() == "yes" or answer.lower() == "y":
answer = "uninstall"
if not os.path.isfile("/etc/init.d/tap"):
answer = input("Do you want to start the installation of TAP: [y/n]: ")
# if they said yes
if answer.lower() == "y" or answer.lower() == "yes":
print("[*] Checking to see if TAP is currently running...")
# kill running processes
kill_tap()
print("[*] Beginning installation. This should only take a moment.")
# if directories aren't there then create them
if not os.path.isdir("/usr/share/tap"):
os.makedirs("/usr/share/tap")
# install to rc.local
print("[*] Adding TAP into startup through init scripts..")
if os.path.isdir("/etc/init.d"):
# remove old startup
if os.path.isfile("/etc/init.d/tap"): os.remove("/etc/init.d/tap")
# startup script here
fileopen = open("src/core/startup_tap", "r")
config = fileopen.read()
filewrite = open("/etc/init.d/tap", "w")
filewrite.write(config)
filewrite.close()
print("[*] Triggering update-rc.d on TAP to automatic start...")
subprocess.Popen("chmod +x /etc/init.d/tap", shell=True).wait()
subprocess.Popen("update-rc.d tap defaults", shell=True).wait()
# setting background
print("[*] Setting background..")
set_background()
# install git and update everything
print("[*] Updating everything beforehand...")
subprocess.Popen("apt-get update && apt-get --force-yes -y upgrade && apt-get --force-yes -y dist-upgrade", shell=True).wait()
subprocess.Popen("apt-get --force-yes -y install git python3-crypto python3-pexpect openssh-server net-tools", shell=True).wait()
from Crypto.Cipher import AES
choice = input("Do you want to keep TAP updated? (requires internet) [y/n]: ")
if choice == "y" or choice == "yes":
print("[*] Checking out latest TAP to /usr/share/tap")
# if old files are there
if os.path.isdir("/usr/share/tap/"):
shutil.rmtree('/usr/share/tap')
if not os.path.isdir("/usr/share/tap"):
os.makedirs("/usr/share/tap")
subprocess.Popen("cd /usr/share/;git clone https://github.com/trustedsec/tap tap/", shell=True).wait()
print("[*] Finished. If you want to update tap go to /usr/share/tap and type 'git pull'")
AUTO_UPDATE="ON"
else:
print("[*] Copying setup files over...")
AUTO_UPDATE="OFF"
if os.path.isdir("/usr/share/tap/"):
shutil.rmtree('/usr/share/tap')
if not os.path.isdir("/usr/share/tap"):
os.makedirs("/usr/share/tap")
subprocess.Popen("cp -rf * /usr/share/tap/", shell=True).wait()
print("[*] Next we need to configure the remote SSH server you will want to tunnel over.")
print("[*] This is the main remote SSH server you have running on the Internet that TAP will call back to.")
print("\nWe need to figure out which method you want to use. The first method will use SSH keys\nfor authentication (preferred). This will generate a pub/priv key pair on your machine\nand automatically upload the key to the remote server. When that happens, a password\nwill not be needed then. The second method will use an actual password for authentication\non the remote server. The password is encrypted with AES but not in a secure format (decryption keys are stored locally, need to be).\n\n")
choice1 = input("Choice 1: Use SSH keys, Choice 2: Use password (1,2)[1]: ")
password = ""
if choice1 == "1" or choice1 == "":
choice1 = "ssh_keys"
else:
choice1 = "password"
# generate ssh_key gen from setcore
if choice1 == "ssh_keys":
print("[*] SSH Key generation was selected, we will begin the process now.")
#password = get<PASSWORD>pass("Enter the passphrase for your new SSH key: ")
password = ""
ssh_keygen(password)
# if we are just using straight passwords
if choice1 == "password":
print("[*] This will ask for a username on the REMOTE system (root not recommended)")
print("The username and password being requested would be the username and password needed to log into the REMOTE system that you have exposed on the Internet for the reverse SSH connection. For example, the TAP box needs to connect OUTBOUND to a box on the Internet - this would be the username and password for that system. ROOT access is NOT needed. This is a simple SSH tunnel. Recommend restricted account in case this box gets taken and has creds on it. Better preference is to use SSH keys.")
username = input("Enter username for ssh [root]: ")
if username == "":
username = "root"
else:
password = getpass.getpass("Enter password for %s: " % (username))
if password != "":
print("[*] Encrypting the password now..")
password = encrypt<PASSWORD>(password)
store = password.split("::::")
password = store[0]
key = store[1]
# if the key directory isnt created, do it real quick
if not os.path.isdir("/root/.tap"):
os.makedirs("/root/.tap")
filewrite = open("/root/.tap/store", "w")
filewrite.write(key)
filewrite.close()
print("[!] Warning when specifying hostname - this implies that the remote TAP device will have DNS - otherwise this will fail.")
host = input("Enter the remote IP or hostname for SSH connect (remote external server): ")
port = input("Enter the PORT to the reverse SSH connect (remote external SSH port)[22]: ")
if port == "": port = "22"
print("[*] This next option will be the LOCAL port on the EXTERNAL box you will need to SSH into when TAP calls back. For example, when the SSH connection is sent from the TAP device to the box on the Internet, a local port is created on the remote box, so if you wanted to get into the tap, you would first SSH into the remote box, then ssh username@localhost -p <port you specify below>.")
localport = input("Enter the LOCAL port that will be on the remote SSH box [10003]: ")
socks = input("Enter the LOCAL port that will be used for the SOCKS HTTP proxy [10004]: ")
if localport == "": localport = "10003"
if socks == "": socks = "10004"
if AUTO_UPDATE == "ON":
print("[*] The update server is a path to pull NEW versions of the TAP device. Using git isn't recommended if you | |
<reponame>camUrban/PteraSoftware
"""This module contains vortex class definitions, and useful aerodynamic functions.
This module contains the following classes:
LineVortex: This class is used to contain line vortices.
HorseshoeVortex: This class is used to contain horseshoe vortices.
RingVortex: This class is used to contain ring vortices.
This module contains the following exceptions:
None
This module contains the following functions:
collapsed_velocities_from_horseshoe_vortices: This function takes in a group of
points, and the attributes of a group of horseshoe vortices. At every point,
it finds the cumulative induced velocity due to all of the horseshoe vortices.
expanded_velocities_from_horseshoe_vortices: This function takes in a group of
points, and the attributes of a group of horseshoe vortices. At every point,
it finds the induced velocity due to each horseshoe vortex.
collapsed_velocities_from_ring_vortices: This function takes in a group of
points, and the attributes of a group of ring vortices. At every point, it finds
the cumulative induced velocity due to all of the ring vortices.
expanded_velocities_from_ring_vortices: This function takes in a group of points,
and the attributes of a group of ring vortices. At every point, it finds the
induced velocity due to each ring vortex.
collapsed_velocities_from_line_vortices: This function takes in a group of
points, and the attributes of a group of line vortices. At every point, it finds
the cumulative induced velocity due to all of the line vortices.
expanded_velocities_from_line_vortices: This function takes in a group of points,
and the attributes of a group of line vortices. At every point, it finds the
induced velocity due to each line vortex.
"""
import math
import numpy as np
from numba import njit, prange
from . import functions
# Set the value of Squire's parameter that will be used by the induced velocity
# functions. Squire's parameter relates to the size of the vortex cores and the rate
# at which they grow. The value of this parameter is slightly controversial. It
# dramatically affect the stability of the result. I'm using this value, as cited for
# use in flapping-wing vehicles in "Role of Filament Strain in the Free-Vortex
# Modeling of Rotor Wakes" (Ananthan and Leishman, 2004). It is unitless.
squire = 10 ** -4
# Set the value of Lamb's constant that will be used by the induced velocity
# functions. Lamb's constant relates to the size of the vortex cores and the rate at
# which they grow. The value of this parameter is well agreed upon, and published in
# "Extended Unsteady Vortex-Lattice Method for Insect Flapping Wings" (Nguyen et al.,
# 2016). It is unitless.
lamb = 1.25643
# Set the value of the local machine error. This will be used to fix removable
# discontinuities in the induced velocity functions.
eps = np.finfo(float).eps
class LineVortex:
"""This class is used to contain line vortices.
This class contains the following public methods:
None
This class contains the following class attributes:
None
Subclassing:
This class is not meant to be subclassed.
"""
def __init__(self, origin, termination, strength):
"""This is the initialization method.
:param origin: 1D array
This is a vector containing the x, y, and z coordinates of the origin of
the line vortex. It's a (3,) array. Its units are meters.
:param termination: 1D array
This is a vector containing the x, y, and z coordinates of the
termination of the line vortex. It's a (3,) array. Its units are meters.
:param strength: float
This is the strength of the vortex in meters squared per second.
"""
self.origin = origin
self.termination = termination
self.strength = strength
# Initialize variables to hold the vector from the vortex's origin to
# termination, and the point halfway between the origin and termination.
self.vector = self.termination - self.origin
self.center = self.origin + 0.5 * self.vector
class HorseshoeVortex:
"""This class is used to contain horseshoe vortices.
This class contains the following public methods:
update_strength: This method updates the strength of this horseshoe vortex
object, and the strength of its legs line vortex objects.
This class contains the following class attributes:
None
Subclassing:
This class is not meant to be subclassed.
"""
def __init__(
self,
finite_leg_origin,
finite_leg_termination,
strength,
infinite_leg_direction,
infinite_leg_length,
):
"""This is the initialization method.
:param finite_leg_origin: 1D array
This is a vector containing the x, y, and z coordinates of the origin of
the vortex's finite leg. It's a (,3) array. It's units are meters.
:param finite_leg_termination: 1D array
This is a vector containing the x, y, and z coordinates of the
termination of the vortex's finite leg. It's a (,3) array. It's units are
meters.
:param strength: float
This is the strength of the vortex in meters squared per second.
:param infinite_leg_direction: 1D array
This is a unit vector containing the direction that the infinite legs
extend towards. It's a (,3) array. It's units are meters. It's default
value is the unit vector in the positive x direction.
:param infinite_leg_length: float
This is the length back to extend the quasi-infinite legs of the
horseshoe vortex. It's units are meters.
"""
self.finite_leg_origin = finite_leg_origin
self.finite_leg_termination = finite_leg_termination
self.strength = strength
self.infinite_leg_direction = infinite_leg_direction
self.infinite_leg_length = infinite_leg_length
self.right_leg_origin = (
self.finite_leg_origin + infinite_leg_direction * infinite_leg_length
)
self.left_leg_termination = (
self.finite_leg_termination + infinite_leg_direction * infinite_leg_length
)
# Initialize a line vortex to represent the horseshoe's finite leg.
self.right_leg = LineVortex(
origin=self.right_leg_origin,
termination=self.finite_leg_origin,
strength=self.strength,
)
self.finite_leg = LineVortex(
origin=self.finite_leg_origin,
termination=self.finite_leg_termination,
strength=self.strength,
)
self.left_leg = LineVortex(
origin=self.finite_leg_termination,
termination=self.left_leg_termination,
strength=self.strength,
)
def update_strength(self, strength):
"""This method updates the strength of this horseshoe vortex object, and the
strength of its legs line vortex objects.
:param strength: float
This is the strength of this vortex, and of its line vortex legs. Its
units are meters squared per second.
:return: None
"""
self.strength = strength
self.right_leg.strength = strength
self.finite_leg.strength = strength
self.left_leg.strength = strength
class RingVortex:
"""This class is used to contain ring vortices.
This class contains the following public methods:
update_strength: This method updates the strength of this ring vortex object,
and the strength of its four legs' line vortex objects.
update_position: This method updates the position of the ring vortex, and the
positions of all its attributes.
This class contains the following class attributes:
None
Subclassing:
This class is not meant to be subclassed.
"""
def __init__(
self,
front_left_vertex,
front_right_vertex,
back_left_vertex,
back_right_vertex,
strength,
):
"""This is the initialization method.
:param front_left_vertex: 1D array
This is a vector containing the x, y, and z coordinates of the vortex's
front left point. It's a (,3) array with units of meters.
:param front_right_vertex: 1D array
This is a vector containing the x, y, and z coordinates of the vortex's
front right point. It's a (,3) array with units of meters.
:param back_left_vertex: 1D array
This is a vector containing the x, y, and z coordinates of the vortex's
back left point. It's a (,3) array with units of meters.
:param back_right_vertex: 1D array
This is a vector containing the x, y, and z coordinates of the vortex's
back right point. It's a (,3) array with units of meters.
:param strength: float
This is the strength of the vortex in meters squared per second.
"""
self.front_left_vertex = front_left_vertex
self.front_right_vertex = front_right_vertex
self.back_left_vertex = back_left_vertex
self.back_right_vertex = back_right_vertex
self.strength = strength
# Initialize the line vortices that make up the ring vortex.
self.front_leg = LineVortex(
origin=self.front_right_vertex,
termination=self.front_left_vertex,
strength=self.strength,
)
self.left_leg = LineVortex(
origin=self.front_left_vertex,
termination=self.back_left_vertex,
strength=self.strength,
)
self.back_leg = LineVortex(
origin=self.back_left_vertex,
termination=self.back_right_vertex,
strength=self.strength,
)
self.right_leg = LineVortex(
origin=self.back_right_vertex,
termination=self.front_right_vertex,
strength=self.strength,
)
# Initialize a variable to hold the centroid of the ring vortex.
self.center = functions.numba_centroid_of_quadrilateral(
self.front_left_vertex,
self.front_right_vertex,
self.back_left_vertex,
self.back_right_vertex,
)
# Initialize a variable to hold the age of the ring vortex in seconds.
self.age = 0
def update_strength(self, strength):
"""This method updates the strength of this ring vortex object, and the
strength of its four legs' line vortex objects.
:param strength: float
This is the strength of this vortex, and of its four legs' line vortices.
Its units are meters squared per second.
:return: None
"""
self.strength | |
Error on the observed nuv-u colour of a galaxy
:age:
Observed age of a galaxy, often calculated from the redshift i.e. at z=0.1 the age ~ 12.5. Must be in units of Gyr.
:get_c_one:
Function to return one colour prediction, maybe predict_c_one if not using lookup table, and lookup_col_one if using a lookup table
RETURNS:
Array of same shape as :age: containing the likelihood for each galaxy at the given :theta:
"""
# why is this next line here, we don't use it at all in this function?
tq, tau = theta
pred_nuvu, pred_ur = get_c_one(theta, age)
return -0.5*N.log(2*N.pi*sigma_ur**2)-0.5*((ur-pred_ur)**2/sigma_ur**2)-0.5*N.log10(2*N.pi*sigma_nuvu**2)-0.5*((nuvu-pred_nuvu)**2/sigma_nuvu**2)
n=0
# had to move this to above predict_c_one() and get_colours()
""" Load the magnitude bandpass filters using idl save """
filters = readsav('ugriz.sav')
fuvwave= filters.ugriz.fuvwave[0]
fuvtrans = filters.ugriz.fuvtrans[0]
nuvwave= filters.ugriz.nuvwave[0]
nuvtrans = filters.ugriz.nuvtrans[0]
uwave= filters.ugriz.uwave[0]
utrans = filters.ugriz.utrans[0]
gwave= filters.ugriz.gwave[0]
gtrans = filters.ugriz.gtrans[0]
rwave= filters.ugriz.rwave[0]
rtrans = filters.ugriz.rtrans[0]
iwave= filters.ugriz.iwave[0]
itrans = filters.ugriz.itrans[0]
zwave= filters.ugriz.zwave[0]
ztrans = filters.ugriz.ztrans[0]
vwave= filters.ugriz.vwave[0]
vtrans = filters.ugriz.vtrans[0]
jwave= filters.ugriz.jwave[0]
jtrans = filters.ugriz.jtrans[0]
hwave= filters.ugriz.hwave[0]
htrans = filters.ugriz.htrans[0]
kwave= filters.ugriz.kwave[0]
ktrans = filters.ugriz.ktrans[0]
""" and the HST bandpass filters using numpy save """
""" filter transmission curves are from the SVO filter service,
e.g. http://svo2.cab.inta-csic.es/svo/theory/fps3/index.php?id=HST/WFC3_IR.F160W
Roughly speaking, the bandpasses
(F435W, F606W, F775W, F814W, F850LP, F105W, F125W, F140W, F160W)
correspond to
(B, V, i, I, z, Y, J, JH, H)
but these aren't exact copies of those filters already read in
and e.g. it's easy to confuse i and I anyway
so to be explicit, don't abbreviate to single-letter filters
"""
hst_filters = N.load('HST_filters.npy').flat[0]
f435wave = hst_filters['HST_ACS_WFC.F435W_77']['wave']
f435trans = hst_filters['HST_ACS_WFC.F435W_77']['throughput']
f606wave = hst_filters['HST_ACS_WFC.F606W_77']['wave']
f606trans = hst_filters['HST_ACS_WFC.F606W_77']['throughput']
f775wave = hst_filters['HST_ACS_WFC.F775W_77']['wave']
f775trans = hst_filters['HST_ACS_WFC.F775W_77']['throughput']
f814wave = hst_filters['HST_ACS_WFC.F814W_77']['wave']
f814trans = hst_filters['HST_ACS_WFC.F814W_77']['throughput']
f850wave = hst_filters['HST_ACS_WFC.F850LP_77']['wave']
f850trans = hst_filters['HST_ACS_WFC.F850LP_77']['throughput']
f105wave = hst_filters['HST_WFC3_IR.F105W']['wave']
f105trans = hst_filters['HST_WFC3_IR.F105W']['throughput']
f125wave = hst_filters['HST_WFC3_IR.F125W']['wave']
f125trans = hst_filters['HST_WFC3_IR.F125W']['throughput']
f140wave = hst_filters['HST_WFC3_IR.F140W']['wave']
f140trans = hst_filters['HST_WFC3_IR.F140W']['throughput']
f160wave = hst_filters['HST_WFC3_IR.F160W']['wave']
f160trans = hst_filters['HST_WFC3_IR.F160W']['throughput']
# these are just temporary, don't really use them for science
hst_filters_z08 = N.load('HST_filters_z0.8.npy').flat[0]
f435wave_z08 = hst_filters_z08['HST_ACS_WFC.F435W_77']['wave']
f435trans_z08 = hst_filters_z08['HST_ACS_WFC.F435W_77']['throughput']
f606wave_z08 = hst_filters_z08['HST_ACS_WFC.F606W_77']['wave']
f606trans_z08 = hst_filters_z08['HST_ACS_WFC.F606W_77']['throughput']
f775wave_z08 = hst_filters_z08['HST_ACS_WFC.F775W_77']['wave']
f775trans_z08 = hst_filters_z08['HST_ACS_WFC.F775W_77']['throughput']
f814wave_z08 = hst_filters_z08['HST_ACS_WFC.F814W_77']['wave']
f814trans_z08 = hst_filters_z08['HST_ACS_WFC.F814W_77']['throughput']
f850wave_z08 = hst_filters_z08['HST_ACS_WFC.F850LP_77']['wave']
f850trans_z08 = hst_filters_z08['HST_ACS_WFC.F850LP_77']['throughput']
f105wave_z08 = hst_filters_z08['HST_WFC3_IR.F105W']['wave']
f105trans_z08 = hst_filters_z08['HST_WFC3_IR.F105W']['throughput']
f125wave_z08 = hst_filters_z08['HST_WFC3_IR.F125W']['wave']
f125trans_z08 = hst_filters_z08['HST_WFC3_IR.F125W']['throughput']
f140wave_z08 = hst_filters_z08['HST_WFC3_IR.F140W']['wave']
f140trans_z08 = hst_filters_z08['HST_WFC3_IR.F140W']['throughput']
f160wave_z08 = hst_filters_z08['HST_WFC3_IR.F160W']['wave']
f160trans_z08 = hst_filters_z08['HST_WFC3_IR.F160W']['throughput']
## TEMPORARY FOR HST PROPOSAL PURPOSES
nuvwave = f435wave_z08
nuvtrans = f435trans_z08
uwave = f606wave_z08
utrans = f606trans_z08
rwave = f850wave_z08
rtrans = f850trans_z08
def expsfh(tq, tau, time):
""" This function when given a single combination of [tq, tau] values will calcualte the SFR at all times. First calculate the sSFR at all times as defined by Peng et al. (2010) - then the SFR at the specified time of quenching, tq and set the SFR at this value at all times before tq. Beyond this time the SFR is an exponentially declining function with timescale tau.
INPUT:
:tau:
The exponential timescale decay rate of the star formation history in Gyr. Allowed range from the rest of the functions is 0 < tau [Gyr] < 5.
:tq:
The time at which the onset of quenching begins in Gyr. Allowed ranges from the beginning to the end of known cosmic time.
:time:
An array of time values at which the SFR is calcualted at each step.
RETURNS:
:sfr:
Array of the same dimensions of time containing the sfr at each timestep.
"""
ssfr = 2.5*(((10**10.27)/1E10)**(-0.1))*(time/3.5)**(-2.2)
c = time.searchsorted(3.0)
ssfr[:c] = N.interp(3.0, time, ssfr)
c_sfr = N.interp(tq, time, ssfr)*(1E10)/(1E9)
### definition is for 10^10 M_solar galaxies and per gyr - convert to M_solar/year ###
a = time.searchsorted(tq)
sfr = N.ones(len(time))*c_sfr
sfr[a:] = c_sfr*N.exp(-(time[a:]-tq)/tau)
return sfr
def expsfh_mass(ur, Mr, age, tq, tau, time):
"""Calculate exponential decline star formation rates at each time step input by matching to the mass of the observed galaxy at the observed time. This is calculated from the mass-to-light ratio that is a function of one color band u-r as in Bladry et al. (2006; see Figure 5) who fit to data from Glazebrrok et al (2004) and Kauffmann et al (2003).
INPUT:
:ur:
u-r optical colour, needed to calculate the mass of the observed galaxy
:Mr:
Absolute r-band magnitude, needed to calculate the mass of the observed galaxy
:age:
Observed age of a galaxy, often calculated from the redshift i.e. at z=0.1 the age ~ 12.5. Must be in units of Gyr.
:tq:
The time at which the onset of quenching begins in Gyr. Allowed ranges from the beginning to the end of known cosmic time.
:tau:
The exponential timescale decay rate of the star formation history in Gyr. Allowed range from the rest of the functions is 0 < tau [Gyr] < 5.
:time:
An array of time values at which the SFR is calcualted at each step.
RETURNS:
:sfr:
Array of the same dimensions of time containing the sfr at each timestep.
"""
t_end = age # time at which to integrate under the exponential curve until to gain the final mass
if ur <=2.1:
log_m_l = -0.95 + 0.56 * ur
else:
log_m_l = -0.16 + 0.18 * ur
m_msun = 10**(((4.62 - Mr)/2.5) + log_m_l)
print 'Mass [M_solar]', m_msun
c_sfr = (m_msun/(tq + tau*(1 - N.exp((tq - t_end)/tau)))) / 1E9
a = time.searchsorted(tq)
sfr = N.ones(len(time))*c_sfr
sfr[a:] = c_sfr*N.exp(-(time[a:]-tq)/tau)
return sfr
''' BDS modified predict_c_one and get_colors to take more general versions of color prediction requests
can take any list of filter pairs and return all the colors
default is still to do it the old way though
'''
def predict_c_one(theta, age, nuv=[nuvwave, nuvtrans], u=[uwave, utrans], r=[rwave, rtrans], filter_pairs=None):
""" This function predicts the u-r and nuv-u colours of a galaxy with a SFH defined by [tq, tau], according to the BC03 model at a given "age" i.e. observation time. It calculates the colours at all times then interpolates for the observed age - it has to do this in order to work out the cumulative mass across the SFH to determine how much each population of stars contributes to the flux at each time step.
:theta:
An array of size (1,2) containing the values [tq, tau] in Gyr.
:tq:
The time at which the onset of quenching begins in Gyr. Allowed ranges from the beginning to the end of known cosmic time.
:tau:
The exponential timescale decay rate of the star formation history in Gyr. Allowed range from the rest of the functions is 0 < tau [Gyr] < 5.
:age:
Observed age of a galaxy, often calculated from the redshift i.e. at z=0.1 the age ~ 12.5. Must be in units of Gyr.
If you want colors of the format (a-b), (b-c), you can specify that as:
nuv=[a_wave, a_trans], u=[b_wave, b_trans], r=[c_wave, c_trans]
OR for an arbitrary set of colors, instead use:
filter_pairs = [[a, b], [c, d], ... , [y, z]]
where each of the filters a, b, c, ... has the format [a_wave, a_trans] etc. as above.
Note that:
filter_pairs = [[a, b], [b, c]]
will have the same result as assigning a, b, and c to nuv, u, and r. However, the nuv=, u=, r= method is a bit faster.
RETURNS:
if not specifying filter_pairs:
:nuv_u_age:
Array the same shape as :age: with the nuv-u colour values at each given age for the specified :theta: values
:u_r_age:
Array the same shape as :age: with the u-r colour values at each given age for the specified :theta: values
otherwise:
:colours:
a list of N colours (or colour arrays, if :age: is an array) where N == len(filter_pairs), ordered as filter_pairs is.
modified 11/7/2018 by BDS to allow user to specify different colours from nuv-u and u-r (those are still default)
nuv still corresponds to the shortest-wavelength filter, r to the longest
modified 20/7/2018 by BDS to allow an arbitrary list of pairs of filters
"""
ti = N.arange(0, 0.01, 0.003)
t = N.linspace(0,14.0,100)
t = N.append(ti, t[1:])
tq, tau = theta
sfr = expsfh(tq, tau, t)
### Work out total flux at each time given the sfh model of tau and | |
monitored > four times per day
nrow += 1
cell = self._merge_cells(table, nrow, 0, nrow + 1, 0)
text = 'Blood Glucose Level (BGL) monitored > four times per day'
self._add_column_name(cell, text, alignment='left', bold=True)
columns = [
'Day of admission',
'Day two of admission',
]
self._insert_subrows(table=table, row=nrow, col=1, values=columns)
name = '# Blood Glucose Level (BGL) monitored > four times per day - Day of admission'
columns = self._get_column_values(name, baseline)
self._insert_values(table=table, row=nrow, values=columns)
nrow += 1
name = '# Blood Glucose Level (BGL) monitored > four times per day - Day two of admission'
columns = self._get_column_values(name, baseline)
self._insert_values(table=table, row=nrow, values=columns)
nrow += 1
cell = self._merge_cells(table, nrow, 0, nrow, 1)
text = 'BGL ≥ 10mmol/L within 48 hours of admission'
self._add_column_name(cell, text, alignment='left')
name = '# BGL ≥ 10mmol/L within 48 hours of admission'
columns = self._get_column_values(name, baseline)
self._insert_values(table=table, row=nrow, values=columns)
nrow += 1
cell = self._merge_cells(table, nrow, 0, nrow, 1)
text = 'Insulin given for first BGL ≥ 10mmol/L'
self._add_column_name(cell, text, alignment='left', bold=True, level=True)
name = '# Insulin given for first BGL ≥ 10mmol/L'
columns = self._get_column_values(name, baseline)
self._insert_values(table=table, row=nrow, values=columns)
nrow += 1
cell = self._merge_cells(table, nrow, 0, nrow, 1)
text = 'Insulin given within one hour from first BGL ≥ 10mmol/L #'
self._add_column_name(cell, text, alignment='left', bold=True, level=True)
name = '# Insulin given within one hour from first BGL ≥ 10mmol/L'
columns = self._get_column_values(name, baseline)
self._insert_values(table=table, row=nrow, values=columns)
nrow += 1
cell = self._merge_cells(table, nrow, 0, nrow, 1)
text = 'Swallow screening'
self._add_column_name(cell, text, alignment='left', bold=True, italic=True)
nrow += 1
cell = self._merge_cells(table, nrow, 0, nrow, 1)
text = 'Formal swallow screen performed'
self._add_column_name(cell, text, alignment='left')
name = '# Formal swallow screen performed'
columns = self._get_column_values(name, baseline)
self._insert_values(table=table, row=nrow, values=columns)
nrow += 1
cell = self._merge_cells(table, nrow, 0, nrow, 1)
text = 'Swallow screen performed within 24 hours #'
self._add_column_name(cell, text, alignment='left', bold=True, level=True)
name = '# Swallow screen performed within 24 hours'
columns = self._get_column_values(name, baseline)
self._insert_values(table=table, row=nrow, values=columns)
nrow += 1
cell = self._merge_cells(table, nrow, 0, nrow, 1)
text = 'Swallow screen or swallow assessment performed before being given oral medications #'
self._add_column_name(cell, text, alignment='left', bold=True)
name = '# Swallow screen or swallow assessment performed before being given oral medications'
columns = self._get_column_values(name, baseline)
self._insert_values(table=table, row=nrow, values=columns)
nrow += 1
cell = self._merge_cells(table, nrow, 0, nrow, 1)
text = 'Swallow screen or swallow assessment performed before being given oral food or fluids #'
self._add_column_name(cell, text, alignment='left', bold=True)
name = '# Swallow screen or swallow assessment performed before being given oral food or fluids'
columns = self._get_column_values(name, baseline)
self._insert_values(table=table, row=nrow, values=columns)
# Create iterator through cells in the table
def iter_cells(table):
for row in table.rows:
for cell in row.cells:
yield cell
# Set font size of all cells in the table based on the report type
for cell in iter_cells(table):
cell.text_frame.autosize = MSO_AUTO_SIZE.SHAPE_TO_FIT_TEXT
for paragraph in cell.text_frame.paragraphs:
paragraph.font.size = self.table_font_size
def generate_baseline_report(self, df=None):
''' Generate baseline data summary feedback.
:param df: dataframe to be used for values (default: None)
:type df: dataframe
'''
self.table_font_size = Pt(11) # Set default font size of the table in baseline report
# Define template for the presentation
master = os.path.normpath(os.path.join(os.path.dirname(__file__), 'backgrounds', 'qasc_baseline.pptx'))
# Filter dataframe for site
hospital_name = self.study_df.loc[self.study_df['unique_identifier'] == self.site_id, 'facility_name'].iloc[0]
# Create output filename containing qasc, current date and site ID
output_file = f'qasc_{self.site_id}_{datetime.now().strftime("%Y-%m-%d")}.pptx'
# set main text to be added into reports
main_texts = [
'Congratulations on completing your baseline audit. We have summarized the results for you in the table below. Please share these results with your team. These data can assist you when discussing the barriers and enablers to implementation of the FeSS clinical protocols at your hospital.',
'It is important to please let us know if there are problems with the data that can be explained further (eg. was there a question the people entering data may not have understood properly?)',
'Please don’t hesitate to contact the NRI if you require clarification on any of the items above.'
]
# Create new Presentaiton object
prs = Presentation(master)
# Get first slide
first_slide = prs.slides[0]
# Set specification of the table
table_specs = {
'height': Cm(18),
'width': Cm(19),
'left': Cm(1),
'top': Cm(5)
}
# Create table
self._create_table(
slide=first_slide,
table_specs=table_specs,
title=f'Table 1: FeSS Management for {hospital_name}',
trow=21,
tcol=4,
baseline=True)
# Add the rest of explaining texts
specs = {
0: {
'height': Cm(2),
'width': Cm(19),
'left': Cm(1),
'top': Cm(3)
},
1: {
'height': Cm(1),
'width': Cm(19),
'left': Cm(1),
'top': Cm(25.5)
},
2: {
'height': Cm(1),
'width': Cm(19),
'left': Cm(1),
'top': Cm(27)
},
}
for i in range(0, len(main_texts)):
self._add_textbox(specs[i], first_slide, main_texts[i])
# Create graph on the second slide
second_slide = prs.slides.add_slide(prs.slide_layouts[0])
graph_df = df[[
'% Temperature monitored at least four times per day - Day of admission',
'% Paracetamol (or other anti-pyretic) given with one hour from first temperature > 37.5°C',
'% Blood Glucose Level (BGL) monitored > four times per day - Day of admission',
'% Insulin given within one hour from first BGL ≥ 10mmol/L',
'% Swallow screen performed within 24 hours',
]].copy()
new_column_names = ["Temp (Day 1)", "Paracetamol (1hr)", "BGL's (Day 1)", "Insulin (1hr)", "Swallow screen (24hrs)"]
graph_df.rename(columns=dict(zip(graph_df.columns, new_column_names)),inplace=True)
column_name = 'Baseline audit'
graph_df = graph_df.T.rename(columns={0: column_name})
# Create chart data
chart_data = ChartData()
chart_data.categories = new_column_names
chart_data.add_series(column_name, graph_df[column_name].tolist())
# Add chart on slide
specs = {
'height': Cm(10),
'width': Cm(19),
'left': Cm(1),
'top': Cm(3)
}
chart = second_slide.shapes.add_chart(
XL_CHART_TYPE.COLUMN_CLUSTERED, specs['left'],specs['top'], specs['width'],specs['height'], chart_data).chart
plot = chart.plots[0]
# All bars with the same color
plot.vary_by_categories = False
# Set maximum to 100
value_axis = chart.value_axis
value_axis.maximum_scale = 100
value_axis.major_gridlines.format.line.width = Pt(0.5)
value_axis.major_gridlines.format.line.color.rgb = RGBColor(206, 206, 206) # Set color to gray (A6A6A6)
value_axis.format.line.color.rgb = RGBColor(0, 0, 0)
solidFill = value_axis.format.line.color._xFill
self._set_transparency(100, solidFill)
# change font size of values
tick_labels = value_axis.tick_labels
tick_labels.font.size = Pt(11)
# Value for y-axis (change font size, name, and other things)
category_axis = chart.category_axis
# Set 100% transparency to category axis
category_axis.format.line.color.rgb = RGBColor(206, 206, 206)
solidFill = category_axis.format.line.color._xFill
self._set_transparency(100, solidFill)
# Change font size of category labels
category_labels = category_axis.tick_labels
category_labels.font.size = Pt(11)
# Set graph of title
graph_title = f'Figure 1: FeSS Management {hospital_name} Hospital'
chart_text = chart.chart_title.text_frame
chart_text.text = graph_title
chart_text.paragraphs[0].font.size = Pt(12)
chart_text.paragraphs[0].font.color.rgb = RGBColor(89, 89, 89)
# Save presentation
path = os.path.join(os.getcwd(), output_file)
save_file(output_file)
prs.save(path)
def generate_pre_post_report(self):
''' Generate report with pre/post comparison. '''
# Set smaller font size of the table
self.table_font_size = Pt(9.5)
# Define template
master = os.path.normpath(os.path.join(os.path.dirname(__file__), 'backgrounds', 'qasc_comparison.pptx'))
# Get hospital name based on study ID
hospital_name = self.study_df.loc[self.study_df['unique_identifier'] == self.site_id, 'facility_name'].iloc[0]
# Create output filename containing qasc, current date and site ID
output_file = f'qasc_comp_{self.site_id}_{datetime.now().strftime("%Y-%m-%d")}.pptx'
# Create Presentation object
prs = Presentation(master)
# Get first slide
first_slide = prs.slides[0]
# Add title
title_text = f'QASC Europe Project: Post-Intervention audit summary {hospital_name} Hospital'
specs = {
'height': Cm(1),
'width': Cm(18),
'left': Cm(0.6),
'top': Cm(1.5),
}
self._add_textbox(specs, first_slide, title_text, bold=True, underline=True)
# Add first paragraph with explanation and congratulations
# A bit longer code because only some letters was made bold, so I had to created more runs in paragraph.
specs = {
'height': Cm(2),
'width': Cm(19.5),
'left': Cm(0.6),
'top': Cm(2),
}
txBox = first_slide.shapes.add_textbox(specs['left'], specs['top'], specs['width'], specs['height'])
txBox.text_frame.clear()
txBox.text_frame.word_wrap = True
self._add_run(
txBox,
'Congratulations on completing the QASC Europe project audits on the use of the FeSS (',
)
self._add_run(txBox, 'F', bold=True)
self._add_run(txBox, 'ever, ')
self._add_run(txBox, 'S', bold=True)
self._add_run(txBox, 'ugar, and ')
self._add_run(txBox, 'S', bold=True)
self._add_run(txBox, 'wallowing) protocols for stroke patients. The summaries below reflect ')
self._add_run(txBox, f"your hospital’s performance for the {self.pre_stats['n'].iloc[0]} stroke patients you reviewed for the baseline audit XX/XX/XXXX and the {self.post_stats['n'].iloc[0]} patients you reviewed during the post intervention period XX/XX/XXXX. ", bold=True)
self._add_run(txBox, ' We present | |
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(src, pos)
# print("attn_mask:", src_key_padding_mask.shape)
src2 = self.self_attn(q, k, src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerDualDecoderLayer(nn.Module):
def __init__(self, d_model, cnt_d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False, spatial_size=(16, 64)):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.cnt_multihead_attn = nn.MultiheadAttention(cnt_d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
# self.fusion_linear = nn.Linear(d_model * 2, d_model)
# self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.bg_norm = nn.LayerNorm(d_model)
self.cnt_norm = nn.LayerNorm(d_model)
self.bg_norm_after = nn.LayerNorm(d_model)
self.cnt_norm_after = nn.LayerNorm(d_model)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self.cnt_d_model = cnt_d_model
self.d_model = d_model
self.height = spatial_size[0]
self.width = spatial_size[1]
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory, cnt_memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
cnt_memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
cnt_memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
cnt_pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
# tgt = self.norm1(tgt)
########################## cnt + bg fusion #############################
tgt = self.bg_norm(tgt)
cnt_tgt = self.cnt_norm(tgt)
N = pos.shape[1]
cnt_tgt = cnt_tgt.permute(1, 2, 0).reshape(N, self.cnt_d_model, self.height, self.width)
cnt_tgt = cnt_tgt.reshape(N, self.cnt_d_model, self.height * self.width).permute(2, 0, 1)
# print("memory:", cnt_tgt.shape, pos.shape, cnt_pos.shape, cnt_memory.shape)
tgt2_cnt, cnt_w = self.cnt_multihead_attn(query=cnt_tgt, #self.with_pos_embed(cnt_tgt, query_pos),
key=self.with_pos_embed(cnt_memory, cnt_pos),
value=cnt_memory, attn_mask=cnt_memory_mask,
key_padding_mask=cnt_memory_key_padding_mask) # [0]
# print("cnt_w:", cnt_w.shape, np.unique(cnt_w[0].data.cpu().numpy()))
# Transfer from 64 channel to 1024 channel
tgt2_cnt = tgt2_cnt.permute(1, 2, 0).reshape(N, self.cnt_d_model, self.height, self.width)
tgt2_cnt = tgt2_cnt.reshape(N, self.cnt_d_model * self.height, self.width).permute(2, 0, 1)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos), # memory
value=memory, attn_mask=memory_mask, # memory
key_padding_mask=memory_key_padding_mask)[0]
# overall_tgt = torch.cat([tgt2, tgt2_cnt], dim=-1)
# tgt = tgt + self.dropout2(
# self.activation(tgt2)) # self.dropout2(tgt2) + self.dropout2(tgt2_cnt)
#########################################################################
tgt2 = self.norm2(tgt2)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2) + self.dropout3(tgt2_cnt)
tgt = self.norm3(tgt)
return tgt, cnt_w
def forward_pre(self, tgt, memory, cnt_memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
# tgt2 = self.norm2(tgt)
# tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
# key=self.with_pos_embed(memory, pos),
# value=memory, attn_mask=memory_mask,
# key_padding_mask=memory_key_padding_mask)[0]
# tgt = tgt + self.dropout2(tgt2)
########################## cnt + bg fusion #############################
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt2_cnt = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(cnt_memory, pos),
value=cnt_memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2) + self.dropout2(tgt2_cnt)
#########################################################################
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(self, tgt, memory, cnt_memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
cnt_memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
cnt_memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
cnt_pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
# print("memory in post:", memory.shape, pos.shape, cnt_pos.shape)
if self.normalize_before:
return self.forward_pre(tgt, memory, cnt_memory, tgt_mask, memory_mask, cnt_memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt,
memory,
cnt_memory,
tgt_mask,
memory_mask,
cnt_memory_mask,
tgt_key_padding_mask,
memory_key_padding_mask,
cnt_memory_key_padding_mask,
pos,
cnt_pos,
query_pos)
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False):
super().__init__()
# self.d_model_self = 1024
# self.d_model = 64
# self.height = 16
# self.width = 64
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
# print("tgt:", tgt.shape)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2, attn_weights = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt, attn_weights
def forward_pre(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
class TransformerDecoderLayer_TP(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False):
super().__init__()
self.d_model_self = 1024
self.d_model = d_model
self.height = 16
self.width = 64
self.self_attn = nn.MultiheadAttention(self.d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(self.d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
# print("pos:", tensor.shape, pos.shape)
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
# L, N, C = tgt.shape
#tgt2 = self.self_attn(q, k, tgt, attn_mask=tgt_mask,
# key_padding_mask=tgt_key_padding_mask)[0]
#tgt = tgt + self.dropout1(tgt2)
tgt2, attn_weights = self.multihead_attn(self.with_pos_embed(tgt, query_pos),
self.with_pos_embed(memory, pos),
memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask) # q, k, v
# print("attn_weights:", np.unique(attn_weights[0].data.cpu().numpy()))
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt, attn_weights
def forward_pre(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
class mish(nn.Module):
def __init__(self, ):
super(mish, self).__init__()
self.activated = True
| |
#!/usr/bin/env python
#
# analyse_solid_run.py: analyse and report on SOLiD sequencer runs
# Copyright (C) University of Manchester 2011-12,2019 <NAME>
#
########################################################################
#
# analyse_solid_run.py
#
#########################################################################
"""analyse_solid_run.py
Provides functionality for analysing a SOLiD run, to verify and report data
about the run, and suggest a layout scheme for the analysis directories.
"""
#######################################################################
# Import modules that this module depends on
#######################################################################
import sys
import os
import io
import string
import shutil
import gzip
import argparse
import logging
logging.basicConfig(format="%(levelname)s %(message)s")
# Put .. onto Python search path for modules
SHARE_DIR = os.path.abspath(
os.path.normpath(
os.path.join(os.path.dirname(sys.argv[0]),'..')))
sys.path.append(SHARE_DIR)
import bcftbx.SolidData as SolidData
import bcftbx.Experiment as Experiment
import bcftbx.Md5sum as Md5sum
#######################################################################
# Class definitions
#######################################################################
# No classes defined
#######################################################################
# Module Functions: program functions
#######################################################################
def report_run(solid_runs,report_paths=False):
"""Print a brief report about SOLiD runs.
This generates a brief screen report about the content of the
supplied SOLiD runs e.g. flow cells, layout, number of samples
etc.
Arguments:
solid_runs: a list or tuple of SolidRun objects to report.
report_paths: if True then also report the full paths for the
primary data files for each library.
"""
# Report the data for each run
first_run = True
for run in solid_runs:
# Cosmetic: add separation between runs
if not first_run:
print("")
else:
first_run = False
# Report overall slide layout
slide_layout = run.slideLayout()
title = "Flow Cell %s (%s)" % (str(run.run_info.flow_cell),
str(slide_layout))
print("%s\n%s\n%s" % ('#'*len(title),title,'#'*len(title)))
print("I.D. : %s" % (run.run_info.name))
print("Date : %s" % (run.run_info.date))
print("Samples: %d" % len(run.samples))
if run.is_paired_end:
print("\nPaired-end run")
#
# Report projects for each sample
for sample in run.samples:
title = "\nSample %s" % sample
title = title + '\n' + "="*len(title)
print(title)
for project in sample.projects:
# Libraries in project
libraries = project.prettyPrintLibraries()
title = "Project %s: %s (%d libraries)" % (project.name,
libraries,
len(project.libraries))
title = '\n' + title + '\n' + "-"*len(title)
print(title)
print("Pattern: %s/%s" % (sample,
project.getLibraryNamePattern()))
# Timestamps for primary data
print("Timestamps:")
for timestamp in project.getTimeStamps():
print("\t%s" % timestamp)
# Report location of primary data
for library in project.libraries:
if report_paths:
files = [library.csfasta,library.qual]
if run.is_paired_end:
files.extend((library.csfasta_f5,library.qual_f5))
for f in files:
if f is not None:
print("%s" % f)
else:
print("Missing primary data for %s" %
library.name)
def write_spreadsheet(solid_runs,spreadsheet):
"""Generate or append run data to an XLS-format spreadsheet
Creates a new spreadsheet or appends to an existing one, writing
new rows to summarise the data about the solid runs supplied as
input.
Arguments:
solid_runs: a list or tuple of SolidRun objects to report.
spreadsheet: the name of the XLS-format spreadsheet to write
the data
"""
# Check whether spreadsheet file already exists
if os.path.exists(spreadsheet):
write_header = False
else:
write_header = True
# Only write date once
write_date = True
# Open spreadsheet
wb = Spreadsheet.Spreadsheet(spreadsheet,'SOLiD Runs')
# Header row
if write_header:
wb.addTitleRow(['Ref No',
'Project Description',
'P.I.',
'Date',
'Library type',
'Sample & Layout Description',
'B/C samples',
'Total reads',
'I.D.',
'Cost'])
# Spacer row
wb.addEmptyRow(color='gray25')
# Report the data for each run
for run in solid_runs:
# First line: date, flow cell layout, and id
slide_layout = run.slideLayout()
if slide_layout is None:
# Unknown layout arrangement
slide_layout = "%d samples" % len(run.samples)
description = "FC"+str(run.run_info.flow_cell)+" ("+slide_layout+")"
# Run with only one sample
total_reads = ''
if len(run.samples) == 1:
description += ": "+str(run.samples[0].name)
try:
if run.samples[0].projects[0].isBarcoded():
# Barcoded sample, get stats
total_reads = run.samples[0].barcode_stats.totalReads()
if total_reads is None:
# Potential problem
total_reads = "NOT_FOUND"
else:
# Not a barcoded sample
total_reads = "MANUAL_LOOKUP"
except IndexError:
# Some problem looking up barcode status
total_reads = "NO_INFO"
# Deal with date string
if write_date:
run_date = run.run_info.date
write_date = False # Don't write date again
else:
run_date = ''
run_id = run.run_info.name
wb.addRow(['',
'',
'',
run_date,
'',
description,
'',
total_reads,
run_id])
# Add one line per project in each sample
index = 0
for sample in run.samples:
for project in sample.projects:
libraries = project.prettyPrintLibraries()
experimenters_initials = project.libraries[0].initials
# Get initial description and total reads
if len(run.samples) > 1:
# Multiple samples in one project
description = sample.name+": "
# Total reads
# For barcoded samples we should be able to extract
# thos from the barcode statistics data
if project.isBarcoded():
total_reads = sample.barcode_stats.totalReads()
if total_reads is None:
# Potential problem
total_reads = "NOT_FOUND"
else:
# Not a barcoded sample, manual lookup
total_reads = "MANUAL_LOOKUP"
else:
# All libraries belong to the same sample
description = ''
# Total reads already written once
total_reads = ''
# Library type
if project.isBarcoded():
library_type = "bar-coding"
else:
library_type = ''
# Add samples to the libraries
description += str(len(project.libraries))+" samples "+\
libraries
# Project description field
# Essentially a placeholder with experimenter's initials
project_description = "%s) %s [project description]" % \
(string.lowercase[index],experimenters_initials)
index += 1
# FIXME need to check that this total read info is
# actually correct
wb.addRow(['',
project_description,
'[P.I.]',
'',
library_type,
description,
len(project.libraries),
total_reads])
wb.addEmptyRow()
# Write the spreadsheet
wb.write()
def suggest_analysis_layout(solid_runs):
"""Generate a bash script to build the analysis directory scheme
Given a set of SolidRuns, print a set of script commands for running the
build_analysis_dir.py program to create and populate the analysis directories.
The script can be edited before being executed by the user.
Arguments:
solid_runs: a list of SolidRun objects.
"""
print("#!/bin/sh\n#\n# Script commands to build analysis directory structure")
for run in solid_runs:
build_analysis_dir_cmd = 'build_analysis_dir.py'
top_dir = os.path.abspath(os.path.join(os.getcwd(),os.path.basename(run.run_dir)))
for sample in run.samples:
for project in sample.projects:
# Create one experiment per project
cmd_line = []
expt = Experiment.Experiment()
expt.name = project.getProjectName()
expt.type = "expt"
expt.sample = project.getSample().name
expt.library = project.getLibraryNamePattern()
# Print the arguments for the layout
cmd_line.extend((build_analysis_dir_cmd,
"--top-dir=%s_analysis" % top_dir,
"--link=absolute",
"--naming-scheme=partial"))
cmd_line.append(expt.describe())
cmd_line.append(run.run_dir)
print("#\n%s" % (' \\\n').join(cmd_line))
def suggest_rsync_command(solid_runs):
"""Generate a bash script to rsync data to another location
Given a set of SolidRuns, print a set of script commands for running rsync
to copy the data directories to another location.
The script should be edited before being executed by the user.
"""
print("#!/bin/sh\n#")
print("# Script command to rsync a subset of data to another location")
print("# Edit the script to remove the exclusions on the data sets to be copied")
for run in solid_runs:
print("rsync --dry-run -av -e ssh \\")
for sample in run.samples:
for library in sample.libraries:
print("--exclude=" + str(library) + " \\")
print("%s user@remote.system:/destination/parent/dir" % run.run_dir)
def verify_runs(solid_dirs):
"""Do basic verification checks on SOLiD run directories
For each SOLiD run directory, create a SolidRun object and check for the
expected sample and library directories, and that primary data files
(csfasta and qual) have been assigned and exist.
Returns a UNIX-like status code: 0 indicates that the checks passed,
1 indicates that they failed.
Arguments:
solid_dirs: a list of SOLiD sequencing directory names.
Returns:
0 if the run is verified, 1 if there is a problem.
"""
print("Performing verification")
status = 0
for solid_dir in solid_dirs:
# Initialise
run_status = 0
run = SolidData.SolidRun(solid_dir)
if not run.verify():
run_status = 1
print("%s:" % run.run_name,)
if run_status == 0:
print(" [PASSED]")
else:
print(" [FAILED]")
status = 1
# Completed
print("Overall status:",
if status == 0:
print(" [PASSED]")
else:
print(" [FAILED]")
return status
def copy_data(solid_runs,library_defns):
"""Copy selection of primary data files to current directory
Locates primary data files matching a sample/library specification
string of the form <sample_pattern>/<library_pattern>. The patterns
are matching against sample and library names, and can be either
exact or can include a trailing wildcard character (i.e. *) to match
multiple names. For example:
- 'SA_LH_POOL_49/LH1' matches the library called 'LH1' in the sample
'SA_LH_POOL_49';
- '*/LH1' matches all libraries called 'LH1' in any sample;
- '*/LH*' matches all libraries starting 'LH' in any sample;
- '*/*' matches all primary data files in all runs
The files are copied to the current directory.
Arguments:
solid_runs: list of populated SolidRun objects
library_defns: list of library definition strings (see above
for syntax/format)
"""
for library_defn in library_defns:
sample = library_defn.split('/')[0]
library = library_defn.split('/')[1]
print("Copy: look for samples matching pattern %s" % library_defn)
print("Data files will be copied to %s" % os.getcwd())
for run in solid_runs:
for lib in run.fetchLibraries(sample,library):
print("-> matched %s/%s" % (lib.parent_sample.name,lib.name))
primary_data_files | |
from copy import deepcopy
from itertools import product
# Internal format for formulas:
# Each subformula within a formula is a list
# For example, (p or ¬p) parses as [['p'], '˅', ['¬',['p']]]
# Parser accepts things such as "(if p then q)" and "if p then q"
# Unary connectives go out before the formula they apply to, binaries go out in the middle
# For first order, parses atomics as they come in (e.g. "Rax" parses as ["Rax"])
# Quantified strings e.g. ("Vx Px") as ['∀', 'x', ['Px']]
atomics = ['p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'o']
set_terms = ["A", "B", "C", "D", "E", "F", "G", "H", "J"]
binary_set_operations = ['∩', '∪', '−', '×']
reserved_terms = atomics[:]
reserved_terms.extend(['(', ')', ',', "'", '"', "V", "E", "$", "%"])
FOL_individual_constants = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n']
FOL_variables = ['x', 'y', 'z', 'w', 'v', 'u']
FOL_predicates = ['P', 'Q', 'R', 'S', 'T', 'U', 'A', 'B', 'C']
# ----------------------------------------------------------------------------------------------------------------------
# PROPOSITIONAL LOGIC
def parse_propositional(string, logic):
"""Takes a string an transforms it into a formula of the format defined above
¡¡¡This function prepares the formula, the next one parses it!!!"""
# An empty string returns an error
if not string:
ValueError("An empty string is not a well-formed propositional formula")
# Delete the ifs
string = string.replace('if ', '')
# Parse constants
for con in logic.parsed_constants:
string = string.replace(con, logic.parsed_constants[con])
string = string.replace("falsum", "⊥")
string = string.replace("Falsum", "⊥")
# Remove spaces
string = string.replace(" ", "")
# Trick so that binaries do not have to contain external parentheses
try:
formula = parse_propositional2('(' + string + ')', logic)
return formula
except ValueError:
pass
formula = parse_propositional2(string, logic)
return formula
def parse_propositional2(string, logic):
# Atomics go back directly
if string in atomics or string == '⊥':
return [string]
# It recognizes if it is in presence of a negated or binary formula
# Checks if unary:
if string[0] in logic.constants(1):
return [string[0], parse_propositional2(string[1:], logic)]
# Checks if binary (starts and ends with parentheses)
elif string[0] == '(' and string[-1] == ')':
# Searches for a constant that has 1 more left parenthesis open than right
num_parentheses_left = 0
num_parentheses_right = 0
for x in range(len(string)):
if string[x] == '(':
num_parentheses_left += 1
elif string[x] == ')':
num_parentheses_right += 1
elif string[x] in logic.constants(2) and num_parentheses_left == num_parentheses_right + 1:
return [parse_propositional2(string[1:x], logic), string[x],
parse_propositional2(string[x+1:-1], logic)]
# If the string starts and ends with parentheses, but did not return at this point, raise an error
raise ValueError(string + " is not a well-formed propositional formula")
# If we did not enter any of the above, then the string is not a formula, and we just return an error
else:
raise ValueError(string + " is not a well-formed propositional formula")
def unparse_propositional_formula(formula, logic):
form1 = deepcopy(formula)
form1 = unparse_propositional_parentheses(form1, logic)
form1 = unparse_propositional_rest(form1, logic)
return form1
def unparse_propositional_parentheses(formula, logic):
# If atomic
if len(formula) == 1:
return formula[0]
# If unary connective
elif len(formula) == 2:
formula[1] = unparse_propositional_parentheses(formula[1], logic)
# If the next one is atomic (i.e. [¬, p])
if type(formula[1]) == str:
return formula
# If the next one is unary (i.e. [¬, [¬, phi]] )
elif len(formula[1]) == 2:
formula = [formula[0], formula[1][0], formula[1][1]] # Turns it into [¬, ¬, phi]
return formula
# If the next one has length 3
elif len(formula[1]) == 3:
# If a unary is in the middle [¬ [¬, ¬, phi]]
if formula[1][1] in logic.constants(1):
formula[1].insert(0, formula[0])
formula = formula[1]
return formula
# If that does not happen, the next is a binary and should be left as is
else:
return formula
# If no contitional was entered before, then length > 3, eg [¬, [¬, ¬, ¬, phi]]
else:
formula[1].insert(0, formula[0])
formula = formula[1]
return formula
# If the formula is binary
elif len(formula) == 3:
formula[0] = unparse_propositional_parentheses(formula[0], logic)
formula[2] = unparse_propositional_parentheses(formula[2], logic)
return formula
def unparse_propositional_rest(formula, logic):
# If atomic (eg 'p') or falsum
if len(formula) == 1:
return formula
# If binary
elif len(formula) == 3 and formula[1] in logic.constants(2):
formula0 = unparse_propositional_rest(formula[0], logic)
formula2 = unparse_propositional_rest(formula[2], logic)
unparsed_formula = f"({formula0} {formula[1]} {formula2})"
return unparsed_formula
# If not atomic or binary, it is some chain of unaries [¬, ¬, .., phi]
else:
last_formula = unparse_propositional_rest(formula[-1], logic)
formula = formula[:-1] # Remove unparsed Phi
formula.append(last_formula) # Add parsed Phi
unparsed_formula = str(formula)
unparsed_formula = unparsed_formula.replace("'", "")
unparsed_formula = unparsed_formula.replace(",", "")
for ucon in logic.constants(1):
# Remove space after unary connectives
while ucon + " " in unparsed_formula:
unparsed_formula = unparsed_formula.replace(ucon + " ", ucon)
return unparsed_formula[1:-1] # This is to remove the []
# Propositional arguments
def parse_propositional_argument(own_argument_string, logic):
"""WILL NOT CHECK IF THE ARGUMENT IS VALID DUE TO A CIRCULAR IMPORT ERROR - CHECK IN THE CALLING FUNCTION"""
warning = False
own_argument_string = own_argument_string.strip()
own_argument_string = own_argument_string.replace(" ", "")
if own_argument_string.count("/") != 1:
raise ValueError("[Argument must contain a single conclusion prefaced with '/']")
# A no-premise argument is given
if own_argument_string[0] == "/":
try:
concl = parse_propositional(own_argument_string[1:], logic)
except ValueError:
raise ValueError('[The string given as conclusion is not a well-formed formula]')
if not str(logic) == "Classical":
warning = True
return [[], concl, warning]
# An argument with premises is given
else:
prems = []
own_argument_string = own_argument_string.replace('/', ',')
formulas = separate_arguments('(' + own_argument_string + ')')
for x in range(len(formulas[:-1])):
try:
prems.append(parse_propositional(formulas[x], logic))
except ValueError:
raise ValueError(f'[Premise {x+1} is not a well-formed formula]')
try:
concl = parse_propositional(formulas[-1], logic)
except ValueError:
raise ValueError('[Conclusion given is not a well-formed formula]')
if not str(logic) == "Classical":
warning = True
return [prems, concl, warning]
def unparse_propositional_argument(argument, logic):
"""Argument comes in form [ [prem1, prem2], conclusion] """
argument_string = ''
premises = argument[0]
conclusion = argument[1]
for premise in premises:
argument_string += unparse_propositional_formula(premise, logic) + ', '
# Remove last comma and add /
argument_string = argument_string[:-2] + ' / '
argument_string += unparse_propositional_formula(conclusion, logic)
return argument_string
# ----------------------------------------------------------------------------------------------------------------------
# SET THEORY
def unparse_set_operation(string):
"""Rewrites a set operation in prefix notation into infix notation"""
string = string.replace(" ", "")
if string[:2] == '℘(':
parsed_argument = unparse_set_operation(string[2:-1])
return f'℘({parsed_argument})'
elif string[0] in binary_set_operations:
args = separate_arguments(string[1:])
arg1 = unparse_set_operation(args[0])
arg2 = unparse_set_operation(args[1])
return f'({arg1} {string[0]} {arg2})'
else:
return string
def parse_set_operation(string):
"""This function prepares the formula, the next one parses it
Operations are written in infix notation. Use U for union, I for intersection - for complement, P for pset,
X for cartesian product. Also accepts union, inters, cartp, complem, pset"""
string = string.replace('U', '∪')
string = string.replace('I', '∩')
string = string.replace('X', '×')
string = string.replace('-', '−')
string = string.replace('P', '℘')
string = string.replace('union', '∪')
string = string.replace('inters', '∩')
string = string.replace('cartp', '×')
string = string.replace('complem', '−')
string = string.replace('pset', '℘')
string = string.replace(" ", "")
# Trick so that external parentheses don't have to be put
try:
string2 = parse_set_operation2(f'({string})')
return string2
except ValueError:
string = parse_set_operation2(string)
return string
def parse_set_operation2(string):
"""Basically rewrittes a set operation string into prefix notation"""
# Atomic
if string in set_terms:
return string
# Unary operation (powerset)
if string[:2] == '℘(' and string[-1] == ')':
formula_inside = parse_set_operation2(string[2:-1])
return f'℘({formula_inside})'
# Binary operation
elif string[0] == '(' and string[-1] == ')':
# Searches for an operation that has 1 more left parenthesis open than right
num_parentheses_left = 0
num_parentheses_right = 0
for x in range(len(string)):
if string[x] == '(':
num_parentheses_left += 1
elif string[x] == ')':
num_parentheses_right += 1
elif string[x] in binary_set_operations and num_parentheses_left == num_parentheses_right + 1:
# This if is to check that there are spaces before and after the binary operation
operation = string[x]
first_arg = parse_set_operation2(string[1:x])
second_arg = parse_set_operation2(string[x+1:-1])
return f'{operation}({first_arg},{second_arg})'
raise ValueError('Invalid set operation')
raise ValueError('Invalid set operation')
def parse_own_sets(string):
"""Format is A = {...}; B = {...}"""
set_dict = dict()
list_dict = dict()
string = string.replace(" ", "")
args = string.split(";")
for arg in args:
if arg[0] not in set_terms:
incorrect_name = arg[:arg.index("=")]
raise ValueError(f'[{incorrect_name} is not a valid | |
import pygame, random, math, queue, os
from dataclasses import dataclass, field
from typing import Any
@dataclass(order=True)
class PrioritizedItem():
priority: int
item: Any=field(compare=False)
pygame.init()
clock = pygame.time.Clock()
size = (600, 600)
screen = pygame.display.set_mode(size)
#Constants
rows = 30
columns = 30
tileWidth = 10
scale = 10
#Buttons
mainMenuButtons = ["play", "settings", "exit"]
gameButtons = ["home"]
settingsButtons = ["toggleMusic", "toggleSound", "home"]
#Stores directions
directions = {
"left" : pygame.math.Vector2(-1, 0),
"right" : pygame.math.Vector2(1, 0),
"up" : pygame.math.Vector2(0, -1),
"down" : pygame.math.Vector2(0, 1)
}
#Stores different fonts in a dictionary
fonts = {
"large" : pygame.font.SysFont(None, 48),
"medium" : pygame.font.SysFont(None, 24)
}
#Init
grid = []
rooms = []
entities = []
effects = []
player = None
levelCount = 1
score = 0
#Stores the list of all tile types which entities cannot walk through
collidable = ["wall", "border", "player", "lockedDoor", "enemy"]
#Stores all the different tile types within a list
tileTypesList = ["wall", "border", "floor", "player", "door", "lockedDoor", "enemy"]
#Creates an empty dictionary for tile types
tileTypes = {}
#Iterates through each tileType within tileTypesList
for tileType in tileTypesList:
#Forms the directory for the image of the tile to be accessed through concatenation
directory = "tiles/" + tileType + ".png"
#Loads the image from the directory and scales it to a resolution of tileWidth x tileWidth pixels
sprite = pygame.transform.scale(pygame.image.load(directory), (tileWidth * scale, tileWidth * scale))
#Stores the sprite into the dictionary where its key is the name of the tileType
tileTypes[tileType] = sprite
#Stores the list of all effect names and the number of frames they have
effectTypesList = {
"hit" : 3,
"death" : 4
}
#Creates an empty dictionary for effects to be stored within
effectTypes = {}
#Loads all the effects and organises them into a dictionary for later use
for effectType in effectTypesList:
#Gets the number of frames of a particular type of effect
numberOfFrames = effectTypesList[effectType]
#Creates a list within the dictionary for the frames of the effect to be stored
effectTypes[effectType] = []
#Concatenates strings to form the directory of the folder which holds the effect frames
folderDirectory = "effects/" + effectType + "/"
#Loops through the individual frames within the folder
for i in range(numberOfFrames):
#Concatenates strings to form the directory of a frame
directory = folderDirectory + str(i) + ".png"
#Loads the image from the directory as well as scaling the image to a resolution of tileWidth x tileWidth pixels
sprite = pygame.transform.scale(pygame.image.load(directory), (tileWidth * scale, tileWidth * scale))
#Stores the loaded sprite into the effectTypes dictionary
effectTypes[effectType].append(sprite)
#Stores list of sound names
soundNames = ["hit", "death", "roomComplete", "roomEnter"]
#Creates an empty dictionary for sound effects to be stored within
sounds = {}
#Loads all sound effects into the sounds dictionary
for soundName in soundNames:
#Forms the directory for the particular sound to be loaded
directory = "sfx/" + soundName + ".wav"
#Stores the sound within the dictionary as a PyGame sound object
sounds[soundName] = pygame.mixer.Sound(directory)
#Loads music
pygame.mixer.music.load("music.mp3")
#Adjusts volume
pygame.mixer.music.set_volume(0.2)
#Plays music infinitely
pygame.mixer.music.play(-1)
musicEnabled = True
soundEnabled = True
class Tile():
def __init__(self, x, y, tileType):
self.x = x
self.y = y
self.tileType = tileType
def getSprite(self):
#Fetches the sprite from the tileTypes dictionary
return tileTypes[self.tileType]
#Draws tile
def draw(self):
#Stores the result of the player being within bounds
#as a boolean
xInBounds = player.x - 3 <= self.x <= player.x + 3
yInBounds = player.y - 3 <= self.y <= player.y + 3
#If both conditions are not met, then do not
#draw the tile
if not(xInBounds and yInBounds): return
#Calls the getSprite method to fetch the tile's sprite
sprite = self.getSprite()
#Gets the offset to position the focus towards the player
offset = getOffset()
#Converts the 2D array coordinates to position measured in pixels
position = (self.x * tileWidth * scale + offset.x, self.y * tileWidth * scale + offset.y)
#Draws the sprite at specified position
screen.blit(sprite, position)
def getNeighbours(self):
neighbours = []
for direction in directions.items():
xNew = int(self.x + direction[1].x)
yNew = int(self.y + direction[1].y)
xInBounds = 1 <= xNew <= columns - 2
yInBounds = 1 <= yNew <= rows - 2
if not (xInBounds and yInBounds): continue
neighbourTile = grid[yNew][xNew]
neighbours.append(neighbourTile)
return neighbours
def getCost(self):
if self.tileType == "floor":
return 1
elif self.tileType == "wall":
return 5
elif self.tileType == "border":
return 999
class Effect(Tile):
def __init__(self, x, y, tileType, frames):
#Inherits method and attributes from the tile class
super().__init__(x, y, tileType)
#Stores the frames that the object will cycle through
#throughout its lifetime
self.frames = frames.copy()
#Stores the lifetime of the effect based on the number of frames
self.timer = len(self.frames) * 3
self.initialTimer = self.timer
def getSprite(self):
#Get the current frame of the effect relative to how long the effect
#has been around for
frame = (self.initialTimer - self.timer) // 3
#Returns the sprite that has been fetched
return self.frames[frame]
def update(self):
#Decrements the timer
self.timer -= 1
#If the timer has reached 0 then remove the effect from the game
if self.timer <= 0:
effects.remove(self)
#The entity class defines an object which can move around
#the level and attack other entities. It inherits methods
#and attributes from the tile class.
class Entity(Tile):
#Takes in coordinates and tileType as parameters to be used
#in the constructor method
def __init__(self, x, y, tileType):
#Inherits method and attributes from the tile class
super().__init__(x, y, tileType)
#The hitpoints attribute is decreased when the entity
#attacked by other entities
self.maxHitpoints = 3
self.hitpoints = self.maxHitpoints
#The power attribute determines the damage that
#this entity can deal to other entities
self.power = 1
#Moves the entity in a direction
def move(self, direction):
#Sets the x attribute of the entity according to the
#x component of the direction vector object
self.x += int(direction.x)
#Sets the y attribute of the entity according to the
#y component of the direction vector object
self.y += int(direction.y)
def getTargetEntity(self, direction):
x = self.x + direction.x
y = self.y + direction.y
for entity in entities:
if entity.x == x and entity.y == y:
return entity
def willCollide(self, x, y):
for entity in entities:
if entity.x == x and entity.y == y and entity != self:
return True
return False
def attack(self, targetEntity):
#Deducts hitpoints from the target entity
targetEntity.hitpoints -= self.power
#Calls the create effect function
createEffect(targetEntity.x, targetEntity.y, "hit")
#Plays hit sound
playSound("hit")
#The enemy class defines the enemy object which inherits
#methods and attributes from the entity class
class Enemy(Entity):
def __init__(self, x, y, tileType, room):
#Inherits methods and attributes from parent class
super().__init__(x, y, tileType)
#Ensures tile type is "enemy"
self.tileType = "enemy"
#Stores the room the enemy has spawned
self.room = room
#Cooldown for enemy movement/attacks
self.actionTimer = 30
#Set attributes of the enemy according to the level count
global levelCount
self.hitpoints = math.ceil(2 + 1 * levelCount)
self.power = math.ceil(1.3 * levelCount)
def update(self):
global score
if self.actionTimer > 0:
#Decrements the action timer
self.actionTimer -= 1
else:
#Calls the decide action method
self.decideAction()
if self.hitpoints <= 0:
#Create death effect
createEffect(self.x, self.y, "death")
#Plays death sound
playSound("death")
#Remove all references of enemy
entities.remove(self)
self.room.enemies.remove(self)
#Increase score when the enemy dies
#Score increase depends on the level count
score += 200 + 100 * levelCount
def getDirection(self):
#Returns a vector based on the player's
#position from the enemy
if self.x > player.x:
return directions["left"]
elif self.x < player.x:
return directions["right"]
elif self.y < player.y:
return directions["down"]
elif self.y > player.y:
return directions["up"]
def decideAction(self):
#Resets the action timer
self.actionTimer = 30
#Gets direction of the player
direction = self.getDirection()
#Gets the target entity
targetEntity = self.getTargetEntity(direction)
#Gets the tile at that direction
nextTile = grid[int(self.y + direction.y)][int(self.x + direction.x)]
#Checks if the enemy will collide with another entity at that tile's position
if self.willCollide(nextTile.x, nextTile.y):
#If it does and the entity it has collided into is a player
if targetEntity == player:
#Then call the attack method while passing in the player object
self.attack(player)
else:
#Calls the move method with the direction calculated
self.move(direction)
def createEffect(x, y, effectType):
#Creates effect object at the given coordinates
effect = Effect(x, y, "effect", effectTypes[effectType])
#Stores the effect in a list | |
r"""
Yangians
AUTHORS:
- <NAME> (2013-10-08): Initial version
"""
#*****************************************************************************
# Copyright (C) 2013 <NAME> <tc<EMAIL> at <EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.misc.cachefunc import cached_method
from sage.misc.misc_c import prod
from sage.structure.unique_representation import UniqueRepresentation
from sage.categories.hopf_algebras_with_basis import HopfAlgebrasWithBasis
from sage.categories.graded_hopf_algebras_with_basis import GradedHopfAlgebrasWithBasis
from sage.rings.all import ZZ
from sage.rings.infinity import infinity
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.sets.family import Family
from sage.sets.positive_integers import PositiveIntegers
from sage.monoids.indexed_free_monoid import IndexedFreeAbelianMonoid
from sage.combinat.free_module import CombinatorialFreeModule
from sage.algebras.associated_graded import AssociatedGradedAlgebra
import itertools
class GeneratorIndexingSet(UniqueRepresentation):
"""
Helper class for the indexing set of the generators.
"""
def __init__(self, index_set, level=None):
"""
Initialize ``self``.
TESTS::
sage: from sage.algebras.yangian import GeneratorIndexingSet
sage: I = GeneratorIndexingSet((1,2))
"""
self._index_set = index_set
self._level = level
def __repr__(self):
"""
Return a string representation of ``self``.
TESTS::
sage: from sage.algebras.yangian import GeneratorIndexingSet
sage: GeneratorIndexingSet((1,2))
Cartesian product of Positive integers, (1, 2), (1, 2)
sage: GeneratorIndexingSet((1,2), 4)
Cartesian product of (1, 2, 3, 4), (1, 2), (1, 2)
"""
if self._level is None:
L = PositiveIntegers()
else:
L = tuple(range(1, self._level+1))
return "Cartesian product of {L}, {I}, {I}".format(L=L, I=self._index_set)
def an_element(self):
"""
Initialize ``self``.
TESTS::
sage: from sage.algebras.yangian import GeneratorIndexingSet
sage: I = GeneratorIndexingSet((1,2))
sage: I.an_element()
(3, 1, 1)
sage: I = GeneratorIndexingSet((1,2), 5)
sage: I.an_element()
(3, 1, 1)
sage: I = GeneratorIndexingSet((1,2), 1)
sage: I.an_element()
(1, 1, 1)
"""
if self._level is not None and self._level < 3:
return (1, self._index_set[0], self._index_set[0])
return (3, self._index_set[0], self._index_set[0])
def cardinality(self):
"""
Return the cardinality of ``self``.
TESTS::
sage: from sage.algebras.yangian import GeneratorIndexingSet
sage: I = GeneratorIndexingSet((1,2))
sage: I.cardinality()
+Infinity
sage: I = GeneratorIndexingSet((1,2), level=3)
sage: I.cardinality() == 3 * 2 * 2
True
"""
if self._level is not None:
return self._level * len(self._index_set)**2
return infinity
__len__ = cardinality
def __call__(self, x):
"""
Call ``self``.
TESTS::
sage: from sage.algebras.yangian import GeneratorIndexingSet
sage: I = GeneratorIndexingSet((1,2))
sage: I([1, 2])
(1, 2)
"""
return tuple(x)
def __contains__(self, x):
"""
Check containment of ``x`` in ``self``.
TESTS::
sage: from sage.algebras.yangian import GeneratorIndexingSet
sage: I = GeneratorIndexingSet((1,2))
sage: (4, 1, 2) in I
True
sage: [4, 2, 1] in I
True
sage: (-1, 1, 1) in I
False
sage: (1, 3, 1) in I
False
::
sage: I3 = GeneratorIndexingSet((1,2), 3)
sage: (1, 1, 2) in I3
True
sage: (3, 1, 1) in I3
True
sage: (4, 1, 1) in I3
False
"""
return (isinstance(x, (tuple, list)) and len(x) == 3
and x[0] in ZZ and x[0] > 0
and (self._level is None or x[0] <= self._level)
and x[1] in self._index_set
and x[2] in self._index_set)
def __iter__(self):
"""
Iterate over ``self``.
TESTS::
sage: from sage.algebras.yangian import GeneratorIndexingSet
sage: I = GeneratorIndexingSet((1,2))
sage: it = iter(I)
sage: [it.next() for dummy in range(5)]
[(1, 1, 1), (1, 1, 2), (1, 2, 1), (1, 2, 2), (2, 1, 1)]
sage: I = GeneratorIndexingSet((1,2), 3)
sage: list(I)
[(1, 1, 1), (1, 1, 2), (1, 2, 1), (1, 2, 2),
(2, 1, 1), (2, 1, 2), (2, 2, 1), (2, 2, 2),
(3, 1, 1), (3, 1, 2), (3, 2, 1), (3, 2, 2)]
"""
I = self._index_set
if self._level is not None:
for x in itertools.product(range(1, self._level+1), I, I):
yield x
return
for i in PositiveIntegers():
for x in itertools.product(I, I):
yield (i, x[0], x[1])
class Yangian(CombinatorialFreeModule):
r"""
The Yangian `Y(\mathfrak{gl}_n)`.
Let `A` be a commutative ring with unity. The *Yangian*
`Y(\mathfrak{gl}_n)`, associated with the Lie algebra `\mathfrak{gl}_n`
for `n \geq 1`, is defined to be the unital associative algebra
generated by `\{t_{ij}^{(r)} \mid 1 \leq i,j \leq n , r \geq 1\}`
subject to the relations
.. MATH::
[t_{ij}^{(M+1)}, t_{k\ell}^{(L)}] - [t_{ij}^{(M)}, t_{k\ell}^{(L+1)}]
= t_{kj}^{(M)} t_{i\ell}^{(L)} - t_{kj}^{(L)} t_{i\ell}^{(M)},
where `L,M \geq 0` and `t_{ij}^{(0)} = \delta_{ij} \cdot 1`. This
system of quadratic relations is equivalent to the system of
commutation relations
.. MATH::
[t_{ij}^{(r)}, t_{k\ell}^{(s)}] =
\sum_{p=0}^{\min\{r,s\}-1} \bigl(t_{kj}^{(p)} t_{i\ell}^{(r+s-1-p)}
- t_{kj}^{(r+s-1-p)} t_{i\ell}^{(p)} \bigr),
where `1 \leq i,j,k,\ell \leq n` and `r,s \geq 1`.
Let `u` be a formal variable and, for
`1 \leq i,j \leq n`, define
.. MATH::
t_{ij}(u) = \delta_{ij} + \sum_{r=1}^\infty t_{ij}^{(r)} u^{-r}
\in Y(\mathfrak{gl}_n)[\![u^{-1}]\!].
Thus, we can write the defining relations as
.. MATH::
\begin{aligned}
(u - v)[t_{ij}(u), t_{k\ell}(v)] & = t_{kj}(u) t_{i\ell}(v)
- t_{kj}(v) t_{i\ell}(u).
\end{aligned}
These series can be combined into a single matrix:
.. MATH::
T(u) := \sum_{i,j=1}^n t_{ij}(u) \otimes E_{ij} \in Y(\mathfrak{gl}_n)
[\![u^{-1}]\!] \otimes \operatorname{End}(\CC^n),
where `E_{ij}` is the matrix with a `1` in the `(i,j)` position
and zeros elsewhere.
For `m \geq 2`, define formal variables `u_1, \ldots, u_m`.
For any `1 \leq k \leq m`, set
.. MATH::
T_k(u_k) := \sum_{i,j=1}^n t_{ij}(u_k) \otimes (E_{ij})_k \in
Y(\mathfrak{gl}_n)[\![u_1^{-1},\dots,u_m^{-1}]\!] \otimes
\operatorname{End}(\CC^n)^{\otimes m},
where `(E_{ij})_k = 1^{\otimes (k-1)} \otimes E_{ij} \otimes
1^{\otimes (m-k)}`. If we consider `m = 2`, we can then also write
the defining relations as
.. MATH::
R(u - v) T_1(u) T_2(v) = T_2(v) T_1(u) R(u - v),
where `R(u) = 1 - Pu^{-1}` and `P` is the permutation operator that
swaps the two factors. Moreover, we can write the Hopf algebra
structure as
.. MATH::
\Delta \colon T(u) \mapsto T_{[1]}(u) T_{[2]}(u),
\qquad
S \colon T(u) \mapsto T^{-1}(u),
\qquad
\epsilon \colon T(u) \mapsto 1,
where `T_{[a]} = \sum_{i,j=1}^n (1^{\otimes a-1} \otimes t_{ij}(u)
\otimes 1^{2-a}) \otimes (E_{ij})_1`.
We can also impose two filtrations on `Y(\mathfrak{gl}_n)`: the
*natural* filtration `\deg t_{ij}^{(r)} = r` and the *loop*
filtration `\deg t_{ij}^{(r)} = r - 1`. The natural filtration has
a graded homomorphism with `U(\mathfrak{gl}_n)` by
`t_{ij}^{(r)} \mapsto (E^r)_{ij}` and an associated graded algebra
being polynomial algebra. Moreover, this shows a PBW theorem for
the Yangian, that for any fixed order, we can write elements as
unique linear combinations of ordered monomials using `t_{ij}^{(r)}`.
For the loop filtration, the associated graded algebra is isomorphic
(as Hopf algebras) to `U(\mathfrak{gl}_n[z])` given by
`\overline{t}_{ij}^{(r)} \mapsto E_{ij} x^{r-1}`, where
`\overline{t}_{ij}^{(r)}` is the image of `t_{ij}^{(r)}` in the
`(r - 1)`-th component of `\operatorname{gr}Y(\mathfrak{gl}_n)`.
INPUT:
- ``base_ring`` -- the base ring
- ``n`` -- the size `n`
- ``level`` -- (optional) the level of the Yangian
- ``variable_name`` -- (default: ``'t'``) the name of the variable
- ``filtration`` -- (default: ``'loop'``) the filtration and can be
one of the following:
* ``'natural'`` -- the filtration is given by `\deg t_{ij}^{(r)} = r`
* ``'loop'`` -- the filtration is given by `\deg t_{ij}^{(r)} = r - 1`
.. TODO::
Implement the antipode.
EXAMPLES::
sage: Y = Yangian(QQ, 4)
sage: t = Y.algebra_generators()
sage: t[6,2,1] * t[2,3,2]
-t(1)[2,2]*t(6)[3,1] + t(1)[3,1]*t(6)[2,2]
+ t(2)[3,2]*t(6)[2,1] - t(7)[3,1]
sage: t[6,2,1] * t[3,1,4]
t(1)[1,1]*t(7)[2,4] + t(1)[1,4]*t(6)[2,1] - t(1)[2,1]*t(6)[1,4]
- t(1)[2,4]*t(7)[1,1] + t(2)[1,1]*t(6)[2,4] - t(2)[2,4]*t(6)[1,1]
+ t(3)[1,4]*t(6)[2,1] + t(6)[2,4] + t(8)[2,4]
We check that the natural filtration has a homomorphism
to `U(\mathfrak{gl}_n)` as algebras::
sage: Y = Yangian(QQ, 4, filtration='natural')
sage: t = Y.algebra_generators()
sage: gl4 = lie_algebras.gl(QQ, 4)
sage: Ugl4 = gl4.pbw_basis()
sage: E = matrix(Ugl4, 4, 4, Ugl4.gens())
sage: Esq = E^2
sage: t[2,1,3] * t[1,2,1]
t(1)[2,1]*t(2)[1,3] - t(2)[2,3]
sage: Esq[0,2] * E[1,0] == E[1,0] * Esq[0,2] - Esq[1,2]
True
sage: Em = [E^k for k in range(1,5)]
sage: S = list(t.some_elements())[:30:3]
sage: def convert(x):
....: return sum(c * prod(Em[t[0]-1][t[1]-1,t[2]-1] ** e
....: for t,e in m._sorted_items())
....: for m,c in x)
sage: for x in S:
....: for y in S:
....: ret = x * y
....: rhs = convert(x) * convert(y)
....: assert rhs == convert(ret)
....: assert ret.maximal_degree() == rhs.maximal_degree()
REFERENCES:
- :wikipedia:`Yangian`
- [MNO1994]_
- [Mol2007]_
"""
@staticmethod
def __classcall_private__(cls, base_ring, n, level=None,
variable_name='t', filtration='loop'):
"""
Return the correct parent based upon input.
EXAMPLES::
sage: Y = Yangian(QQ, 4)
sage: Y2 = Yangian(QQ, 4)
sage: Y is Y2
True
sage: YL = Yangian(QQ, 4, 3)
sage: YL2 = Yangian(QQ, 4, 3)
sage: YL is YL2
True
"""
if | |
copy : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
Returns
-------
numpy.ndarray
See Also
--------
Series.array : Get the actual data stored within.
Index.array : Get the actual data stored within.
DataFrame.to_numpy : Similar method for DataFrame.
Notes
-----
The returned array will be the same up to equality (values equal
in `self` will be equal in the returned array; likewise for values
that are not equal). When `self` contains an ExtensionArray, the
dtype may be different. For example, for a category-dtype Series,
``to_numpy()`` will return a NumPy array and the categorical dtype
will be lost.
For NumPy dtypes, this will be a reference to the actual data stored
in this Series or Index (assuming ``copy=False``). Modifying the result
in place will modify the data stored in the Series or Index (not that
we recommend doing that).
For extension types, ``to_numpy()`` *may* require copying data and
coercing the result to a NumPy type (possibly object), which may be
expensive. When you need a no-copy reference to the underlying data,
:attr:`Series.array` should be used instead.
This table lays out the different dtypes and return types of
``to_numpy()`` for various dtypes within pandas.
================== ================================
dtype array type
================== ================================
category[T] ndarray[T] (same dtype as input)
period ndarray[object] (Periods)
interval ndarray[object] (Intervals)
IntegerNA ndarray[object]
datetime64[ns, tz] ndarray[object] (Timestamps)
================== ================================
Examples
--------
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
>>> ser.to_numpy()
array(['a', 'b', 'a'], dtype=object)
Specify the `dtype` to control how datetime-aware data is represented.
Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`
objects, each with the correct ``tz``.
>>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
>>> ser.to_numpy(dtype=object)
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')],
dtype=object)
Or ``dtype='datetime64[ns]'`` to return an ndarray of native
datetime64 values. The values are converted to UTC and the timezone
info is dropped.
>>> ser.to_numpy(dtype="datetime64[ns]")
... # doctest: +ELLIPSIS
array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],
dtype='datetime64[ns]')
"""
if (is_extension_array_dtype(self.dtype) or
is_datetime64tz_dtype(self.dtype)):
# TODO(DatetimeArray): remove the second clause.
# TODO(GH-24345): Avoid potential double copy
result = np.asarray(self._values, dtype=dtype)
else:
result = self._values
if copy:
result = result.copy()
return result
@property
def _ndarray_values(self):
# type: () -> np.ndarray
"""
The data as an ndarray, possibly losing information.
The expectation is that this is cheap to compute, and is primarily
used for interacting with our indexers.
- categorical -> codes
"""
if is_extension_array_dtype(self):
return self.array._ndarray_values
return self.values
@property
def empty(self):
return not self.size
def max(self):
"""
Return the maximum value of the Index.
Returns
-------
scalar
Maximum value.
See Also
--------
Index.min : Return the minimum value in an Index.
Series.max : Return the maximum value in a Series.
DataFrame.max : Return the maximum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.max()
3
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.max()
'c'
For a MultiIndex, the maximum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.max()
('b', 2)
"""
return nanops.nanmax(self.values)
def argmax(self, axis=None):
"""
Return a ndarray of the maximum argument indexer.
See Also
--------
numpy.ndarray.argmax
"""
return nanops.nanargmax(self.values)
def min(self):
"""
Return the minimum value of the Index.
Returns
-------
scalar
Minimum value.
See Also
--------
Index.max : Return the maximum value of the object.
Series.min : Return the minimum value in a Series.
DataFrame.min : Return the minimum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.min()
1
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.min()
'a'
For a MultiIndex, the minimum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.min()
('a', 1)
"""
return nanops.nanmin(self.values)
def argmin(self, axis=None):
"""
Return a ndarray of the minimum argument indexer.
See Also
--------
numpy.ndarray.argmin
"""
return nanops.nanargmin(self.values)
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
See Also
--------
numpy.ndarray.tolist
"""
if is_datetimelike(self._values):
return [com.maybe_box_datetimelike(x) for x in self._values]
elif is_extension_array_dtype(self._values):
return list(self._values)
else:
return self._values.tolist()
to_list = tolist
def __iter__(self):
"""
Return an iterator of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
# We are explicity making element iterators.
if is_datetimelike(self._values):
return map(com.maybe_box_datetimelike, self._values)
elif is_extension_array_dtype(self._values):
return iter(self._values)
else:
return map(self._values.item, range(self._values.size))
@cache_readonly
def hasnans(self):
"""
Return if I have any nans; enables various perf speedups.
"""
return bool(isna(self).any())
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform the reduction type operation if we can """
func = getattr(self, name, None)
if func is None:
raise TypeError("{klass} cannot perform the operation {op}".format(
klass=self.__class__.__name__, op=name))
return func(**kwds)
def _map_values(self, mapper, na_action=None):
"""
An internal function that maps values using the input
correspondence (which can be a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
The input correspondence object
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping function
Returns
-------
applied : Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
# we can fastpath dict/Series to an efficient map
# as we know that we are not going to have to yield
# python types
if isinstance(mapper, dict):
if hasattr(mapper, '__missing__'):
# If a dictionary subclass defines a default value method,
# convert mapper to a lookup function (GH #15999).
dict_with_default = mapper
mapper = lambda x: dict_with_default[x]
else:
# Dictionary does not have a default. Thus it's safe to
# convert to an Series for efficiency.
# we specify the keys here to handle the
# possibility that they are tuples
from pandas import Series
mapper = Series(mapper)
if isinstance(mapper, ABCSeries):
# Since values were input this means we came from either
# a dict or a series and mapper should be an index
if is_extension_type(self.dtype):
values = self._values
else:
values = self.values
indexer = mapper.index.get_indexer(values)
new_values = algorithms.take_1d(mapper._values, indexer)
return new_values
# we must convert to python types
if is_extension_type(self.dtype):
values = self._values
if na_action is not None:
raise NotImplementedError
map_f = lambda values, f: values.map(f)
else:
values = self.astype(object)
values = getattr(values, 'values', values)
if na_action == 'ignore':
def map_f(values, f):
return lib.map_infer_mask(values, f,
isna(values).view(np.uint8))
else:
map_f = lib.map_infer
# mapper is a function
new_values = map_f(values, mapper)
return new_values
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
"""
Return a Series containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : boolean, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : boolean, default True
Sort by values.
ascending : boolean, default False
Sort in ascending order.
bins : integer, optional
Rather than count values, group them into half-open bins,
a convenience for ``pd.cut``, only works with numeric data.
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.count: Number of non-NA elements in a DataFrame.
Examples
--------
>>> index = pd.Index([3, 1, 2, 3, 4, np.nan])
>>> index.value_counts()
3.0 2
4.0 1
2.0 1
1.0 1
dtype: int64
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> s = pd.Series([3, 1, 2, 3, 4, np.nan])
>>> s.value_counts(normalize=True)
3.0 0.4
4.0 0.2
2.0 0.2
1.0 0.2
dtype: float64
**bins**
| |
def __init__(self, document_data):
self.document_data = document_data
self.field_paths = []
self.deleted_fields = []
self.server_timestamps = []
self.array_removes = {}
self.array_unions = {}
self.set_fields = {}
self.empty_document = False
prefix_path = FieldPath()
iterator = self._get_document_iterator(prefix_path)
for field_path, value in iterator:
if field_path == prefix_path and value is _EmptyDict:
self.empty_document = True
elif value is transforms.DELETE_FIELD:
self.deleted_fields.append(field_path)
elif value is transforms.SERVER_TIMESTAMP:
self.server_timestamps.append(field_path)
elif isinstance(value, transforms.ArrayRemove):
self.array_removes[field_path] = value.values
elif isinstance(value, transforms.ArrayUnion):
self.array_unions[field_path] = value.values
else:
self.field_paths.append(field_path)
set_field_value(self.set_fields, field_path, value)
def _get_document_iterator(self, prefix_path):
return extract_fields(self.document_data, prefix_path)
@property
def has_transforms(self):
return bool(self.server_timestamps or self.array_removes or self.array_unions)
@property
def transform_paths(self):
return sorted(
self.server_timestamps + list(self.array_removes) + list(self.array_unions)
)
def _get_update_mask(self, allow_empty_mask=False):
return None
def get_update_pb(self, document_path, exists=None, allow_empty_mask=False):
if exists is not None:
current_document = common_pb2.Precondition(exists=exists)
else:
current_document = None
update_pb = write_pb2.Write(
update=document_pb2.Document(
name=document_path, fields=encode_dict(self.set_fields)
),
update_mask=self._get_update_mask(allow_empty_mask),
current_document=current_document,
)
return update_pb
def get_transform_pb(self, document_path, exists=None):
def make_array_value(values):
value_list = [encode_value(element) for element in values]
return document_pb2.ArrayValue(values=value_list)
path_field_transforms = (
[
(
path,
write_pb2.DocumentTransform.FieldTransform(
field_path=path.to_api_repr(),
set_to_server_value=REQUEST_TIME_ENUM,
),
)
for path in self.server_timestamps
]
+ [
(
path,
write_pb2.DocumentTransform.FieldTransform(
field_path=path.to_api_repr(),
remove_all_from_array=make_array_value(values),
),
)
for path, values in self.array_removes.items()
]
+ [
(
path,
write_pb2.DocumentTransform.FieldTransform(
field_path=path.to_api_repr(),
append_missing_elements=make_array_value(values),
),
)
for path, values in self.array_unions.items()
]
)
field_transforms = [
transform for path, transform in sorted(path_field_transforms)
]
transform_pb = write_pb2.Write(
transform=write_pb2.DocumentTransform(
document=document_path, field_transforms=field_transforms
)
)
if exists is not None:
transform_pb.current_document.CopyFrom(
common_pb2.Precondition(exists=exists)
)
return transform_pb
def pbs_for_create(document_path, document_data):
"""Make ``Write`` protobufs for ``create()`` methods.
Args:
document_path (str): A fully-qualified document path.
document_data (dict): Property names and values to use for
creating a document.
Returns:
List[google.cloud.firestore_v1beta1.types.Write]: One or two
``Write`` protobuf instances for ``create()``.
"""
extractor = DocumentExtractor(document_data)
if extractor.deleted_fields:
raise ValueError("Cannot apply DELETE_FIELD in a create request.")
write_pbs = []
# Conformance tests require skipping the 'update_pb' if the document
# contains only transforms.
if extractor.empty_document or extractor.set_fields:
write_pbs.append(extractor.get_update_pb(document_path, exists=False))
if extractor.has_transforms:
exists = None if write_pbs else False
transform_pb = extractor.get_transform_pb(document_path, exists)
write_pbs.append(transform_pb)
return write_pbs
def pbs_for_set_no_merge(document_path, document_data):
"""Make ``Write`` protobufs for ``set()`` methods.
Args:
document_path (str): A fully-qualified document path.
document_data (dict): Property names and values to use for
replacing a document.
Returns:
List[google.cloud.firestore_v1beta1.types.Write]: One
or two ``Write`` protobuf instances for ``set()``.
"""
extractor = DocumentExtractor(document_data)
if extractor.deleted_fields:
raise ValueError(
"Cannot apply DELETE_FIELD in a set request without "
"specifying 'merge=True' or 'merge=[field_paths]'."
)
# Conformance tests require send the 'update_pb' even if the document
# contains only transforms.
write_pbs = [extractor.get_update_pb(document_path)]
if extractor.has_transforms:
transform_pb = extractor.get_transform_pb(document_path)
write_pbs.append(transform_pb)
return write_pbs
class DocumentExtractorForMerge(DocumentExtractor):
""" Break document data up into actual data and transforms.
"""
def __init__(self, document_data):
super(DocumentExtractorForMerge, self).__init__(document_data)
self.data_merge = []
self.transform_merge = []
self.merge = []
@property
def has_updates(self):
# for whatever reason, the conformance tests want to see the parent
# of nested transform paths in the update mask
# (see set-st-merge-nonleaf-alone.textproto)
update_paths = set(self.data_merge)
for transform_path in self.transform_paths:
if len(transform_path.parts) > 1:
parent_fp = FieldPath(*transform_path.parts[:-1])
update_paths.add(parent_fp)
return bool(update_paths)
def _apply_merge_all(self):
self.data_merge = sorted(self.field_paths + self.deleted_fields)
# TODO: other transforms
self.transform_merge = self.transform_paths
self.merge = sorted(self.data_merge + self.transform_paths)
def _construct_merge_paths(self, merge):
for merge_field in merge:
if isinstance(merge_field, FieldPath):
yield merge_field
else:
yield FieldPath(*parse_field_path(merge_field))
def _normalize_merge_paths(self, merge):
merge_paths = sorted(self._construct_merge_paths(merge))
# Raise if any merge path is a parent of another. Leverage sorting
# to avoid quadratic behavior.
for index in range(len(merge_paths) - 1):
lhs, rhs = merge_paths[index], merge_paths[index + 1]
if lhs.eq_or_parent(rhs):
raise ValueError("Merge paths overlap: {}, {}".format(lhs, rhs))
for merge_path in merge_paths:
if merge_path in self.deleted_fields:
continue
try:
get_field_value(self.document_data, merge_path)
except KeyError:
raise ValueError("Invalid merge path: {}".format(merge_path))
return merge_paths
def _apply_merge_paths(self, merge):
if self.empty_document:
raise ValueError("Cannot merge specific fields with empty document.")
merge_paths = self._normalize_merge_paths(merge)
del self.data_merge[:]
del self.transform_merge[:]
self.merge = merge_paths
for merge_path in merge_paths:
if merge_path in self.transform_paths:
self.transform_merge.append(merge_path)
for field_path in self.field_paths:
if merge_path.eq_or_parent(field_path):
self.data_merge.append(field_path)
# Clear out data for fields not merged.
merged_set_fields = {}
for field_path in self.data_merge:
value = get_field_value(self.document_data, field_path)
set_field_value(merged_set_fields, field_path, value)
self.set_fields = merged_set_fields
unmerged_deleted_fields = [
field_path
for field_path in self.deleted_fields
if field_path not in self.merge
]
if unmerged_deleted_fields:
raise ValueError(
"Cannot delete unmerged fields: {}".format(unmerged_deleted_fields)
)
self.data_merge = sorted(self.data_merge + self.deleted_fields)
# Keep only transforms which are within merge.
merged_transform_paths = set()
for merge_path in self.merge:
tranform_merge_paths = [
transform_path
for transform_path in self.transform_paths
if merge_path.eq_or_parent(transform_path)
]
merged_transform_paths.update(tranform_merge_paths)
self.server_timestamps = [
path for path in self.server_timestamps if path in merged_transform_paths
]
self.array_removes = {
path: values
for path, values in self.array_removes.items()
if path in merged_transform_paths
}
self.array_unions = {
path: values
for path, values in self.array_unions.items()
if path in merged_transform_paths
}
def apply_merge(self, merge):
if merge is True: # merge all fields
self._apply_merge_all()
else:
self._apply_merge_paths(merge)
def _get_update_mask(self, allow_empty_mask=False):
# Mask uses dotted / quoted paths.
mask_paths = [
field_path.to_api_repr()
for field_path in self.merge
if field_path not in self.transform_merge
]
if mask_paths or allow_empty_mask:
return common_pb2.DocumentMask(field_paths=mask_paths)
def pbs_for_set_with_merge(document_path, document_data, merge):
"""Make ``Write`` protobufs for ``set()`` methods.
Args:
document_path (str): A fully-qualified document path.
document_data (dict): Property names and values to use for
replacing a document.
merge (Optional[bool] or Optional[List<apispec>]):
If True, merge all fields; else, merge only the named fields.
Returns:
List[google.cloud.firestore_v1beta1.types.Write]: One
or two ``Write`` protobuf instances for ``set()``.
"""
extractor = DocumentExtractorForMerge(document_data)
extractor.apply_merge(merge)
merge_empty = not document_data
write_pbs = []
if extractor.has_updates or merge_empty:
write_pbs.append(
extractor.get_update_pb(document_path, allow_empty_mask=merge_empty)
)
if extractor.transform_paths:
transform_pb = extractor.get_transform_pb(document_path)
write_pbs.append(transform_pb)
return write_pbs
class DocumentExtractorForUpdate(DocumentExtractor):
""" Break document data up into actual data and transforms.
"""
def __init__(self, document_data):
super(DocumentExtractorForUpdate, self).__init__(document_data)
self.top_level_paths = sorted(
[FieldPath.from_string(key) for key in document_data]
)
tops = set(self.top_level_paths)
for top_level_path in self.top_level_paths:
for ancestor in top_level_path.lineage():
if ancestor in tops:
raise ValueError(
"Conflicting field path: {}, {}".format(
top_level_path, ancestor
)
)
for field_path in self.deleted_fields:
if field_path not in tops:
raise ValueError(
"Cannot update with nest delete: {}".format(field_path)
)
def _get_document_iterator(self, prefix_path):
return extract_fields(self.document_data, prefix_path, expand_dots=True)
def _get_update_mask(self, allow_empty_mask=False):
mask_paths = []
for field_path in self.top_level_paths:
if field_path not in self.transform_paths:
mask_paths.append(field_path.to_api_repr())
else:
prefix = FieldPath(*field_path.parts[:-1])
if prefix.parts:
mask_paths.append(prefix.to_api_repr())
return common_pb2.DocumentMask(field_paths=mask_paths)
def pbs_for_update(document_path, field_updates, option):
"""Make ``Write`` protobufs for ``update()`` methods.
Args:
document_path (str): A fully-qualified document path.
field_updates (dict): Field names or paths to update and values
to update with.
option (optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes.
Returns:
List[google.cloud.firestore_v1beta1.types.Write]: One
or two ``Write`` protobuf instances for ``update()``.
"""
extractor = DocumentExtractorForUpdate(field_updates)
if extractor.empty_document:
raise ValueError("Cannot update with an empty document.")
if option is None: # Default is to use ``exists=True``.
option = ExistsOption(exists=True)
write_pbs = []
if extractor.field_paths or extractor.deleted_fields:
update_pb = extractor.get_update_pb(document_path)
option.modify_write(update_pb)
write_pbs.append(update_pb)
if extractor.has_transforms:
transform_pb = extractor.get_transform_pb(document_path)
if not write_pbs:
# NOTE: set the write option on the ``transform_pb`` only if there
# is no ``update_pb``
option.modify_write(transform_pb)
write_pbs.append(transform_pb)
return write_pbs
def pb_for_delete(document_path, option):
"""Make a ``Write`` protobuf for ``delete()`` methods.
Args:
document_path (str): A fully-qualified document path.
option (optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes.
Returns:
google.cloud.firestore_v1beta1.types.Write: A
``Write`` protobuf instance for the ``delete()``.
"""
write_pb = write_pb2.Write(delete=document_path)
if option is not None:
option.modify_write(write_pb)
return write_pb
class ReadAfterWriteError(Exception):
"""Raised when a read is attempted after a write.
Raised by "read" methods that use transactions.
"""
def get_transaction_id(transaction, read_operation=True):
"""Get the transaction ID from a ``Transaction`` object.
Args:
transaction (Optional[~.firestore_v1beta1.transaction.\
Transaction]): An existing transaction that this query will
run in.
read_operation (Optional[bool]): Indicates if the transaction ID
will be used in a read operation. Defaults to :data:`True`.
Returns:
Optional[bytes]: The ID of the transaction, or :data:`None` if the
``transaction`` is :data:`None`.
Raises:
ValueError: If the ``transaction`` is not in progress (only if
``transaction`` is not :data:`None`).
ReadAfterWriteError: If the ``transaction`` has writes stored on
it and ``read_operation`` is :data:`True`.
"""
if transaction is None:
return None
else:
if not transaction.in_progress:
raise ValueError(INACTIVE_TXN)
if read_operation and len(transaction._write_pbs) > 0:
raise ReadAfterWriteError(READ_AFTER_WRITE_ERROR)
return transaction.id
def metadata_with_prefix(prefix, **kw):
"""Create RPC metadata containing a prefix.
Args:
prefix (str): appropriate resource | |
<gh_stars>0
from abc import ABC, abstractmethod
from datetime import datetime, timedelta
from enum import Enum
import logging
import os
from os import PathLike
from pathlib import Path
import sys
from textwrap import indent
from typing import Iterator, List, Tuple, Union
if sys.version_info >= (3, 8):
from importlib import metadata as importlib_metadata
else:
import importlib_metadata
from nemspy.model.base import (
AttributeEntry,
ConnectionEntry,
EntryType,
FileForcingEntry,
GridRemapMethod,
INDENTATION,
MediationEntry,
MediatorEntry,
ModelEntry,
SequenceEntry,
VerbosityOption,
)
from nemspy.utilities import create_symlink
class Earth(AttributeEntry):
"""
multi-model coupling container representing the entire Earth system
Only one of each model type can be assigned to the Earth system model at a time.
"""
entry_title = 'EARTH'
def __init__(self, **models):
"""
:param atm: atmospheric wind model
:param wav: oceanic wave model
:param ocn: oceanic circulation model
:param hyd: terrestrial water model
:param med: model mediator
"""
if 'Verbosity' not in models:
models['Verbosity'] = VerbosityOption.OFF
self.__models = {model_type: None for model_type in EntryType}
attributes = {}
for key, value in models.items():
if key.upper() in {entry.name for entry in EntryType}:
if isinstance(value, ModelEntry):
self[EntryType[key.upper()]] = value
else:
attributes[key] = value
self.attributes = attributes
@property
def models(self):
"""
list of models comprising the Earth system
"""
return self.__models
def __getitem__(self, model_type: EntryType) -> ModelEntry:
return self.__models[model_type]
def __setitem__(self, model_type: EntryType, model: ModelEntry):
assert model_type == model.entry_type
if self.__models[model_type] is not None:
logging.debug(
f'overwriting existing "{model_type.name}" model: ' f'{repr(self[model_type])}'
)
self.__models[model_type] = model
def __contains__(self, model_type: EntryType):
return model_type in self.__models
def __iter__(self) -> Iterator[Tuple[EntryType, ModelEntry]]:
for model_type, model in self.models.items():
yield model_type, model
def __str__(self) -> str:
attributes = [
f'{attribute} = {value if not isinstance(value, Enum) else value.value}'
for attribute, value in self.attributes.items()
]
return '\n'.join(
[
f'{self.entry_title}_component_list: '
f'{" ".join(model_type.value for model_type, model in self.models.items() if model is not None)}',
f'{self.entry_title}_attributes::',
indent('\n'.join(attributes), INDENTATION),
'::',
]
)
def __repr__(self) -> str:
models = [
f'{model_type.name}={repr(model)}' for model_type, model in self.models.items()
]
models += [f'{key}={value}' for key, value in self.attributes.items()]
return (
f'{self.__class__.__name__}({self.attributes["Verbosity"]}, {", ".join(models)})'
)
class RunSequence(AttributeEntry, SequenceEntry):
"""
multi-model container for model entries, defining the sequence in which they run within the modeled time loop
NOTE: Currently, only one loop is supported. Nested loops will be implemented in a future version of NEMSpy.
"""
entry_title = 'Run Sequence'
def __init__(self, interval: timedelta, **kwargs):
"""
:param interval: time interval to repeat the main loop in modeled time
"""
self.interval = interval
if 'Verbosity' not in kwargs:
kwargs['Verbosity'] = VerbosityOption.OFF
self.__models = {}
attributes = {}
for key, value in kwargs.items():
model_types = [model_type.value for model_type in EntryType]
if key.upper() in model_types and isinstance(value, ModelEntry):
self.__models[EntryType(key.upper())] = value
else:
attributes[key] = value
self.attributes = attributes
self.__sequence = [
model for model in self.models if model.entry_type != EntryType.MEDIATOR
]
self.__link_models()
def append(self, entry: SequenceEntry):
"""
add a sequence entry
"""
if isinstance(entry, ModelEntry):
model_type = entry.entry_type
if model_type in self.__models:
del self.__models[model_type]
self[entry.entry_type] = entry
self.__sequence.append(entry)
def extend(self, sequence: List[SequenceEntry]):
"""
add several sequence entries
"""
for entry in sequence:
self.append(entry)
@property
def sequence(self) -> List[SequenceEntry]:
"""
list of sequence entries in order, including model entries and connections / mediations
"""
return self.__sequence
@sequence.setter
def sequence(self, sequence: List[SequenceEntry]):
"""
set the sequence by passing a list of entries in order
"""
sequence = list(sequence)
if sequence != self.__sequence:
mediator = self.mediator
self.__models = {}
if mediator is not None:
self.mediator = mediator
for entry in sequence:
if isinstance(entry, ModelEntry):
model_type = entry.entry_type
if model_type in self.__models:
raise TypeError(
f'duplicate model type ' f'"{model_type.name}" in given sequence'
)
self.__models[model_type] = entry
self.__link_models()
self.__sequence = sequence
def connect(
self, source: EntryType, target: EntryType, method: GridRemapMethod = None, **kwargs,
):
"""
assign a simple connection (not a mediation) between two model entries within the sequence
"""
if method is None:
method = GridRemapMethod.REDISTRIBUTE
if EntryType.MEDIATOR in [source, target] and self.mediator is None:
self.mediator = MediatorEntry(**kwargs)
if source not in self.__models:
raise KeyError(f'no {source.name} model in sequence')
if target not in self.__models:
raise KeyError(f'no {target.name} model in sequence')
self.append(ConnectionEntry(self[source], self[target], method))
@property
def connections(self) -> List[Union[ConnectionEntry, MediationEntry]]:
"""
list of all connections in the sequence
"""
return [
entry
for entry in self.sequence
if isinstance(entry, ConnectionEntry) or isinstance(entry, MediationEntry)
]
@property
def mediator(self) -> MediatorEntry:
"""
shortcut property to the mediator entry
"""
if EntryType.MEDIATOR in self:
return self.__models[EntryType.MEDIATOR]
else:
return None
@mediator.setter
def mediator(self, mediator: MediatorEntry):
"""
set the mediator entry (does not exist in the sequence by itself)
"""
self[EntryType.MEDIATOR] = mediator
def mediate(
self,
sources: List[EntryType] = None,
functions: List[str] = None,
targets: List[EntryType] = None,
method: GridRemapMethod = None,
processors: int = None,
**attributes,
):
"""
assign a mediation between two entries in the sequence
"""
if 'name' not in attributes:
attributes['name'] = 'mediator'
if self.mediator is None:
self.mediator = MediatorEntry(processors=processors, **attributes)
else:
self.mediator.attributes.update(attributes)
if processors is not None:
# increase mediation processor assignment if required
if self.mediator.processors < processors:
self.mediator.processors = processors
if sources is not None:
sources = [self[source] for source in sources]
if targets is not None:
targets = [self[target] for target in targets]
self.append(MediationEntry(self.mediator, sources, functions, targets, method))
@property
def mediations(self) -> List[MediationEntry]:
"""
list of all mediations in the sequence
"""
return [entry for entry in self.sequence if isinstance(entry, MediationEntry)]
@property
def earth(self) -> Earth:
"""
Earth system assigned to the sequence
"""
return Earth(
**{model.entry_type.name: model for model in self.models}, **self.attributes
)
@property
def processors(self) -> int:
"""
total number of processors assigned to sequence entries
"""
return sum(model.processors for model in self.__models.values())
def __link_models(self):
"""
link entries and assign processors
"""
models = self.models
for model in models:
if model.previous is not None:
model.previous.next = None
if model.next is not None:
model.next = None
model.start_processor = 0
for model_index, model in enumerate(models):
previous_model_index = model_index - 1
if previous_model_index >= 0:
model.previous = models[previous_model_index]
def __setitem__(self, model_type: EntryType, model: ModelEntry):
assert model_type == model.entry_type
if model_type in self.__models:
existing_model = self.__models[model_type]
logging.debug(
f'overwriting {model_type.name} model ' f'"{existing_model}" with "{model}"'
)
self.__sequence.remove(self.__sequence.index(existing_model))
self.__models[model_type] = model
self.__link_models()
def __getitem__(self, model_type: EntryType) -> ModelEntry:
return self.__models[model_type]
@property
def models(self) -> List[ModelEntry]:
"""
list of models in the run sequence
"""
models = [
model
for model_type, model in self.__models.items()
if model_type in self and model_type is not EntryType.MEDIATOR
]
if self.mediator is not None:
models.insert(0, self.mediator)
return models
def __iter__(self) -> Iterator[ModelEntry]:
for model in self.models:
yield model
def __contains__(self, model_type: EntryType) -> bool:
return model_type in self.__models
def __len__(self) -> int:
return len(self.sequence)
@property
def sequence_entry(self) -> str:
return str(self)
def __str__(self) -> str:
block = '\n'.join(
[
f'@{self.interval / timedelta(seconds=1):.0f}',
indent(
'\n'.join(entry.sequence_entry for entry in self.__sequence), INDENTATION
),
'@',
]
)
return '\n'.join([f'runSeq::', indent(block, INDENTATION), '::'])
def __repr__(self) -> str:
models = [f'{model.entry_type.name.lower()}={repr(model)}' for model in self.models]
return f'{self.__class__.__name__}({repr(self.interval)}, {", ".join(models)})'
class ConfigurationFile(ABC):
"""
abstraction of a configuration file
"""
name: str = NotImplementedError
def __init__(self, sequence: RunSequence):
"""
:param sequence: run sequence object containing models and order
"""
self.sequence = sequence
def __getitem__(self, entry_type: type) -> List[AttributeEntry]:
return [entry for entry in self if isinstance(entry, entry_type)]
@property
def version_header(self) -> str:
"""
comment header indicating filename and NEMSpy version
"""
installed_distributions = importlib_metadata.distributions()
for distribution in installed_distributions:
if (
distribution.metadata['Name'] is not None
and distribution.metadata['Name'].lower() == 'nemspy'
):
version = distribution.version
break
else:
version = 'unknown'
return f'# `{self.name}` generated with NEMSpy {version}'
def write(
self, filename: PathLike, overwrite: bool = False, include_version: bool = False
) -> Path:
"""
write this configuration to file
:param filename: path to file
:param overwrite: overwrite an existing file
:param include_version: include NEMSpy version information
:returns: path to written file
"""
if not isinstance(filename, Path):
filename = Path(filename)
ensure_directory(filename.parent)
output = f'{self}\n'
if include_version:
output = f'{self.version_header}\n' f'{output}'
if filename.is_dir():
filename = filename / self.name
logging.debug(
f'creating new file "{os.path.relpath(filename.resolve(), Path.cwd())}"'
)
if filename.exists():
logging.debug(
f'{"overwriting" if overwrite else "skipping"} existing file "{os.path.relpath(filename.resolve(), Path.cwd())}"'
)
if not filename.exists() or overwrite:
with open(filename, 'w', newline='\n') as output_file:
output_file.write(output)
return filename
| |
# flake8: noqa
from contextlib import ExitStack
import click
from omegaconf import OmegaConf
from pfb.workers.main import cli
import pyscilog
pyscilog.init('pfb')
log = pyscilog.get_logger('SPIFIT')
@cli.command()
@click.option('-image', '--image', required=True,
help="Path to model or restored image cube.")
@click.option('-resid', "--residual", required=False,
help="Path to residual image cube.")
@click.option('-o', '--output-filename', required=True,
help="Path to output directory + prefix.")
@click.option('-pp', '--psf-pars', nargs=3, type=float,
help="Beam parameters matching FWHM of restoring beam "
"specified as emaj emin pa."
"By default these are taken from the fits header "
"of the residual image.")
@click.option('--circ-psf/--no-circ-psf', default=False)
@click.option('-th', '--threshold', default=10, type=float, show_default=True,
help="Multiple of the rms in the residual to threshold on."
"Only components above threshold*rms will be fit.")
@click.option('-maxdr', '--maxdr', default=100, type=float, show_default=True,
help="Maximum dynamic range used to determine the "
"threshold above which components need to be fit. "
"Only used if residual is not passed in.")
@click.option('-bw', '--band-weights', type=float,
help="Per bands weights to use during the fit")
@click.option('-pb-min', '--pb-min', type=float, default=0.15,
help="Set image to zero where pb falls below this value")
@click.option('-products', '--products', default='aeikIcmrb', type=str,
help="Outputs to write. Letter correspond to: \n"
"a - alpha map \n"
"e - alpha error map \n"
"i - I0 map \n"
"k - I0 error map \n"
"I - reconstructed cube form alpha and I0 \n"
"c - restoring beam used for convolution \n"
"m - convolved model \n"
"r - convolved residual \n"
"b - average power beam \n"
"Default is to write all of them")
@click.option('-pf', "--padding-frac", default=0.5, type=float,
show_default=True, help="Padding factor for FFT's.")
@click.option('-dc', "--dont-convolve", is_flag=True,
help="Do not convolve by the clean beam before fitting")
@click.option('-rf', '--ref-freq', type=float,
help='Reference frequency where the I0 map is sought. '
"Will overwrite in fits headers of output.")
@click.option('-otype', '--out-dtype', default='f4', type=str,
help="Data type of output. Default is single precision")
@click.option('-acr', '--add-convolved-residuals', is_flag=True,
help='Flag to add in the convolved residuals before '
'fitting components')
@click.option('-bm', '--beam-model', default=None,
help="Fits power beam model. It is assumed that the beam "
"match the fits headers of --image. You can use the binterp "
"worker to create compatible beam models")
@click.option('-ha', '--host-address',
help='Address where the distributed client lives. '
'Will use a local cluster if no address is provided')
@click.option('-nw', '--nworkers', type=int, default=1,
help='Number of workers for the client.')
@click.option('-ntpw', '--nthreads-per-worker', type=int,
help='Number of dask threads per worker.')
@click.option('-nvt', '--nvthreads', type=int,
help="Total number of threads to use for vertical scaling (eg. gridder, fft's etc.)")
@click.option('-mem', '--mem-limit', type=int,
help="Memory limit in GB. Default uses all available memory")
@click.option('-nthreads', '--nthreads', type=int,
help="Total available threads. Default uses all available threads")
def spifit(**kw):
"""
Spectral index fitter
"""
args = OmegaConf.create(kw)
pyscilog.log_to_file(args.output_filename + '.log')
from glob import glob
from omegaconf import ListConfig
# image is either a string or a list of strings that we want to glob on
if isinstance(args.image, str):
image = sorted(glob(args.image))
elif isinstance(args.image, list) or isinstance(args.image, ListConfig):
image = []
for i in len(args.image):
image.append(sorted(glob(args.image[i])))
# make sure it's not empty
try:
assert len(image) > 0
args.image = image
except:
raise ValueError(f"No image at {args.image}")
# same goes for the residual except that it may also be None
if isinstance(args.residual, str):
residual = sorted(glob(args.residual))
elif isinstance(args.residual, list) or isinstance(args.residual, ListConfig):
residual = []
for i in len(args.residual):
residual.append(sorted(glob(args.residual[i])))
if args.residual is not None:
try:
assert len(residual) > 0
args.residual = residual
except:
raise ValueError(f"No residual at {args.residual}")
# we also need the same number of residuals as images
try:
assert len(args.image) == len(args.residual)
except:
raise ValueError(f"Number of images and residuals need to "
"match")
else:
print("No residual passed in!", file=log)
# and finally the beam model
if isinstance(args.beam_model, str):
beam_model = sorted(glob(args.beam_model))
elif isinstance(args.beam_model, list) or isinstance(args.beam_model, ListConfig):
beam_model = []
for i in len(args.beam_model):
beam_model.append(sorted(glob(args.beam_model[i])))
if args.beam_model is not None:
try:
assert len(beam_model) > 0
args.beam_model = beam_model
except:
raise ValueError(f"No beam model at {args.beam_model}")
try:
assert len(args.image) == len(args.beam_model)
except:
raise ValueError(f"Number of images and beam models need to "
"match")
else:
print("Not doing any form of primary beam correction", file=log)
# LB - TODO: can we sort them along freq at this point already?
OmegaConf.set_struct(args, True)
with ExitStack() as stack:
from pfb import set_client
args = set_client(args, stack, log)
# TODO - prettier config printing
print('Input Options:', file=log)
for key in args.keys():
print(' %25s = %s' % (key, args[key]), file=log)
return _spifit(**args)
def _spifit(**kw):
args = OmegaConf.create(kw)
OmegaConf.set_struct(args, True)
import dask.array as da
import numpy as np
from astropy.io import fits
from africanus.model.spi.dask import fit_spi_components
from pfb.utils.fits import load_fits, save_fits, data_from_header, set_wcs
from pfb.utils.misc import convolve2gaussres
# get max gausspars
gaussparf = None
if args.psf_pars is None:
if args.residual is None:
ppsource = args.image
else:
ppsource = args.residual
for image in ppsource:
try:
pphdr = fits.getheader(image)
except Exception as e:
raise e
if 'BMAJ0' in pphdr.keys():
emaj = pphdr['BMAJ0']
emin = pphdr['BMIN0']
pa = pphdr['BPA0']
gausspars = [emaj, emin, pa]
freq_idx0 = 0
elif 'BMAJ1' in pphdr.keys():
emaj = pphdr['BMAJ1']
emin = pphdr['BMIN1']
pa = pphdr['BPA1']
gausspars = [emaj, emin, pa]
freq_idx0 = 1
elif 'BMAJ' in pphdr.keys():
emaj = pphdr['BMAJ']
emin = pphdr['BMIN']
pa = pphdr['BPA']
gausspars = [emaj, emin, pa]
freq_idx0 = 0
else:
raise ValueError("No beam parameters found in residual."
"You will have to provide them manually.")
if gaussparf is None:
gaussparf = gausspars
else:
# we need to take the max in both directions
gaussparf[0] = np.maximum(gaussparf[0], gausspars[0])
gaussparf[1] = np.maximum(gaussparf[1], gausspars[1])
else:
freq_idx0 = 0 # assumption
gaussparf = list(args.psf_pars)
if args.circ_psf:
e = np.maximum(gaussparf[0], gaussparf[1])
gaussparf[0] = e
gaussparf[1] = e
gaussparf[2] = 0.0
gaussparf = tuple(gaussparf)
print("Using emaj = %3.2e, emin = %3.2e, PA = %3.2e \n" % gaussparf, file=log)
# get required data products
image_dict = {}
for i in range(len(args.image)):
image_dict[i] = {}
# load model image
model = load_fits(args.image[i], dtype=args.out_dtype).squeeze()
mhdr = fits.getheader(args.image[i])
if model.ndim < 3:
model = model[None, :, :]
l_coord, ref_l = data_from_header(mhdr, axis=1)
l_coord -= ref_l
m_coord, ref_m = data_from_header(mhdr, axis=2)
m_coord -= ref_m
if mhdr["CTYPE4"].lower() == 'freq':
freq_axis = 4
stokes_axis = 3
elif mhdr["CTYPE3"].lower() == 'freq':
freq_axis = 3
stokes_axis = 4
else:
raise ValueError("Freq axis must be 3rd or 4th")
freqs, ref_freq = data_from_header(mhdr, axis=freq_axis)
image_dict[i]['freqs'] = freqs
nband = freqs.size
npix_l = l_coord.size
npix_m = m_coord.size
xx, yy = np.meshgrid(l_coord, m_coord, indexing='ij')
# load beam
if args.beam_model is not None:
bhdr = fits.getheader(args.beam_model[i])
l_coord_beam, ref_lb = data_from_header(bhdr, axis=1)
l_coord_beam -= ref_lb
if not np.array_equal(l_coord_beam, l_coord):
raise ValueError("l coordinates of beam model do not match "
"those of image. Use binterp to make "
"compatible beam images")
m_coord_beam, ref_mb = data_from_header(bhdr, axis=2)
m_coord_beam -= ref_mb
if not np.array_equal(m_coord_beam, m_coord):
raise ValueError("m coordinates of beam model do not match "
"those of image. Use binterp to make "
"compatible beam images")
freqs_beam, _ = data_from_header(bhdr, axis=freq_axis)
if not np.array_equal(freqs, freqs_beam):
raise ValueError("Freq coordinates of beam model do not match "
"those of image. Use binterp to make "
"compatible beam images")
beam_image = load_fits(args.beam_model[i],
dtype=args.out_dtype).squeeze()
if beam_image.ndim < 3:
beam_image = beam_image[None, :, :]
else:
beam_image = np.ones(model.shape, dtype=args.out_dtype)
image_dict[i]['beam'] = beam_image
if not args.dont_convolve:
print("Convolving model %i"%i, file=log)
# convolve model to desired resolution
model, gausskern = convolve2gaussres(model, xx, yy, gaussparf,
args.nthreads, None,
args.padding_frac)
image_dict[i]['model'] = model
# add in residuals and set threshold
if args.residual is not None:
msg = "of residual do not match those of model"
rhdr = fits.getheader(args.residual[i])
l_res, ref_lb = data_from_header(rhdr, axis=1)
l_res -= ref_lb
if not np.array_equal(l_res, l_coord):
raise ValueError("l coordinates " + msg)
m_res, ref_mb = data_from_header(rhdr, axis=2)
m_res -= ref_mb
if not np.array_equal(m_res, m_coord):
raise ValueError("m coordinates " + msg)
freqs_res, _ = data_from_header(rhdr, axis=freq_axis)
if not np.array_equal(freqs, freqs_res):
raise ValueError("Freqs " + msg)
resid = load_fits(args.residual[i],
dtype=args.out_dtype).squeeze()
if resid.ndim < 3:
resid = resid[None, :, :]
# convolve residual to same resolution as model
gausspari = ()
for b in range(nband):
key = 'BMAJ' + str(b + freq_idx0)
if key in rhdr.keys():
emaj = rhdr[key]
emin = rhdr[key]
pa = rhdr[key]
gausspari += ((emaj, emin, pa),)
elif 'BMAJ' in rhdr.keys():
emaj = rhdr['BMAJ']
emin = rhdr['BMIN']
pa = rhdr['BPA']
gausspari += | |
<reponame>webclinic017/gs-quant
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from gs_quant.base import *
from gs_quant.common import *
import datetime
from typing import Dict, Optional, Tuple, Union
from dataclasses import dataclass, field
from dataclasses_json import LetterCase, config, dataclass_json
from enum import Enum
class ApprovalStatus(EnumBase, Enum):
"""Current status of an approval"""
Draft = 'Draft'
Cancelled = 'Cancelled'
Submitted = 'Submitted'
Approved = 'Approved'
Approving = 'Approving'
Rejected = 'Rejected'
Locked = 'Locked'
Error = 'Error'
class IndicesCurrency(EnumBase, Enum):
"""Currencies supported for Indices"""
USD = 'USD'
EUR = 'EUR'
GBP = 'GBP'
CAD = 'CAD'
AUD = 'AUD'
CHF = 'CHF'
CNH = 'CNH'
CNY = 'CNY'
DKK = 'DKK'
HKD = 'HKD'
IDR = 'IDR'
ILS = 'ILS'
INR = 'INR'
JPY = 'JPY'
KRW = 'KRW'
KWD = 'KWD'
MXN = 'MXN'
MYR = 'MYR'
NOK = 'NOK'
NZD = 'NZD'
PHP = 'PHP'
PLN = 'PLN'
RUB = 'RUB'
SAR = 'SAR'
SEK = 'SEK'
SGD = 'SGD'
THB = 'THB'
TRY = 'TRY'
TWD = 'TWD'
ZAR = 'ZAR'
BRL = 'BRL'
@dataclass
class IndicesConstructRequestTypes(Base):
pass
@dataclass
class IndicesConstructResponseTypes(Base):
pass
@dataclass
class IndicesRebalanceActionTypes(Base):
pass
@dataclass
class IndicesRebalanceInputTypes(Base):
pass
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class ApprovalComment(Base):
timestamp: Optional[datetime.datetime] = field(default=None, metadata=field_metadata)
message: Optional[str] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class CustomBasketRiskParams(Base):
risk_model: Optional[str] = field(default=None, metadata=field_metadata)
fx_hedged: Optional[bool] = field(default=None, metadata=field_metadata)
delete: Optional[bool] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class CustomBasketsRebalanceAction(IndicesRebalanceActionTypes):
comment: Optional[str] = field(default=None, metadata=field_metadata)
action_type: Optional[str] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class CustomBasketsResponse(IndicesConstructResponseTypes):
status: Optional[str] = field(default=None, metadata=field_metadata)
report_id: Optional[str] = field(default=None, metadata=field_metadata)
asset_id: Optional[str] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class ISelectActionRequest(IndicesRebalanceActionTypes):
action_comment: str = field(default=None, metadata=field_metadata)
trader_attestations: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
user_action: Optional[str] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class ISelectIndexParameter(Base):
name: Optional[str] = field(default=None, metadata=field_metadata)
value: Optional[Union[float, str]] = field(default=None, metadata=field_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class ISelectIndexParameters(Base):
name: Optional[str] = field(default=None, metadata=field_metadata)
value: Optional[Union[float, str]] = field(default=None, metadata=field_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class ISelectSeries(Base):
data: Optional[tuple] = field(default=None, metadata=field_metadata)
identifier: Optional[str] = field(default=None, metadata=field_metadata)
identifier_type: Optional[str] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=field_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class IndicesPositionInput(Base):
asset_id: str = field(default=None, metadata=field_metadata)
weight: float = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class PositionPriceInput(Base):
asset_id: str = field(default=None, metadata=field_metadata)
quantity: Optional[float] = field(default=None, metadata=field_metadata)
weight: Optional[float] = field(default=None, metadata=field_metadata)
notional: Optional[float] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class PublishParameters(Base):
publish_to_bloomberg: bool = field(default=False, metadata=field_metadata)
include_price_history: bool = field(default=False, metadata=field_metadata)
publish_to_reuters: Optional[bool] = field(default=False, metadata=field_metadata)
publish_to_factset: Optional[bool] = field(default=False, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class CreditCustomBasketPricingParameters(Base):
quote_source: BasketValuationSource = field(default=None, metadata=field_metadata)
quote_time: str = field(default='16:00:00', metadata=field_metadata)
quote_side: Side = field(default='Mid', metadata=field_metadata)
quoting_type: QuoteType = field(default=None, metadata=field_metadata)
weighting_type: WeightingType = field(default=None, metadata=field_metadata)
currency: Optional[Currency] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class CustomBasketsPricingParameters(Base):
currency: Optional[IndicesCurrency] = field(default=None, metadata=field_metadata)
asset_data_set_id: Optional[str] = field(default=None, metadata=field_metadata)
divisor: Optional[float] = field(default=None, metadata=field_metadata)
fx_data_set_id: Optional[str] = field(default=None, metadata=field_metadata)
fallback_date: Optional[str] = field(default=None, metadata=field_metadata)
initial_price: Optional[float] = field(default=None, metadata=field_metadata)
target_notional: Optional[float] = field(default=None, metadata=field_metadata)
pricing_date: Optional[datetime.date] = field(default=None, metadata=field_metadata)
vendor: Optional[MarketDataVendor] = field(default=None, metadata=field_metadata)
weighting_strategy: Optional[str] = field(default=None, metadata=field_metadata)
reweight: Optional[bool] = field(default=False, metadata=field_metadata)
asset_overwrite_data_set_id: Optional[str] = field(default='BASKET_EOD_OVERWRITE', metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class CustomBasketsRiskScheduleInputs(Base):
risk_models: Optional[Tuple[CustomBasketRiskParams, ...]] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class ISelectConstituentColumn(Base):
id_: str = field(default=None, metadata=config(field_name='id', exclude=exclude_none))
field_: str = field(default=None, metadata=config(field_name='field', exclude=exclude_none))
name: str = field(default=None, metadata=field_metadata)
aggregator_string: Optional[str] = field(default=None, metadata=field_metadata)
class_: Optional[str] = field(default=None, metadata=config(field_name='class', exclude=exclude_none))
filter_: Optional[str] = field(default=None, metadata=config(field_name='filter', exclude=exclude_none))
formatter_string: Optional[str] = field(default=None, metadata=field_metadata)
ID: Optional[int] = field(default=None, metadata=field_metadata)
max_width: Optional[int] = field(default=None, metadata=field_metadata)
min_width: Optional[int] = field(default=None, metadata=field_metadata)
precision: Optional[int] = field(default=None, metadata=field_metadata)
sortable: Optional[int] = field(default=None, metadata=field_metadata)
tooltip: Optional[str] = field(default=None, metadata=field_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class IndicesPositionSet(Base):
positions: Tuple[IndicesPositionInput, ...] = field(default=None, metadata=field_metadata)
position_date: datetime.date = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class CreditCustomBasketCreateInputs(IndicesConstructRequestTypes):
ticker: str = field(default=None, metadata=field_metadata)
name: str = field(default=None, metadata=field_metadata)
pricing_parameters: CreditCustomBasketPricingParameters = field(default=None, metadata=field_metadata)
position_set: Tuple[PositionPriceInput, ...] = field(default=None, metadata=field_metadata)
return_type: IndexCalculationType = field(default='Price Return', metadata=field_metadata)
styles: Tuple[str, ...] = field(default=None, metadata=field_metadata)
asset_class: Optional[AssetClass] = field(default='Credit', metadata=field_metadata)
description: Optional[str] = field(default=None, metadata=field_metadata)
related_content: Optional[GIRDomain] = field(default=None, metadata=field_metadata)
portfolio_id: Optional[str] = field(default=None, metadata=field_metadata)
publish_parameters: Optional[PublishParameters] = field(default=None, metadata=field_metadata)
index_notes: Optional[str] = field(default=None, metadata=field_metadata)
flagship: Optional[bool] = field(default=False, metadata=field_metadata)
on_behalf_of: Optional[str] = field(default=None, metadata=field_metadata)
clone_parent_id: Optional[str] = field(default=None, metadata=field_metadata)
hedge_id: Optional[str] = field(default=None, metadata=field_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class CustomBasketsBackcastInputs(Base):
position_set: Tuple[IndicesPositionSet, ...] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class CustomBasketsCreateInputs(IndicesConstructRequestTypes):
ticker: str = field(default=None, metadata=field_metadata)
name: str = field(default=None, metadata=field_metadata)
pricing_parameters: CustomBasketsPricingParameters = field(default=None, metadata=field_metadata)
position_set: Tuple[PositionPriceInput, ...] = field(default=None, metadata=field_metadata)
return_type: str = field(default=None, metadata=field_metadata)
description: Optional[str] = field(default=None, metadata=field_metadata)
styles: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
related_content: Optional[GIRDomain] = field(default=None, metadata=field_metadata)
portfolio_id: Optional[str] = field(default=None, metadata=field_metadata)
hedge_id: Optional[str] = field(default=None, metadata=field_metadata)
clone_parent_id: Optional[str] = field(default=None, metadata=field_metadata)
publish_parameters: Optional[PublishParameters] = field(default=None, metadata=field_metadata)
index_notes: Optional[str] = field(default=None, metadata=field_metadata)
flagship: Optional[bool] = field(default=None, metadata=field_metadata)
on_behalf_of: Optional[str] = field(default=None, metadata=field_metadata)
allow_limited_access_assets: Optional[bool] = field(default=False, metadata=field_metadata)
allow_ca_restricted_assets: Optional[bool] = field(default=False, metadata=config(field_name='allowCARestrictedAssets', exclude=exclude_none))
vendor: Optional[str] = field(default=None, metadata=field_metadata)
default_backcast: Optional[bool] = field(default=True, metadata=field_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class CustomBasketsEditInputs(Base):
name: Optional[str] = field(default=None, metadata=field_metadata)
description: Optional[str] = field(default=None, metadata=field_metadata)
styles: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
related_content: Optional[GIRDomain] = field(default=None, metadata=field_metadata)
publish_parameters: Optional[PublishParameters] = field(default=None, metadata=field_metadata)
index_notes: Optional[str] = field(default=None, metadata=field_metadata)
index_not_trading_reasons: Optional[IndexNotTradingReasons] = field(default=None, metadata=field_metadata)
flagship: Optional[bool] = field(default=None, metadata=field_metadata)
clone_parent_id: Optional[str] = field(default=None, metadata=field_metadata)
hedge_id: Optional[str] = field(default=None, metadata=field_metadata)
portfolio_id: Optional[str] = field(default=None, metadata=field_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class CustomBasketsRebalanceInputs(Base):
position_set: Optional[Tuple[PositionPriceInput, ...]] = field(default=None, metadata=field_metadata)
publish_parameters: Optional[PublishParameters] = field(default=None, metadata=field_metadata)
pricing_parameters: Optional[CustomBasketsPricingParameters] = field(default=None, metadata=field_metadata)
allow_limited_access_assets: Optional[bool] = field(default=False, metadata=field_metadata)
allow_ca_restricted_assets: Optional[bool] = field(default=False, metadata=config(field_name='allowCARestrictedAssets', exclude=exclude_none))
allow_system_approval: Optional[bool] = field(default=False, metadata=field_metadata)
clone_parent_id: Optional[str] = field(default=None, metadata=field_metadata)
hedge_id: Optional[str] = field(default=None, metadata=field_metadata)
portfolio_id: Optional[str] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DynamicConstructionResponse(IndicesConstructResponseTypes):
action: Optional[object] = field(default=None, metadata=field_metadata)
columns: Optional[Tuple[ISelectConstituentColumn, ...]] = field(default=None, metadata=field_metadata)
constituent_validations: Optional[tuple] = field(default=None, metadata=field_metadata)
date_validation_status: Optional[str] = field(default=None, metadata=field_metadata)
types: Optional[tuple] = field(default=None, metadata=field_metadata)
date_validations: Optional[tuple] = field(default=None, metadata=field_metadata)
new_parameters: Optional[Tuple[ISelectNewParameter, ...]] = field(default=None, metadata=field_metadata)
index_type: Optional[str] = field(default=None, metadata=field_metadata)
index_parameter_definitions: Optional[tuple] = field(default=None, metadata=field_metadata)
index_metadata: Optional[tuple] = field(default=None, metadata=field_metadata)
index_parameters: Optional[tuple] = field(default=None, metadata=field_metadata)
index_parameter_validation: Optional[tuple] = field(default=None, metadata=field_metadata)
status: Optional[object] = field(default=None, metadata=field_metadata)
valid: Optional[int] = field(default=None, metadata=field_metadata)
validation_messages: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class ISelectRebalance(Base):
new_weights: Optional[Tuple[ISelectNewWeight, ...]] = field(default=None, metadata=field_metadata)
rebalance_date: Optional[str] = field(default=None, metadata=field_metadata)
new_parameters: Optional[Tuple[ISelectNewParameter, ...]] = field(default=None, metadata=field_metadata)
index_parameters: Optional[Tuple[ISelectIndexParameters, ...]] = field(default=None, metadata=field_metadata)
waiver_requested: Optional[bool] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class ISelectRequest(IndicesRebalanceInputTypes):
rebalance_date: str = field(default=None, metadata=field_metadata)
request_counter: int = field(default=None, metadata=field_metadata)
use_new_rebalance_interface: bool = field(default=None, metadata=field_metadata)
new_parameters: Optional[Tuple[ISelectNewParameter, ...]] = field(default=None, metadata=field_metadata)
index_parameters: Optional[Tuple[ISelectIndexParameter, ...]] = field(default=None, metadata=field_metadata)
new_weights: Optional[Tuple[ISelectNewWeight, ...]] = field(default=None, metadata=field_metadata)
new_units: Optional[Tuple[ISelectNewUnit, ...]] = field(default=None, metadata=field_metadata)
entry_type: Optional[str] = field(default=None, metadata=field_metadata)
waiver_requested: Optional[bool] = field(default=None, metadata=field_metadata)
presubmit: Optional[bool] = field(default=None, metadata=field_metadata)
requester_id: Optional[str] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class ISelectResponse(Base):
action: Optional[object] = field(default=None, metadata=config(field_name='Action', exclude=exclude_none))
action_comment: Optional[str] = field(default=None, metadata=config(field_name='ActionComment', exclude=exclude_none))
asset_name: Optional[str] = field(default=None, metadata=field_metadata)
asset_short_name: Optional[str] = field(default=None, metadata=field_metadata)
available_action_confirms: Optional[Tuple[Tuple[str, ...], ...]] = field(default=None, metadata=field_metadata)
available_actions: Optional[tuple] = field(default=None, metadata=field_metadata)
available_rebalance_dates: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
constituent_validations: Optional[tuple] = field(default=None, metadata=field_metadata)
date_validation_status: Optional[str] = field(default=None, metadata=field_metadata)
date_validations: Optional[tuple] = field(default=None, metadata=field_metadata)
entry_mode: Optional[str] = field(default=None, metadata=field_metadata)
entry_type: Optional[str] = field(default=None, metadata=field_metadata)
internal_rebalance: Optional[int] = field(default=None, metadata=field_metadata)
index_parameter_definitions: Optional[tuple] = field(default=None, metadata=field_metadata)
index_parameters: Optional[tuple] = field(default=None, metadata=field_metadata)
index_parameter_validation: Optional[tuple] = field(default=None, metadata=field_metadata)
new_units: Optional[Tuple[ISelectNewUnit, ...]] = field(default=None, metadata=field_metadata)
new_weights: Optional[Tuple[ISelectNewWeight, ...]] = field(default=None, metadata=field_metadata)
notification_date: Optional[str] = field(default=None, metadata=field_metadata)
rebalance_date: Optional[str] = field(default=None, metadata=field_metadata)
rebalance_determination_date: Optional[str] = field(default=None, metadata=field_metadata)
reb_determination_index_level: Optional[float] = | |
= ObjectTypeClass
self.ObjectType = ObjectType
self.ObjectLabel = ObjectLabel
self.SlantPlane = SlantPlane
self.GroundPlane = GroundPlane
self.Size = Size
self.Orientation = Orientation
if isinstance(Articulation, str):
self.Articulation = CompoundCommentType(Value=Articulation)
elif isinstance(Articulation, list):
self.Articulation = CompoundCommentType(Comments=Articulation)
elif isinstance(Articulation, dict):
self.Articulation = CompoundCommentType(**Articulation)
else:
self.Articulation = Articulation
if isinstance(Configuration, str):
self.Configuration = CompoundCommentType(Value=Configuration)
elif isinstance(Configuration, list):
self.Configuration = CompoundCommentType(Comments=Configuration)
elif isinstance(Configuration, dict):
self.Configuration = CompoundCommentType(**Configuration)
else:
self.Configuration = Configuration
self.Accessories = Accessories
self.PaintScheme = PaintScheme
self.Camouflage = Camouflage
self.Obscuration = Obscuration
self.ObscurationPercent = ObscurationPercent
self.ImageLevelObscuration = ImageLevelObscuration
self.ImageLocation = ImageLocation
self.GeoLocation = GeoLocation
self.TargetToClutterRatio = TargetToClutterRatio
self.VisualQualityMetric = VisualQualityMetric
self.UnderlyingTerrain = UnderlyingTerrain
self.OverlyingTerrain = OverlyingTerrain
self.TerrainTexture = TerrainTexture
self.SeasonalCover = SeasonalCover
super(TheObjectType, self).__init__(**kwargs)
def _check_placement(self, rows, cols, row_bounds, col_bounds, overlap_cutoff=0.5):
"""
Checks the bounds condition for the provided box.
Here inclusion is defined by what proportion of the area of the proposed
chip is actually contained inside the image bounds.
Parameters
----------
rows : int|float
The number of rows in the image.
cols : int|float
The number of columns in the image.
row_bounds : List
Of the form `[row min, row max]`
col_bounds : List
Of the form `[col min, col max]`
overlap_cutoff : float
Determines the transition from in the periphery to out of the image.
Returns
-------
int
1 - completely in the image
2 - the proposed chip has `overlap_cutoff <= fractional contained area < 1`
3 - the proposed chip has `fractional contained area < overlap_cutoff`
"""
if row_bounds[1] <= row_bounds[0] or col_bounds[1] <= col_bounds[0]:
raise ValueError('bounds out of order ({}, {})'.format(row_bounds, col_bounds))
if 0 <= row_bounds[0] and rows < row_bounds[1] and 0 <= col_bounds[0] and cols < col_bounds[1]:
return 1 # completely in bounds
row_size = row_bounds[1] - row_bounds[0]
col_size = col_bounds[1] - col_bounds[0]
first_row, last_row = max(0, row_bounds[0]), min(rows, row_bounds[1])
first_col, last_col = max(0, col_bounds[0]), min(cols, col_bounds[1])
area_overlap = (last_row - first_row)*(last_col - first_col)
if area_overlap >= overlap_cutoff*row_size*col_size:
return 2 # the item is at the periphery
else:
return 3 # it should be considered out of range
def set_image_location_from_sicd(self, sicd, populate_in_periphery=False):
"""
Set the image location information with respect to the given SICD,
assuming that the physical coordinates are populated.
Parameters
----------
sicd : SICDType
populate_in_periphery : bool
Returns
-------
int
-1 - insufficient metadata to proceed or other failure
0 - nothing to be done
1 - successful
2 - object in the image periphery, populating based on `populate_in_periphery`
3 - object not in the image field
"""
if self.ImageLocation is not None:
# no need to infer anything, it's already populated
return 0
if self.GeoLocation is None:
logger.warning(
'GeoLocation is not populated,\n\t'
'so the image location can not be inferred')
return -1
if not sicd.can_project_coordinates():
logger.warning(_no_projection_text)
return -1
# gets the prospective image location
image_location = ImageLocationType.from_geolocation(self.GeoLocation, sicd)
if image_location is None:
return -1
self.ImageLocation = image_location
# get nominal object size in meters and pixels
if self.Size is None:
row_size = 2.0
col_size = 2.0
else:
max_size = self.Size.get_max_diameter()
if max_size == 0:
max_size = 10.0 # todo: fix this...
row_size = max_size/sicd.Grid.Row.SS
col_size = max_size/sicd.Grid.Col.SS
# check bounding information
rows = sicd.ImageData.NumRows
cols = sicd.ImageData.NumCols
center_pixel = image_location.CenterPixel.get_array(dtype='float64')
row_bounds = [center_pixel[0] - 0.5*row_size, center_pixel[0] + 0.5*row_size]
col_bounds = [center_pixel[1] - 0.5*col_size, center_pixel[1] + 0.5*col_size]
placement = self._check_placement(rows, cols, row_bounds, col_bounds)
if placement == 3:
return placement
if placement == 2 and not populate_in_periphery:
return placement
self.ImageLocation = image_location
return placement
def set_geo_location_from_sicd(self, sicd, projection_type='HAE', **proj_kwargs):
"""
Set the geographical location information with respect to the given SICD,
assuming that the image coordinates are populated.
.. Note::
This assumes that the image coordinates are with respect to the given
image (chip), and NOT including any sicd.ImageData.FirstRow/Col values,
which will be added here.
Parameters
----------
sicd : SICDType
projection_type : str
The projection type selector, one of `['PLANE', 'HAE', 'DEM']`. Using `'DEM'`
requires configuration for the DEM pathway described in
:func:`sarpy.geometry.point_projection.image_to_ground_dem`.
proj_kwargs
The keyword arguments for the :func:`SICDType.project_image_to_ground_geo` method.
"""
if self.GeoLocation is not None:
# no need to infer anything, it's already populated
return
if self.ImageLocation is None:
logger.warning(
'ImageLocation is not populated,\n\t'
'so the geographical location can not be inferred')
return
if not sicd.can_project_coordinates():
logger.warning(_no_projection_text)
return
self.GeoLocation = GeoLocationType.from_image_location(
self.ImageLocation, sicd, projection_type=projection_type, **proj_kwargs)
def set_chip_details_from_sicd(self, sicd, layover_shift=False, populate_in_periphery=False, padding_fraction=0.05, minimum_pad=0):
"""
Set the chip information with respect to the given SICD, assuming that the
image location and size are defined.
Parameters
----------
sicd : SICDType
layover_shift : bool
Shift based on layover direction? This should be `True` if the identification of
the bounds and/or center pixel do not include any layover, as in
populating location from known ground truth. This should be `False` if
the identification of bounds and/or center pixel do include layover,
potentially as based on annotation of the imagery itself in pixel
space.
populate_in_periphery : bool
Should we populate for peripheral?
padding_fraction : None|float
Default fraction of box dimension by which to pad.
minimum_pad : int|float
The minimum number of pixels by which to pad for the chip definition
Returns
-------
int
-1 - insufficient metadata to proceed
0 - nothing to be done
1 - successful
2 - object in the image periphery, populating based on `populate_in_periphery`
3 - object not in the image field
"""
if self.SlantPlane is not None:
# no need to infer anything, it's already populated
return 0
if self.Size is None:
logger.warning(
'Size is not populated,\n\t'
'so the chip size can not be inferred')
return -1
if self.ImageLocation is None:
# try to set from geolocation
return_value = self.set_image_location_from_sicd(sicd, populate_in_periphery=populate_in_periphery)
if return_value in [-1, 3] or (return_value == 2 and not populate_in_periphery):
return return_value
# get nominal object size, in meters
max_size = self.Size.get_max_diameter() # in meters
row_size = max_size/sicd.Grid.Row.SS # in pixels
col_size = max_size/sicd.Grid.Col.SS # in pixels
# get nominal image box
image_location = self.ImageLocation
pixel_box = image_location.get_nominal_box(row_length=row_size, col_length=col_size)
ground_unit_norm = wgs_84_norm(sicd.GeoData.SCP.ECF.get_array())
slant_plane_unit_norm = numpy.cross(sicd.Grid.Row.UVectECF.get_array(), sicd.Grid.Col.UVectECF.get_array())
magnitude_factor = ground_unit_norm.dot(slant_plane_unit_norm)
# determines the relative size of things in slant plane versus ground plane
# get nominal layover vector - should be pointed generally towards the top (negative rows value)
layover_magnitude = sicd.SCPCOA.LayoverMagnitude
if layover_magnitude is None:
layover_magnitude = 0.25
layover_size = self.Size.Height*layover_magnitude*magnitude_factor
if sicd.SCPCOA.LayoverAng is None:
layover_angle = 0.0
else:
layover_angle = numpy.deg2rad(sicd.SCPCOA.LayoverAng - sicd.SCPCOA.AzimAng)
layover_vector = layover_size*numpy.array(
[numpy.cos(layover_angle)/sicd.Grid.Row.SS, numpy.sin(layover_angle)/sicd.Grid.Col.SS])
# craft the layover box
if layover_shift:
layover_box = pixel_box + layover_vector
else:
layover_box = pixel_box
# determine the maximum and minimum pixel values here
min_rows = min(numpy.min(pixel_box[:, 0]), numpy.min(layover_box[:, 0]))
max_rows = max(numpy.max(pixel_box[:, 0]), numpy.max(layover_box[:, 0]))
min_cols = min(numpy.min(pixel_box[:, 1]), numpy.min(layover_box[:, 1]))
max_cols = max(numpy.max(pixel_box[:, 1]), numpy.max(layover_box[:, 1]))
# determine the padding amount
padding_fraction = 0.0 if padding_fraction is None else float(padding_fraction)
if padding_fraction < 0.0:
padding_fraction = 0.0
row_pad = max(minimum_pad, padding_fraction*(max_rows-min_rows))
col_pad = max(minimum_pad, padding_fraction*(max_cols-min_cols))
# check our bounding information
rows = sicd.ImageData.NumRows
cols = sicd.ImageData.NumCols
chip_rows = [min_rows - row_pad, max_rows + row_pad]
chip_cols = [min_cols - col_pad, max_cols + col_pad]
placement = self._check_placement(rows, cols, chip_rows, chip_cols)
if placement == 3 or (placement == 2 and not populate_in_periphery):
return placement
# set the physical data ideal chip size
physical = PhysicalType.from_ranges(chip_rows, chip_cols, rows, cols)
# determine nominal shadow vector
shadow_magnitude = sicd.SCPCOA.ShadowMagnitude
if shadow_magnitude is None:
shadow_magnitude = 1.0
shadow_size = self.Size.Height*shadow_magnitude*magnitude_factor
shadow_angle = sicd.SCPCOA.Shadow
shadow_angle = numpy.pi if shadow_angle is None else numpy.deg2rad(shadow_angle)
shadow_vector = -shadow_size*numpy.array(
[numpy.cos(shadow_angle)/sicd.Grid.Row.SS, numpy.sin(shadow_angle)/sicd.Grid.Col.SS])
shadow_box = pixel_box + shadow_vector
min_rows = min(min_rows, numpy.min(shadow_box[:, 0]))
max_rows = max(max_rows, numpy.max(shadow_box[:, 0]))
min_cols = min(min_cols, numpy.min(shadow_box[:, 1]))
max_cols = max(max_cols, numpy.max(shadow_box[:, 1]))
chip_rows = [min_rows - row_pad, max_rows + row_pad]
chip_cols = [min_cols - col_pad, max_cols + col_pad]
# set the physical with shadows data ideal chip size
physical_with_shadows = PhysicalType.from_ranges(chip_rows, chip_cols, rows, cols)
self.SlantPlane = PlanePhysicalType(
Physical=physical,
PhysicalWithShadows=physical_with_shadows)
return placement
def get_image_geometry_object_for_sicd(self, include_chip=False):
"""
Gets the geometry element describing the image geometry for a sicd.
Returns
-------
| |
<filename>cybox/bindings/network_socket_object.py
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import sys
from mixbox.binding_utils import *
from . import cybox_common
from . import socket_address_object
class SocketOptionsType(GeneratedsSuper):
"""The SocketOptionsType specifies any particular options used by the
socket. If an options is supported only by specific address
families or socket types, that's indicated in parentheses."""
subclass = None
superclass = None
def __init__(self, IP_MULTICAST_IF=None, IP_MULTICAST_IF2=None, IP_MULTICAST_LOOP=None, IP_TOS=None, SO_BROADCAST=None, SO_CONDITIONAL_ACCEPT=None, SO_KEEPALIVE=None, SO_DONTROUTE=None, SO_LINGER=None, SO_DONTLINGER=None, SO_OOBINLINE=None, SO_RCVBUF=None, SO_GROUP_PRIORITY=None, SO_REUSEADDR=None, SO_DEBUG=None, SO_RCVTIMEO=None, SO_SNDBUF=None, SO_SNDTIMEO=None, SO_UPDATE_ACCEPT_CONTEXT=None, SO_TIMEOUT=None, TCP_NODELAY=None):
self.IP_MULTICAST_IF = IP_MULTICAST_IF
self.IP_MULTICAST_IF2 = IP_MULTICAST_IF2
self.IP_MULTICAST_LOOP = IP_MULTICAST_LOOP
self.IP_TOS = IP_TOS
self.SO_BROADCAST = SO_BROADCAST
self.SO_CONDITIONAL_ACCEPT = SO_CONDITIONAL_ACCEPT
self.SO_KEEPALIVE = SO_KEEPALIVE
self.SO_DONTROUTE = SO_DONTROUTE
self.SO_LINGER = SO_LINGER
self.SO_DONTLINGER = SO_DONTLINGER
self.SO_OOBINLINE = SO_OOBINLINE
self.SO_RCVBUF = SO_RCVBUF
self.SO_GROUP_PRIORITY = SO_GROUP_PRIORITY
self.SO_REUSEADDR = SO_REUSEADDR
self.SO_DEBUG = SO_DEBUG
self.SO_RCVTIMEO = SO_RCVTIMEO
self.SO_SNDBUF = SO_SNDBUF
self.SO_SNDTIMEO = SO_SNDTIMEO
self.SO_UPDATE_ACCEPT_CONTEXT = SO_UPDATE_ACCEPT_CONTEXT
self.SO_TIMEOUT = SO_TIMEOUT
self.TCP_NODELAY = TCP_NODELAY
def factory(*args_, **kwargs_):
if SocketOptionsType.subclass:
return SocketOptionsType.subclass(*args_, **kwargs_)
else:
return SocketOptionsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_IP_MULTICAST_IF(self): return self.IP_MULTICAST_IF
def set_IP_MULTICAST_IF(self, IP_MULTICAST_IF): self.IP_MULTICAST_IF = IP_MULTICAST_IF
def validate_StringObjectPropertyType(self, value):
# Validate type cybox_common.StringObjectPropertyType, a restriction on None.
pass
def get_IP_MULTICAST_IF2(self): return self.IP_MULTICAST_IF2
def set_IP_MULTICAST_IF2(self, IP_MULTICAST_IF2): self.IP_MULTICAST_IF2 = IP_MULTICAST_IF2
def get_IP_MULTICAST_LOOP(self): return self.IP_MULTICAST_LOOP
def set_IP_MULTICAST_LOOP(self, IP_MULTICAST_LOOP): self.IP_MULTICAST_LOOP = IP_MULTICAST_LOOP
def get_IP_TOS(self): return self.IP_TOS
def set_IP_TOS(self, IP_TOS): self.IP_TOS = IP_TOS
def get_SO_BROADCAST(self): return self.SO_BROADCAST
def set_SO_BROADCAST(self, SO_BROADCAST): self.SO_BROADCAST = SO_BROADCAST
def get_SO_CONDITIONAL_ACCEPT(self): return self.SO_CONDITIONAL_ACCEPT
def set_SO_CONDITIONAL_ACCEPT(self, SO_CONDITIONAL_ACCEPT): self.SO_CONDITIONAL_ACCEPT = SO_CONDITIONAL_ACCEPT
def get_SO_KEEPALIVE(self): return self.SO_KEEPALIVE
def set_SO_KEEPALIVE(self, SO_KEEPALIVE): self.SO_KEEPALIVE = SO_KEEPALIVE
def get_SO_DONTROUTE(self): return self.SO_DONTROUTE
def set_SO_DONTROUTE(self, SO_DONTROUTE): self.SO_DONTROUTE = SO_DONTROUTE
def get_SO_LINGER(self): return self.SO_LINGER
def set_SO_LINGER(self, SO_LINGER): self.SO_LINGER = SO_LINGER
def validate_UnsignedIntegerObjectPropertyType(self, value):
# Validate type cybox_common.UnsignedIntegerObjectPropertyType, a restriction on None.
pass
def get_SO_DONTLINGER(self): return self.SO_DONTLINGER
def set_SO_DONTLINGER(self, SO_DONTLINGER): self.SO_DONTLINGER = SO_DONTLINGER
def get_SO_OOBINLINE(self): return self.SO_OOBINLINE
def set_SO_OOBINLINE(self, SO_OOBINLINE): self.SO_OOBINLINE = SO_OOBINLINE
def get_SO_RCVBUF(self): return self.SO_RCVBUF
def set_SO_RCVBUF(self, SO_RCVBUF): self.SO_RCVBUF = SO_RCVBUF
def get_SO_GROUP_PRIORITY(self): return self.SO_GROUP_PRIORITY
def set_SO_GROUP_PRIORITY(self, SO_GROUP_PRIORITY): self.SO_GROUP_PRIORITY = SO_GROUP_PRIORITY
def get_SO_REUSEADDR(self): return self.SO_REUSEADDR
def set_SO_REUSEADDR(self, SO_REUSEADDR): self.SO_REUSEADDR = SO_REUSEADDR
def get_SO_DEBUG(self): return self.SO_DEBUG
def set_SO_DEBUG(self, SO_DEBUG): self.SO_DEBUG = SO_DEBUG
def get_SO_RCVTIMEO(self): return self.SO_RCVTIMEO
def set_SO_RCVTIMEO(self, SO_RCVTIMEO): self.SO_RCVTIMEO = SO_RCVTIMEO
def get_SO_SNDBUF(self): return self.SO_SNDBUF
def set_SO_SNDBUF(self, SO_SNDBUF): self.SO_SNDBUF = SO_SNDBUF
def get_SO_SNDTIMEO(self): return self.SO_SNDTIMEO
def set_SO_SNDTIMEO(self, SO_SNDTIMEO): self.SO_SNDTIMEO = SO_SNDTIMEO
def get_SO_UPDATE_ACCEPT_CONTEXT(self): return self.SO_UPDATE_ACCEPT_CONTEXT
def set_SO_UPDATE_ACCEPT_CONTEXT(self, SO_UPDATE_ACCEPT_CONTEXT): self.SO_UPDATE_ACCEPT_CONTEXT = SO_UPDATE_ACCEPT_CONTEXT
def get_SO_TIMEOUT(self): return self.SO_TIMEOUT
def set_SO_TIMEOUT(self, SO_TIMEOUT): self.SO_TIMEOUT = SO_TIMEOUT
def get_TCP_NODELAY(self): return self.TCP_NODELAY
def set_TCP_NODELAY(self, TCP_NODELAY): self.TCP_NODELAY = TCP_NODELAY
def hasContent_(self):
if (
self.IP_MULTICAST_IF is not None or
self.IP_MULTICAST_IF2 is not None or
self.IP_MULTICAST_LOOP is not None or
self.IP_TOS is not None or
self.SO_BROADCAST is not None or
self.SO_CONDITIONAL_ACCEPT is not None or
self.SO_KEEPALIVE is not None or
self.SO_DONTROUTE is not None or
self.SO_LINGER is not None or
self.SO_DONTLINGER is not None or
self.SO_OOBINLINE is not None or
self.SO_RCVBUF is not None or
self.SO_GROUP_PRIORITY is not None or
self.SO_REUSEADDR is not None or
self.SO_DEBUG is not None or
self.SO_RCVTIMEO is not None or
self.SO_SNDBUF is not None or
self.SO_SNDTIMEO is not None or
self.SO_UPDATE_ACCEPT_CONTEXT is not None or
self.SO_TIMEOUT is not None or
self.TCP_NODELAY is not None
):
return True
else:
return False
def export(self, lwrite, level, namespace_='NetworkSocketObj:', name_='SocketOptionsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='SocketOptionsType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='NetworkSocketObj:', name_='SocketOptionsType'):
pass
def exportChildren(self, lwrite, level, namespace_='NetworkSocketObj:', name_='SocketOptionsType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.IP_MULTICAST_IF is not None:
self.IP_MULTICAST_IF.export(lwrite, level, 'NetworkSocketObj:', name_='IP_MULTICAST_IF', pretty_print=pretty_print)
if self.IP_MULTICAST_IF2 is not None:
self.IP_MULTICAST_IF2.export(lwrite, level, 'NetworkSocketObj:', name_='IP_MULTICAST_IF2', pretty_print=pretty_print)
if self.IP_MULTICAST_LOOP is not None:
showIndent(lwrite, level, pretty_print)
lwrite('<%sIP_MULTICAST_LOOP>%s</%sIP_MULTICAST_LOOP>%s' % ('NetworkSocketObj:', self.gds_format_boolean(self.IP_MULTICAST_LOOP, input_name='IP_MULTICAST_LOOP'), 'NetworkSocketObj:', eol_))
if self.IP_TOS is not None:
self.IP_TOS.export(lwrite, level, 'NetworkSocketObj:', name_='IP_TOS', pretty_print=pretty_print)
if self.SO_BROADCAST is not None:
showIndent(lwrite, level, pretty_print)
lwrite('<%sSO_BROADCAST>%s</%sSO_BROADCAST>%s' % ('NetworkSocketObj:', self.gds_format_boolean(self.SO_BROADCAST, input_name='SO_BROADCAST'), 'NetworkSocketObj:', eol_))
if self.SO_CONDITIONAL_ACCEPT is not None:
showIndent(lwrite, level, pretty_print)
lwrite('<%sSO_CONDITIONAL_ACCEPT>%s</%sSO_CONDITIONAL_ACCEPT>%s' % ('NetworkSocketObj:', self.gds_format_boolean(self.SO_CONDITIONAL_ACCEPT, input_name='SO_CONDITIONAL_ACCEPT'), 'NetworkSocketObj:', eol_))
if self.SO_KEEPALIVE is not None:
showIndent(lwrite, level, pretty_print)
lwrite('<%sSO_KEEPALIVE>%s</%sSO_KEEPALIVE>%s' % ('NetworkSocketObj:', self.gds_format_boolean(self.SO_KEEPALIVE, input_name='SO_KEEPALIVE'), 'NetworkSocketObj:', eol_))
if self.SO_DONTROUTE is not None:
showIndent(lwrite, level, pretty_print)
lwrite('<%sSO_DONTROUTE>%s</%sSO_DONTROUTE>%s' % ('NetworkSocketObj:', self.gds_format_boolean(self.SO_DONTROUTE, input_name='SO_DONTROUTE'), 'NetworkSocketObj:', eol_))
if self.SO_LINGER is not None:
self.SO_LINGER.export(lwrite, level, 'NetworkSocketObj:', name_='SO_LINGER', pretty_print=pretty_print)
if self.SO_DONTLINGER is not None:
showIndent(lwrite, level, pretty_print)
lwrite('<%sSO_DONTLINGER>%s</%sSO_DONTLINGER>%s' % ('NetworkSocketObj:', self.gds_format_boolean(self.SO_DONTLINGER, input_name='SO_DONTLINGER'), 'NetworkSocketObj:', eol_))
if self.SO_OOBINLINE is not None:
showIndent(lwrite, level, pretty_print)
lwrite('<%sSO_OOBINLINE>%s</%sSO_OOBINLINE>%s' % ('NetworkSocketObj:', self.gds_format_boolean(self.SO_OOBINLINE, input_name='SO_OOBINLINE'), 'NetworkSocketObj:', eol_))
if self.SO_RCVBUF is not None:
self.SO_RCVBUF.export(lwrite, level, 'NetworkSocketObj:', name_='SO_RCVBUF', pretty_print=pretty_print)
if self.SO_GROUP_PRIORITY is not None:
self.SO_GROUP_PRIORITY.export(lwrite, level, 'NetworkSocketObj:', name_='SO_GROUP_PRIORITY', pretty_print=pretty_print)
if self.SO_REUSEADDR is not None:
showIndent(lwrite, level, pretty_print)
lwrite('<%sSO_REUSEADDR>%s</%sSO_REUSEADDR>%s' % ('NetworkSocketObj:', self.gds_format_boolean(self.SO_REUSEADDR, input_name='SO_REUSEADDR'), 'NetworkSocketObj:', eol_))
if self.SO_DEBUG is not None:
showIndent(lwrite, level, pretty_print)
lwrite('<%sSO_DEBUG>%s</%sSO_DEBUG>%s' % ('NetworkSocketObj:', self.gds_format_boolean(self.SO_DEBUG, input_name='SO_DEBUG'), 'NetworkSocketObj:', eol_))
if self.SO_RCVTIMEO is not None:
self.SO_RCVTIMEO.export(lwrite, level, 'NetworkSocketObj:', name_='SO_RCVTIMEO', pretty_print=pretty_print)
if self.SO_SNDBUF is not None:
self.SO_SNDBUF.export(lwrite, level, 'NetworkSocketObj:', name_='SO_SNDBUF', pretty_print=pretty_print)
if self.SO_SNDTIMEO is not None:
self.SO_SNDTIMEO.export(lwrite, level, 'NetworkSocketObj:', name_='SO_SNDTIMEO', pretty_print=pretty_print)
if self.SO_UPDATE_ACCEPT_CONTEXT is not None:
self.SO_UPDATE_ACCEPT_CONTEXT.export(lwrite, level, 'NetworkSocketObj:', name_='SO_UPDATE_ACCEPT_CONTEXT', pretty_print=pretty_print)
if self.SO_TIMEOUT is not None:
self.SO_TIMEOUT.export(lwrite, level, 'NetworkSocketObj:', name_='SO_TIMEOUT', pretty_print=pretty_print)
if self.TCP_NODELAY is not None:
showIndent(lwrite, level, pretty_print)
lwrite('<%sTCP_NODELAY>%s</%sTCP_NODELAY>%s' % ('NetworkSocketObj:', self.gds_format_boolean(self.TCP_NODELAY, input_name='TCP_NODELAY'), 'NetworkSocketObj:', eol_))
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'IP_MULTICAST_IF':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_IP_MULTICAST_IF(obj_)
elif nodeName_ == 'IP_MULTICAST_IF2':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_IP_MULTICAST_IF2(obj_)
elif nodeName_ == 'IP_MULTICAST_LOOP':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'IP_MULTICAST_LOOP')
self.IP_MULTICAST_LOOP = ival_
elif nodeName_ == 'IP_TOS':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_IP_TOS(obj_)
elif nodeName_ == 'SO_BROADCAST':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'SO_BROADCAST')
self.SO_BROADCAST = ival_
elif nodeName_ == 'SO_CONDITIONAL_ACCEPT':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'SO_CONDITIONAL_ACCEPT')
self.SO_CONDITIONAL_ACCEPT = ival_
elif nodeName_ == 'SO_KEEPALIVE':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'SO_KEEPALIVE')
self.SO_KEEPALIVE = ival_
elif nodeName_ == 'SO_DONTROUTE':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'SO_DONTROUTE')
self.SO_DONTROUTE = ival_
elif nodeName_ == 'SO_LINGER':
obj_ = cybox_common.UnsignedIntegerObjectPropertyType.factory()
obj_.build(child_)
self.set_SO_LINGER(obj_)
elif nodeName_ == 'SO_DONTLINGER':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'SO_DONTLINGER')
self.SO_DONTLINGER = ival_
elif nodeName_ == 'SO_OOBINLINE':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'SO_OOBINLINE')
self.SO_OOBINLINE = ival_
elif nodeName_ == 'SO_RCVBUF':
obj_ = cybox_common.UnsignedIntegerObjectPropertyType.factory()
obj_.build(child_)
self.set_SO_RCVBUF(obj_)
elif nodeName_ == 'SO_GROUP_PRIORITY':
obj_ = cybox_common.UnsignedIntegerObjectPropertyType.factory()
obj_.build(child_)
self.set_SO_GROUP_PRIORITY(obj_)
elif nodeName_ == 'SO_REUSEADDR':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'SO_REUSEADDR')
self.SO_REUSEADDR = ival_
elif nodeName_ == 'SO_DEBUG':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'SO_DEBUG')
self.SO_DEBUG = ival_
elif nodeName_ == 'SO_RCVTIMEO':
obj_ = cybox_common.UnsignedIntegerObjectPropertyType.factory()
obj_.build(child_)
self.set_SO_RCVTIMEO(obj_)
elif nodeName_ == 'SO_SNDBUF':
obj_ = cybox_common.UnsignedIntegerObjectPropertyType.factory()
obj_.build(child_)
| |
import click
import ast
import requests
import pandas as pd
import numpy as np
from pysradb.sraweb import SRAweb
import requests
import pysam
import re
import pyfastx
import pkg_resources # part of setuptools
import json
import warnings
import sys
import os
from .run_workflow import make_snakes
# Allows passing strings to CLI and eval as python objects
# From https://stackoverflow.com/questions/47631914/how-to-pass-several-list-of-arguments-to-click-option
# Had to add the "str(value)" part or default would throw ValueErrors.
class PythonLiteralOption(click.Option):
def type_cast_value(self, ctx, value):
try:
return ast.literal_eval(str(value))
except ValueError:
raise click.BadParameter(value)
# Constants
AVAILABLE_MODES = [
"DRIP",
"DRIPc",
"qDRIP",
"sDRIP",
"ssDRIP",
"R-ChIP",
"RR-ChIP",
"RDIP",
"S1-DRIP",
"DRIVE",
"RNH-CnR",
"MapR",
"RNA-Seq"
]
SRA_URL = "https://www.ncbi.nlm.nih.gov/sra/"
SRA_COLS = [
"experiment",
"study_accession",
"experiment_title",
"experiment_accession",
"organism_taxid ",
"run_accession",
"library_layout",
"run_total_bases",
"run_total_spots",
]
redict = {
"fastq": "^.+\\.[fastq]+[\\.gz]*$|^.+\\.[fastq]+[\\.gz]*\\~.+\\.[fastq]+[\\.gz]*$",
"bam": "^.+\\.bam$",
"public": "^GSM[0-9]+$|^SRX[0-9]+$",
}
# __file__=os.path.abspath("../RLPipes/rlpipes/cli.py")
this_dir = os.path.dirname(__file__)
DATA_PATH = os.path.abspath(
os.path.join(this_dir, "src", "data", "available_genomes.tsv.xz")
)
GENSIZE_PATH = os.path.abspath(
os.path.join(this_dir, "src", "data", "eff_gen_size.tsv.xz")
)
SRC_DIR = os.path.abspath(os.path.join(this_dir, "src"))
N_BAM_READS_CHECK = 1000
# Set verion
__version__ = pkg_resources.require("rlpipes")[0].version
# Help text
snakeHelp = """
Dict of arguments passed to the snakemake python API. Default: "{'use_conda': True}".
Read the snakemake API reference for the full list of options.
"""
modeHelp = """
The type of sequencing (e.g., "DRIP"). The available options are currently:
DRIP, DRIPc, qDRIP, sDRIP, ssDRIP, R-ChIP, RR-ChIP, RDIP, S1-DRIP, DRIVE, RNH-CnR, and MapR
"""
groupHelp = """
Column(s) which identify biologically-meaningful grouping(s) of samples (i.e., conditions).
Can be any column name from the `samples` CSV file. If using public data accessions,
it may also include "study". NOTE: If --groupby is set and there R-loop-mapping and expression
samples within groups, expression-matched analysis will be run. This can be disabled with the --noexp flag.\n
Example #1: "RSeqCLI build outdir/ samples.csv --groupcols tissue"\n
samples.csv:\n
experiment, mode, tissue\n
\tGSM1720615, DRIP, NT2\n
\tGSM1720616, DRIP, NT2\n
\tGSM1720619, DRIP, K562\n
\n
Example #2: "RSeqCLI build outdir/ samples.csv --groupby tissue"\n
samples.csv:\n
experiment, mode, tissue\n
\tGSM1720615, DRIP, NT2\n
\tGSM1720616, DRIP, NT2\n
\tGSM1720613, DRIPc, NT2\n
\tGSM1720614, DRIPc, NT2\n
\tGSM1720622, RNA-seq, NT2\n
\tGSM1720623, RNA-seq, NT2\n
\n
"""
expHelp = """
If set, no expression-matched analysis will be performed.
"""
# Get the shared options
# From https://stackoverflow.com/questions/40182157/shared-options-and-flags-between-commands
verify_run_options = [
click.option(
"--smargs",
"-s",
cls=PythonLiteralOption,
help=snakeHelp,
default="{'use_conda': True}",
),
click.option(
"--threads", "-t", help="Number of threads to use. Default: 1", default=1
),
click.option(
"--bwamem2",
is_flag=True,
help="Align with BWA-MEM2 instead of BWA. BWA MEM2 Needs > 70GB RAM avaialble to build index, but shows > 3x speed increase. Default: False.",
default=False,
),
click.option(
"--macs3",
help="Call peaks using macs3 instead of macs2",
is_flag=True,
default=False,
),
click.option(
"--groupby",
"-G",
help=groupHelp
),
click.option(
"--noexp",
help=expHelp,
is_flag=True,
default=False
),
click.option(
"--noreport",
help="If set, RSeq reports will not be generated.",
is_flag=True,
default=False
),
click.option(
"--debug",
is_flag=True,
help="Run pipeline on subsampled number of reads (for testing).",
default=False,
),
click.option(
"--tsv",
is_flag=True,
help="Obtain config from config.tsv file instead of config.json.",
default=False,
),
click.option(
"--useaws",
is_flag=True,
help="If set, prefetch from SRA tools will be used to download any public SRA data instead of AWS S3.",
default=False,
)
]
# Function for addint these options to the click command
def add_options(options):
def _add_options(func):
for option in reversed(options):
func = option(func)
return func
return _add_options
def validate_genome(ctx, param, value):
"""Validate genome input"""
if value is not None:
available_genomes = pd.read_table(DATA_PATH)
try:
assert value in available_genomes.UCSC_orgID.to_list()
except AssertionError:
raise click.BadParameter(
"'" + value + "' is not a valid UCSC genome ID (e.g., 'hg38' is valid)."
)
return value
def validate_mode(ctx, param, value):
"""Validate mode input"""
if value is not None:
try:
assert value in AVAILABLE_MODES
except AssertionError:
raise click.BadParameter(
"'"
+ value
+ "' is not a valid mode. (RSeqCLI build --help for more info)"
)
return value
def validate_run_dir(ctx, param, value):
try:
os.makedirs(value, exist_ok=True)
except FileNotFoundError:
raise click.BadParameter(
"'"
+ value
+ "' could not be created using `os.makedirs("
+ value
+ ", exist_ok=True)` please re-check this path."
)
except FileExistsError:
raise click.BadParameter(
"RUN_DIR must be a directory. User supplied '" + value + "'"
)
return os.path.abspath(value)
def validate_run_dir_prepped(ctx, param, value):
try:
assert os.path.exists(value) and os.path.exists(
os.path.join(value, "config.json")
)
except AssertionError:
raise click.BadParameter(
"Configuration file '"
+ os.path.join(value, "config.json")
+ "' is not found. Have you run 'RSeqCLI build' yet?"
)
return os.path.abspath(value)
def bam_info(bamfile, n_bam_reads_check=1000):
"""Tests whether bam file is paired end and checks read length. Requires pysam."""
save = pysam.set_verbosity(0)
samfile = pysam.AlignmentFile(bamfile, "rb")
pysam.set_verbosity(save)
numPair = sum([x.is_paired for x in samfile.head(n=n_bam_reads_check)])
read_len = (
sum([x.infer_read_length() for x in samfile.head(n=n_bam_reads_check)])
// n_bam_reads_check
)
return {"paired_end": numPair > n_bam_reads_check / 2, "read_len": read_len}
def validate_samples(ctx, param, value):
"""Validate and wrangle sampels input"""
# value = "../RLPipes/tests/test_data/fq_test_samples_1.csv"
samps = pd.read_csv(value)
# First, check for matching pattern
exp = samps.experiment[0]
try:
samptype = [key for key, val in redict.items() if re.match(val, exp)][0]
except IndexError:
raise click.BadParameter(
message="Unable to detect data format for file " + exp
)
samps["file_type"] = samptype
# Wrangle controls if provided
if "control" in samps.columns:
controls = True
samps = pd.concat(
[
samps,
samps.assign(experiment=samps.control).assign(condition="Input").assign(control=pd.NA).dropna(subset=["experiment"]),
]
)
samps = samps.assign(
control=samps.control.apply(lambda x: pd.NA if pd.isna(x) else x)
).drop_duplicates()
else:
controls = False
samps["control"] = ""
if samptype == "public":
# Init SRAdb
db = SRAweb(os.environ.get("NCBI_API_KEY", None))
def getsra(x):
"""Except for unreachable accessions in SRA"""
try:
data=db.sra_metadata(x)
data['experiment'] = x
except SystemExit:
data=pd.DataFrame({
'experiment': x,
'study_accession': pd.NA,
'experiment_title': pd.NA,
'experiment_accession': pd.NA,
'organism_taxid ': pd.NA,
'run_accession': pd.NA,
'library_layout': pd.NA,
'run_total_bases': pd.NA,
'run_total_spots': pd.NA
}, index=[0])
return data
# Query the SRAdb and wrangle with original data
newSamps = pd.concat(
samps.experiment.progress_apply(lambda x: getsra(x)).values.tolist()
)[SRA_COLS]
# Drop NaNs
newSamps.dropna(subset=["experiment_accession"], inplace=True)
newSamps.dropna(subset=["run_total_bases"], inplace=True)
# Remove samples which have been retracted
newSamps = newSamps[newSamps['run_total_bases'] != '']
# Get the read length
newSamps = newSamps.astype(
{"run_total_bases": "int64", "run_total_spots": "int64"}
)
newSamps["read_length"] = newSamps.run_total_bases // newSamps.run_total_spots
# Get the latest genomes.
# From https://stackoverflow.com/questions/15705630/get-the-rows-which-have-the-max-value-in-groups-using-groupby
available_genome = pd.read_table(DATA_PATH)
latest_genomes = available_genome[
available_genome.groupby(axis=0, by=["taxId"])["year"].transform(max)
== available_genome["year"]
]
latest_genomes = latest_genomes.rename(columns={"taxId": "organism_taxid "})
newSamps["organism_taxid "] = newSamps["organism_taxid "].astype(np.int64)
# Necessary to avoid taxid conflict between sacCer2/3
newSamps.loc[newSamps["organism_taxid "] == 4932, "organism_taxid "] = 559292
newSamps = newSamps.set_index("organism_taxid ")
latest_genomes = latest_genomes.set_index("organism_taxid ")
newSamps = newSamps.join(latest_genomes, how="left")
newSamps = (
newSamps[
[
"experiment",
"study_accession",
"experiment_title",
"experiment_accession",
"run_accession",
"library_layout",
"UCSC_orgID",
"read_length",
]
]
.rename(
columns={
"experiment": "experiment_original",
"study_accession": "study",
"experiment_title": "name",
"library_layout": "paired_end",
"experiment_accession": "experiment",
"run_accession": "run",
"UCSC_orgID": "genome",
}
)
)
# Set paired end
newSamps["paired_end"] = newSamps["paired_end"] == "PAIRED"
# Get srx to orig mapping
srx_to_orig=newSamps[['experiment', 'experiment_original']]
# Set index as exp orig
newSamps=newSamps.set_index("experiment_original")
if "genome" in samps.columns:
newSamps = newSamps.drop("genome", axis=1)
# Finally, join the dataframes by experiment...
samps['experiment_original'] = samps['experiment']
samps['control_original'] = samps['control']
samps=samps.drop('experiment', axis=1).drop('control', axis=1)
samps=pd.merge(
samps,
newSamps,
on = "experiment_original"
).drop_duplicates()
# And control
srx_to_origctr=srx_to_orig.rename(
columns={
"experiment": "control",
"experiment_original": "control_original",
}
)
samps=pd.merge(
samps,
srx_to_origctr,
how="left",
on = "control_original"
).drop_duplicates()
samps = samps.assign(
control=samps.control.apply(lambda x: pd.NA if pd.isna(x) else x)
).drop_duplicates()
else:
if samptype == "bam":
# Check which are paired-end
samps["paired_end"] = [
bam_info(bam, N_BAM_READS_CHECK)["paired_end"]
for bam in samps["experiment"]
]
samps["read_length"] = [
bam_info(bam, N_BAM_READS_CHECK)["read_len"] for bam in samps["experiment"]
]
samps["name"] = [
os.path.splitext(os.path.basename(exp))[0] for exp in samps["experiment"]
]
if controls:
samps["control"] = [
os.path.splitext(os.path.basename(exp))[0] if not pd.isna(exp) else exp
for exp in samps["control"]
]
else:
samps["control"] = ""
samps["run"] = [os.path.abspath(bam) for bam in samps["experiment"]]
elif samptype == "fastq":
# Check which are paired-end
samps["paired_end"] = [bool(re.match(".+\\~.+", exp)) for exp in samps["experiment"]]
def get_readlen(fq, lines=500):
"""
Get Read Length from a fastq file
params:
fq: Path to a FASTQ file
line: Number of lines to scan. Default: 500
"""
seqlst=[]
for name,seq,qual in pyfastx.Fastq(fq, build_index=False):
seqlst.append(seq)
if len(seqlst) > lines:
break
toavg = [len(x) for x in seqlst]
return round(sum(toavg) / len(toavg))
samps["read_length"] = [
get_readlen(re.sub('\\~.+', "", exp )) for exp in samps["experiment"]
]
def get_samplename(fq):
splt = os.path.splitext(os.path.basename(fq))
if splt[1] == ".gz":
nm=os.path.splitext(splt[0])[0]
else:
nm=splt[0]
return re.sub('[\\._]{1}[R1-2]+$', "", nm)
samps["name"] = [
get_samplename(re.sub('\\~.+', "", exp)) for exp in samps["experiment"]
]
if controls:
samps["control"] = [
get_samplename(re.sub('\\~.+', "", exp)) if not pd.isna(exp) else exp
for exp in samps["control"]
]
else:
samps["control"] = ""
def get_fq_path(fq, pe):
if | |
headers = dingtalkyida__1__0_models.GetPlatformResourceHeaders()
return await self.get_platform_resource_with_options_async(request, headers, runtime)
def get_platform_resource_with_options(
self,
request: dingtalkyida__1__0_models.GetPlatformResourceRequest,
headers: dingtalkyida__1__0_models.GetPlatformResourceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetPlatformResourceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetPlatformResourceResponse(),
self.do_roarequest('GetPlatformResource', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/platformResources', 'json', req, runtime)
)
async def get_platform_resource_with_options_async(
self,
request: dingtalkyida__1__0_models.GetPlatformResourceRequest,
headers: dingtalkyida__1__0_models.GetPlatformResourceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetPlatformResourceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetPlatformResourceResponse(),
await self.do_roarequest_async('GetPlatformResource', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/platformResources', 'json', req, runtime)
)
def list_connector_information(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListConnectorInformationRequest,
) -> dingtalkyida__1__0_models.ListConnectorInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListConnectorInformationHeaders()
return self.list_connector_information_with_options(instance_id, request, headers, runtime)
async def list_connector_information_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListConnectorInformationRequest,
) -> dingtalkyida__1__0_models.ListConnectorInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListConnectorInformationHeaders()
return await self.list_connector_information_with_options_async(instance_id, request, headers, runtime)
def list_connector_information_with_options(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListConnectorInformationRequest,
headers: dingtalkyida__1__0_models.ListConnectorInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListConnectorInformationResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListConnectorInformationResponse(),
self.do_roarequest('ListConnectorInformation', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/plugins/infos/{instance_id}', 'json', req, runtime)
)
async def list_connector_information_with_options_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListConnectorInformationRequest,
headers: dingtalkyida__1__0_models.ListConnectorInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListConnectorInformationResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListConnectorInformationResponse(),
await self.do_roarequest_async('ListConnectorInformation', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/plugins/infos/{instance_id}', 'json', req, runtime)
)
def register_accounts(
self,
request: dingtalkyida__1__0_models.RegisterAccountsRequest,
) -> dingtalkyida__1__0_models.RegisterAccountsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RegisterAccountsHeaders()
return self.register_accounts_with_options(request, headers, runtime)
async def register_accounts_async(
self,
request: dingtalkyida__1__0_models.RegisterAccountsRequest,
) -> dingtalkyida__1__0_models.RegisterAccountsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RegisterAccountsHeaders()
return await self.register_accounts_with_options_async(request, headers, runtime)
def register_accounts_with_options(
self,
request: dingtalkyida__1__0_models.RegisterAccountsRequest,
headers: dingtalkyida__1__0_models.RegisterAccountsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RegisterAccountsResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.active_code):
body['activeCode'] = request.active_code
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RegisterAccountsResponse(),
self.do_roarequest('RegisterAccounts', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/applicationAuthorizations/accounts/register', 'json', req, runtime)
)
async def register_accounts_with_options_async(
self,
request: dingtalkyida__1__0_models.RegisterAccountsRequest,
headers: dingtalkyida__1__0_models.RegisterAccountsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RegisterAccountsResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.active_code):
body['activeCode'] = request.active_code
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RegisterAccountsResponse(),
await self.do_roarequest_async('RegisterAccounts', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/applicationAuthorizations/accounts/register', 'json', req, runtime)
)
def get_notify_me(
self,
user_id: str,
request: dingtalkyida__1__0_models.GetNotifyMeRequest,
) -> dingtalkyida__1__0_models.GetNotifyMeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetNotifyMeHeaders()
return self.get_notify_me_with_options(user_id, request, headers, runtime)
async def get_notify_me_async(
self,
user_id: str,
request: dingtalkyida__1__0_models.GetNotifyMeRequest,
) -> dingtalkyida__1__0_models.GetNotifyMeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetNotifyMeHeaders()
return await self.get_notify_me_with_options_async(user_id, request, headers, runtime)
def get_notify_me_with_options(
self,
user_id: str,
request: dingtalkyida__1__0_models.GetNotifyMeRequest,
headers: dingtalkyida__1__0_models.GetNotifyMeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetNotifyMeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.token):
query['token'] = request.token
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.app_types):
query['appTypes'] = request.app_types
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.instance_create_from_time_gmt):
query['instanceCreateFromTimeGMT'] = request.instance_create_from_time_gmt
if not UtilClient.is_unset(request.instance_create_to_time_gmt):
query['instanceCreateToTimeGMT'] = request.instance_create_to_time_gmt
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetNotifyMeResponse(),
self.do_roarequest('GetNotifyMe', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/corpNotifications/{user_id}', 'json', req, runtime)
)
async def get_notify_me_with_options_async(
self,
user_id: str,
request: dingtalkyida__1__0_models.GetNotifyMeRequest,
headers: dingtalkyida__1__0_models.GetNotifyMeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetNotifyMeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.token):
query['token'] = request.token
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.app_types):
query['appTypes'] = request.app_types
if not UtilClient.is_unset(request.process_codes):
query['processCodes'] = request.process_codes
if not UtilClient.is_unset(request.instance_create_from_time_gmt):
query['instanceCreateFromTimeGMT'] = request.instance_create_from_time_gmt
if not UtilClient.is_unset(request.instance_create_to_time_gmt):
query['instanceCreateToTimeGMT'] = request.instance_create_to_time_gmt
if not UtilClient.is_unset(request.create_from_time_gmt):
query['createFromTimeGMT'] = request.create_from_time_gmt
if not UtilClient.is_unset(request.create_to_time_gmt):
query['createToTimeGMT'] = request.create_to_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetNotifyMeResponse(),
await self.do_roarequest_async('GetNotifyMe', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/corpNotifications/{user_id}', 'json', req, runtime)
)
def expire_commodity(
self,
request: dingtalkyida__1__0_models.ExpireCommodityRequest,
) -> dingtalkyida__1__0_models.ExpireCommodityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ExpireCommodityHeaders()
return self.expire_commodity_with_options(request, headers, runtime)
async def expire_commodity_async(
self,
request: dingtalkyida__1__0_models.ExpireCommodityRequest,
) -> dingtalkyida__1__0_models.ExpireCommodityResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ExpireCommodityHeaders()
return await self.expire_commodity_with_options_async(request, headers, runtime)
def expire_commodity_with_options(
self,
request: dingtalkyida__1__0_models.ExpireCommodityRequest,
headers: dingtalkyida__1__0_models.ExpireCommodityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ExpireCommodityResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ExpireCommodityResponse(),
self.do_roarequest('ExpireCommodity', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/appAuth/commodities/expire', 'json', req, runtime)
)
async def expire_commodity_with_options_async(
self,
request: dingtalkyida__1__0_models.ExpireCommodityRequest,
headers: dingtalkyida__1__0_models.ExpireCommodityHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ExpireCommodityResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ExpireCommodityResponse(),
await self.do_roarequest_async('ExpireCommodity', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/appAuth/commodities/expire', 'json', req, runtime)
)
def get_instance_by_id(
self,
id: str,
request: dingtalkyida__1__0_models.GetInstanceByIdRequest,
) -> dingtalkyida__1__0_models.GetInstanceByIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetInstanceByIdHeaders()
return self.get_instance_by_id_with_options(id, request, headers, runtime)
async def get_instance_by_id_async(
self,
id: str,
request: dingtalkyida__1__0_models.GetInstanceByIdRequest,
) -> dingtalkyida__1__0_models.GetInstanceByIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetInstanceByIdHeaders()
return await self.get_instance_by_id_with_options_async(id, request, headers, runtime)
def get_instance_by_id_with_options(
self,
id: str,
request: dingtalkyida__1__0_models.GetInstanceByIdRequest,
headers: dingtalkyida__1__0_models.GetInstanceByIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetInstanceByIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetInstanceByIdResponse(),
self.do_roarequest('GetInstanceById', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/instancesInfos/{id}', 'json', req, runtime)
)
async def get_instance_by_id_with_options_async(
self,
id: str,
request: dingtalkyida__1__0_models.GetInstanceByIdRequest,
headers: dingtalkyida__1__0_models.GetInstanceByIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetInstanceByIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetInstanceByIdResponse(),
await self.do_roarequest_async('GetInstanceById', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/instancesInfos/{id}', 'json', req, runtime)
)
def redirect_task(
self,
request: dingtalkyida__1__0_models.RedirectTaskRequest,
) -> dingtalkyida__1__0_models.RedirectTaskResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RedirectTaskHeaders()
return self.redirect_task_with_options(request, headers, runtime)
async def redirect_task_async(
self,
request: dingtalkyida__1__0_models.RedirectTaskRequest,
) -> dingtalkyida__1__0_models.RedirectTaskResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RedirectTaskHeaders()
return await self.redirect_task_with_options_async(request, headers, runtime)
def redirect_task_with_options(
self,
request: dingtalkyida__1__0_models.RedirectTaskRequest,
headers: dingtalkyida__1__0_models.RedirectTaskHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RedirectTaskResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.process_instance_id):
body['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.by_manager):
body['byManager'] = request.by_manager
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.remark):
body['remark'] = request.remark
if not | |
'%d%d' % (m, i)
if is_prime(int(chk)) is False:
continue
chk = '%d%d' % (i, m)
if is_prime(int(chk)) is False:
continue
chk = '%d%d' % (j, m)
if is_prime(int(chk)) is False:
continue
chk = '%d%d' % (m, j)
if is_prime(int(chk)) is False:
continue
chk = '%d%d' % (k, m)
if is_prime(int(chk)) is False:
continue
chk = '%d%d' % (m, k)
if is_prime(int(chk)) is False:
continue
chk = '%d%d' % (l, m)
if is_prime(int(chk)) is False:
continue
chk = '%d%d' % (m, l)
if is_prime(int(chk)) is False:
continue
print('[60]: ', (i + j + k + l + m))
return
return
'''
Problem 61
Triangle, square, pentagonal, hexagonal, heptagonal, and octagonal numbers are all figurate (polygonal) numbers and are generated by the following formulae:
Triangle
P3,n=n(n+1)/2
1, 3, 6, 10, 15, ...
Square
P4,n=n2
1, 4, 9, 16, 25, ...
Pentagonal
P5,n=n(3n−1)/2
1, 5, 12, 22, 35, ...
Hexagonal
P6,n=n(2n−1)
1, 6, 15, 28, 45, ...
Heptagonal
P7,n=n(5n−3)/2
1, 7, 18, 34, 55, ...
Octagonal
P8,n=n(3n−2)
1, 8, 21, 40, 65, ...
The ordered set of three 4-digit numbers: 8128, 2882, 8281,
has three interesting properties.
The set is cyclic, in that the last two digits of each number
is the first two digits of the next number
(including the last number with the first).
Each polygonal type:
triangle (P3,127=8128),
square (P4, 91=8281),
pentagonal (P5 ,44=2882),
is represented by a different number in the set.
This is the only set of 4-digit numbers with this property.
Find the sum of the only ordered set of six cyclic 4-digit numbers for which each polygonal type:
triangle, square, pentagonal, hexagonal, heptagonal, and octagonal,
is represented by a different number in the set.
'''
def p61():
triangle = []
square = []
pentagonal = []
hexagonal = []
heptagonal = []
octagonal = []
for i in range(1000, 10000):
if is_triangle(i):
triangle.append(i)
if is_square(i):
square.append(i)
if is_pentagonal(i):
pentagonal.append(i)
if is_hexagonal(i):
hexagonal.append(i)
if is_heptagonal(i):
heptagonal.append(i)
if is_octagonal(i):
octagonal.append(i)
polygonals = [triangle, square, pentagonal, hexagonal, heptagonal, octagonal]
for i, polygonal in enumerate(polygonals):
for num in polygonal:
tail_num1 = num % 100 # 8231 -> 31
head_num1 = num // 100 # 8231 -> 82
for j, polygonal in enumerate(polygonals):
if i == j:
continue
for num2 in polygonal:
head_num2 = num2 // 100 # 8231 -> 82
if tail_num1 != head_num2:
continue
tail_num2 = num2 % 100 # 8231 -> 31
for k, polygonal in enumerate(polygonals):
if i == k or j == k:
continue
for num3 in polygonal:
head_num3 = num3 // 100 # 8231 -> 82
if tail_num2 != head_num3:
continue
tail_num3 = num3 % 100 # 8231 -> 31
for l, polygonal in enumerate(polygonals):
if i == l or j == l or k == l:
continue
for num4 in polygonal:
head_num4 = num4 // 100 # 8231 -> 82
if tail_num3 != head_num4:
continue
tail_num4 = num4 % 100 # 8231 -> 31
for m, polygonal in enumerate(polygonals):
if i == m or j == m or k == m or l == m:
continue
for num5 in polygonal:
head_num5 = num5 // 100 # 8231 -> 82
if tail_num4 != head_num5:
continue
tail_num5 = num5 % 100 # 8231 -> 31
for n, polygonal in enumerate(polygonals):
if i == n or j == n or k == n or l == n or m == n:
continue
for num6 in polygonal:
head_num6 = num6 // 100 # 8231 -> 82
if tail_num5 != head_num6:
continue
tail_num6 = num6 % 100 # 8231 -> 31
if tail_num6 == head_num1:
print(num, num2, num3, num4, num5, num6)
print('[61]: ', num + num2 + num3 + num4 + num5 + num6)
return
'''
Problem 62
The cube, 41063625 (3453), can be permuted to produce two other cubes: 56623104 (3843) and 66430125 (4053).
In fact, 41063625 is the smallest cube which has exactly three permutations of its digits which are also cube.
Find the smallest cube for which exactly five permutations of its digits are cube.
'''
def p62():
cubes = [i ** 3 for i in range(1, 10001)]
find_key = 0
d = {}
for c in cubes:
check_num = ''.join(sorted(list(str(c))))
d[check_num] = d.get(check_num, 0) + 1
for key, val in d.items():
if val == 5:
find_key = key
break
find_cube = [c for c in cubes if ''.join(sorted(list(str(c)))) == find_key]
print('[62]: ', min(find_cube))
'''
Problem 63
The 5-digit number, 16807=7^5, is also a fifth power.
Similarly, the 9-digit number, 134217728=8^9, is a ninth power.
How many n-digit positive integers exist which are also an nth power?
'''
def p63():
total = 0
for i in range(1, 10):
for j in count(1):
k = i ** j
if len(str(k)) == j:
total += 1
elif len(str(k)) < j:
break
print('[63]: ', total)
return
'''
Problem 64
All square roots are periodic when written as continued fractions
and can be written in the form:
It can be seen that the sequence is repeating.
For conciseness, we use the notation √23 = [4;(1,3,1,8)],
to indicate that the block (1,3,1,8) repeats indefinitely.
The first ten continued fraction representations of (irrational) square roots are:
√2=[1;(2)], period=1
√3=[1;(1,2)], period=2
√5=[2;(4)], period=1
√6=[2;(2,4)], period=2
√7=[2;(1,1,1,4)], period=4
√8=[2;(1,4)], period=2
√10=[3;(6)], period=1
√11=[3;(3,6)], period=2
√12= [3;(2,6)], period=2
√13=[3;(1,1,1,1,6)], period=5 <-- odd period
Exactly four continued fractions, for N ≤ 13, have an odd period.
How many continued fractions for N ≤ 10000 have an odd period?
'''
def continued_frac_duration(S):
# https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Continued_fraction_expansion
if is_square(S):
return -1
m, d, a = 0, 1, sqrt(S)
duration = 0
while duration == 0 or d != 1:
m = int(d * a) - m
d = (S - m * m) // d
a = (sqrt(S) + m) // d
duration += 1
return duration % 2
def p64():
print(len([i for i in range(1, 10001) if continued_frac_duration(i)]))
'''
Problem 65
The square root of 2 can be written as an infinite continued fraction.
2 + ...
The infinite continued fraction can be written, √2 = [1;(2)], (2) indicates that 2 repeats ad infinitum. In a similar way, √23 = [4;(1,3,1,8)].
It turns out that the sequence of partial values of continued fractions for square roots provide the best rational approximations. Let us consider the convergents for √2.
Hence the sequence of the first ten convergents for √2 are:
1, 3/2, 7/5, 17/12, 41/29, 99/70, 239/169, 577/408, 1393/985, 3363/2378, ...
What is most surprising is that the important mathematical constant,
e = [2; 1,2,1, 1,4,1, 1,6,1 , ... , 1,2k,1, ...].
The first ten terms in the sequence of convergents for e are:
2, 3, 8/3, 11/4, 19/7, 87/32, 106/39, 193/71, 1264/465, 1457/536, ...
The sum of digits in the numerator of the 10th convergent is 1+4+5+7=17.
Find the sum of digits in the numerator of the 100th convergent of the continued fraction for e.
'''
def p65():
n = [8, 11, 8 + 11]
d = [3, 4, 3 + 4]
i = 5
M = 4
while i < 100 + 1:
if i % 3 == 0:
n[0] = n[2] * M + n[1]
d[0] = d[2] * M + d[1]
M += 1
elif i % 3 == 1:
n[1] = n[2] * M + n[1]
d[1] = d[2] * M + d[1]
M += 1
elif i % 3 == 2:
n[2] = n[0] + n[1]
d[2] = d[0] + d[1]
i += 1
# print(n[100 % 3])
# print(list(map(int, (str(n[100 % 3])))))
print('[65]: ', sum(map(int, (str(n[100 % 3])))))
'''
Problem 66
Consider quadratic Diophantine equations of the form:
x2 – Dy2 = 1
For example, when D=13, the minimal solution in x is 6492 – 13×1802 = 1.
It can be assumed that there are no solutions in positive integers when D is square.
By finding minimal solutions in x for D = {2, 3, 5, 6, 7}, we obtain the following:
3^2 – 2×2^2 = 1
2^2 – 3×1^2 = 1
9^2 – 5×4^2 = 1
5^2 – 6×2^2 = 1
8^2 – 7×3^2 = 1
Hence, by considering minimal solutions in x for D ≤ 7, the largest x is obtained when D=5.
Find the value of D ≤ 1000 in minimal solutions of x for which the largest value of x is obtained.
'''
# http://mathworld.wolfram.com/FloorFunction.html
# https://github.com/JonSeijo/pell-equation-solver/blob/master/pell.py
# TODO: Maximum recursion depth | |
<gh_stars>0
non_sparse_consts = [
{"algorithm": "zlib",
"name": "zinflate_lengthStarts",
"size": "L",
"array": [ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258 ]},
{"algorithm": "zlib",
"name": "zinflate_lengthExtraBits",
"size": "L",
"array": [ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0 ]},
{"algorithm": "zlib",
"name": "zinflate_distanceStarts",
"size": "L",
"array": [ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
8193, 12289, 16385, 24577 ]},
{"algorithm": "zlib",
"name": "zinflate_distanceExtraBits",
"size": "L",
"array": [ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 ]},
{"algorithm": "zlib",
"name": "zdeflate_lengthCodes",
"size": "L",
"array": [ 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268,
269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272,
273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274,
275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276,
277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279,
280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285 ]},
{"algorithm": "DES",
"name": "DES_ip",
"size": "B",
"array": [ 58, 50, 42, 34, 26, 18, 10, 2, 60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6, 64, 56, 48, 40, 32, 24, 16, 8,
57, 49, 41, 33, 25, 17, 9, 1, 59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5, 63, 55, 47, 39, 31, 23, 15, 7 ]},
{"algorithm": "DES",
"name": "DES_fp",
"size": "B",
"array": [ 40, 8, 48, 16, 56, 24, 64, 32, 39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30, 37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28, 35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26, 33, 1, 41, 9, 49, 17, 57, 25 ]},
{"algorithm": "DES",
"name": "DES_ei",
"size": "B",
"array": [ 32, 1, 2, 3, 4, 5, 4, 5, 6, 7, 8, 9, 8, 9, 10, 11,
12, 13, 12, 13, 14, 15, 16, 17, 16, 17, 18, 19, 20, 21, 20, 21,
22, 23, 24, 25, 24, 25, 26, 27, 28, 29, 28, 29, 30, 31, 32, 1 ]},
{"algorithm": "DES",
"name": "DES_sbox1",
"size": "B",
"array": [ 14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7,
0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8,
4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0,
15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13 ]},
{"algorithm": "DES",
"name": "DES_sbox2",
"size": "B",
"array": [ 15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10,
3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5,
0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15,
13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9 ]},
{"algorithm": "DES",
"name": "DES_sbox3",
"size": "B",
"array": [ 10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8,
13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1,
13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7,
1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12 ]},
{"algorithm": "DES",
"name": "DES_sbox4",
"size": "B",
"array": [ 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15,
13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9,
10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4,
3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14 ]},
{"algorithm": "DES",
"name": "DES_sbox5",
"size": "B",
"array": [ 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9,
14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6,
4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14,
11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3 ]},
{"algorithm": "DES",
"name": "DES_sbox6",
"size": "B",
"array": [ 12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11,
10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8,
9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6,
4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13 ]},
{"algorithm": "DES",
"name": "DES_sbox7",
"size": "B",
"array": [ 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1,
13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6,
1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2,
6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12 ]},
{"algorithm": "DES",
"name": "DES_sbox8",
"size": "B",
"array": [ 13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7,
1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2,
7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8,
2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11 ]},
{"algorithm": "DES",
"name": "DES_p32i",
"size": "B",
"array": [ 16, 7, 20, 21, 29, 12, 28, 17, 1, 15, 23, 26, 5, 18, 31, 10,
2, 8, 24, 14, 32, 27, 3, 9, 19, 13, 30, 6, 22, 11, 4, 25 | |
<gh_stars>0
# Copyright (c) 2018 NVIDIA Corporation
"""
RNN-based encoders
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops
from OpenSeq2Seq.open_seq2seq.parts.rnns.utils import single_cell
from .encoder import Encoder
class UnidirectionalRNNEncoderWithEmbedding(Encoder):
"""
Uni-directional RNN decoder with embeddings.
Can support various RNN cell types.
"""
@staticmethod
def get_required_params():
return dict(Encoder.get_required_params(), **{
'src_vocab_size': int,
'src_emb_size': int,
'core_cell': None,
'core_cell_params': dict,
'encoder_layers': int,
'encoder_use_skip_connections': bool,
})
@staticmethod
def get_optional_params():
return dict(Encoder.get_optional_params(), **{
'encoder_dp_input_keep_prob': float,
'encoder_dp_output_keep_prob': float,
'time_major': bool,
'use_swap_memory': bool,
'proj_size': int,
'num_groups': int,
})
def __init__(self, params, model,
name="unidir_rnn_encoder_with_emb", mode='train'):
"""Initializes uni-directional encoder with embeddings.
Args:
params (dict): dictionary with encoder parameters
Must define:
* src_vocab_size - data vocabulary size
* src_emb_size - size of embedding to use
* encoder_cell_units - number of units in RNN cell
* encoder_cell_type - cell type: lstm, gru, etc.
* encoder_layers - number of layers
* encoder_dp_input_keep_prob -
* encoder_dp_output_keep_prob -
* encoder_use_skip_connections - true/false
* time_major (optional)
* use_swap_memory (optional)
* mode - train or infer
... add any cell-specific parameters here as well
"""
super(UnidirectionalRNNEncoderWithEmbedding, self).__init__(
params,
model,
name=name,
mode=mode,
)
self._src_vocab_size = self.params['src_vocab_size']
self._src_emb_size = self.params['src_emb_size']
self._enc_emb_w = None
self._encoder_cell_fw = None
def _encode(self, input_dict):
"""Encodes data into representation.
Args:
input_dict: a Python dictionary.
Must define:
* src_inputs - a Tensor of shape [batch_size, time] or
[time, batch_size]
(depending on time_major param)
* src_lengths - a Tensor of shape [batch_size]
Returns:
a Python dictionary with:
* encoder_outputs - a Tensor of shape
[batch_size, time, representation_dim]
or [time, batch_size, representation_dim]
* encoder_state - a Tensor of shape [batch_size, dim]
* src_lengths - (copy ref from input) a Tensor of shape [batch_size]
"""
# TODO: make a separate level of config for cell_params?
source_sequence = input_dict['source_tensors'][0]
source_length = input_dict['source_tensors'][1]
self._enc_emb_w = tf.get_variable(
name="EncoderEmbeddingMatrix",
shape=[self._src_vocab_size, self._src_emb_size],
dtype=tf.float32,
)
if self._mode == "train":
dp_input_keep_prob = self.params['encoder_dp_input_keep_prob']
dp_output_keep_prob = self.params['encoder_dp_output_keep_prob']
else:
dp_input_keep_prob = 1.0
dp_output_keep_prob = 1.0
fwd_cells = [
single_cell(
cell_class=self.params['core_cell'],
cell_params=self.params.get('core_cell_params', {}),
dp_input_keep_prob=dp_input_keep_prob,
dp_output_keep_prob=dp_output_keep_prob,
residual_connections=self.params['encoder_use_skip_connections']
) for _ in range(self.params['encoder_layers'])
]
# pylint: disable=no-member
self._encoder_cell_fw = tf.contrib.rnn.MultiRNNCell(fwd_cells)
time_major = self.params.get("time_major", False)
use_swap_memory = self.params.get("use_swap_memory", False)
embedded_inputs = tf.cast(
tf.nn.embedding_lookup(
self.enc_emb_w,
source_sequence,
),
self.params['dtype'],
)
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(
cell=self._encoder_cell_fw,
inputs=embedded_inputs,
sequence_length=source_length,
time_major=time_major,
swap_memory=use_swap_memory,
dtype=embedded_inputs.dtype,
)
return {'outputs': encoder_outputs,
'state': encoder_state,
'src_lengths': source_length,
'encoder_input': source_sequence}
@property
def src_vocab_size(self):
return self._src_vocab_size
@property
def src_emb_size(self):
return self._src_emb_size
@property
def enc_emb_w(self):
return self._enc_emb_w
class BidirectionalRNNEncoderWithEmbedding(Encoder):
"""
Bi-directional RNN-based encoder with embeddings.
Can support various RNN cell types.
"""
@staticmethod
def get_required_params():
return dict(Encoder.get_required_params(), **{
'src_vocab_size': int,
'src_emb_size': int,
'encoder_layers': int,
'encoder_use_skip_connections': bool,
'core_cell': None,
'core_cell_params': dict,
})
@staticmethod
def get_optional_params():
return dict(Encoder.get_optional_params(), **{
'encoder_dp_input_keep_prob': float,
'encoder_dp_output_keep_prob': float,
'time_major': bool,
'use_swap_memory': bool,
'proj_size': int,
'num_groups': int,
})
def __init__(self, params, model,
name="bidir_rnn_encoder_with_emb", mode='train'):
"""Initializes bi-directional encoder with embeddings.
Args:
params (dict): dictionary with encoder parameters
Must define:
* src_vocab_size - data vocabulary size
* src_emb_size - size of embedding to use
* encoder_cell_units - number of units in RNN cell
* encoder_cell_type - cell type: lstm, gru, etc.
* encoder_layers - number of layers
* encoder_dp_input_keep_prob -
* encoder_dp_output_keep_prob -
* encoder_use_skip_connections - true/false
* time_major (optional)
* use_swap_memory (optional)
* mode - train or infer
... add any cell-specific parameters here as well
Returns:
encoder_params
"""
super(BidirectionalRNNEncoderWithEmbedding, self).__init__(
params, model, name=name, mode=mode,
)
self._src_vocab_size = self.params['src_vocab_size']
self._src_emb_size = self.params['src_emb_size']
self._enc_emb_w = None
self._encoder_cell_fw = None
self._encoder_cell_bw = None
def _encode(self, input_dict):
"""Encodes data into representation.
Args:
input_dict: a Python dictionary.
Must define:
*src_inputs - a Tensor of shape [batch_size, time] or
[time, batch_size]
(depending on time_major param)
* src_lengths - a Tensor of shape [batch_size]
Returns:
a Python dictionary with:
* encoder_outputs - a Tensor of shape
[batch_size, time, representation_dim]
or [time, batch_size, representation_dim]
* encoder_state - a Tensor of shape [batch_size, dim]
* src_lengths - (copy ref from input) a Tensor of shape [batch_size]
"""
source_sequence = input_dict['source_tensors'][0]
source_length = input_dict['source_tensors'][1]
time_major = self.params.get("time_major", False)
use_swap_memory = self.params.get("use_swap_memory", False)
self._enc_emb_w = tf.get_variable(
name="EncoderEmbeddingMatrix",
shape=[self._src_vocab_size, self._src_emb_size],
dtype=tf.float32
)
if self._mode == "train":
dp_input_keep_prob = self.params['encoder_dp_input_keep_prob']
dp_output_keep_prob = self.params['encoder_dp_output_keep_prob']
else:
dp_input_keep_prob = 1.0
dp_output_keep_prob = 1.0
fwd_cells = [
single_cell(
cell_class=self.params['core_cell'],
cell_params=self.params.get('core_cell_params', {}),
dp_input_keep_prob=dp_input_keep_prob,
dp_output_keep_prob=dp_output_keep_prob,
residual_connections=self.params['encoder_use_skip_connections'],
) for _ in range(self.params['encoder_layers'])
]
bwd_cells = [
single_cell(
cell_class=self.params['core_cell'],
cell_params=self.params.get('core_cell_params', {}),
dp_input_keep_prob=dp_input_keep_prob,
dp_output_keep_prob=dp_output_keep_prob,
residual_connections=self.params['encoder_use_skip_connections'],
) for _ in range(self.params['encoder_layers'])
]
with tf.variable_scope("FW"):
# pylint: disable=no-member
self._encoder_cell_fw = tf.contrib.rnn.MultiRNNCell(fwd_cells)
with tf.variable_scope("BW"):
# pylint: disable=no-member
self._encoder_cell_bw = tf.contrib.rnn.MultiRNNCell(bwd_cells)
embedded_inputs = tf.cast(
tf.nn.embedding_lookup(
self.enc_emb_w,
source_sequence,
),
self.params['dtype']
)
encoder_output, encoder_state = tf.nn.bidirectional_dynamic_rnn(
cell_fw=self._encoder_cell_fw,
cell_bw=self._encoder_cell_bw,
inputs=embedded_inputs,
sequence_length=source_length,
time_major=time_major,
swap_memory=use_swap_memory,
dtype=embedded_inputs.dtype,
)
encoder_outputs = tf.concat(encoder_output, 2)
return {'outputs': encoder_outputs,
'state': encoder_state,
'src_lengths': source_length,
'encoder_input': source_sequence}
@property
def src_vocab_size(self):
return self._src_vocab_size
@property
def src_emb_size(self):
return self._src_emb_size
@property
def enc_emb_w(self):
return self._enc_emb_w
class GNMTLikeEncoderWithEmbedding(Encoder):
"""
Encoder similar to the one used in
GNMT model: https://arxiv.org/abs/1609.08144.
Must have at least 2 layers
"""
@staticmethod
def get_required_params():
return dict(Encoder.get_required_params(), **{
'src_vocab_size': int,
'src_emb_size': int,
'core_cell': None,
'core_cell_params': dict,
'encoder_layers': int,
'encoder_use_skip_connections': bool,
})
@staticmethod
def get_optional_params():
return dict(Encoder.get_optional_params(), **{
'encoder_dp_input_keep_prob': float,
'encoder_dp_output_keep_prob': float,
'time_major': bool,
'use_swap_memory': bool,
'proj_size': int,
'num_groups': int,
})
def __init__(self, params, model,
name="gnmt_encoder_with_emb", mode='train'):
"""Encodes data into representation.
Args:
params (dict): a Python dictionary.
Must define:
* src_inputs - a Tensor of shape [batch_size, time] or
[time, batch_size]
(depending on time_major param)
* src_lengths - a Tensor of shape [batch_size]
Returns:
a Python dictionary with:
* encoder_outputs - a Tensor of shape
[batch_size, time, representation_dim]
or [time, batch_size, representation_dim]
* encoder_state - a Tensor of shape [batch_size, dim]
* src_lengths - (copy ref from input) a Tensor of shape [batch_size]
"""
super(GNMTLikeEncoderWithEmbedding, self).__init__(
params, model, name=name, mode=mode,
)
self._src_vocab_size = self.params['src_vocab_size']
self._src_emb_size = self.params['src_emb_size']
self._encoder_l1_cell_fw = None
self._encoder_l1_cell_bw = None
self._encoder_cells = None
self._enc_emb_w = None
def _encode(self, input_dict):
source_sequence = input_dict['source_tensors'][0]
source_length = input_dict['source_tensors'][1]
self._enc_emb_w = tf.get_variable(
name="EncoderEmbeddingMatrix",
shape=[self._src_vocab_size, self._src_emb_size],
dtype=tf.float32,
)
if self.params['encoder_layers'] < 2:
raise ValueError("GNMT encoder must have at least 2 layers")
with tf.variable_scope("Level1FW"):
self._encoder_l1_cell_fw = single_cell(
cell_class=self.params['core_cell'],
cell_params=self.params.get('core_cell_params', {}),
dp_input_keep_prob=1.0,
dp_output_keep_prob=1.0,
residual_connections=False,
)
with tf.variable_scope("Level1BW"):
self._encoder_l1_cell_bw = single_cell(
cell_class=self.params['core_cell'],
cell_params=self.params.get('core_cell_params', {}),
dp_input_keep_prob=1.0,
dp_output_keep_prob=1.0,
residual_connections=False,
)
if self._mode == "train":
dp_input_keep_prob = self.params['encoder_dp_input_keep_prob']
dp_output_keep_prob = self.params['encoder_dp_output_keep_prob']
else:
dp_input_keep_prob = 1.0
dp_output_keep_prob = 1.0
with tf.variable_scope("UniDirLevel"):
self._encoder_cells = [
single_cell(
cell_class=self.params['core_cell'],
cell_params=self.params.get('core_cell_params', {}),
dp_input_keep_prob=dp_input_keep_prob,
dp_output_keep_prob=dp_output_keep_prob,
residual_connections=False,
) for _ in range(self.params['encoder_layers'] - 1)
]
# add residual connections starting from the third layer
for idx, cell in enumerate(self._encoder_cells):
if idx > 0:
# pylint: disable=no-member
self._encoder_cells[idx] = tf.contrib.rnn.ResidualWrapper(cell)
time_major = self.params.get("time_major", False)
use_swap_memory = self.params.get("use_swap_memory", False)
embedded_inputs = tf.cast(
tf.nn.embedding_lookup(
self.enc_emb_w,
source_sequence,
),
self.params['dtype'],
)
# first bi-directional layer
_encoder_output, _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=self._encoder_l1_cell_fw,
cell_bw=self._encoder_l1_cell_bw,
inputs=embedded_inputs,
sequence_length=source_length,
swap_memory=use_swap_memory,
time_major=time_major,
dtype=embedded_inputs.dtype,
)
encoder_l1_outputs = tf.concat(_encoder_output, 2)
# stack of unidirectional layers
# pylint: disable=no-member
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(
cell=tf.contrib.rnn.MultiRNNCell(self._encoder_cells),
inputs=encoder_l1_outputs,
sequence_length=source_length,
swap_memory=use_swap_memory,
time_major=time_major,
dtype=encoder_l1_outputs.dtype,
)
return {'outputs': encoder_outputs,
'state': encoder_state,
'src_lengths': source_length,
'encoder_input': source_sequence}
@property
def src_vocab_size(self):
return self._src_vocab_size
@property
def src_emb_size(self):
return self._src_emb_size
@property
def enc_emb_w(self):
return self._enc_emb_w
class GNMTLikeEncoderWithEmbedding_cuDNN(Encoder):
"""
Encoder similar to the one used in
GNMT model: https://arxiv.org/abs/1609.08144.
Must have at least 2 layers. Uses cuDNN RNN blocks for efficiency
"""
@staticmethod
def get_required_params():
return dict(Encoder.get_required_params(), **{
'src_vocab_size': int,
'src_emb_size': int,
'encoder_cell_units': int,
'encoder_cell_type': ['lstm', 'gru'],
'encoder_layers': int,
})
@staticmethod
def get_optional_params():
return dict(Encoder.get_optional_params(), **{
'encoder_dp_output_keep_prob': float,
})
def __init__(self, params, model,
name="gnmt_encoder_with_emb_cudnn", mode='train'):
"""Encodes data into representation
Args:
params (dict): a Python dictionary.
Must define:
* src_inputs - a Tensor of shape [batch_size, time] or
[time, batch_size]
(depending on time_major param)
* src_lengths - a Tensor of shape [batch_size]
Returns:
a Python dictionary with:
* encoder_outputs - a Tensor of shape
[batch_size, time, representation_dim]
or [time, batch_size, representation_dim]
* encoder_state - a Tensor of shape [batch_size, dim]
* src_lengths - (copy ref from input) a Tensor of shape [batch_size]
"""
super(GNMTLikeEncoderWithEmbedding_cuDNN, self).__init__(
params, model, name=name, mode=mode,
)
self._src_vocab_size = self.params['src_vocab_size']
self._src_emb_size = self.params['src_emb_size']
self._enc_emb_w = None
def _encode(self, input_dict):
source_sequence = input_dict['source_tensors'][0]
source_length = input_dict['source_tensors'][1]
self._enc_emb_w = | |
= Var(within=Reals,bounds=(0,45),initialize=0)
m.x1877 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1878 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1879 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1880 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1881 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1882 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1883 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1884 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1885 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1886 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1887 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1888 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1889 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1890 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1891 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1892 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1893 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1894 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1895 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1896 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1897 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1898 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1899 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1900 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1901 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1902 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1903 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1904 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1905 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1906 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1907 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1908 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1909 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1910 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1911 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1912 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1913 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1914 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1915 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1916 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1917 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1918 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1919 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1920 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1921 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1922 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1923 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1924 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1925 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1926 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1927 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1928 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1929 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1930 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1931 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1932 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1933 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1934 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1935 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1936 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1937 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1938 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1939 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1940 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1941 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1942 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1943 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1944 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1945 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1946 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1947 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1948 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1949 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1950 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1951 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1952 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1953 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1954 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1955 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1956 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1957 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1958 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1959 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1960 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1961 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1962 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1963 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1964 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1965 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1966 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1967 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1968 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1969 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1970 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1971 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1972 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1973 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1974 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1975 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1976 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1977 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1978 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1979 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1980 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1981 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1982 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1983 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1984 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1985 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1986 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1987 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1988 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1989 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1990 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1991 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1992 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1993 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1994 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1995 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1996 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1997 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1998 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x1999 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2000 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2001 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2002 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2003 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2004 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2005 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2006 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2007 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2008 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2009 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2010 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2011 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2012 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2013 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2014 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2015 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2016 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2017 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2018 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2019 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2020 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2021 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2022 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2023 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2024 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2025 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2026 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2027 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2028 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2029 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2030 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2031 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2032 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2033 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2034 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2035 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2036 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2037 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2038 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2039 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2040 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2041 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2042 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2043 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2044 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2045 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2046 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2047 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2048 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2049 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2050 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2051 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2052 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2053 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2054 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2055 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2056 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2057 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2058 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2059 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2060 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2061 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2062 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2063 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2064 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2065 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2066 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2067 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2068 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2069 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2070 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2071 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2072 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2073 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2074 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2075 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2076 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2077 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2078 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2079 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2080 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2081 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2082 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2083 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2084 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2085 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2086 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2087 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2088 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2089 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2090 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2091 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2092 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2093 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2094 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2095 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2096 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2097 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2098 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2099 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2100 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2101 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2102 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2103 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2104 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2105 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2106 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2107 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2108 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2109 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2110 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2111 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2112 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2113 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2114 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2115 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2116 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2117 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2118 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2119 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2120 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2121 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2122 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2123 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2124 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2125 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2126 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2127 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2128 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2129 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2130 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2131 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2132 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2133 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2134 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2135 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2136 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2137 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2138 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2139 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2140 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2141 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2142 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2143 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2144 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2145 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2146 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2147 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2148 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2149 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2150 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2151 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2152 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2153 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2154 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2155 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2156 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2157 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2158 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2159 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2160 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2161 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2162 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2163 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2164 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2165 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2166 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2167 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2168 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2169 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2170 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2171 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2172 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2173 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2174 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2175 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2176 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2177 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2178 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2179 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2180 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2181 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2182 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2183 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2184 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2185 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2186 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2187 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2188 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2189 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2190 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2191 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2192 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2193 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2194 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2195 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2196 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2197 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2198 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2199 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2200 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2201 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2202 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2203 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2204 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2205 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2206 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2207 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2208 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2209 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2210 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2211 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2212 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2213 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2214 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2215 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2216 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2217 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2218 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2219 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2220 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2221 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2222 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2223 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2224 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2225 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2226 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2227 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2228 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2229 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2230 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2231 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2232 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2233 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2234 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2235 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2236 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2237 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2238 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2239 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2240 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2241 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2242 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2243 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2244 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2245 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2246 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2247 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2248 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2249 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2250 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2251 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2252 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2253 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2254 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2255 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2256 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2257 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2258 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2259 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2260 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2261 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2262 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2263 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2264 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2265 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2266 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2267 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2268 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2269 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2270 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2271 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2272 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2273 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2274 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2275 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2276 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2277 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2278 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2279 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2280 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2281 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2282 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2283 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2284 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2285 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2286 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2287 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2288 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2289 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2290 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2291 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2292 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2293 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2294 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2295 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2296 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2297 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2298 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2299 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2300 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2301 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2302 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2303 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2304 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2305 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2306 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2307 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2308 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2309 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2310 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2311 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2312 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2313 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2314 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2315 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2316 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2317 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2318 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2319 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2320 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2321 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2322 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2323 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2324 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2325 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2326 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2327 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2328 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2329 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2330 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2331 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2332 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2333 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2334 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2335 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2336 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2337 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2338 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2339 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2340 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2341 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2342 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2343 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2344 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2345 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2346 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2347 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2348 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2349 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2350 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2351 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2352 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2353 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2354 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2355 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2356 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2357 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2358 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2359 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2360 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2361 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2362 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2363 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2364 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2365 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2366 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2367 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2368 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2369 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2370 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2371 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2372 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2373 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2374 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2375 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2376 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2377 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2378 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2379 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2380 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2381 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2382 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2383 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2384 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2385 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2386 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2387 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2388 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2389 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2390 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2391 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2392 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2393 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2394 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2395 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2396 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2397 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2398 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2399 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2400 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2401 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2402 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2403 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2404 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2405 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2406 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2407 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2408 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2409 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2410 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2411 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2412 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2413 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2414 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2415 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2416 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2417 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2418 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2419 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2420 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2421 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2422 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2423 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2424 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2425 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2426 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2427 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2428 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2429 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2430 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2431 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2432 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2433 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2434 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2435 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2436 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2437 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2438 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2439 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2440 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2441 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2442 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2443 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2444 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2445 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2446 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2447 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2448 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2449 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2450 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2451 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2452 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2453 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2454 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2455 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2456 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2457 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2458 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2459 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2460 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2461 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2462 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2463 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2464 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2465 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2466 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2467 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2468 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2469 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2470 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2471 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2472 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2473 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2474 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2475 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2476 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2477 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2478 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2479 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2480 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2481 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2482 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2483 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2484 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2485 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2486 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2487 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2488 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2489 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2490 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2491 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2492 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2493 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2494 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2495 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2496 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2497 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2498 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2499 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2500 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2501 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2502 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2503 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2504 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2505 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2506 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2507 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2508 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2509 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2510 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2511 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2512 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2513 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2514 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2515 = Var(within=Reals,bounds=(0,45),initialize=0)
m.x2516 | |
<reponame>michaelansel/evennia
"""
Account (OOC) commands. These are stored on the Account object
and self.caller is thus always an Account, not an Object/Character.
These commands go in the AccountCmdset and are accessible also
when puppeting a Character (although with lower priority)
These commands use the account_caller property which tells the command
parent (MuxCommand, usually) to setup caller correctly. They use
self.account to make sure to always use the account object rather than
self.caller (which change depending on the level you are calling from)
The property self.character can be used to access the character when
these commands are triggered with a connected character (such as the
case of the @ooc command), it is None if we are OOC.
Note that under MULTISESSION_MODE > 2, Account commands should use
self.msg() and similar methods to reroute returns to the correct
method. Otherwise all text will be returned to all connected sessions.
"""
from builtins import range
import time
from django.conf import settings
from evennia.server.sessionhandler import SESSIONS
from evennia.utils import utils, create, search, evtable
COMMAND_DEFAULT_CLASS = utils.class_from_module(settings.COMMAND_DEFAULT_CLASS)
_MAX_NR_CHARACTERS = settings.MAX_NR_CHARACTERS
_MULTISESSION_MODE = settings.MULTISESSION_MODE
# limit symbol import for API
__all__ = ("CmdOOCLook", "CmdIC", "CmdOOC", "CmdPassword", "CmdQuit",
"CmdCharCreate", "CmdOption", "CmdSessions", "CmdWho",
"CmdColorTest", "CmdQuell")
class MuxAccountLookCommand(COMMAND_DEFAULT_CLASS):
"""
Custom parent (only) parsing for OOC looking, sets a "playable"
property on the command based on the parsing.
"""
def parse(self):
"""Custom parsing"""
super(MuxAccountLookCommand, self).parse()
if _MULTISESSION_MODE < 2:
# only one character allowed - not used in this mode
self.playable = None
return
playable = self.account.db._playable_characters
if playable is not None:
# clean up list if character object was deleted in between
if None in playable:
playable = [character for character in playable if character]
self.account.db._playable_characters = playable
# store playable property
if self.args:
self.playable = dict((utils.to_str(char.key.lower()), char)
for char in playable).get(self.args.lower(), None)
else:
self.playable = playable
# Obs - these are all intended to be stored on the Account, and as such,
# use self.account instead of self.caller, just to be sure. Also self.msg()
# is used to make sure returns go to the right session
# note that this is inheriting from MuxAccountLookCommand,
# and has the .playable property.
class CmdOOCLook(MuxAccountLookCommand):
"""
look while out-of-character
Usage:
look
Look in the ooc state.
"""
# This is an OOC version of the look command. Since a
# Account doesn't have an in-game existence, there is no
# concept of location or "self". If we are controlling
# a character, pass control over to normal look.
key = "look"
aliases = ["l", "ls"]
locks = "cmd:all()"
help_category = "General"
# this is used by the parent
account_caller = True
def func(self):
"""implement the ooc look command"""
if _MULTISESSION_MODE < 2:
# only one character allowed
self.msg("You are out-of-character (OOC).\nUse |w@ic|n to get back into the game.")
return
# call on-account look helper method
self.msg(self.account.at_look(target=self.playable, session=self.session))
class CmdCharCreate(COMMAND_DEFAULT_CLASS):
"""
create a new character
Usage:
@charcreate <charname> [= desc]
Create a new character, optionally giving it a description. You
may use upper-case letters in the name - you will nevertheless
always be able to access your character using lower-case letters
if you want.
"""
key = "@charcreate"
locks = "cmd:pperm(Player)"
help_category = "General"
# this is used by the parent
account_caller = True
def func(self):
"""create the new character"""
account = self.account
if not self.args:
self.msg("Usage: @charcreate <charname> [= description]")
return
key = self.lhs
desc = self.rhs
charmax = _MAX_NR_CHARACTERS
if not account.is_superuser and \
(account.db._playable_characters and
len(account.db._playable_characters) >= charmax):
self.msg("You may only create a maximum of %i characters." % charmax)
return
from evennia.objects.models import ObjectDB
typeclass = settings.BASE_CHARACTER_TYPECLASS
if ObjectDB.objects.filter(db_typeclass_path=typeclass, db_key__iexact=key):
# check if this Character already exists. Note that we are only
# searching the base character typeclass here, not any child
# classes.
self.msg("|rA character named '|w%s|r' already exists.|n" % key)
return
# create the character
start_location = ObjectDB.objects.get_id(settings.START_LOCATION)
default_home = ObjectDB.objects.get_id(settings.DEFAULT_HOME)
permissions = settings.PERMISSION_ACCOUNT_DEFAULT
new_character = create.create_object(typeclass, key=key,
location=start_location,
home=default_home,
permissions=permissions)
# only allow creator (and developers) to puppet this char
# TODO remove hardcoded permission names?
new_character.locks.add("puppet:id(%i) or pid(%i) or perm(Developer) or pperm(Developer)" %
(new_character.id, account.id))
account.db._playable_characters.append(new_character)
if desc:
new_character.db.desc = desc
elif not new_character.db.desc:
new_character.db.desc = "This is a character."
self.msg("Created new character %s. Use |w@ic %s|n to enter the game as this character."
% (new_character.key, new_character.key))
class CmdCharDelete(COMMAND_DEFAULT_CLASS):
"""
delete a character - this cannot be undone!
Usage:
@chardelete <charname>
Permanently deletes one of your characters.
"""
key = "@chardelete"
locks = "cmd:pperm(Player)"
help_category = "General"
def func(self):
"""delete the character"""
account = self.account
if not self.args:
self.msg("Usage: @chardelete <charactername>")
return
# use the playable_characters list to search
match = [char for char in utils.make_iter(account.db._playable_characters)
if char.key.lower() == self.args.lower()]
if not match:
self.msg("You have no such character to delete.")
return
elif len(match) > 1:
self.msg("Aborting - there are two characters with the same name. Ask an admin to delete the right one.")
return
else: # one match
from evennia.utils.evmenu import get_input
def _callback(caller, callback_prompt, result):
if result.lower() == "yes":
# only take action
delobj = caller.ndb._char_to_delete
key = delobj.key
caller.db._playable_characters = [pc for pc in caller.db._playable_characters if pc != delobj]
delobj.delete()
self.msg("Character '%s' was permanently deleted." % key)
else:
self.msg("Deletion was aborted.")
del caller.ndb._char_to_delete
match = match[0]
account.ndb._char_to_delete = match
prompt = "|rThis will permanently destroy '%s'. This cannot be undone.|n Continue yes/[no]?"
get_input(account, prompt % match.key, _callback)
class CmdIC(COMMAND_DEFAULT_CLASS):
"""
control an object you have permission to puppet
Usage:
@ic <character>
Go in-character (IC) as a given Character.
This will attempt to "become" a different object assuming you have
the right to do so. Note that it's the ACCOUNT character that puppets
characters/objects and which needs to have the correct permission!
You cannot become an object that is already controlled by another
account. In principle <character> can be any in-game object as long
as you the account have access right to puppet it.
"""
key = "@ic"
# lock must be all() for different puppeted objects to access it.
locks = "cmd:all()"
aliases = "@puppet"
help_category = "General"
# this is used by the parent
account_caller = True
def func(self):
"""
Main puppet method
"""
account = self.account
session = self.session
new_character = None
if not self.args:
new_character = account.db._last_puppet
if not new_character:
self.msg("Usage: @ic <character>")
return
if not new_character:
# search for a matching character
new_character = [char for char in search.object_search(self.args) if char.access(account, "puppet")]
if not new_character:
self.msg("That is not a valid character choice.")
return
if len(new_character) > 1:
self.msg("Multiple targets with the same name:\n %s"
% ", ".join("%s(#%s)" % (obj.key, obj.id) for obj in new_character))
return
else:
new_character = new_character[0]
try:
account.puppet_object(session, new_character)
account.db._last_puppet = new_character
except RuntimeError as exc:
self.msg("|rYou cannot become |C%s|n: %s" % (new_character.name, exc))
# note that this is inheriting from MuxAccountLookCommand,
# and as such has the .playable property.
class CmdOOC(MuxAccountLookCommand):
"""
stop puppeting and go ooc
Usage:
@ooc
Go out-of-character (OOC).
This will leave your current character and put you in a incorporeal OOC state.
"""
key = "@ooc"
locks = "cmd:pperm(Player)"
aliases = "@unpuppet"
help_category = "General"
# this is used by the parent
account_caller = True
def func(self):
"""Implement function"""
account = self.account
session = self.session
old_char = account.get_puppet(session)
if not old_char:
string = "You are already OOC."
self.msg(string)
return
account.db._last_puppet = old_char
# disconnect
try:
account.unpuppet_object(session)
self.msg("\n|GYou go OOC.|n\n")
if _MULTISESSION_MODE < 2:
# only one character allowed
self.msg("You are out-of-character (OOC).\nUse |w@ic|n to get back into the game.")
return
self.msg(account.at_look(target=self.playable, session=session))
except RuntimeError as exc:
self.msg("|rCould not unpuppet from |c%s|n: %s" % (old_char, exc))
class CmdSessions(COMMAND_DEFAULT_CLASS):
"""
check your connected session(s)
Usage:
@sessions
Lists the sessions currently connected to your account.
"""
key = "@sessions"
locks = "cmd:all()"
help_category = "General"
# this is used by the parent
account_caller = True
def func(self):
"""Implement function"""
account = self.account
sessions = account.sessions.all()
table = evtable.EvTable("|wsessid",
"|wprotocol",
"|whost",
"|wpuppet/character",
"|wlocation")
for sess in sorted(sessions, key=lambda x: x.sessid):
char = account.get_puppet(sess)
table.add_row(str(sess.sessid), str(sess.protocol_key),
isinstance(sess.address, tuple) and sess.address[0] or sess.address,
char and str(char) or "None",
char and str(char.location) or "N/A")
self.msg("|wYour current session(s):|n\n%s" % | |
the gobbli download directory if it doesn't already exist there.
Stream the download to avoid running out of memory.
Args:
url: URL for the file.
filename: If passed, use this as the filename instead of the best-effort one
determined from the URL.
Returns:
The path to the downloaded file.
"""
if filename is None:
# Kind of hacky... pull out the last path component as the filename and strip
# a trailing query, if any
local_filename = url.split("/")[-1]
try:
query_start_ndx = local_filename.index("?")
except ValueError:
query_start_ndx = -1
if query_start_ndx != -1:
local_filename = local_filename[:query_start_ndx]
else:
local_filename = filename
local_filepath = download_dir() / local_filename
if local_filepath.exists():
LOGGER.debug(f"Download for URL '{url}' already exists at '{local_filepath}'")
return local_filepath
LOGGER.debug(f"Downloading URL '{url}' to '{local_filepath}'")
try:
with requests.get(url, stream=True) as r:
with open(local_filepath, "wb") as f:
shutil.copyfileobj(r.raw, f)
except Exception as e:
# Don't leave the file in a partially downloaded state
try:
local_filepath.unlink()
LOGGER.debug(f"Removed partially downloaded file at '{local_filepath}'")
except Exception:
warnings.warn(
"Failed to remove partially downloaded file at '{local_filepath}'."
)
raise e
return local_filepath
def _extract_tar_junk_path(tarfile_obj: tarfile.TarFile, archive_extract_dir: Path):
"""
Extract a tarfile while flattening any directory hierarchy
in the archive.
"""
for member in tarfile_obj.getmembers():
if member.isdir():
# Skip directories
continue
# Remove the directory hierarchy from the file
member.name = Path(member.name).name
output_file = archive_extract_dir / member.name
LOGGER.debug(f"Extracting member '{member.name}' to '{output_file}'")
tarfile_obj.extract(member, path=archive_extract_dir)
def _extract_zip_junk_path(zipfile_obj: zipfile.ZipFile, archive_extract_dir: Path):
"""
Extract a zip file while flattening any directory hierarchy in the archive.
"""
for member in zipfile_obj.infolist():
if member.is_dir():
# Skip directories
continue
member_name = Path(member.filename).name
output_file = archive_extract_dir / member_name
LOGGER.debug(f"Extracting member '{member_name}' to '{output_file}'")
with zipfile_obj.open(member, "r") as f:
with output_file.open("wb") as f_out:
shutil.copyfileobj(f, f_out)
_SUPPORTED_ARCHIVE_EXTENSIONS = (".gz", ".zip")
def is_archive(filepath: Path) -> bool:
"""
Args:
filepath: Path to a file.
Returns:
Whether the file is an archive supported by :func:`extract_archive`.
"""
for ext in _SUPPORTED_ARCHIVE_EXTENSIONS:
if filepath.name.endswith(ext):
return True
return False
def extract_archive(
archive_path: Path, archive_extract_dir: Path, junk_paths: bool = False
):
"""
Extract an archive to the given directory.
Args:
archive_path: Path to the archive file.
archive_extract_dir: Extract the archive to this directory.
junk_paths: If True, disregard the archive's internal directory hierarchy
and extract all files directly to the output directory.
Returns:
Path to the directory containing the extracted file contents.
"""
LOGGER.debug(f"Extracting archive '{archive_path}'")
archive_extract_dir.mkdir(exist_ok=True, parents=True)
if archive_path.name.endswith(".tar.gz"):
with tarfile.open(archive_path, "r:gz") as archive_tar:
if junk_paths:
_extract_tar_junk_path(archive_tar, archive_extract_dir)
else:
LOGGER.debug(f"Extracting all members to '{archive_extract_dir}'")
archive_tar.extractall(archive_extract_dir)
elif archive_path.name.endswith(".gz"):
LOGGER.debug(f"Extracting gzipped file to '{archive_extract_dir}'")
with gzip.open(archive_path, "rb") as archive_gz:
# Strip the trailing '.gz' and use the original filename as the new filename
with open(archive_extract_dir / archive_path.name[:-3], "wb") as f:
shutil.copyfileobj(archive_gz, f)
elif archive_path.name.endswith(".zip"):
with zipfile.ZipFile(archive_path, "r") as archive_zip:
if junk_paths:
_extract_zip_junk_path(archive_zip, archive_extract_dir)
else:
LOGGER.debug(f"Extracting all members to '{archive_extract_dir}'")
archive_zip.extractall(archive_extract_dir)
else:
raise ValueError(f"Unsupported archive file: {archive_path}")
return archive_extract_dir
def download_archive(
archive_url: str,
archive_extract_dir: Path,
junk_paths: bool = False,
filename: Optional[str] = None,
) -> Path:
"""
Save an archive in the given directory and extract its contents. Automatically
retry the download once if the file comes back corrupted (in case it's left over
from a partial download that was cancelled before).
Args:
archive_url: URL for the archive.
archive_extract_dir: Download the archive and extract it to this directory.
junk_paths: If True, disregard the archive's internal directory hierarchy
and extract all files directly to the output directory.
filename: If given, store the downloaded file under this name instead of
one automatically inferred from the URL.
Returns:
Path to the directory containing the extracted file contents.
"""
download_path = download_file(archive_url, filename=filename)
try:
return extract_archive(
download_path, archive_extract_dir, junk_paths=junk_paths
)
except (zipfile.BadZipFile, tarfile.ReadError, OSError, EOFError):
LOGGER.warning(
f"Downloaded archive at '{download_path}' is corrupted. Retrying..."
)
download_path.unlink()
download_path = download_file(archive_url, filename=filename)
return extract_archive(
download_path, archive_extract_dir, junk_paths=junk_paths
)
def dir_to_blob(dir_path: Path) -> bytes:
"""
Archive a directory and save it as a blob in-memory.
Useful for storing a directory's contents in an in-memory object store.
Use compression to reduce file size. Extract with :func:`blob_to_dir`.
Args:
dir_path: Path to the directory to be archived.
Returns:
The compressed directory as a binary buffer.
"""
blob = io.BytesIO()
with tarfile.open(fileobj=blob, mode="w:gz") as archive:
# Set arcname=. to ensure the files will be extracted properly
# using relative paths
archive.add(str(dir_path), arcname=".", recursive=True)
# Seek the beginning of the buffer so we read the whole thing
blob.seek(0)
return blob.getvalue()
def blob_to_dir(blob: bytes, dir_path: Path):
"""
Extract the given blob (assumed to be a compressed directory
created by :func:`dir_to_blob`) to the given directory.
Args:
blob: The compressed directory as a binary buffer.
dir_path: Path to extract the directory to.
"""
with tarfile.open(fileobj=io.BytesIO(blob), mode="r:gz") as archive:
archive.extractall(dir_path)
T1 = TypeVar("T1")
T2 = TypeVar("T2")
def shuffle_together(l1: List[T1], l2: List[T2], seed: Optional[int] = None):
"""
Shuffle two lists together, so their order is random but the individual
elements still correspond. Ex. shuffle a list of texts and a list of labels
so the labels still correspond correctly to the texts. The lists are shuffled in-place.
The lists must be the same length for this to make sense.
Args:
l1: The first list to be shuffled.
l2: The second list to be shuffled.
seed: Seed for the random number generator, if any
"""
if not len(l1) == len(l2):
raise ValueError("Lists have unequal length.")
if len(l1) == 0:
return
zipped = list(zip(l1, l2))
if seed is not None:
random.seed(seed)
random.shuffle(zipped)
l1[:], l2[:] = zip(*zipped)
def _train_sentencepiece(spm: Any, texts: List[str], model_path: Path, vocab_size: int):
"""
Train a Sentencepiece model on the given data and save it to the given
location.
Args:
spm: Imported sentencepiece module
texts: Data to train on.
model_path: Path to write the trained model to.
vocab_size: Size of the vocabulary for the model to train.
"""
with tempfile.NamedTemporaryFile(mode="w") as f:
f.write("\n".join(texts))
f.flush()
# log levels:
# https://github.com/google/sentencepiece/blob/d4dd947fe71c4fa4ee24ad8297beee32887d8828/src/common.h#L132
# Default is waaay too chatty
spm.SentencePieceTrainer.train(
f"--input={f.name} --model_prefix={model_path} --vocab_size={vocab_size} --minloglevel=2"
)
@enum.unique
class TokenizeMethod(enum.Enum):
"""
Enum describing the different canned tokenization methods gobbli supports.
Processes requiring tokenization should generally allow a user to pass in
a custom tokenization function if their needs aren't met by one of these.
Attributes:
SPLIT: Naive tokenization based on whitespace. Probably only useful for testing.
Tokens will be lowercased.
SPACY: Simple tokenization using spaCy's English language model.
Tokens will be lowercased, and non-alphabetic tokens will be filtered out.
SENTENCEPIECE: `SentencePiece <https://github.com/google/sentencepiece>`__-based tokenization.
"""
SPLIT = "split"
SPACY = "spacy"
SENTENCEPIECE = "sentencepiece"
def tokenize(
method: TokenizeMethod,
texts: List[str],
model_path: Optional[Path] = None,
vocab_size: int = 2000,
) -> List[List[str]]:
"""
Tokenize a list of texts using a predefined method.
Args:
texts: Texts to tokenize.
method: The type of tokenization to apply.
model_path: Path to save a trained model to. Required if the tokenization method
requires training a model; otherwise ignored. If the model doesn't exist, it will
be trained; if it does, the trained model will be reused.
vocab_size: Number of terms in the vocabulary for tokenization methods with a fixed
vocabulary size. You may need to lower this if you get tokenization errors or
raise it if your texts have a very diverse vocabulary.
Returns:
List of tokenized texts.
"""
if method == TokenizeMethod.SPLIT:
return [[tok.lower() for tok in text.split()] for text in texts]
elif method == TokenizeMethod.SPACY:
try:
from spacy.lang.en import English
except ImportError:
raise ImportError(
"spaCy tokenization method requires spaCy and an english language "
"model to be installed."
)
nlp = English()
tokenizer = nlp.Defaults.create_tokenizer(nlp)
processed_texts = []
for doc in tokenizer.pipe(texts):
processed_texts.append([tok.lower_ for tok in doc if tok.is_alpha])
return processed_texts
elif method == TokenizeMethod.SENTENCEPIECE:
try:
import sentencepiece as spm
except ImportError:
raise ImportError(
"SentencePiece tokenization requires the sentencepiece module "
"to be installed."
)
# Train only if the model file path doesn't already exist
# If we weren't given a path to save to, just make a temp file which will
# be discarded after the run
sp = spm.SentencePieceProcessor()
if model_path is None:
with tempfile.TemporaryDirectory() as temp_model_dir:
temp_model_path = Path(temp_model_dir) / "temp"
_train_sentencepiece(spm, texts, temp_model_path, | |
data.
"""
import torch.nn as nn
import torch.nn.functional as F
simple_model = nn.Sequential(
nn.Conv2d(3, 8, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(2, 2)
)
"""Refer to [Sylvian's post](https://sgugger.github.io/convolution-in-depth.html) for an explanation of `kernel_size`, `stride` and `padding`. """
for images, labels in train_dl:
print('images.shape:', images.shape)
out = simple_model(images)
print('out.shape:', out.shape)
break
"""The `Conv2d` layer transforms a 3-channel image to a 16-channel *feature map*, and the `MaxPool2d` layer halves the height and width. The feature map gets smaller as we add more layers, until we are finally left with a small feature map, which can be flattened into a vector. We can then add some fully connected layers at the end to get vector of size 10 for each image.
<img src="https://i.imgur.com/KKtPOKE.png" style="max-width:540px">
Let's define the model by extending an `ImageClassificationBase` class which contains helper methods for training & validation.
"""
class ImageClassificationBase(nn.Module):
def training_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
print(out.shape)
print(labels.shape)
loss = F.cross_entropy(out, labels) # Calculate loss
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
acc = accuracy(out, labels) # Calculate accuracy
return {'val_loss': loss.detach(), 'val_acc': acc}
def validation_epoch_end(self, outputs):
batch_losses = [x['val_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean() # Combine losses
batch_accs = [x['val_acc'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies
return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
def epoch_end(self, epoch, result):
print("Epoch [{}], train_loss: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}".format(
epoch, result['train_loss'], result['val_loss'], result['val_acc']))
def accuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
"""
We'll use `nn.Sequential` to chain the layers and activations functions into a single network architecture."""
class Cifar10CnnModel(ImageClassificationBase):
def __init__(self):
super().__init__()
self.network = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, 2), # output: 64 x 16 x 16
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, 2), # output: 128 x 8 x 8
nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, 2), # output: 256 x 4 x 4
nn.Flatten(),
nn.Linear(256*4*4, 1024),
nn.ReLU(),
nn.Linear(1024, 512),
nn.ReLU(),
nn.Linear(512, 10))
def forward(self, xb):
return self.network(xb)
model = Cifar10CnnModel()
model
"""Let's verify that the model produces the expected output on a batch of training data. The 10 outputs for each image can be interpreted as probabilities for the 10 target classes (after applying softmax), and the class with the highest probability is chosen as the label predicted by the model for the input image. Check out [Part 3 (logistic regression)](https://jovian.ml/aakashns/03-logistic-regression#C50) for a more detailed discussion on interpeting the outputs, applying softmax and identifying the predicted labels."""
for images, labels in train_dl:
print('images.shape:', images.shape)
out = model(images)
print('out.shape:', out.shape)
print('out[0]:', out[0])
break
"""To seamlessly use a GPU, if one is available, we define a couple of helper functions (`get_default_device` & `to_device`) and a helper class `DeviceDataLoader` to move our model & data to the GPU as required. These are described in more detail in the [previous tutorial](https://jovian.ml/aakashns/04-feedforward-nn#C21)."""
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list,tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader():
"""Wrap a dataloader to move data to a device"""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a batch of data after moving it to device"""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches"""
return len(self.dl)
"""Based on where you're running this notebook, your default device could be a CPU (`torch.device('cpu')`) or a GPU (`torch.device('cuda')`)"""
device = get_default_device()
device
"""We can now wrap our training and validation data loaders using `DeviceDataLoader` for automatically transferring batches of data to the GPU (if available), and use `to_device` to move our model to the GPU (if available)."""
train_dl = DeviceDataLoader(train_dl, device)
val_dl = DeviceDataLoader(val_dl, device)
to_device(model, device);
"""Once again, let's save and commit the notebook before we proceed further."""
"""## Training the Model
We'll define two functions: `fit` and `evaluate` to train the model using gradient descent and evaluate its performance on the validation set. For a detailed walkthrough of these functions, check out the [previous tutorial](https://jovian.ai/aakashns/03-logistic-regression).
"""
@torch.no_grad()
def evaluate(model, val_loader):
model.eval()
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)
def fit(epochs, lr, model, train_loader, val_loader, opt_func=torch.optim.SGD):
history = []
optimizer = opt_func(model.parameters(), lr)
for epoch in range(epochs):
# Training Phase
model.train()
train_losses = []
for batch in train_loader:
loss = model.training_step(batch)
train_losses.append(loss)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# Validation phase
result = evaluate(model, val_loader)
result['train_loss'] = torch.stack(train_losses).mean().item()
model.epoch_end(epoch, result)
history.append(result)
return history
"""Before we begin training, let's instantiate the model once again and see how it performs on the validation set with the initial set of parameters."""
model = to_device(Cifar10CnnModel(), device)
evaluate(model, val_dl)
"""The initial accuracy is around 10%, which is what one might expect from a randomly intialized model (since it has a 1 in 10 chance of getting a label right by guessing randomly).
We'll use the following *hyperparmeters* (learning rate, no. of epochs, batch_size etc.) to train our model. As an exercise, you can try changing these to see if you have achieve a higher accuracy in a shorter time.
"""
num_epochs = 10
opt_func = torch.optim.Adam
lr = 0.001
"""It's important to record the hyperparameters of every experiment you do, to replicate it later and compare it against other experiments. We can record them using `jovian.log_hyperparams`."""
history = fit(num_epochs, lr, model, train_dl, val_dl, opt_func)
"""Just as we have recorded the hyperparameters, we can also record the final metrics achieved by the model using `jovian.log_metrics` for reference, analysis and comparison."""
"""We can also plot the valdation set accuracies to study how the model improves over time."""
def plot_accuracies(history):
accuracies = [x['val_acc'] for x in history]
plt.plot(accuracies, '-x')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.title('Accuracy vs. No. of epochs');
plot_accuracies(history)
"""Our model reaches an accuracy of around 75%, and by looking at the graph, it seems unlikely that the model will achieve an accuracy higher than 80% even after training for a long time. This suggests that we might need to use a more powerful model to capture the relationship between the images and the labels more accurately. This can be done by adding more convolutional layers to our model, or incrasing the no. of channels in each convolutional layer, or by using regularization techniques.
We can also plot the training and validation losses to study the trend.
"""
def plot_losses(history):
train_losses = [x.get('train_loss') for x in history]
val_losses = [x['val_loss'] for x in history]
plt.plot(train_losses, '-bx')
plt.plot(val_losses, '-rx')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['Training', 'Validation'])
plt.title('Loss vs. No. of epochs');
plot_losses(history)
"""Initialy, both the training and validation losses seem to decrease over time. However, if you train the model for long enough, you will notice that the training loss continues to decrease, while the validation loss stops decreasing, and even starts to increase after a certain point!
<img src="https://i.stack.imgur.com/1QU0m.png" style="max-width:400px;">
This phenomenon is called **overfitting**, and it is the no. 1 why many machine learning models give rather terrible results on real-world data. It happens because the model, in an attempt to minimize the loss, starts to learn patters are are unique to the training data, sometimes even memorizing specific training examples. Because of this, the model does not generalize well to previously unseen data.
Following are some common stragegies for avoiding overfitting:
- Gathering and generating more training data, or adding noise to it
- Using regularization techniques like batch normalization & dropout
- Early stopping of model's training, when validation loss starts to increase
We will cover these topics in more detail in the next tutorial in this series, and learn how we can reach an accuracy of **over 90%** by making minor but important changes to our model.
Before continuing, let us save our work to the cloud using `jovian.commit`.
"""
"""When you try different experiments (by chaging the learning rate, batch size, optimizer etc.) and record hyperparameters and metrics with each version of your notebook, you can use the [**Compare**](https://jovian.ml/aakashns/05-cifar10-cnn/compare) view on | |
<gh_stars>100-1000
import hail as hl
from hail.expr.expressions import expr_float64, expr_numeric, analyze
from hail.typecheck import typecheck, oneof, sequenceof, nullable
from hail.utils import wrap_to_list, new_temp_file
@typecheck(weight_expr=expr_float64,
ld_score_expr=expr_numeric,
chi_sq_exprs=oneof(expr_float64,
sequenceof(expr_float64)),
n_samples_exprs=oneof(expr_numeric,
sequenceof(expr_numeric)),
n_blocks=int,
two_step_threshold=int,
n_reference_panel_variants=nullable(int))
def ld_score_regression(weight_expr,
ld_score_expr,
chi_sq_exprs,
n_samples_exprs,
n_blocks=200,
two_step_threshold=30,
n_reference_panel_variants=None) -> hl.Table:
r"""Estimate SNP-heritability and level of confounding biases from genome-wide association study
(GWAS) summary statistics.
Given a set or multiple sets of GWAS summary statistics, :func:`.ld_score_regression` estimates the heritability
of a trait or set of traits and the level of confounding biases present in
the underlying studies by regressing chi-squared statistics on LD scores,
leveraging the model:
.. math::
\mathrm{E}[\chi_j^2] = 1 + Na + \frac{Nh_g^2}{M}l_j
* :math:`\mathrm{E}[\chi_j^2]` is the expected chi-squared statistic
for variant :math:`j` resulting from a test of association between
variant :math:`j` and a trait.
* :math:`l_j = \sum_{k} r_{jk}^2` is the LD score of variant
:math:`j`, calculated as the sum of squared correlation coefficients
between variant :math:`j` and nearby variants. See :func:`ld_score`
for further details.
* :math:`a` captures the contribution of confounding biases, such as
cryptic relatedness and uncontrolled population structure, to the
association test statistic.
* :math:`h_g^2` is the SNP-heritability, or the proportion of variation
in the trait explained by the effects of variants included in the
regression model above.
* :math:`M` is the number of variants used to estimate :math:`h_g^2`.
* :math:`N` is the number of samples in the underlying association study.
For more details on the method implemented in this function, see:
* `LD Score regression distinguishes confounding from polygenicity in genome-wide association studies (Bulik-Sullivan et al, 2015) <https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4495769/>`__
Examples
--------
Run the method on a matrix table of summary statistics, where the rows
are variants and the columns are different phenotypes:
>>> mt_gwas = ld_score_all_phenos_sumstats
>>> ht_results = hl.experimental.ld_score_regression(
... weight_expr=mt_gwas['ld_score'],
... ld_score_expr=mt_gwas['ld_score'],
... chi_sq_exprs=mt_gwas['chi_squared'],
... n_samples_exprs=mt_gwas['n'])
Run the method on a table with summary statistics for a single
phenotype:
>>> ht_gwas = ld_score_one_pheno_sumstats
>>> ht_results = hl.experimental.ld_score_regression(
... weight_expr=ht_gwas['ld_score'],
... ld_score_expr=ht_gwas['ld_score'],
... chi_sq_exprs=ht_gwas['chi_squared_50_irnt'],
... n_samples_exprs=ht_gwas['n_50_irnt'])
Run the method on a table with summary statistics for multiple
phenotypes:
>>> ht_gwas = ld_score_one_pheno_sumstats
>>> ht_results = hl.experimental.ld_score_regression(
... weight_expr=ht_gwas['ld_score'],
... ld_score_expr=ht_gwas['ld_score'],
... chi_sq_exprs=[ht_gwas['chi_squared_50_irnt'],
... ht_gwas['chi_squared_20160']],
... n_samples_exprs=[ht_gwas['n_50_irnt'],
... ht_gwas['n_20160']])
Notes
-----
The ``exprs`` provided as arguments to :func:`.ld_score_regression`
must all be from the same object, either a :class:`~.Table` or a
:class:`~.MatrixTable`.
**If the arguments originate from a table:**
* The table must be keyed by fields ``locus`` of type
:class:`.tlocus` and ``alleles``, a :class:`.tarray` of
:py:data:`.tstr` elements.
* ``weight_expr``, ``ld_score_expr``, ``chi_sq_exprs``, and
``n_samples_exprs`` are must be row-indexed fields.
* The number of expressions passed to ``n_samples_exprs`` must be
equal to one or the number of expressions passed to
``chi_sq_exprs``. If just one expression is passed to
``n_samples_exprs``, that sample size expression is assumed to
apply to all sets of statistics passed to ``chi_sq_exprs``.
Otherwise, the expressions passed to ``chi_sq_exprs`` and
``n_samples_exprs`` are matched by index.
* The ``phenotype`` field that keys the table returned by
:func:`.ld_score_regression` will have generic :obj:`int` values
``0``, ``1``, etc. corresponding to the ``0th``, ``1st``, etc.
expressions passed to the ``chi_sq_exprs`` argument.
**If the arguments originate from a matrix table:**
* The dimensions of the matrix table must be variants
(rows) by phenotypes (columns).
* The rows of the matrix table must be keyed by fields
``locus`` of type :class:`.tlocus` and ``alleles``,
a :class:`.tarray` of :py:data:`.tstr` elements.
* The columns of the matrix table must be keyed by a field
of type :py:data:`.tstr` that uniquely identifies phenotypes
represented in the matrix table. The column key must be a single
expression; compound keys are not accepted.
* ``weight_expr`` and ``ld_score_expr`` must be row-indexed
fields.
* ``chi_sq_exprs`` must be a single entry-indexed field
(not a list of fields).
* ``n_samples_exprs`` must be a single entry-indexed field
(not a list of fields).
* The ``phenotype`` field that keys the table returned by
:func:`.ld_score_regression` will have values corresponding to the
column keys of the input matrix table.
This function returns a :class:`~.Table` with one row per set of summary
statistics passed to the ``chi_sq_exprs`` argument. The following
row-indexed fields are included in the table:
* **phenotype** (:py:data:`.tstr`) -- The name of the phenotype. The
returned table is keyed by this field. See the notes below for
details on the possible values of this field.
* **mean_chi_sq** (:py:data:`.tfloat64`) -- The mean chi-squared
test statistic for the given phenotype.
* **intercept** (`Struct`) -- Contains fields:
- **estimate** (:py:data:`.tfloat64`) -- A point estimate of the
intercept :math:`1 + Na`.
- **standard_error** (:py:data:`.tfloat64`) -- An estimate of
the standard error of this point estimate.
* **snp_heritability** (`Struct`) -- Contains fields:
- **estimate** (:py:data:`.tfloat64`) -- A point estimate of the
SNP-heritability :math:`h_g^2`.
- **standard_error** (:py:data:`.tfloat64`) -- An estimate of
the standard error of this point estimate.
Warning
-------
:func:`.ld_score_regression` considers only the rows for which both row
fields ``weight_expr`` and ``ld_score_expr`` are defined. Rows with missing
values in either field are removed prior to fitting the LD score
regression model.
Parameters
----------
weight_expr : :class:`.Float64Expression`
Row-indexed expression for the LD scores used to derive
variant weights in the model.
ld_score_expr : :class:`.Float64Expression`
Row-indexed expression for the LD scores used as covariates
in the model.
chi_sq_exprs : :class:`.Float64Expression` or :obj:`list` of
:class:`.Float64Expression`
One or more row-indexed (if table) or entry-indexed
(if matrix table) expressions for chi-squared
statistics resulting from genome-wide association
studies (GWAS).
n_samples_exprs: :class:`.NumericExpression` or :obj:`list` of
:class:`.NumericExpression`
One or more row-indexed (if table) or entry-indexed
(if matrix table) expressions indicating the number of
samples used in the studies that generated the test
statistics supplied to ``chi_sq_exprs``.
n_blocks : :obj:`int`
The number of blocks used in the jackknife approach to
estimating standard errors.
two_step_threshold : :obj:`int`
Variants with chi-squared statistics greater than this
value are excluded in the first step of the two-step
procedure used to fit the model.
n_reference_panel_variants : :obj:`int`, optional
Number of variants used to estimate the
SNP-heritability :math:`h_g^2`.
Returns
-------
:class:`~.Table`
Table keyed by ``phenotype`` with intercept and heritability estimates
for each phenotype passed to the function."""
chi_sq_exprs = wrap_to_list(chi_sq_exprs)
n_samples_exprs = wrap_to_list(n_samples_exprs)
assert ((len(chi_sq_exprs) == len(n_samples_exprs))
or (len(n_samples_exprs) == 1))
__k = 2 # number of covariates, including intercept
ds = chi_sq_exprs[0]._indices.source
analyze('ld_score_regression/weight_expr',
weight_expr,
ds._row_indices)
analyze('ld_score_regression/ld_score_expr',
ld_score_expr,
ds._row_indices)
# format input dataset
if isinstance(ds, hl.MatrixTable):
if len(chi_sq_exprs) != 1:
raise ValueError("""Only one chi_sq_expr allowed if originating
from a matrix table.""")
if len(n_samples_exprs) != 1:
raise ValueError("""Only one n_samples_expr allowed if
originating from a matrix table.""")
col_key = list(ds.col_key)
if len(col_key) != 1:
raise ValueError("""Matrix table must be keyed by a single
phenotype field.""")
analyze('ld_score_regression/chi_squared_expr',
chi_sq_exprs[0],
ds._entry_indices)
analyze('ld_score_regression/n_samples_expr',
n_samples_exprs[0],
ds._entry_indices)
ds = ds._select_all(row_exprs={'__locus': ds.locus,
'__alleles': ds.alleles,
'__w_initial': weight_expr,
'__w_initial_floor': hl.max(weight_expr,
1.0),
'__x': ld_score_expr,
'__x_floor': hl.max(ld_score_expr,
1.0)},
row_key=['__locus', '__alleles'],
col_exprs={'__y_name': ds[col_key[0]]},
col_key=['__y_name'],
entry_exprs={'__y': chi_sq_exprs[0],
'__n': n_samples_exprs[0]})
ds = ds.annotate_entries(**{'__w': ds.__w_initial})
ds = ds.filter_rows(hl.is_defined(ds.__locus)
& hl.is_defined(ds.__alleles)
& hl.is_defined(ds.__w_initial)
& hl.is_defined(ds.__x))
else:
assert isinstance(ds, hl.Table)
for y in chi_sq_exprs:
analyze('ld_score_regression/chi_squared_expr', y, ds._row_indices)
for n in n_samples_exprs:
analyze('ld_score_regression/n_samples_expr', n, ds._row_indices)
ys = ['__y{:}'.format(i) for i, _ in enumerate(chi_sq_exprs)]
ws = ['__w{:}'.format(i) for i, _ in enumerate(chi_sq_exprs)]
ns = ['__n{:}'.format(i) for i, _ in enumerate(n_samples_exprs)]
ds = ds.select(**dict(**{'__locus': ds.locus,
'__alleles': ds.alleles,
'__w_initial': weight_expr,
'__x': ld_score_expr},
**{y: chi_sq_exprs[i]
for i, y in enumerate(ys)},
**{w: weight_expr for w in ws},
**{n: n_samples_exprs[i]
for i, n in enumerate(ns)}))
ds = ds.key_by(ds.__locus, ds.__alleles)
table_tmp_file = new_temp_file()
ds.write(table_tmp_file)
ds = hl.read_table(table_tmp_file)
hts = [ds.select(**{'__w_initial': ds.__w_initial,
'__w_initial_floor': hl.max(ds.__w_initial,
1.0),
'__x': ds.__x,
'__x_floor': hl.max(ds.__x, 1.0),
'__y_name': i,
'__y': ds[ys[i]],
'__w': ds[ws[i]],
'__n': hl.int(ds[ns[i]])})
for i, y in enumerate(ys)]
mts = [ht.to_matrix_table(row_key=['__locus',
'__alleles'],
col_key=['__y_name'],
row_fields=['__w_initial',
'__w_initial_floor',
'__x',
'__x_floor'])
for ht in hts]
ds = mts[0]
for i in range(1, len(ys)):
ds = ds.union_cols(mts[i])
ds = ds.filter_rows(hl.is_defined(ds.__locus)
& hl.is_defined(ds.__alleles)
& hl.is_defined(ds.__w_initial)
& hl.is_defined(ds.__x))
mt_tmp_file1 = new_temp_file()
ds.write(mt_tmp_file1)
mt = hl.read_matrix_table(mt_tmp_file1)
if not n_reference_panel_variants:
M = mt.count_rows()
else:
M = n_reference_panel_variants
mt = mt.annotate_entries(__in_step1=(hl.is_defined(mt.__y)
& (mt.__y < two_step_threshold)),
__in_step2=hl.is_defined(mt.__y))
mt = mt.annotate_cols(__col_idx=hl.int(hl.scan.count()),
__m_step1=hl.agg.count_where(mt.__in_step1),
__m_step2=hl.agg.count_where(mt.__in_step2))
| |
block devices that should
be detached from the instance.
:param destroy_disks: Indicates if disks should be destroyed
:param migrate_data: implementation specific params
"""
raise NotImplementedError()
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup the instance resources .
Instance should have been destroyed from the Hypervisor before calling
this method.
:param context: security context
:param instance: Instance object as returned by DB layer.
:param network_info: instance network information
:param block_device_info: Information about block devices that should
be detached from the instance.
:param destroy_disks: Indicates if disks should be destroyed
:param migrate_data: implementation specific params
"""
raise NotImplementedError()
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot the specified instance.
After this is called successfully, the instance's state
goes back to power_state.RUNNING. The virtualization
platform should ensure that the reboot action has completed
successfully even in cases in which the underlying domain/vm
is paused or halted/stopped.
:param instance: nova.objects.instance.Instance
:param network_info: instance network information
:param reboot_type: Either a HARD or SOFT reboot
:param block_device_info: Info pertaining to attached volumes
:param bad_volumes_callback: Function to handle any bad volumes
encountered
"""
raise NotImplementedError()
def get_console_pool_info(self, console_type):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_console_output(self, context, instance):
"""Get console output for an instance
:param context: security context
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def get_vnc_console(self, context, instance):
"""Get connection info for a vnc console.
:param context: security context
:param instance: nova.objects.instance.Instance
:returns an instance of console.type.ConsoleVNC
"""
raise NotImplementedError()
def get_spice_console(self, context, instance):
"""Get connection info for a spice console.
:param context: security context
:param instance: nova.objects.instance.Instance
:returns an instance of console.type.ConsoleSpice
"""
raise NotImplementedError()
def get_rdp_console(self, context, instance):
"""Get connection info for a rdp console.
:param context: security context
:param instance: nova.objects.instance.Instance
:returns an instance of console.type.ConsoleRDP
"""
raise NotImplementedError()
def get_serial_console(self, context, instance):
"""Get connection info for a serial console.
:param context: security context
:param instance: nova.objects.instance.Instance
:returns an instance of console.type.ConsoleSerial
"""
raise NotImplementedError()
def get_mks_console(self, context, instance):
"""Get connection info for a MKS console.
:param context: security context
:param instance: nova.objects.instance.Instance
:returns an instance of console.type.ConsoleMKS
"""
raise NotImplementedError()
def get_diagnostics(self, instance):
"""Return diagnostics data about the given instance.
:param nova.objects.instance.Instance instance:
The instance to which the diagnostic data should be returned.
:return: Has a big overlap to the return value of the newer interface
:func:`get_instance_diagnostics`
:rtype: dict
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_instance_diagnostics(self, instance):
"""Return diagnostics data about the given instance.
:param nova.objects.instance.Instance instance:
The instance to which the diagnostic data should be returned.
:return: Has a big overlap to the return value of the older interface
:func:`get_diagnostics`
:rtype: nova.virt.diagnostics.Diagnostics
"""
raise NotImplementedError()
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM.
:param instances: nova.objects.instance.InstanceList
"""
raise NotImplementedError()
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.-
"""
raise NotImplementedError()
def get_host_ip_addr(self):
"""Retrieves the IP address of the dom0
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach the disk to the instance at mountpoint using info."""
raise NotImplementedError()
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach the disk attached to the instance."""
raise NotImplementedError()
def swap_volume(self, old_connection_info, new_connection_info,
instance, mountpoint, resize_to):
"""Replace the volume attached to the given `instance`.
:param dict old_connection_info:
The volume for this connection gets detached from the given
`instance`.
:param dict new_connection_info:
The volume for this connection gets attached to the given
'instance'.
:param nova.objects.instance.Instance instance:
The instance whose volume gets replaced by another one.
:param str mountpoint:
The mountpoint in the instance where the volume for
`old_connection_info` is attached to.
:param int resize_to:
If the new volume is larger than the old volume, it gets resized
to the given size (in Gigabyte) of `resize_to`.
:return: None
"""
raise NotImplementedError()
def attach_interface(self, instance, image_meta, vif):
"""Use hotplug to add a network interface to a running instance.
The counter action to this is :func:`detach_interface`.
:param nova.objects.instance.Instance instance:
The instance which will get an additional network interface.
:param nova.objects.ImageMeta image_meta:
The metadata of the image of the instance.
:param nova.network.model.NetworkInfo vif:
The object which has the information about the interface to attach.
:raise nova.exception.NovaException: If the attach fails.
:return: None
"""
raise NotImplementedError()
def detach_interface(self, instance, vif):
"""Use hotunplug to remove a network interface from a running instance.
The counter action to this is :func:`attach_interface`.
:param nova.objects.instance.Instance instance:
The instance which gets a network interface removed.
:param nova.network.model.NetworkInfo vif:
The object which has the information about the interface to detach.
:raise nova.exception.NovaException: If the detach fails.
:return: None
"""
raise NotImplementedError()
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
"""Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
:param nova.objects.instance.Instance instance:
The instance whose disk should be migrated.
:param str dest:
The IP address of the destination host.
:param nova.objects.flavor.Flavor flavor:
The flavor of the instance whose disk get migrated.
:param nova.network.model.NetworkInfo network_info:
The network information of the given `instance`.
:param dict block_device_info:
Information about the block devices.
:param int timeout:
The time in seconds to wait for the guest OS to shutdown.
:param int retry_interval:
How often to signal guest while waiting for it to shutdown.
:return: A list of disk information dicts in JSON format.
:rtype: str
"""
raise NotImplementedError()
def snapshot(self, context, instance, image_id, update_task_state):
"""Snapshots the specified instance.
:param context: security context
:param instance: nova.objects.instance.Instance
:param image_id: Reference to a pre-created image that will
hold the snapshot.
"""
raise NotImplementedError()
def post_interrupted_snapshot_cleanup(self, context, instance):
"""Cleans up any resources left after an interrupted snapshot.
:param context: security context
:param instance: nova.objects.instance.Instance
"""
pass
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
"""Completes a resize/migration.
:param context: the context for the migration/resize
:param migration: the migrate/resize information
:param instance: nova.objects.instance.Instance being migrated/resized
:param disk_info: the newly transferred disk information
:param network_info: instance network information
:param nova.objects.ImageMeta image_meta:
The metadata of the image of the instance.
:param resize_instance: True if the instance is being resized,
False otherwise
:param block_device_info: instance volume block device info
:param power_on: True if the instance should be powered on, False
otherwise
"""
raise NotImplementedError()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize/migration, destroying the source VM.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize/migration.
:param context: the context for the finish_revert_migration
:param instance: nova.objects.instance.Instance being migrated/resized
:param network_info: instance network information
:param block_device_info: instance volume block device info
:param power_on: True if the instance should be powered on, False
otherwise
"""
raise NotImplementedError()
def pause(self, instance):
"""Pause the given instance.
A paused instance doesn't use CPU cycles of the host anymore. The
state of the VM could be stored in the memory or storage space of the
host, depending on the underlying hypervisor technology.
A "stronger" version of `pause` is :func:'suspend'.
The counter action for `pause` is :func:`unpause`.
:param nova.objects.instance.Instance instance:
The instance which should be paused.
:return: None
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unpause(self, instance):
"""Unpause the given paused instance.
The paused instance gets unpaused and will use CPU cycles of the
host again. The counter action for 'unpause' is :func:`pause`.
Depending on the underlying hypervisor technology, the guest has the
same state as before the 'pause'.
:param nova.objects.instance.Instance instance:
The instance which should be unpaused.
:return: None
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def suspend(self, context, instance):
"""Suspend the specified instance.
A suspended instance doesn't use CPU cycles or memory of the host
| |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 1, 2017
Updated Mon Oct 22, 2018
@author: <EMAIL>
"""
import numpy as np
import os
import EXOSIMS.MissionSim as MissionSim
import sympy
from sympy.solvers import solve
import scipy.integrate as integrate
import scipy.interpolate as interpolate
import scipy.optimize as optimize
import astropy.constants as const
import astropy.units as u
try:
import cPickle as pickle
except:
import pickle
from ortools.linear_solver import pywraplp
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
class DoSFuncs(object):
'''Calculates depth of search values for a given input EXOSIMS json script.
Occurrence rates are determined from the EXOSIMS PlanetPopulation specified.
'core_contrast' must be specified in the input json script as either a
path to a fits file or a constant value, otherwise the default contrast
value from EXOSIMS will be used
path must be specified
Args:
path (str):
path to json script for EXOSIMS
abins (int):
number of semi-major axis bins for depth of search grid (optional)
Rbins (int):
number of planetary radius bins for depth of search grid (optional)
maxTime (float):
maximum total integration time in days (optional)
intCutoff (float):
integration cutoff time per target in days (optional)
dMag (float):
limiting dMag value for integration time calculation (optional)
WA_targ (astropy Quantity):
working angle for target astrophysical contrast (optional)
Attributes:
result (dict):
dictionary containing results of the depth of search calculations
Keys include:
NumObs (dict):
dictionary containing number of observations, key is: 'all'
aedges (ndarray):
1D array of semi-major axis bin edges in AU
Redges (ndarray):
1D array of planetary radius bin edges in R_earth
DoS (dict):
dictionary containing 2D array of depth of search key is: 'all'
occ_rates (dict):
dictionary containing 2D array of occurrence rates determined
from EXOSIMS PlanetPopulation, key is: 'all'
DoS_occ (dict):
dictionary containing 2D array of depth of search convolved
with the extrapolated occurrence rates, keys is: 'all',
sim (object):
EXOSIMS.MissionSim object used to generate target list and
integration times
outspec (dict):
EXOSIMS.MissionSim output specification
'''
def __init__(self, path=None, abins=100, Rbins=30, maxTime=365.0, intCutoff=30.0, dMag=None, WA_targ=None):
if path is None:
raise ValueError('path must be specified')
if path is not None:
# generate EXOSIMS.MissionSim object to calculate integration times
self.sim = MissionSim.MissionSim(scriptfile=path)
print 'Acquired EXOSIMS data from %r' % (path)
if dMag is not None:
try:
float(dMag)
except TypeError:
print 'dMag can have only one value'
if WA_targ is not None:
try:
float(WA_targ.value)
except AttributeError:
print 'WA_targ must be astropy Quantity'
except TypeError:
print 'WA_targ can have only one value'
self.result = {}
# minimum and maximum values of semi-major axis and planetary radius
# NO astropy Quantities
amin = self.sim.PlanetPopulation.arange[0].to('AU').value
amax = self.sim.PlanetPopulation.arange[1].to('AU').value
Rmin = self.sim.PlanetPopulation.Rprange[0].to('earthRad').value
assert Rmin < 45.0, 'Minimum planetary radius is above extrapolation range'
if Rmin < 0.35:
print 'Rmin reset to 0.35*R_earth'
Rmin = 0.35
Rmax = self.sim.PlanetPopulation.Rprange[1].to('earthRad').value
assert Rmax > 0.35, 'Maximum planetary radius is below extrapolation range'
if Rmax > 45.0:
print 'Rmax reset to 45.0*R_earth'
assert Rmax > Rmin, 'Maximum planetary radius is less than minimum planetary radius'
# need to get Cmin from contrast curve
mode = filter(lambda mode: mode['detectionMode'] == True, self.sim.OpticalSystem.observingModes)[0]
WA = np.linspace(mode['IWA'], mode['OWA'], 50)
syst = mode['syst']
lam = mode['lam']
if dMag is None:
# use dMagLim when dMag not specified
dMag = self.sim.Completeness.dMagLim
fZ = self.sim.ZodiacalLight.fZ0
fEZ = self.sim.ZodiacalLight.fEZ0
if WA_targ is None:
core_contrast = syst['core_contrast'](lam,WA)
contrast = interpolate.interp1d(WA.to('arcsec').value, core_contrast, \
kind='cubic', fill_value=1.0)
# find minimum value of contrast
opt = optimize.minimize_scalar(contrast, \
bounds=[mode['IWA'].to('arcsec').value, \
mode['OWA'].to('arcsec').value],\
method='bounded')
Cmin = opt.fun
WA_targ = opt.x*u.arcsec
t_int1 = self.sim.OpticalSystem.calc_intTime(self.sim.TargetList,np.array([0]),fZ,fEZ,dMag,WA_targ,mode)
t_int1 = np.repeat(t_int1.value,len(WA))*t_int1.unit
sInds = np.repeat(0,len(WA))
fZ1 = np.repeat(fZ.value,len(WA))*fZ.unit
fEZ1 = np.repeat(fEZ.value,len(WA))*fEZ.unit
core_contrast = 10.0**(-0.4*self.sim.OpticalSystem.calc_dMag_per_intTime(t_int1,self.sim.TargetList,sInds,fZ1,fEZ1,WA,mode))
contrast = interpolate.interp1d(WA.to('arcsec').value,core_contrast,kind='cubic',fill_value=1.0)
opt = optimize.minimize_scalar(contrast,bounds=[mode['IWA'].to('arcsec').value,mode['OWA'].to('arcsec').value],method='bounded')
Cmin = opt.fun
# find expected values of p and R
if self.sim.PlanetPopulation.prange[0] != self.sim.PlanetPopulation.prange[1]:
if hasattr(self.sim.PlanetPopulation,'ps'):
f = lambda R: self.sim.PlanetPopulation.get_p_from_Rp(R*u.earthRad)*self.sim.PlanetPopulation.dist_radius(R)
pexp, err = integrate.quad(f,self.sim.PlanetPopulation.Rprange[0].value,\
self.sim.PlanetPopulation.Rprange[1].value,\
epsabs=0,epsrel=1e-6,limit=100)
else:
f = lambda p: p*self.sim.PlanetPopulation.dist_albedo(p)
pexp, err = integrate.quad(f,self.sim.PlanetPopulation.prange[0],\
self.sim.PlanetPopulation.prange[1],\
epsabs=0,epsrel=1e-6,limit=100)
else:
pexp = self.sim.PlanetPopulation.prange[0]
print 'Expected value of geometric albedo: %r' % (pexp)
if self.sim.PlanetPopulation.Rprange[0] != self.sim.PlanetPopulation.Rprange[1]:
f = lambda R: R*self.sim.PlanetPopulation.dist_radius(R)
Rexp, err = integrate.quad(f,self.sim.PlanetPopulation.Rprange[0].to('earthRad').value,\
self.sim.PlanetPopulation.Rprange[1].to('earthRad').value,\
epsabs=0,epsrel=1e-4,limit=100)
Rexp *= u.earthRad.to('AU')
else:
Rexp = self.sim.PlanetPopulation.Rprange[0].to('AU').value
# minimum and maximum separations
smin = (np.tan(mode['IWA'])*self.sim.TargetList.dist).to('AU').value
smax = (np.tan(mode['OWA'])*self.sim.TargetList.dist).to('AU').value
smax[smax>amax] = amax
# include only stars where smin > amin
bigger = np.where(smin>amin)[0]
self.sim.TargetList.revise_lists(bigger)
smin = smin[bigger]
smax = smax[bigger]
# include only stars where smin < amax
smaller = np.where(smin<amax)[0]
self.sim.TargetList.revise_lists(smaller)
smin = smin[smaller]
smax = smax[smaller]
# calculate integration times
sInds = np.arange(self.sim.TargetList.nStars)
# calculate maximum integration time
t_int = self.sim.OpticalSystem.calc_intTime(self.sim.TargetList, sInds, fZ, fEZ, dMag, WA_targ, mode)
# remove integration times above cutoff
cutoff = np.where(t_int.to('day').value<intCutoff)[0]
self.sim.TargetList.revise_lists(cutoff)
smin = smin[cutoff]
smax = smax[cutoff]
t_int = t_int[cutoff]
print 'Beginning ck calculations'
ck = self.find_ck(amin,amax,smin,smax,Cmin,pexp,Rexp)
# offset to account for zero ck values with nonzero completeness
ck += ck[ck>0.0].min()*1e-2
print 'Finished ck calculations'
print 'Beginning ortools calculations to determine list of observed stars'
sInds = self.select_obs(t_int.to('day').value,maxTime,ck)
print 'Finished ortools calculations'
# include only stars chosen for observation
self.sim.TargetList.revise_lists(sInds)
smin = smin[sInds]
smax = smax[sInds]
t_int = t_int[sInds]
ck = ck[sInds]
# get contrast array for given integration times
sInds2 = np.arange(self.sim.TargetList.nStars)
fZ2 = np.repeat(fZ.value,len(WA))*fZ.unit
fEZ2 = np.repeat(fEZ.value,len(WA))*fEZ.unit
C_inst = np.zeros((len(sInds2),len(WA)))
for i in xrange(len(sInds2)):
t_int2 = np.repeat(t_int[i].value,len(WA))*t_int.unit
sInds2a = np.repeat(sInds2[i],len(WA))
C_inst[i,:] = 10.0**(-0.4*self.sim.OpticalSystem.calc_dMag_per_intTime(t_int2,self.sim.TargetList,sInds2a,fZ2,fEZ2,WA,mode))
# store number of observed stars in result
self.result['NumObs'] = {"all": self.sim.TargetList.nStars}
print 'Number of observed targets: %r' % self.sim.TargetList.nStars
# find bin edges for semi-major axis and planetary radius in AU
aedges = np.logspace(np.log10(amin), np.log10(amax), abins+1)
Redges = np.logspace(np.log10(Rmin*u.earthRad.to('AU')), \
np.log10(Rmax*u.earthRad.to('AU')), Rbins+1)
# store aedges and Redges in result
self.result['aedges'] = aedges
self.result['Redges'] = Redges/u.earthRad.to('AU')
aa, RR = np.meshgrid(aedges,Redges) # in AU
# get depth of search
print 'Beginning depth of search calculations for observed stars'
if self.sim.TargetList.nStars > 0:
DoS = self.DoS_sum(aedges, aa, Redges, RR, pexp, smin, smax, \
self.sim.TargetList.dist.to('pc').value, C_inst, WA.to('arcsecond').value)
else:
DoS = np.zeros((aa.shape[0]-1,aa.shape[1]-1))
print 'Finished depth of search calculations'
# store DoS in result
self.result['DoS'] = {"all": DoS}
# find occurrence rate grid
Redges /= u.earthRad.to('AU')
etas = np.zeros((len(Redges)-1,len(aedges)-1))
# get joint pdf of semi-major axis and radius
if hasattr(self.sim.PlanetPopulation,'dist_sma_radius'):
func = lambda a,R: self.sim.PlanetPopulation.dist_sma_radius(a,R)
else:
func = lambda a,R: self.sim.PlanetPopulation.dist_sma(a)*self.sim.PlanetPopulation.dist_radius(R)
aa, RR = np.meshgrid(aedges,Redges)
r_norm = Redges[1:] - Redges[:-1]
a_norm = aedges[1:] - aedges[:-1]
norma, normR = np.meshgrid(a_norm,r_norm)
tmp = func(aa,RR)
etas = 0.25*(tmp[:-1,:-1]+tmp[1:,:-1]+tmp[:-1,1:]+tmp[1:,1:])*norma*normR
# for i in xrange(len(Redges)-1):
# print('{} out of {}'.format(i+1,len(Redges)-1))
# for j in xrange(len(aedges)-1):
# etas[i,j] = integrate.dblquad(func,Redges[i],Redges[i+1],lambda x: aedges[j],lambda x: aedges[j+1])[0]
etas *= self.sim.PlanetPopulation.eta
self.result['occ_rates'] = {"all": etas}
# Multiply depth of search with occurrence rates
print 'Multiplying depth of search grid with occurrence rate grid'
DoS_occ = DoS*etas*norma*normR
self.result['DoS_occ'] = {"all": DoS_occ}
# store MissionSim output specification dictionary
self.outspec = self.sim.genOutSpec()
print 'Calculations finished'
def one_DoS_grid(self,a,R,p,smin,smax,Cmin):
'''Calculates completeness for one star on constant semi-major axis--
planetary radius grid
Args:
a (ndarray):
2D array of semi-major axis values in AU
R (ndarray):
2D array of planetary radius values in AU
p (float):
average geometric albedo value
smin (float):
minimum separation in AU
smax (float):
maximum separation in AU
Cmin (ndarray):
2D array of minimum contrast
Returns:
f (ndarray):
2D array of depth of search values for one star on 2D grid
'''
a = np.array(a, ndmin=1, copy=False)
R = np.array(R, ndmin=1, copy=False)
Cmin = np.array(Cmin, ndmin=1, copy=False)
f = np.zeros(a.shape)
# work on smax < a first
fg = f[smax<a]
ag = a[smax<a]
Rg = R[smax<a]
Cgmin = Cmin[smax<a]
b1g = np.arcsin(smin/ag)
b2g = np.pi-np.arcsin(smin/ag)
b3g = np.arcsin(smax/ag)
b4g = np.pi-np.arcsin(smax/ag)
C1g = (p*(Rg/ag)**2*np.cos(b1g/2.0)**4)
C2g = (p*(Rg/ag)**2*np.cos(b2g/2.0)**4)
C3g = (p*(Rg/ag)**2*np.cos(b3g/2.0)**4)
C4g = (p*(Rg/ag)**2*np.cos(b4g/2.0)**4)
C2g[C2g<Cgmin] = Cgmin[C2g<Cgmin]
C3g[C3g<Cgmin] = Cgmin[C3g<Cgmin]
vals = C3g > C1g
C3g[vals] = 0.0
C1g[vals] = 0.0
vals = C2g > C4g
C2g[vals] = 0.0
C4g[vals] = 0.0
fg = (ag/np.sqrt(p*Rg**2)*(np.sqrt(C4g)-np.sqrt(C2g)+np.sqrt(C1g)-np.sqrt(C3g)))
fl = f[smax>=a]
al = a[smax>=a]
Rl | |
= []
for item in armor:
if item != {}:
item_details = []
item_name = item_name_formatter(item["name"])
rarity = item["rarity"].capitalize()
enchants = enchant_formatter(item["attributes"]["enchantments"])
item_details.append(item_name)
item_details.append(rarity)
item_details.append(enchants)
items.append(item_details)
item_list = []
for item in items:
item_name = item[0]
rarity = item[1]
enchants = item[2]
item_desc = f"Rarity: {rarity}" \
f"\nEnchants: {enchants}"
item_details = {
"item_name": item_name,
"item_desc": item_desc
}
item_list.append(item_details)
if profile == '':
embed = discord.Embed(
title=f"{playername}'s Equipped Armor",
color=0xf00000,
description=f"Data taken from most recently used profile."
)
else:
embed = discord.Embed(
title=f"{playername}'s Equipped Armor",
color=0xf00000,
description=f"Data taken from profile: {profile}"
)
for item in range(len(item_list)):
item = len(item_list) - item - 1
item_name = item_list[item]["item_name"]
item_desc = item_list[item]["item_desc"]
embed.add_field(name=item_name, value=item_desc, inline=False)
embed.set_thumbnail(url=f"https://crafatar.com/avatars/{playeruuid}?size=40&default=MHF_Steve&overlay.png")
embed.add_field(
name="** **",
value="Also try: `j.skills`, `j.accessories`, `j.inventory`, `j.dungeons`, `j.auctions`",
inline=False
)
await ctx.send(embed=embed)
except Exception as e:
await ctx.send('Error getting player data. If this persists, feel free to dm me: Moonflower#8861')
logger.exception(e)
@commands.command(aliases=['dungeon'])
async def dungeons(self, ctx, *param):
data = await checkdungeonplayer(ctx, param)
if data is None:
return
playerstats = data[1]
profile = data[0]
playername = data[2]
playeruuid = data[3]
# all checks passed
try:
dungeonstats = playerstats["members"][playeruuid]["dungeons"]
levelreq = [50, 125, 235, 395, 625, 955, 1425, 2095, 3045, 4385, 6275, 8940, 12700,
17960, 25340, 35640, 50040, 70040, 97640, 135640, 188140, 259640, 356640,
488640, 668640, 911640, 1239640, 1684640, 2284640, 3084640, 4149640, 5559640,
7459640, 9959640, 13259640, 17559640, 23159640, 30359640, 39559640, 51559640,
66559640, 85559640, 109559640, 139559640, 177559640, 225559640, 285559640,
360559640, 453559640]
# get dungeon type data
type_data = {}
dungeontypes = dungeonstats["dungeon_types"]
for dungeontype in dungeontypes:
totalxp = dungeontypes[dungeontype]["experience"]
levels = 0
xp_needed = 50
progress = 0
for level in range(len(levelreq)):
if totalxp < levelreq[level]:
levels = level
xp_needed = round(levelreq[level] - totalxp)
if level != 0:
level_xp = levelreq[level] - levelreq[level - 1]
else:
level_xp = 50
progress = round((level_xp - xp_needed) / level_xp, 2)
xp_needed = price_formatter(xp_needed)
break
totalxp = price_formatter(round(totalxp))
type_data[dungeontype] = {
"level": levels,
"total": totalxp,
"progress_bar": '',
"progress": progress,
"xp_needed": xp_needed
}
if type_data[dungeontype]["xp_needed"] == '0':
progressbar = "[====maxed====]"
else:
progressbar = '['
equals = round(type_data[dungeontype]["progress"] / (1 / 12))
dashes = 12 - equals
progressbar = progressbar + equals * '=' + '|' + dashes * '-' + ']'
type_data[dungeontype]["progress_bar"] = progressbar
# get class data
class_data = {}
classlevels = dungeonstats["player_classes"]
for dungeon_class in classlevels:
totalxp = classlevels[dungeon_class]["experience"]
levels = 0
xp_needed = 50
progress = 0
for level in range(len(levelreq)):
if totalxp < levelreq[level]:
levels = level
xp_needed = round(levelreq[level] - totalxp)
if level != 0:
level_xp = levelreq[level] - levelreq[level - 1]
else:
level_xp = 50
progress = round((level_xp - xp_needed) / level_xp, 2)
xp_needed = price_formatter(xp_needed)
break
totalxp = price_formatter(round(totalxp))
class_data[dungeon_class] = {
"level": levels,
"total": totalxp,
"progress_bar": '',
"progress": progress,
"xp_needed": xp_needed
}
if class_data[dungeon_class]["xp_needed"] == '0':
progressbar = "[====maxed====]"
else:
progressbar = '['
equals = round(class_data[dungeon_class]["progress"] / (1 / 12))
dashes = 12 - equals
progressbar = progressbar + equals * '=' + '|' + dashes * '-' + ']'
class_data[dungeon_class]["progress_bar"] = progressbar
embed = discord.Embed(
title=f"{playername}'s Dungeon Stats",
color=0xf00000,
description=f"Data taken from profile: {profile}"
)
embed.add_field(
name=str('☠️️ Catacombs Level ' + str(type_data["catacombs"]["level"])),
value=str('`' + type_data["catacombs"]["progress_bar"] + '`' +
str(int(type_data["catacombs"]["progress"] * 100)) + '%' +
"\nTotal XP: " + type_data["catacombs"]["total"] +
'\nNext Level: ' + type_data["catacombs"]["xp_needed"] + 'XP'),
inline=True
)
embed.add_field(
name=str('🚑 Healer Level ' + str(class_data["healer"]["level"])),
value=str('`' + class_data["healer"]["progress_bar"] + '`' +
str(int(class_data["healer"]["progress"] * 100)) + '%' +
"\nTotal XP: " + class_data["healer"]["total"] +
'\nNext Level: ' + class_data["healer"]["xp_needed"] + 'XP'),
inline=True
)
embed.add_field(
name=str('🔮️ Mage Level ' + str(class_data["mage"]["level"])),
value=str('`' + class_data["mage"]["progress_bar"] + '`' +
str(int(class_data["mage"]["progress"] * 100)) + '%' +
"\nTotal XP: " + class_data["mage"]["total"] +
'\nNext Level: ' + class_data["mage"]["xp_needed"] + 'XP'),
inline=True
)
embed.add_field(
name=str('⚔️ Berserk Level ' + str(class_data["berserk"]["level"])),
value=str('`' + class_data["berserk"]["progress_bar"] + '`' +
str(int(class_data["berserk"]["progress"] * 100)) + '%' +
"\nTotal XP: " + class_data["berserk"]["total"] +
'\nNext Level: ' + class_data["berserk"]["xp_needed"] + 'XP'),
inline=True
)
embed.add_field(
name=str('🏹️ Archer Level ' + str(class_data["archer"]["level"])),
value=str('`' + class_data["archer"]["progress_bar"] + '`' +
str(int(class_data["archer"]["progress"] * 100)) + '%' +
"\nTotal XP: " + class_data["archer"]["total"] +
'\nNext Level: ' + class_data["archer"]["xp_needed"] + 'XP'),
inline=True
)
embed.add_field(
name=str('🛡️ Tank Level ' + str(class_data["tank"]["level"])),
value=str('`' + class_data["tank"]["progress_bar"] + '`' +
str(int(class_data["tank"]["progress"] * 100)) + '%' +
"\nTotal XP: " + class_data["tank"]["total"] +
'\nNext Level: ' + class_data["tank"]["xp_needed"] + 'XP'),
inline=True
)
embed.set_thumbnail(url=f"https://crafatar.com/avatars/{playeruuid}?size=40&default=MHF_Steve&overlay.png")
embed.add_field(
name="** **",
value="Also try: `j.skills`, `j.accessories`, `j.armor`, `j.inventory`, `j.auctions`",
inline=False
)
await ctx.send(embed=embed)
except Exception as e:
await ctx.send(f'Error getting player data: Player has no dungeon data on profile {profile}.')
logger.exception(e)
return
@commands.command(aliases=["ah"])
async def auctions(self, ctx, username):
mcdata = await checkusername(username)
if mcdata == -1:
await ctx.send('Invalid Username!')
return
uuid = mcdata[1]
username = mcdata[0]
data = getauctiondata()
if data == {}:
await ctx.send("Jerry is in the middle of an automatic reboot. Please try again in a few minutes. Thanks!")
return
if uuid in data:
userauctions = data[uuid]
for item in userauctions:
# format time til end
end = datetime.fromtimestamp(int(str(item["end"])[:-3]))
endingin = end - datetime.now()
timetilend = str(endingin).split(' ')
if len(timetilend) > 1:
daystilend = int(timetilend[0])
else:
daystilend = 0
if daystilend < 0:
item["endingin"] = 'Ended!'
else:
hourstilend = int(timetilend[-1].split(':')[0])
minstilend = int(timetilend[-1].split(':')[1])
hourstilend += daystilend * 24
item["endingin"] = str(hourstilend) + 'h ' + str(minstilend) + 'm'
# make the embed
embed = discord.Embed(title=username + "'s Auctions", color=0xf00000)
for item in userauctions:
itemname = item["item_name"]
startingbid = price_formatter(item["starting_bid"])
highestbid = price_formatter(item["highest_bid"])
tier = item["tier"].capitalize()
endingin = item["endingin"]
if not item["bin"]:
embed.add_field(
name=itemname,
value=f"Tier: {tier} \nStarting Bid: {startingbid} \nHighest Bid: {highestbid} \nEnds In: {endingin}",
inline=False)
else:
embed.add_field(
name=itemname,
value=f"Tier: {tier} \nBIN: {startingbid} \nEnds In: {endingin}",
inline=False)
embed.set_footer(text="Showing " + str(len(userauctions)) + " ongoing auctions")
embed.set_thumbnail(url=f"https://crafatar.com/avatars/{uuid}?size=500&default=MHF_Steve&overlay.png")
else:
embed = discord.Embed(title=username + "'s Auctions", color=0xf00000)
embed.set_footer(text="No ongoing auctions found")
await ctx.send(embed=embed)
@commands.command(aliases=['bin'])
async def lowestbin(self, ctx, *itemname):
itemname = ' '.join(itemname).lower()
logger.info(f"Finding lowest BIN for {itemname}.")
with open('auction/bindata.json', 'r') as b:
bins = json.load(b)
if bins == {}:
await ctx.send("Jerry is in the middle of an automatic reboot. Please try again in a few minutes. Thanks!")
return
lowestbins = {}
for item in bins:
# check item name
if itemname.lower() in item.lower():
for auction in bins[item]:
# check if it's the lowest price for its rarity
rarity = auction["tier"]
if rarity not in lowestbins:
lowestbins[rarity] = {}
lowest = {}
lowest["price"] = auction["starting_bid"]
lowest["auctioneer"] = auction["auctioneer"]
lowestbins[rarity] = lowest
else:
if auction["starting_bid"] < lowestbins[rarity]["price"]:
lowest = {}
lowest["price"] = auction["starting_bid"]
lowest["auctioneer"] = auction["auctioneer"]
lowestbins[rarity] = lowest
if lowestbins != {}:
embed = discord.Embed(title="Lowest BIN Prices", color=0xf00000)
embed.set_footer(text=f"keyword = {itemname}")
for rarity in lowestbins:
rarity2 = rarity[0].upper() + rarity[1:]
name = await checkuuid(lowestbins[rarity]["auctioneer"])
embed.add_field(
name=rarity2,
value=(name + '\n' + price_formatter(lowestbins[rarity]["price"]) + ' coins'),
inline=False
)
await ctx.send(embed=embed)
else:
await ctx.send(f'No BINs found with keyword {itemname}.')
@commands.command(aliases=['bz'])
async def bazaar(self, ctx, *itemname):
try:
with open('bazaar/bazaardata.json') as f:
data = json.load(f)
if data == {}:
await ctx.send("Jerry is in the middle of an automatic reboot. Please try again in a few minutes. Thanks!")
return
itemname = '_'.join(itemname).upper()
if itemname not in data:
found = False
for name in data:
if itemname in name:
itemname = name
found = True
if found is False:
await ctx.send("Item is not in the bazaar data. Try checking the spelling.")
return
buy_info = '[1x] ' + bz_price_formatter(round(data[itemname]["buy_price"], 1)) + ' coins'\
+ '\n' + '[10x] ' + bz_price_formatter(round(data[itemname]["buy_price"] * 10, 1)) + ' coins'\
+ '\n' + '[64x] ' + bz_price_formatter(round(data[itemname]["buy_price"] * 64, 1)) + ' coins'
sell_info = '[1x] ' + bz_price_formatter(round(data[itemname]["sell_price"], 1)) + ' coins'\
+ '\n' + '[10x] ' + bz_price_formatter(round(data[itemname]["sell_price"] * 10, 1)) + ' coins'\
+ '\n' + '[64x] ' + bz_price_formatter(round(data[itemname]["sell_price"] * 64, 1)) + ' coins'
name = bz_name_formatter(itemname)
embed = discord.Embed(title=f"Bazaar Data for {name}", color=0xf00000)
embed.add_field(name=f"Buy Price:", value=buy_info)
embed.add_field(name=f"Sell Price:", value=sell_info)
await ctx.send(embed=embed)
except Exception as e:
await ctx.send('There was an error | |
equipment: a list of dictionaries specifying which equipment objects should be constructed
activities: list of dictionaries specifying which activities should be performed during the simulation
Each of the values the sites and equipment lists, are a dictionary specifying "id", "name",
"type" and "properties". Here "id" can be used to refer to this site / equipment in other parts of the
configuration, "name" is used to initialize the objects name (required by core.Identifiable).
The "type" must be a list of mixin class names which will be used to construct a dynamic class for the
object. For example: ["HasStorage", "HasResource", "Locatable"]. The core.Identifiable and core.Log class will
always be added automatically by the Simulation class.
The "properties" must be a dictionary which is used to construct the arguments for initializing the object.
For example, if "HasContainer" is included in the "type" list, the "properties" dictionary must include a "capacity"
which has the value that will be passed to the constructor of HasContainer. In this case, the "properties"
dictionary can also optionally specify the "level".
Each of the values of the activities list, is a dictionary specifying an "id", "type", and other fields depending
on the type. The supported types are "move", "single_run", "sequential", "conditional", and "delayed".
For a "move" type activity, the dictionary should also contain a "mover", "destination" and can optionally contain
a "moverProperties" dictionary containing an "engineOrder".
For a "single_run" type activity, the dictionary should also contain an "origin", "destination", "loader", "mover",
"unloader" and can optionally contain a "moverProperties" dictionary containing an "engineOrder" and/or "load".
For a "sequential" type activity, the dictionary should also contain "activities". This is a list off activities
(dictionaries as before) which will be performed until sequentially in the order in which they appear in the list.
For a "conditional" type activity, the dictionary should also contain a "condition" and "activities", where the
"activities" is another list of activities which will be performed until the event corresponding with the condition
occurs.
For a "delayed" type activity, the dictionary should also contain a "condition" and "activities", where the
"activities" is another list of activities which will be performed after the event corresponding with the condition
occurs.
The "condition" of a "conditional" or "delayed" type activity is a dictionary containing an "operator" and one other
field depending on the type. The operator can be "is_full", "is_empty", "is_done", "any_of" and "all_of".
For the "is_full" operator, the dictionary should contain an "operand" which must be the id of the object (site or
equipment) of which the container should be full for the event to occur.
For the "is_empty" operator, the dictionary should contain an "operand" which must be the id of the object (site or
equipment) of which the container should be empty for the event to occur.
For the "is_done" operator, the dictionary should contain an "operand" which must the the id of an activity which
should be finished for the event to occur. To instantiate such an event, the operand activity must already be
instantiated. The Simulation class takes care of instantiating its activities in an order which ensures this is the
case. However, if there is no such order because activities contain "is_done" conditions which circularly reference
each other, a ValueError will be raised.
For the "any_of" operator, the dictionary should contain "conditions", a list of (sub)conditions of which any must
occur for the event to occur.
For the "all_of" operator, the dictionary should contain "conditions", a list of (sub)conditions which all must
occur for the event to occur.
"""
def __init__(self, sites, equipment, activities, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__init_sites(sites)
self.__init_equipment(equipment)
self.__init_activities(activities)
def __init_sites(self, sites):
self.sites = {}
for site in sites:
self.sites[site["id"]] = self.__init_object_from_json(site)
def __init_equipment(self, equipment):
self.equipment = {}
for equipment_piece in equipment:
self.equipment[equipment_piece["id"]] = self.__init_object_from_json(
equipment_piece
)
def __init_activities(self, activities):
self.activities = {}
activity_log_class = type("ActivityLog", (core.Log, core.Identifiable), {})
uninstantiated_activities = activities
while len(uninstantiated_activities) > 0:
still_uninstantiated = self.__try_to_init_activities(
activities, activity_log_class
)
if len(still_uninstantiated) == len(uninstantiated_activities):
raise ValueError(
"Unable to instantiate activities {}; their is_done conditions form a circle.".format(
", ".join(
activity["id"] for activity in uninstantiated_activities
)
)
)
uninstantiated_activities = still_uninstantiated
def __try_to_init_activities(self, activities, activity_log_class):
failed_activities = []
for activity in activities:
successful = self.__try_to_init_activity(activity, activity_log_class)
if not successful:
failed_activities.append(activity)
return failed_activities
def __try_to_init_activity(self, activity, activity_log_class):
try:
process_control = self.get_process_control(activity)
except KeyError:
return False
id = activity["id"]
activity_log = activity_log_class(env=self.env, name=id)
process = self.env.process(
process_control(activity_log=activity_log, env=self.env)
)
self.activities[id] = {"activity_log": activity_log, "process": process}
return True
def get_process_control(self, activity, stop_reservation_waiting_event=None):
activity_type = activity["type"]
if activity_type == "move":
mover = self.equipment[activity["mover"]]
mover_properties = self.get_mover_properties_kwargs(activity)
destination = self.sites[activity["destination"]]
kwargs = {"mover": mover, "destination": destination}
if "engine_order" in mover_properties:
kwargs["engine_order"] = mover_properties["engine_order"]
return partial(move_process, **kwargs)
if activity_type == "single_run":
kwargs = self.get_mover_properties_kwargs(activity)
kwargs["mover"] = self.equipment[activity["mover"]]
kwargs["origin"] = self.sites[activity["origin"]]
kwargs["destination"] = self.sites[activity["destination"]]
kwargs["loader"] = self.equipment[activity["loader"]]
kwargs["unloader"] = self.equipment[activity["unloader"]]
if stop_reservation_waiting_event is not None:
kwargs[
"stop_reservation_waiting_event"
] = stop_reservation_waiting_event
return partial(single_run_process, **kwargs)
if activity_type == "conditional":
stop_event = self.get_condition_event(activity["condition"])
sub_processes = [
self.get_process_control(act, stop_reservation_waiting_event=stop_event)
for act in activity["activities"]
]
return partial(
conditional_process, stop_event=stop_event, sub_processes=sub_processes
)
if activity_type == "sequential":
sub_processes = [
self.get_process_control(act) for act in activity["activities"]
]
return partial(sequential_process, sub_processes=sub_processes)
if activity_type == "delayed":
sub_processes = [
self.get_process_control(act) for act in activity["activities"]
]
start_event = self.get_condition_event(activity["condition"])
return partial(
delayed_process, start_event=start_event, sub_processes=sub_processes
)
raise ValueError("Unrecognized activity type: " + activity_type)
@staticmethod
def get_mover_properties_kwargs(activity):
if "moverProperties" not in activity:
return {}
kwargs = {}
mover_options = activity["moverProperties"]
if "engineOrder" in mover_options:
kwargs["engine_order"] = mover_options["engineOrder"]
if "load" in mover_options:
kwargs["filling"] = mover_options["load"]
return kwargs
def get_level_event_operand(self, condition):
operand_key = condition["operand"]
try:
operand = (
self.sites[operand_key]
if operand_key in self.sites
else self.equipment[operand_key]
)
except KeyError:
# rethrow a KeyError as a ValueError to avoid assuming there is a circular dependency
raise ValueError(
'No object with id "{}" present in configuration'.format(operand_key)
)
return operand
def get_sub_condition_events(self, condition):
conditions = condition["conditions"]
events = [self.get_condition_event(condition) for condition in conditions]
return events
def get_condition_event(self, condition):
operator = condition["operator"]
if operator == "is_full":
operand = self.get_level_event_operand(condition)
return operand.container.get_full_event
elif operator == "is_empty":
operand = self.get_level_event_operand(condition)
return operand.container.get_empty_event
elif operator == "is_done":
operand_key = condition["operand"]
return self.activities[operand_key][
"process"
] # potential KeyError is caught in try_to_init_activity
elif operator == "any_of":
sub_events = self.get_sub_condition_events(condition)
return self.env.any_of(events=sub_events)
elif operator == "all_of":
sub_events = self.get_sub_condition_events(condition)
return self.env.all_of(events=sub_events)
else:
raise ValueError("Unrecognized operator type: " + operator)
def __init_object_from_json(self, object_json):
class_name = object_json["id"]
name = object_json["name"]
type = object_json["type"]
properties = object_json["properties"]
klass = get_class_from_type_list(class_name, type)
kwargs = get_kwargs_from_properties(self.env, name, properties, self.sites)
try:
new_object = klass(**kwargs)
except TypeError as type_err:
# create a useful error message
message_template = "Unable to instantiate {} for {}: {} with arguments {}"
message = message_template.format(klass, class_name, type_err, kwargs)
raise ValueError(message)
add_object_properties(new_object, properties)
return new_object
def get_logging(self):
json = {}
sites_logging = []
for key, site in self.sites.items():
sites_logging.append(
self.get_as_feature_collection(key, site.get_log_as_json())
)
json["sites"] = sites_logging
equipment_logging = []
for key, equipment in self.equipment.items():
equipment_logging.append(
self.get_as_feature_collection(key, equipment.get_log_as_json())
)
json["equipment"] = equipment_logging
activity_logging = []
for key, activity in self.activities.items():
activity_logging.append(
self.get_as_feature_collection(
key, activity["activity_log"].get_log_as_json()
)
)
json["activities"] = activity_logging
return json
@staticmethod
def get_as_feature_collection(id, features):
return dict(type="FeatureCollection", id=id, features=features)
def get_class_from_type_list(class_name, type_list):
mixin_classes = (
[core.Identifiable, core.Log]
+ [string_to_class(text) for text in type_list]
+ [core.DebugArgs]
)
return type(class_name, tuple(mixin_classes), {})
def string_to_class(text):
try:
return getattr(core, text)
except AttributeError:
raise ValueError("Invalid core class name given: " + text)
def get_kwargs_from_properties(environment, name, properties, sites):
kwargs = {"env": environment, "name": name}
# some checks on the configuration could be added here,
# for example, if both level and capacity are given, is level <= capacity, level >= 0, capacity >= 0 etc.
# for compute functions:
# - check if there are enough entries for interp1d / interp2d,
# - check if functions of for example level have a range from 0 to max level (capacity)
# Locatable
if "geometry" in properties:
kwargs["geometry"] = shapely.geometry.asShape(properties["geometry"]).centroid
if | |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
import oci # noqa: F401
from oci.util import WAIT_RESOURCE_NOT_FOUND # noqa: F401
class BlockchainPlatformClientCompositeOperations(object):
"""
This class provides a wrapper around :py:class:`~oci.blockchain.BlockchainPlatformClient` and offers convenience methods
for operations that would otherwise need to be chained together. For example, instead of performing an action
on a resource (e.g. launching an instance, creating a load balancer) and then using a waiter to wait for the resource
to enter a given state, you can call a single method in this class to accomplish the same functionality
"""
def __init__(self, client, **kwargs):
"""
Creates a new BlockchainPlatformClientCompositeOperations object
:param BlockchainPlatformClient client:
The service client which will be wrapped by this object
"""
self.client = client
def change_blockchain_platform_compartment_and_wait_for_state(self, blockchain_platform_id, change_blockchain_platform_compartment_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.blockchain.BlockchainPlatformClient.change_blockchain_platform_compartment` and waits for the :py:class:`~oci.blockchain.models.WorkRequest`
to enter the given state(s).
:param str blockchain_platform_id: (required)
Unique service identifier.
:param oci.blockchain.models.ChangeBlockchainPlatformCompartmentDetails change_blockchain_platform_compartment_details: (required)
Input payload to move the resource to a different compartment.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.blockchain.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.blockchain.BlockchainPlatformClient.change_blockchain_platform_compartment`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.change_blockchain_platform_compartment(blockchain_platform_id, change_blockchain_platform_compartment_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_blockchain_platform_and_wait_for_state(self, create_blockchain_platform_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.blockchain.BlockchainPlatformClient.create_blockchain_platform` and waits for the :py:class:`~oci.blockchain.models.WorkRequest`
to enter the given state(s).
:param oci.blockchain.models.CreateBlockchainPlatformDetails create_blockchain_platform_details: (required)
Details for the new service.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.blockchain.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.blockchain.BlockchainPlatformClient.create_blockchain_platform`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_blockchain_platform(create_blockchain_platform_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_osn_and_wait_for_state(self, blockchain_platform_id, create_osn_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.blockchain.BlockchainPlatformClient.create_osn` and waits for the :py:class:`~oci.blockchain.models.WorkRequest`
to enter the given state(s).
:param str blockchain_platform_id: (required)
Unique service identifier.
:param oci.blockchain.models.CreateOsnDetails create_osn_details: (required)
Input payload to create blockchain platform OSN. The payload cannot be empty.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.blockchain.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.blockchain.BlockchainPlatformClient.create_osn`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_osn(blockchain_platform_id, create_osn_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_peer_and_wait_for_state(self, blockchain_platform_id, create_peer_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.blockchain.BlockchainPlatformClient.create_peer` and waits for the :py:class:`~oci.blockchain.models.WorkRequest`
to enter the given state(s).
:param str blockchain_platform_id: (required)
Unique service identifier.
:param oci.blockchain.models.CreatePeerDetails create_peer_details: (required)
Input payload to create a blockchain platform peer. The payload cannot be empty.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.blockchain.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.blockchain.BlockchainPlatformClient.create_peer`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_peer(blockchain_platform_id, create_peer_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_blockchain_platform_and_wait_for_state(self, blockchain_platform_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.blockchain.BlockchainPlatformClient.delete_blockchain_platform` and waits for the :py:class:`~oci.blockchain.models.WorkRequest`
to enter the given state(s).
:param str blockchain_platform_id: (required)
Unique service identifier.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.blockchain.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.blockchain.BlockchainPlatformClient.delete_blockchain_platform`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = None
try:
operation_result = self.client.delete_blockchain_platform(blockchain_platform_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_osn_and_wait_for_state(self, blockchain_platform_id, osn_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.blockchain.BlockchainPlatformClient.delete_osn` and waits for the :py:class:`~oci.blockchain.models.WorkRequest`
to enter the given state(s).
:param str blockchain_platform_id: (required)
Unique service identifier.
:param str osn_id: (required)
OSN identifier.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.blockchain.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.blockchain.BlockchainPlatformClient.delete_osn`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = None
try:
operation_result = self.client.delete_osn(blockchain_platform_id, osn_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_peer_and_wait_for_state(self, blockchain_platform_id, peer_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.blockchain.BlockchainPlatformClient.delete_peer` and waits for the :py:class:`~oci.blockchain.models.WorkRequest`
to enter the given state(s).
:param str blockchain_platform_id: (required)
Unique service identifier.
:param str peer_id: (required)
Peer identifier.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.blockchain.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.blockchain.BlockchainPlatformClient.delete_peer`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = None
try:
operation_result = self.client.delete_peer(blockchain_platform_id, peer_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = | |
a "*" marker, indicating this is a place where the left
# and right pages line up. Get the lowest y coordinate of a change
# above this point and the highest y coordinate of a change after
# this point. If there's no overlap, we can split the PDF here.
try:
y1 = max(b["y"]+b["height"] for j, b in enumerate(changes)
if j < i and b != "*" and b["pdf"]["index"] == pdf and b["page"] == (page, split_index))
y2 = min(b["y"] for j, b in enumerate(changes)
if j > i and b != "*" and b["pdf"]["index"] == pdf and b["page"] == (page, split_index))
except ValueError:
# Nothing either before or after this point, so no need to split.
continue
if y1+1 >= y2:
# This is not a good place to split the page.
continue
# Split the PDF page between the bottom of the previous box and
# the top of the next box.
split_coord = int(round((y1+y2)/2))
# Make a new image for the next split-off part.
im = pages[pdf][(page, split_index)]
pages[pdf][(page, split_index)] = im.crop(
[0, 0, im.size[0], split_coord])
pages[pdf][(page, split_index+1)] = im.crop([0,
split_coord, im.size[0], im.size[1]])
# Re-do all of the coordinates of boxes after the split point:
# map them to the newly split-off part.
for j, b in enumerate(changes):
if j > i and b != "*" and b["pdf"]["index"] == pdf and b["page"] == (page, split_index):
b["page"] = (page, split_index+1)
b["y"] -= split_coord
split_index += 1
# Re-group the pages by where we made a split on both sides.
page_groups = [({}, {})]
for i, box in enumerate(changes):
if box != "*":
page_groups[-1][box["pdf"]["index"]][box["page"]
] = pages[box["pdf"]["index"]][box["page"]]
else:
# Did we split at this location?
pages_before = set((b["pdf"]["index"], b["page"])
for j, b in enumerate(changes) if j < i and b != "*")
pages_after = set((b["pdf"]["index"], b["page"])
for j, b in enumerate(changes) if j > i and b != "*")
if len(pages_before & pages_after) == 0:
# no page is on both sides of this asterisk, so start a new group
page_groups.append(({}, {}))
return page_groups
def draw_red_boxes(changes, pages, styles):
# Draw red boxes around changes.
for change in changes:
if change == "*":
continue # not handled yet
# 'box', 'strike', 'underline'
style = styles[change["pdf"]["index"]]
# the Image of the page
im = pages[change["pdf"]["index"]][change["page"]]
# draw it
draw = ImageDraw.Draw(im)
if style == "box":
draw.rectangle((
change["x"], change["y"],
(change["x"]+change["width"]), (change["y"]+change["height"]),
), outline="red")
elif style == "strike":
draw.line((
change["x"], change["y"]+change["height"]/2,
change["x"]+change["width"], change["y"]+change["height"]/2
), fill="red")
elif style == "underline":
draw.line((
change["x"], change["y"]+change["height"],
change["x"]+change["width"], change["y"]+change["height"]
), fill="red")
del draw
def zealous_crop(page_groups):
# Zealous crop all of the pages. Vertical margins can be cropped
# however, but be sure to crop all pages the same horizontally.
for idx in (0, 1):
# min horizontal extremes
minx = None
maxx = None
width = None
for grp in page_groups:
for pdf in grp[idx].values():
bbox = ImageOps.invert(pdf.convert("L")).getbbox()
if bbox is None:
continue # empty
minx = min(bbox[0], minx) if minx is not None else bbox[0]
maxx = max(bbox[2], maxx) if maxx is not None else bbox[2]
width = max(
width, pdf.size[0]) if width is not None else pdf.size[0]
if width is not None:
minx = max(0, minx-int(.02*width)) # add back some margins
maxx = min(width, maxx+int(.02*width))
# do crop
for grp in page_groups:
for pg in grp[idx]:
im = grp[idx][pg]
# .invert() requires a grayscale image
bbox = ImageOps.invert(im.convert("L")).getbbox()
if bbox is None:
bbox = [0, 0, im.size[0], im.size[1]] # empty page
vpad = int(.02*im.size[1])
im = im.crop(
(0, max(0, bbox[1]-vpad), im.size[0], min(im.size[1], bbox[3]+vpad)))
if os.environ.get("HORZCROP", "1") != "0":
im = im.crop((minx, 0, maxx, im.size[1]))
grp[idx][pg] = im
def stack_pages(page_groups):
# Compute the dimensions of the final image.
col_height = [0, 0]
col_width = 0
page_group_spacers = []
for grp in page_groups:
for idx in (0, 1):
for im in grp[idx].values():
col_height[idx] += im.size[1]
col_width = max(col_width, im.size[0])
dy = col_height[1] - col_height[0]
if abs(dy) < 10:
dy = 0 # don't add tiny spacers
page_group_spacers.append((dy if dy > 0 else 0, -dy if dy < 0 else 0))
col_height[0] += page_group_spacers[-1][0]
col_height[1] += page_group_spacers[-1][1]
height = max(col_height)
# Draw image with some background lines.
img = Image.new("RGBA", (col_width*2+1, height), "#F3F3F3")
draw = ImageDraw.Draw(img)
for x in range(0, col_width*2+1, 50):
draw.line((x, 0, x, img.size[1]), fill="#E3E3E3")
# Paste in the page.
for idx in (0, 1):
y = 0
for i, grp in enumerate(page_groups):
for pg in sorted(grp[idx]):
pgimg = grp[idx][pg]
img.paste(pgimg, (0 if idx == 0 else (col_width+1), y))
if pg[0] > 1 and pg[1] == 0:
# Draw lines between physical pages. Since we split
# pages into sub-pages, check that the sub-page index
# pg[1] is the start of a logical page. Draw lines
# above pages, but not on the first page pg[0] == 1.
draw.line((0 if idx == 0 else col_width, y,
col_width*(idx+1), y), fill="black")
y += pgimg.size[1]
y += page_group_spacers[i][idx]
# Draw a vertical line between the two sides.
draw.line((col_width, 0, col_width, height), fill="black")
del draw
return img
def merge_boxes_if_possible(a, b):
"""Combine b into a if a and b appear to be sequential words and
return True."""
# Need same PDF
if a['pdf'] != b['pdf']:
return False
# Need same page
if a['page'] != b['page']:
return False
# Need sequential boxes (since we do this after diffing)
if a['index'] + 1 != b['index']:
return False
a_min_y = a['y']
a_max_y = a['y'] + a['height']
b_min_y = b['y']
b_max_y = b['y'] + b['height']
overlap_min_y = max(a_min_y, b_min_y)
overlap_max_y = min(a_max_y, b_max_y)
# If the new box lies vertically mostly within the old box, combine them
overlap_ratio = (overlap_max_y - overlap_min_y) / b['height']
if overlap_ratio > 0.7:
# expand width
a['width'] = b['x'] + b['width'] - a['x']
# expand y and height
a['y'] = min(a_min_y, b_min_y)
a['height'] = max(a_max_y, b_max_y) - a['y']
# combine text
a["text"] += b["text"]
# so that in the next iteration we can expand it again
a["index"] += 1
return True
return False
def simplify_changes(boxes):
# Combine changed boxes when they were sequential in the input.
# Our bounding boxes may be on a word-by-word basis, which means
# neighboring boxes will lead to discontiguous rectangles even
# though they are probably the same semantic change.
changes = []
for b in boxes:
if len(changes) > 0 and changes[-1] != "*" and b != "*":
if merge_boxes_if_possible(changes[-1], b):
continue
changes.append(b)
return changes
# Rasterizes a page of a PDF.
def pdftopng(pdffile, pagenumber, width):
pngbytes = subprocess.check_output(
["pdftoppm", "-f", str(pagenumber), "-l", str(pagenumber), "-scale-to", str(width), "-png", pdffile])
im = Image.open(io.BytesIO(pngbytes))
return im.convert("RGBA")
def main():
import argparse
description = ('Calculates the differences between two specified files in PDF format '
'(or changes specified on standard input) and outputs to standard output '
'side-by-side images with the differences marked (in PNG format).')
parser = argparse.ArgumentParser(description=description)
parser.add_argument('files', nargs='*', # Use '*' to allow --changes with zero files
help='calculate differences between the two named files')
parser.add_argument('-c', '--changes', action='store_true', default=False,
help='read change description from standard input, ignoring files')
parser.add_argument('-s', '--style', metavar='box|strike|underline,box|stroke|underline',
default='strike,underline',
help='how to mark the differences in the two files (default: strike, underline)')
parser.add_argument('-f', '--format', choices=['png', 'gif', 'jpeg', 'ppm', 'tiff'], default='png',
help='output format in which to render (default: png)')
parser.add_argument('-t', '--top-margin', metavar='margin', default=0., type=float,
help='top margin (ignored area) end in percent of page height (default 0.0)')
parser.add_argument('-b', '--bottom-margin', metavar='margin', default=100., type=float,
help='bottom margin (ignored area) begin in percent of page height (default 100.0)')
parser.add_argument('-r', '--result-width', default=900, type=int,
help='width of the result image (width of image in px)')
args = parser.parse_args()
def invalid_usage(msg):
sys.stderr.write('ERROR: %s%s' % (msg, os.linesep))
parser.print_usage(sys.stderr)
sys.exit(1)
# Validate style
style = args.style.split(',')
if len(style) != 2:
invalid_usage(
'Exactly two style values must be specified, if --style is used.')
for i in [0, 1]:
if style[i] != 'box' and style[i] != 'strike' and style[i] != 'underline':
invalid_usage(
'--style values must be box, strike or underline, not "%s".' % (style[i]))
# Ensure one of | |
-> Tuple[Point, ...]:
"""Return a tuple of labelled points, in the order they were labelled."""
self._fix_array()
return tuple(point for point in self._points if not point.isnan())
def _fix_array(self):
"""Fix PointArray after nodes have been added or removed.
This updates the PointArray as required by comparing the cached
list of nodes to the nodes in the `Skeleton` object (which may
have changed).
"""
# Check if cached skeleton nodes are different than current nodes
if self._nodes != self.skeleton.nodes:
# Create new PointArray (or PredictedPointArray)
cls = type(self._points)
new_array = cls.make_default(len(self.skeleton.nodes))
# Add points into new array
for i, node in enumerate(self._nodes):
if node in self.skeleton.nodes:
new_array[self.skeleton.nodes.index(node)] = self._points[i]
# Update points and nodes for this instance
self._points = new_array
self._nodes = self.skeleton.nodes
def get_points_array(
self, copy: bool = True, invisible_as_nan: bool = False, full: bool = False
) -> Union[np.ndarray, np.recarray]:
"""Return the instance's points in array form.
Args:
copy: If True, the return a copy of the points array as an ndarray.
If False, return a view of the underlying recarray.
invisible_as_nan: Should invisible points be marked as NaN.
If copy is False, then invisible_as_nan is ignored since we
don't want to set invisible points to NaNs in original data.
full: If True, return all data for points. Otherwise, return just
the x and y coordinates.
Returns:
Either a recarray (if copy is False) or an ndarray (if copy True).
The order of the rows corresponds to the ordering of the skeleton
nodes. Any skeleton node not defined will have NaNs present.
Columns in recarray are accessed by name, e.g., ["x"], ["y"].
Columns in ndarray are accessed by number. The order matches
the order in `Point.dtype` or `PredictedPoint.dtype`.
"""
self._fix_array()
if not copy:
if full:
return self._points
else:
return self._points[["x", "y"]]
else:
if full:
parray = structured_to_unstructured(self._points)
else:
parray = structured_to_unstructured(self._points[["x", "y"]])
# Note that invisible_as_nan assumes copy is True.
if invisible_as_nan:
parray[~self._points.visible] = math.nan
return parray
def fill_missing(
self, max_x: Optional[float] = None, max_y: Optional[float] = None
):
"""Add points for skeleton nodes that are missing in the instance.
This is useful when modifying the skeleton so the nodes appears in the GUI.
Args:
max_x: If specified, make sure points are not added outside of valid range.
max_y: If specified, make sure points are not added outside of valid range.
"""
self._fix_array()
y1, x1, y2, x2 = self.bounding_box
y1, x1 = max(y1, 0), max(x1, 0)
if max_x is not None:
x2 = min(x2, max_x)
if max_y is not None:
y2 = min(y2, max_y)
w, h = y2 - y1, x2 - x1
for node in self.skeleton.nodes:
if node not in self.nodes or self[node].isnan():
off = np.array([w, h]) * np.random.rand(2)
x, y = off + np.array([x1, y1])
y, x = max(y, 0), max(x, 0)
if max_x is not None:
x = min(x, max_x)
if max_y is not None:
y = min(y, max_y)
self[node] = Point(x=x, y=y, visible=False)
@property
def points_array(self) -> np.ndarray:
"""Return array of x and y coordinates for visible points.
Row in array corresponds to order of points in skeleton. Invisible points will
be denoted by NaNs.
Returns:
A numpy array of of shape `(n_nodes, 2)` point coordinates.
"""
return self.get_points_array(invisible_as_nan=True)
def numpy(self) -> np.ndarray:
"""Return the instance node coordinates as a numpy array.
Alias for `points_array`.
Returns:
Array of shape `(n_nodes, 2)` of dtype `float32` containing the coordinates
of the instance's nodes. Missing/not visible nodes will be replaced with
`NaN`.
"""
return self.points_array
def transform_points(self, transformation_matrix):
"""Apply affine transformation matrix to points in the instance.
Args:
transformation_matrix: Affine transformation matrix as a numpy array of
shape `(3, 3)`.
"""
points = self.get_points_array(copy=True, full=False, invisible_as_nan=False)
if transformation_matrix.shape[1] == 3:
rotation = transformation_matrix[:, :2]
translation = transformation_matrix[:, 2]
transformed = points @ rotation.T + translation
else:
transformed = points @ transformation_matrix.T
self._points["x"] = transformed[:, 0]
self._points["y"] = transformed[:, 1]
@property
def centroid(self) -> np.ndarray:
"""Return instance centroid as an array of `(x, y)` coordinates
Notes:
This computes the centroid as the median of the visible points.
"""
points = self.points_array
centroid = np.nanmedian(points, axis=0)
return centroid
@property
def bounding_box(self) -> np.ndarray:
"""Return bounding box containing all points in `[y1, x1, y2, x2]` format."""
points = self.points_array
if np.isnan(points).all():
return np.array([np.nan, np.nan, np.nan, np.nan])
bbox = np.concatenate(
[np.nanmin(points, axis=0)[::-1], np.nanmax(points, axis=0)[::-1]]
)
return bbox
@property
def midpoint(self) -> np.ndarray:
"""Return the center of the bounding box of the instance points."""
y1, x1, y2, x2 = self.bounding_box
return np.array([(x2 - x1) / 2, (y2 - y1) / 2])
@property
def n_visible_points(self) -> int:
"""Return the number of visible points in this instance."""
n = 0
for p in self.points:
if p.visible:
n += 1
return n
def __len__(self) -> int:
"""Return the number of visible points in this instance."""
return self.n_visible_points
@property
def video(self) -> Optional[Video]:
"""Return the video of the labeled frame this instance is associated with."""
if self.frame is None:
return None
else:
return self.frame.video
@property
def frame_idx(self) -> Optional[int]:
"""Return the index of the labeled frame this instance is associated with."""
if self.frame is None:
return None
else:
return self.frame.frame_idx
@classmethod
def from_pointsarray(
cls, points: np.ndarray, skeleton: Skeleton, track: Optional[Track] = None
) -> "Instance":
"""Create an instance from an array of points.
Args:
points: A numpy array of shape `(n_nodes, 2)` and dtype `float32` that
contains the points in (x, y) coordinates of each node. Missing nodes
should be represented as `NaN`.
skeleton: A `sleap.Skeleton` instance with `n_nodes` nodes to associate with
the instance.
track: Optional `sleap.Track` object to associate with the instance.
Returns:
A new `Instance` object.
"""
predicted_points = dict()
for point, node_name in zip(points, skeleton.node_names):
if np.isnan(point).any():
continue
predicted_points[node_name] = Point(x=point[0], y=point[1])
return cls(points=predicted_points, skeleton=skeleton, track=track)
@classmethod
def from_numpy(
cls, points: np.ndarray, skeleton: Skeleton, track: Optional[Track] = None
) -> "Instance":
"""Create an instance from a numpy array.
Args:
points: A numpy array of shape `(n_nodes, 2)` and dtype `float32` that
contains the points in (x, y) coordinates of each node. Missing nodes
should be represented as `NaN`.
skeleton: A `sleap.Skeleton` instance with `n_nodes` nodes to associate with
the instance.
track: Optional `sleap.Track` object to associate with the instance.
Returns:
A new `Instance` object.
Notes:
This is an alias for `Instance.from_pointsarray()`.
"""
return cls.from_pointsarray(points, skeleton, track=track)
def _merge_nodes_data(self, base_node: str, merge_node: str):
"""Copy point data from one node to another.
Args:
base_node: Name of node that will be merged into.
merge_node: Name of node that will be removed after merge.
Notes:
This is used when merging skeleton nodes and should not be called directly.
"""
base_pt = self[base_node]
merge_pt = self[merge_node]
if merge_pt.isnan():
return
if base_pt.isnan() or not base_pt.visible:
base_pt.x = merge_pt.x
base_pt.y = merge_pt.y
base_pt.visible = merge_pt.visible
base_pt.complete = merge_pt.complete
if hasattr(base_pt, "score"):
base_pt.score = merge_pt.score
@attr.s(eq=False, order=False, slots=True, repr=False, str=False)
class PredictedInstance(Instance):
"""
A predicted instance is an output of the inference procedure.
Args:
score: The instance-level grouping prediction score.
tracking_score: The instance-level track matching score.
"""
score: float = attr.ib(default=0.0, converter=float)
tracking_score: float = attr.ib(default=0.0, converter=float)
# The underlying Point array type that this instances point array should be.
_point_array_type = PredictedPointArray
def __attrs_post_init__(self):
super(PredictedInstance, self).__attrs_post_init__()
if self.from_predicted is not None:
raise ValueError("PredictedInstance should not have from_predicted.")
def __repr__(self) -> str:
"""Return string representation of this object."""
pts = []
for node, pt in self.nodes_points:
pts.append(f"{node.name}: ({pt.x:.1f}, {pt.y:.1f}, {pt.score:.2f})")
pts = ", ".join(pts)
return (
"PredictedInstance("
f"video={self.video}, "
f"frame_idx={self.frame_idx}, "
f"points=[{pts}], "
f"score={self.score:.2f}, "
f"track={self.track}, "
f"tracking_score={self.tracking_score:.2f}"
")"
)
@property
def points_and_scores_array(self) -> np.ndarray:
"""Return the instance points and scores as an array.
This will be a `(n_nodes, 3)` array of `(x, y, score)` for each predicted point.
Rows in the array correspond to the order of points in skeleton. Invisible
points will be represented as NaNs.
"""
pts = self.get_points_array(full=True, copy=True, invisible_as_nan=True)
return pts[:, (0, 1, 4)] # (x, y, score)
@property
def | |
<gh_stars>10-100
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""object_detection_evaluation module.
ObjectDetectionEvaluation is a class which manages ground truth information of a
object detection dataset, and computes frequently used detection metrics such as
Precision, Recall, CorLoc of the provided detection results.
It supports the following operations:
1) Add ground truth information of images sequentially.
2) Add detection result of images sequentially.
3) Evaluate detection metrics on already inserted detection results.
4) Write evaluation result into a pickle file for future processing or
visualization.
Note: This module operates on numpy boxes and box lists.
"""
import logging
import numpy as np
import os
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from object_detection.utils import metrics
from object_detection.utils import per_image_evaluation
from object_detection.utils import np_box_list
class ObjectDetectionEvaluation(object):
"""Evaluate Object Detection Result."""
def __init__(self,
num_groundtruth_classes,
matching_iou_threshold=0.5,
nms_type='standard',
nms_iou_threshold=1.0,
nms_max_output_boxes=10000,
soft_nms_sigma=0.5,
subset_names=('default',)):
self.per_image_eval = per_image_evaluation.PerImageEvaluation(
num_groundtruth_classes,
matching_iou_threshold,
nms_type,
nms_iou_threshold,
nms_max_output_boxes,
soft_nms_sigma)
self.num_class = num_groundtruth_classes
self.subset_names = subset_names
self.groundtruth_boxes = {}
self.groundtruth_class_labels = {}
self.groundtruth_subset = {s: {} for s in self.subset_names}
self.num_gt_instances_per_class = {s: np.zeros(self.num_class, dtype=int)
for s in self.subset_names}
self.num_gt_imgs_per_class = np.zeros(self.num_class, dtype=int)
self.detection_keys = set()
self.scores_per_class = {s: [[] for _ in range(self.num_class)]
for s in self.subset_names}
self.tp_fp_labels_per_class = {s: [[] for _ in range(self.num_class)]
for s in self.subset_names}
self.num_images_correctly_detected_per_class = np.zeros(self.num_class)
self.average_precision_per_class \
= {s: np.empty(self.num_class, dtype=float)
for s in self.subset_names}
for s in self.subset_names:
self.average_precision_per_class[s].fill(np.nan)
self.precisions_per_class = {s: [] for s in self.subset_names}
self.recalls_per_class = {s: [] for s in self.subset_names}
self.corloc_per_class = np.ones(self.num_class, dtype=float)
def clear_groundtruths(self):
self.groundtruth_boxes = {}
self.groundtruth_class_labels = {}
self.groundtruth_subset = {s: {} for s in self.subset_names}
self.num_gt_instances_per_class = {s: np.zeros(self.num_class, dtype=int)
for s in self.subset_names}
self.num_gt_imgs_per_class = np.zeros(self.num_class, dtype=int)
def clear_detections(self):
self.detection_keys = set()
self.scores_per_class = {s: [[] for _ in range(self.num_class)]
for s in self.subset_names}
self.tp_fp_labels_per_class = {s: [[] for _ in range(self.num_class)]
for s in self.subset_names}
self.num_images_correctly_detected_per_class = np.zeros(self.num_class)
self.average_precision_per_class \
= {s: np.empty(self.num_class, dtype=float)
for s in self.subset_names}
for s in self.subset_names:
self.average_precision_per_class[s].fill(np.nan)
self.precisions_per_class = {s: [] for s in self.subset_names}
self.recalls_per_class = {s: [] for s in self.subset_names}
self.corloc_per_class = np.ones(self.num_class, dtype=float)
def add_single_ground_truth_image_info(self,
image_key,
groundtruth_boxes,
groundtruth_class_labels,
groundtruth_subset=None):
"""Add ground truth info of a single image into the evaluation database.
Args:
image_key: sha256 key of image content
groundtruth_boxes: A numpy array of shape [M, 4] representing object box
coordinates[y_min, x_min, y_max, x_max]
groundtruth_class_labels: A 1-d numpy array of length M representing class
labels
groundtruth_subset: A list of M subset strings, each of which is subset
names joined with '|'. An object box may belong to multiple subsets.
If this is not None, ignore groundtruth_is_difficult_list.
"""
if image_key in self.groundtruth_boxes:
logging.warn(
'image %s has already been added to the ground truth database.',
image_key)
return
self.groundtruth_boxes[image_key] = groundtruth_boxes
self.groundtruth_class_labels[image_key] = groundtruth_class_labels
num_boxes = groundtruth_boxes.shape[0]
# determine subset for each setting
if groundtruth_subset is None:
groundtruth_subset = ['default'] * num_boxes
# initialize groundtruth subset of current image
for subset in self.subset_names:
self.groundtruth_subset[subset][image_key] \
= np.zeros((num_boxes,)).astype(np.bool)
for box_idx, subsets in enumerate(groundtruth_subset):
for subset in subsets.split('|'):
if subset == '':
continue
if subset not in self.subset_names:
raise ValueError('%s is not found in subset_names')
self.groundtruth_subset[subset][image_key][box_idx] = True
subset_of_current_img = {s: self.groundtruth_subset[s][image_key]
for s in self.subset_names}
self._update_ground_truth_statistics(groundtruth_class_labels,
subset_of_current_img)
def add_single_detected_image_info(self, image_key, detected_boxes,
detected_scores, detected_class_labels):
"""Add detected result of a single image into the evaluation database.
Args:
image_key: sha256 key of image content
detected_boxes: A numpy array of shape [N, 4] representing detected box
coordinates[y_min, x_min, y_max, x_max]
detected_scores: A 1-d numpy array of length N representing classification
score
detected_class_labels: A 1-d numpy array of length N representing class
labels
Raises:
ValueError: if detected_boxes, detected_scores and detected_class_labels
do not have the same length.
"""
if (len(detected_boxes) != len(detected_scores) or
len(detected_boxes) != len(detected_class_labels)):
raise ValueError('detected_boxes, detected_scores and '
'detected_class_labels should all have same lengths. Got'
'[%d, %d, %d]' % (len(detected_boxes),
len(detected_scores),
len(detected_class_labels)))
if image_key in self.detection_keys:
logging.warn(
'image %s has already been added to the detection result database',
image_key)
return
self.detection_keys.add(image_key)
if image_key in self.groundtruth_boxes:
groundtruth_boxes = self.groundtruth_boxes[image_key]
groundtruth_class_labels = self.groundtruth_class_labels[image_key]
else:
groundtruth_boxes = np.empty(shape=[0, 4], dtype=float)
groundtruth_class_labels = np.array([], dtype=int)
is_class_correctly_detected_in_image = 0
for subset in self.groundtruth_subset:
if image_key in self.groundtruth_boxes:
groundtruth_subset = self.groundtruth_subset[subset][image_key]
else:
groundtruth_subset = np.array([], dtype=bool)
scores, tp_fp_labels, is_class_correctly_detected_in_image = (
self.per_image_eval.compute_object_detection_metrics(
detected_boxes, detected_scores, detected_class_labels,
groundtruth_boxes, groundtruth_class_labels,
~groundtruth_subset))
for i in range(self.num_class):
self.scores_per_class[subset][i].append(scores[i])
self.tp_fp_labels_per_class[subset][i].append(tp_fp_labels[i])
self.num_images_correctly_detected_per_class \
+= is_class_correctly_detected_in_image
def _update_ground_truth_statistics(self, groundtruth_class_labels,
is_subset):
"""Update grouth truth statitistics.
1. Difficult boxes are ignored when counting the number of ground truth
instances as done in Pascal VOC devkit.
2. Difficult boxes are treated as normal boxes when computing CorLoc related
statistics.
Args:
groundtruth_class_labels: An integer numpy array of length M,
representing M class labels of object instances in ground truth
is_subset: A dict of boolean numpy arrays, each denoting whether a ground
truth box belongs to the corresponding subset.
Its inverse is considered as the difficult
"""
for class_index in range(self.num_class):
for subset in self.subset_names:
num_gt_instances = np.sum(groundtruth_class_labels[is_subset[subset]]
== class_index)
self.num_gt_instances_per_class[subset][class_index] += num_gt_instances
if np.any(groundtruth_class_labels == class_index):
self.num_gt_imgs_per_class[class_index] += 1
def evaluate(self):
"""Compute evaluation result.
Returns:
average_precision_per_class: float numpy array of average precision for
each class.
mean_ap: mean average precision of all classes, float scalar
precisions_per_class: List of precisions, each precision is a float numpy
array
recalls_per_class: List of recalls, each recall is a float numpy array
corloc_per_class: numpy float array
mean_corloc: Mean CorLoc score for each class, float scalar
"""
# compute mAP
mean_ap = {}
for subset in self.subset_names:
if (self.num_gt_instances_per_class[subset] == 0).any():
logging.warning(
'The following classes in subset %s have no ground truth examples: '
'%s', subset,
np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)))
for class_index in range(self.num_class):
if self.num_gt_instances_per_class[subset][class_index] == 0:
continue
scores = np.concatenate(self.scores_per_class[subset][class_index])
tp_fp_labels = np.concatenate(
self.tp_fp_labels_per_class[subset][class_index])
precision, recall = metrics.compute_precision_recall(
scores, tp_fp_labels,
self.num_gt_instances_per_class[subset][class_index])
self.precisions_per_class[subset].append(precision)
self.recalls_per_class[subset].append(recall)
average_precision = metrics.compute_average_precision(precision, recall)
self.average_precision_per_class[subset][class_index] = \
average_precision
mean_ap[subset] = np.nanmean(self.average_precision_per_class[subset])
# compute CorLoc
self.corloc_per_class = metrics.compute_cor_loc(
self.num_gt_imgs_per_class,
self.num_images_correctly_detected_per_class)
mean_corloc = np.nanmean(self.corloc_per_class)
return (self.average_precision_per_class, mean_ap,
self.precisions_per_class, self.recalls_per_class,
self.corloc_per_class, mean_corloc)
def get_eval_result(self):
return EvalResult(self.average_precision_per_class,
self.precisions_per_class, self.recalls_per_class,
self.corloc_per_class)
class CocoEvaluation(ObjectDetectionEvaluation):
def __init__(self,
num_groundtruth_classes,
matching_iou_threshold=0.5,
nms_type='standard',
nms_iou_threshold=1.0,
nms_max_output_boxes=256,
soft_nms_sigma=0.5):
super(CocoEvaluation, self).__init__(
num_groundtruth_classes,
matching_iou_threshold,
nms_type,
nms_iou_threshold,
nms_max_output_boxes,
soft_nms_sigma)
self.detection_result = np.zeros((0), dtype=np.float64).reshape((0,7))
self.gt_result = []
self.max_detections_per_image = 100
def add_single_detected_image_info(self, image_key, detected_boxes,
detected_scores, detected_class_labels):
"""Add detected result of a single image into the evaluation database.
Args:
image_key: sha256 key of image content
detected_boxes: A numpy array of shape [N, 4] representing detected box
coordinates[y_min, x_min, y_max, x_max]
detected_scores: A 1-d numpy array of length N representing classification
score
detected_class_labels: A 1-d numpy array of length N representing class
labels
Raises:
ValueError: if detected_boxes, detected_scores and detected_class_labels
do not have the same length.
"""
if (len(detected_boxes) != len(detected_scores) or
len(detected_boxes) != len(detected_class_labels)):
raise ValueError('detected_boxes, detected_scores and '
'detected_class_labels should all have same lengths. Got'
'[%d, %d, %d]' % (len(detected_boxes),
len(detected_scores),
len(detected_class_labels)))
detected_boxes, detected_scores, detected_class_labels = (
self.per_image_eval._remove_invalid_boxes(detected_boxes, detected_scores, detected_class_labels))
category_id_list = np.zeros(0)
bbox_list = np.zeros((0,0)).reshape(0, 4)
score_list = np.zeros(0)
for i in range(self.num_class):
detected_boxes_at_ith_class = detected_boxes[(detected_class_labels == i), :]
detected_scores_at_ith_class = detected_scores[detected_class_labels == i]
if len(detected_scores_at_ith_class) == 0:
continue
detected_boxlist = np_box_list.BoxList(detected_boxes_at_ith_class)
detected_boxlist.add_field('scores', detected_scores_at_ith_class)
kwargs = {}
detected_boxlist = self.per_image_eval.nms_fn(boxlist=detected_boxlist, **kwargs)
boxes = detected_boxlist.get_field('boxes')
scores = detected_boxlist.get_field('scores')
categories = np.array([i+1] * len(boxes), dtype=np.int32)
# [x,y,width,height]
for idx, box in enumerate(boxes):
boxes[idx] = [box[1], box[0], box[3] - box[1], box[2] - box[0]]
category_id_list = np.hstack([category_id_list, categories])
bbox_list = np.vstack([bbox_list, boxes])
score_list = np.hstack([score_list, scores])
dec_index = list(reversed(sorted(range(len(score_list)), key=lambda k: score_list[k])))
category_id_list = [category_id_list[idx] for idx in dec_index]
bbox_list = [bbox_list[idx] for idx in dec_index]
score_list = [score_list[idx] for idx in dec_index]
if len(score_list) > self.max_detections_per_image:
category_id_list = category_id_list[0:100]
bbox_list = bbox_list[0:100]
| |
import itertools
import logging
import os.path as osp
import tempfile
from collections import OrderedDict
import mmcv
import numpy as np
import pycocotools
from mmcv.utils import print_log
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class CocoDataset(CustomDataset):
CLASSES = ('rumex with leaves', 'rumex with leaves IS', 'rumex stalks only', 'cluster of rumex', 'ignore', 'rumex_generated_med_conf', 'rumex_generated_2_med_conf', 'rumex_generated_high_conf', 'rumex_generated_2_high_conf')
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
if not getattr(pycocotools, '__version__', '0') >= '12.0.2':
raise AssertionError(
'Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, | |
import cStringIO as StringIO
import logging
# yes, i know this is evil
from spitfire.compiler.ast import *
class CodegenError(Exception):
pass
class CodeNode(object):
def __init__(self, src_line=None):
self.src_line = src_line
self.child_nodes = []
def append_line(self, line):
self.append(CodeNode(line))
def append(self, code_node):
self.child_nodes.append(code_node)
def extend(self, code_nodes):
try:
self.child_nodes.extend(code_nodes)
except TypeError:
raise CodegenError("can't add %s" % code_nodes)
def __repr__(self):
return '%s:%s' % (self.__class__.__name__, self.src_line)
# perform an in-order traversal of the AST and call the generate methods
# in this case, we are generating python source code that should be somewhat
# human-readable
class CodeGenerator(object):
indent_str = ' '
indent_level = 0
# options - an AnalyzerOptions object
def __init__(self, ast_root, options=None):
self.ast_root = ast_root
self.options = options
self.output = StringIO.StringIO()
def get_code(self):
code_root = self.build_code(self.ast_root)[0]
self.write_python(code_root, indent_level=-1)
return self.output.getvalue().encode(self.ast_root.encoding)
def generate_python(self, code_node):
try:
return code_node.src_line
except AttributeError, e:
raise CodegenError(
"can't write code_node: %s\n\t%s" % (code_node, e))
def write_python(self, code_node, indent_level):
try:
if code_node.src_line is not None:
self.output.write(self.indent_str * indent_level)
self.output.write(code_node.src_line)
self.output.write('\n')
except AttributeError:
raise CodegenError("can't write code_node: %s" % code_node)
for cn in code_node.child_nodes:
self.write_python(cn, indent_level + 1)
def build_code(self, ast_node):
method_name = 'codegenAST%s' % ast_node.__class__.__name__
method = getattr(self, method_name, self.codegenDefault)
return method(ast_node)
def codegenASTTemplateNode(self, node):
module_code = CodeNode()
module_code.append_line('#!/usr/bin/env python')
module_code.append_line('# -*- coding: %s -*-' % node.encoding)
module_code.append_line('')
if node.import_nodes:
module_code.append_line('# template imports')
for n in node.import_nodes:
module_code.extend(self.build_code(n))
module_code.append_line('')
if node.from_nodes:
module_code.append_line('# template from imports')
for n in node.from_nodes:
module_code.extend(self.build_code(n))
module_code.append_line('')
extends = []
for n in node.extends_nodes:
extends.append(self.generate_python(self.build_code(n)[0]))
if not extends:
extends = ['spitfire.runtime.template.SpitfireTemplate']
extends_clause = ', '.join(extends)
classname = node.classname
module_code.append_line('import spitfire.runtime')
module_code.append_line('import spitfire.runtime.template')
if self.options and self.options.cheetah_cheats:
module_code.append_line('from Cheetah.NameMapper import valueFromSearchList as resolve_placeholder')
module_code.append_line('from Cheetah.NameMapper import valueForKey as resolve_udn')
else:
module_code.append_line('from spitfire.runtime.udn import resolve_placeholder')
module_code.append_line('from spitfire.runtime.udn import resolve_udn')
module_code.append_line('from spitfire.runtime.template import template_method')
module_code.append_line('')
if node.cached_identifiers:
module_code.append_line('# cached identifiers')
for cached_ph in node.cached_identifiers:
module_code.append_line('%s = None' % cached_ph.name)
module_code.append_line('')
if not node.library:
class_code = CodeNode(
'class %(classname)s(%(extends_clause)s):' % vars())
module_code.append(class_code)
for n in node.attr_nodes:
class_code.extend(self.build_code(n))
class_code.append_line('')
def_parent_code = class_code
else:
# Library functions are written to the module directly.
module_code.append_line('')
def_parent_code = module_code
for n in node.child_nodes:
def_parent_code.extend(self.build_code(n))
def_parent_code.append_line('')
# if we aren't extending a template, build out the main function
if not node.library and (not node.extends_nodes or node.implements):
def_parent_code.extend(self.build_code(node.main_function))
# NOTE(msolo): originally, i thought this would be helpful in case a bit of
# human error - however, a more robust check is necessary here to make the
# warning less spurious
# else:
# from spitfire.compiler.visitor import flatten_tree
# logging.warning("throwing away defined main function because it is not a base class %s %s", self.ast_root.source_path)
# logging.warning("%s", flatten_tree(node.main_function))
# Don't enable psyco for libraries since there is no class. We might want
# to iterate over the library functions and enable it for them instead.
if not node.library and self.options and self.options.enable_psyco:
module_code.append_line('spitfire.runtime.template.enable_psyco(%(classname)s)' % vars())
module_code.append_line(run_tmpl % vars(node))
return [module_code]
def codegenASTExtendsNode(self, node):
return [CodeNode('.'.join([
self.generate_python(self.build_code(n)[0])
for n in node.module_name_list]))]
def codegenASTImportNode(self, node):
return [CodeNode(
'import %s' % '.'.join([
self.generate_python(self.build_code(n)[0])
for n in node.module_name_list]))]
def codegenASTFromNode(self, node):
from_clause = '.'.join([
self.generate_python(self.build_code(n)[0])
for n in node.module_name_list])
import_clause = self.generate_python(self.build_code(node.identifier)[0])
if node.alias:
alias_clause = self.generate_python(self.build_code(node.alias)[0])
return [CodeNode(
'from %(from_clause)s import %(import_clause)s as %(alias_clause)s'
% vars())]
else:
return [CodeNode(
'from %(from_clause)s import %(import_clause)s' % vars())]
def codegenASTPlaceholderSubstitutionNode(self, node):
placeholder = self.generate_python(
self.build_code(node.expression)[0])
return [CodeNode(ASTPlaceholderSubstitutionNode_tmpl[0] % vars())]
def codegenASTCallFunctionNode(self, node):
expression = self.generate_python(
self.build_code(node.expression)[0])
if node.arg_list:
arg_list = self.generate_python(
self.build_code(node.arg_list)[0])
else:
arg_list = ''
return [CodeNode(ASTCallFunctionNode_tmpl[0] % vars())]
def codegenASTForNode(self, node):
target_list = self.generate_python(
self.build_code(node.target_list)[0])
expression_list = self.generate_python(
self.build_code(node.expression_list)[0])
code_node = CodeNode(ASTForNode_tmpl[0] % vars())
for n in node.child_nodes:
code_node.extend(self.build_code(n))
return [code_node]
def codegenASTIfNode(self, node):
test_expression = self.generate_python(
self.build_code(node.test_expression)[0])
if_code_node = CodeNode("if %(test_expression)s:" % vars())
for n in node.child_nodes:
if_code_node.extend(self.build_code(n))
code_nodes = [if_code_node]
if node.else_.child_nodes:
else_code_node = CodeNode('else:')
for n in node.else_.child_nodes:
else_code_node.extend(self.build_code(n))
code_nodes.append(else_code_node)
return code_nodes
def codegenASTTargetListNode(self, node):
if len(node.child_nodes) == 1:
return self.build_code(node.child_nodes[0])
else:
return [CodeNode('(%s)' % ', '.join(
[self.generate_python(self.build_code(n)[0])
for n in node.child_nodes]))]
codegenASTExpressionListNode = codegenASTTargetListNode
def codegenASTLiteralNode(self, node):
if (self.options and not self.options.generate_unicode and
isinstance(node.value, basestring)):
return [CodeNode(repr(node.value.encode(self.ast_root.encoding)))]
else:
# generate unicode by default
return [CodeNode('%(value)r' % vars(node))]
def codegenASTListLiteralNode(self, node):
return [CodeNode('[%s]' % ', '.join([
self.generate_python(self.build_code(n)[0])
for n in node.child_nodes]))]
def codegenASTTupleLiteralNode(self, node):
return [CodeNode('(%s)' % ', '.join([
self.generate_python(self.build_code(n)[0])
for n in node.child_nodes]))]
def codegenASTDictLiteralNode(self, node):
return [
CodeNode('{%s}' %
', '.join([
'%s: %s' % (self.generate_python(self.build_code(kn)[0]),
self.generate_python(self.build_code(vn)[0]))
for kn, vn in node.child_nodes]))]
def codegenASTParameterNode(self, node):
if node.default:
return [CodeNode('%s=%s' % (node.name, self.generate_python(
self.build_code(node.default)[0])))]
else:
return [CodeNode('%s' % node.name)]
def codegenASTAttributeNode(self, node):
return [CodeNode('%s = %s' % (node.name, self.generate_python(
self.build_code(node.default)[0])))]
def codegenASTFilterAttributeNode(self, node):
return [CodeNode('%s = staticmethod(%s)' % (node.name, self.generate_python(
self.build_code(node.default)[0])))]
def codegenASTParameterListNode(self, node):
if len(node.child_nodes) == 1:
return self.build_code(node.child_nodes[0])
else:
return [CodeNode('%s' % ', '.join(
[self.generate_python(self.build_code(n)[0])
for n in node.child_nodes]))]
codegenASTArgListNode = codegenASTParameterListNode
def codegenASTGetUDNNode(self, node):
#print "codegenASTGetUDNNode", id(node), "name", node.name, "expr", node.expression
expression = self.generate_python(self.build_code(node.expression)[0])
name = node.name
if self.options and self.options.raise_udn_exceptions:
return [CodeNode("resolve_udn(%(expression)s, '%(name)s', raise_exception=True)" % vars())]
else:
return [CodeNode("resolve_udn(%(expression)s, '%(name)s')" % vars())]
def codegenASTPlaceholderNode(self, node):
name = node.name
if name in ('has_var', 'get_var'):
return [CodeNode("self.%(name)s" % vars())]
elif self.options and self.options.cheetah_cheats:
return [CodeNode(
"resolve_placeholder(_self_search_list, '%(name)s')"
% vars())]
elif self.options and self.options.omit_local_scope_search:
return [CodeNode(
"resolve_placeholder('%(name)s', template=self, global_vars=_globals)"
% vars())]
else:
return [CodeNode(
"resolve_placeholder('%(name)s', template=self, local_vars=locals(), global_vars=_globals)"
% vars())]
def codegenASTReturnNode(self, node):
expression = self.generate_python(self.build_code(node.expression)[0])
return [CodeNode("return %(expression)s" % vars())]
def codegenASTOptionalWhitespaceNode(self, node):
#if self.ignore_optional_whitespace:
# return []
return [CodeNode(ASTOptionalWhitespaceNode_tmpl[0] % vars(node))]
def codegenASTSliceNode(self, node):
expression = self.generate_python(self.build_code(node.expression)[0])
slice_expression = self.generate_python(
self.build_code(node.slice_expression)[0])
return [CodeNode("%(expression)s[%(slice_expression)s]" % vars())]
def codegenASTBinOpExpressionNode(self, node):
left = self.generate_python(self.build_code(node.left)[0])
right = self.generate_python(self.build_code(node.right)[0])
operator = node.operator
return [CodeNode('(%(left)s %(operator)s %(right)s)' % vars())]
def codegenASTBinOpNode(self, node):
left = self.generate_python(self.build_code(node.left)[0])
right = self.generate_python(self.build_code(node.right)[0])
operator = node.operator
return [CodeNode('%(left)s %(operator)s %(right)s' % vars())]
codegenASTAssignNode = codegenASTBinOpNode
def codegenASTUnaryOpNode(self, node):
expression = self.generate_python(self.build_code(node.expression)[0])
operator = node.operator
return [CodeNode('(%(operator)s %(expression)s)' % vars())]
def codegenASTGetAttrNode(self, node):
expression = self.generate_python(self.build_code(node.expression)[0])
name = node.name
return [CodeNode("%(expression)s.%(name)s" % vars())]
def codegenASTFunctionNode(self, node):
name = node.name
if node.parameter_list:
parameter_list = self.generate_python(
self.build_code(node.parameter_list)[0])
else:
parameter_list = ''
decorator_node = CodeNode('@template_method')
# NOTE: for Cheetah compatibility, we have to handle the case where Cheetah
# tries to pass a 'transaction' object through. hopefully this doesn't have
# some other baggage coming with it.
if self.options and self.options.cheetah_compatibility:
if parameter_list:
code_node = CodeNode('def %(name)s(%(parameter_list)s, **kargs):' % vars())
else:
code_node = CodeNode('def %(name)s(**kargs):' % vars())
else:
code_node = CodeNode('def %(name)s(%(parameter_list)s):' % vars())
if self.options and self.options.cheetah_compatibility:
if_cheetah = CodeNode("if 'trans' in kargs:")
code_node.append(if_cheetah)
if_cheetah.append(CodeNode("_buffer = kargs['trans'].response()"))
else_spitfire = CodeNode('else:')
else_spitfire.append(CodeNode('_buffer = self.new_buffer()'))
code_node.append(else_spitfire)
else:
code_node.append(CodeNode('_buffer = self.new_buffer()'))
code_node.append(CodeNode('_buffer_write = _buffer.write'))
code_node.append(CodeNode('_globals = globals()'))
code_node.append(CodeNode('_self_filter_function = self.filter_function'))
if self.options and self.options.cheetah_cheats:
code_node.append(CodeNode('_self_search_list = self.search_list + [_globals]'))
for n in node.child_nodes:
code_child_nodes = self.build_code(n)
code_node.extend(code_child_nodes)
if self.options.cheetah_compatibility:
if_cheetah = CodeNode("if 'trans' not in kargs:")
if_cheetah.append(CodeNode('return _buffer.getvalue()'))
code_node.append(if_cheetah)
else:
code_node.append(CodeNode('return _buffer.getvalue()'))
return [decorator_node, code_node]
# fixme: don't know if i still need this - a 'template function'
# has an implicit return of the buffer built in - might be simpler
# to code that rather than adding a return node during the analyze
#def codegenASTReturnNode(self, node):
# code_node = self.codegenDefault(node)
def codegenASTBufferWrite(self, node):
expression = self.generate_python(self.build_code(node.expression)[0])
code_node = CodeNode('_buffer_write(%(expression)s)' % vars())
return [code_node]
def codegenASTEchoNode(self, node):
node_list = []
true_expression = self.generate_python(
self.build_code(node.true_expression)[0])
true_code = CodeNode('_buffer_write(%(true_expression)s)' % vars())
if node.test_expression:
test_expression = self.generate_python(
self.build_code(node.test_expression)[0])
if_code = CodeNode('if %(test_expression)s:' % vars())
if_code.append(true_code)
node_list.append(if_code)
else:
node_list.append(true_code)
if node.false_expression:
false_expression = self.generate_python(
self.build_code(node.false_expression)[0])
else_code = CodeNode('else:' % vars())
else_code.append(
CodeNode('_buffer_write(%(false_expression)s)' % vars()))
node_list.append(else_code)
return node_list
def codegenASTCacheNode(self, node):
cached_name = node.name
expression = self.generate_python(self.build_code(node.expression)[0])
# use dictionary syntax to get around coalescing 'global' statements
#globalize_var = CodeNode('global %(cached_name)s' % vars())
if_code = CodeNode("if %(cached_name)s is None:" % vars())
if_code.append(CodeNode("_globals['%(cached_name)s'] = %(expression)s" % vars()))
return [if_code]
def codegenASTFilterNode(self, node):
expression = self.generate_python(self.build_code(node.expression)[0])
if node.filter_function_node == DefaultFilterFunction:
filter_expression = '_self_filter_function'
elif node.filter_function_node:
filter_expression = self.generate_python(
self.build_code(node.filter_function_node)[0])
else:
filter_expression = None
if isinstance(node.expression, CallFunctionNode):
# need the placeholder function expression to make | |
<gh_stars>100-1000
from __future__ import absolute_import, division, print_function
from libtbx.test_utils import show_diff
from libtbx import easy_run
import sys
def run_and_compare(commands, expected_output):
def digest(lines):
result = []
for line in lines:
if (line.strip().startswith("Change of basis: ")): continue
if (line.strip().startswith("Inverse: ")): continue
result.append(line)
result.sort()
return "\n".join(result)+"\n"
for command in commands:
lines = easy_run.fully_buffered(
command=command).raise_if_errors().stdout_lines
assert not show_diff(digest(lines), digest(expected_output.splitlines()))
def exercise(args):
assert len(args) == 0
run_and_compare(
['iotbx.lattice_symmetry'
' --unit_cell="12.7923,12.8923,29.4356,102.846,103.846,22.7475"'],
"""\
Input
=====
Unit cell: (12.7923, 12.8923, 29.4356, 102.846, 103.846, 22.7475)
Space group: P 1 (No. 1)
Angular tolerance: 3.000 degrees
Similar symmetries
==================
Symmetry in minimum-lengths cell: C 1 2/m 1 (z,x-y,2*y) (No. 12)
Input minimum-lengths cell: (5.06616, 12.7923, 29.1526, 78.6285, 87.746, 79.7351)
Symmetry-adapted cell: (5.06616, 12.7923, 29.2944, 77.3884, 87.7702, 79.7351)
Conventional setting: I 1 2/m 1 (No. 12)
Unit cell: (5.06616, 57.1752, 12.8923, 90, 102.483, 90)
Change of basis: -x+1/2*z,-1/2*z,-x-y+1/2*z
Inverse: -x-y,x-z,-2*y
Maximal angular difference: 1.323 degrees
Symmetry in minimum-lengths cell: P -1 (No. 2)
Input minimum-lengths cell: (5.06616, 12.7923, 29.1526, 78.6285, 87.746, 79.7351)
Symmetry-adapted cell: (5.06616, 12.7923, 29.1526, 78.6285, 87.746, 79.7351)
Conventional setting: P -1 (No. 2)
Unit cell: (5.06616, 12.7923, 29.1526, 78.6285, 87.746, 79.7351)
Change of basis: -y,x+y-z,z
Inverse: x+y+z,-x,z
Maximal angular difference: 0.000 degrees
""")
#
run_and_compare(
['iotbx.lattice_symmetry --unit_cell=12,12.2,12.1,89,90,92 F',
'cctbx.lattice_symmetry'],
"""\
Input
=====
Unit cell: (12, 12.2, 12.1, 89, 90, 92)
Space group: P 1 (-a+b+c,a-b+c,a+b-c) (No. 1)
Angular tolerance: 3.000 degrees
Similar symmetries
==================
Symmetry in minimum-lengths cell: F m -3 m (-x+y+z,x-y+z,x+y-z) (No. 225)
Input minimum-lengths cell: (8.40567, 8.51612, 8.52071, 61.4489, 61.5879, 60.4641)
Symmetry-adapted cell: (8.55619, 8.55619, 8.55619, 60, 60, 60)
Conventional setting: F m -3 m (No. 225)
Unit cell: (12.1003, 12.1003, 12.1003, 90, 90, 90)
Change of basis: z,-x,-y
Inverse: -y,-z,x
Maximal angular difference: 2.236 degrees
Symmetry in minimum-lengths cell: I 4/m m m (y-z,-x+z,x+z) (No. 139)
Input minimum-lengths cell: (8.40567, 8.51612, 8.52071, 61.4489, 61.5879, 60.4641)
Symmetry-adapted cell: (8.55628, 8.55614, 8.55614, 60.0011, 59.9994, 59.9994)
Conventional setting: I 4/m m m (No. 139)
Unit cell: (8.55628, 8.55628, 12.1, 90, 90, 90)
Change of basis: -x+y,-x-y,z
Inverse: -1/2*x-1/2*y,1/2*x-1/2*y,z
Maximal angular difference: 2.236 degrees
Symmetry in minimum-lengths cell: I 4/m m m (-y+z,x-z,y+z) (No. 139)
Input minimum-lengths cell: (8.40567, 8.51612, 8.52071, 61.4489, 61.5879, 60.4641)
Symmetry-adapted cell: (8.53852, 8.59142, 8.53852, 59.7948, 60.4103, 59.7948)
Conventional setting: I 4/m m m (No. 139)
Unit cell: (8.59142, 8.59142, 12, 90, 90, 90)
Change of basis: -y+z,y+z,-x
Inverse: -z,-1/2*x+1/2*y,1/2*x+1/2*y
Maximal angular difference: 2.236 degrees
Symmetry in minimum-lengths cell: I 4/m m m (y+z,-y+z,x-z) (No. 139)
Input minimum-lengths cell: (8.40567, 8.51612, 8.52071, 61.4489, 61.5879, 60.4641)
Symmetry-adapted cell: (8.57387, 8.57387, 8.52071, 60.2049, 60.2049, 59.5902)
Conventional setting: I 4/m m m (No. 139)
Unit cell: (8.52071, 8.52071, 12.2, 90, 90, 90)
Change of basis: -x+z,-x-z,-y
Inverse: -1/2*x-1/2*y,-z,1/2*x-1/2*y
Maximal angular difference: 2.236 degrees
Symmetry in minimum-lengths cell: R -3 m :R (No. 166)
Input minimum-lengths cell: (8.40567, 8.51612, 8.52071, 61.4489, 61.5879, 60.4641)
Symmetry-adapted cell: (8.481, 8.481, 8.481, 61.1714, 61.1714, 61.1714)
Conventional setting: R -3 m :H (No. 166)
Unit cell: (8.63072, 8.63072, 20.5883, 90, 90, 120)
Change of basis: 4/3*x-2/3*y+2/3*z,2/3*x+2/3*y+4/3*z,-1/3*x-1/3*y+1/3*z
Inverse: 1/2*x-z,-1/2*x+1/2*y-z,1/2*y+z
Maximal angular difference: 1.474 degrees
Symmetry in minimum-lengths cell: R -3 m :H (x+z,-y+z,-3*z) (No. 166)
Input minimum-lengths cell: (8.40567, 8.51612, 8.52071, 61.4489, 61.5879, 60.4641)
Symmetry-adapted cell: (8.481, 8.481, 8.63072, 60.5722, 60.5722, 60)
Conventional setting: R -3 m :H (No. 166)
Unit cell: (8.481, 8.481, 21.3218, 90, 90, 120)
Change of basis: -4/3*x-2/3*y-2/3*z,-2/3*x+2/3*y-4/3*z,1/3*x-1/3*y-1/3*z
Inverse: -1/2*x+z,-1/2*x+1/2*y-z,-1/2*y-z
Maximal angular difference: 1.498 degrees
Symmetry in minimum-lengths cell: R -3 m :H (-y+z,-3*z,x+z) (No. 166)
Input minimum-lengths cell: (8.40567, 8.51612, 8.52071, 61.4489, 61.5879, 60.4641)
Symmetry-adapted cell: (8.53148, 8.58082, 8.53148, 60.19, 60, 60.19)
Conventional setting: R -3 m :H (No. 166)
Unit cell: (8.53148, 8.53148, 21.0788, 90, 90, 120)
Change of basis: -2/3*x+2/3*y+4/3*z,2/3*x+4/3*y+2/3*z,-1/3*x+1/3*y-1/3*z
Inverse: -1/2*x+1/2*y-z,1/2*y+z,1/2*x-z
Maximal angular difference: 2.177 degrees
Symmetry in minimum-lengths cell: R -3 m :H (-3*z,x+z,-y+z) (No. 166)
Input minimum-lengths cell: (8.40567, 8.51612, 8.52071, 61.4489, 61.5879, 60.4641)
Symmetry-adapted cell: (8.53148, 8.58082, 8.58082, 60, 59.8085, 59.8085)
Conventional setting: R -3 m :H (No. 166)
Unit cell: (8.58082, 8.58082, 20.8371, 90, 90, 120)
Change of basis: 2/3*x-4/3*y+2/3*z,4/3*x-2/3*y-2/3*z,1/3*x+1/3*y+1/3*z
Inverse: 1/2*y+z,-1/2*x+z,1/2*x-1/2*y+z
Maximal angular difference: 2.177 degrees
Symmetry in minimum-lengths cell: I m m m (y-z,-x+z,x+z) (No. 71)
Input minimum-lengths cell: (8.40567, 8.51612, 8.52071, 61.4489, 61.5879, 60.4641)
Symmetry-adapted cell: (8.40567, 8.55614, 8.55614, 61.1489, 60.58, 60.58)
Conventional setting: I m m m (No. 71)
Unit cell: (8.40567, 8.70429, 12.1, 90, 90, 90)
Change of basis: x+y,-x+y,z
Inverse: 1/2*x-1/2*y,1/2*x+1/2*y,z
Maximal angular difference: 1.187 degrees
Symmetry in minimum-lengths cell: I m m m (-y+z,x-z,y+z) (No. 71)
Input minimum-lengths cell: (8.40567, 8.51612, 8.52071, 61.4489, 61.5879, 60.4641)
Symmetry-adapted cell: (8.53852, 8.51612, 8.53852, 60.0867, 60.9908, 60.0867)
Conventional setting: I m m m (No. 71)
Unit cell: (8.51612, 8.66606, 12, 90, 90, 90)
Change of basis: -y+z,y+z,-x
Inverse: -z,-1/2*x+1/2*y,1/2*x+1/2*y
Maximal angular difference: 2.000 degrees
Symmetry in minimum-lengths cell: I m m m (y+z,-y+z,x-z) (No. 71)
Input minimum-lengths cell: (8.40567, 8.51612, 8.52071, 61.4489, 61.5879, 60.4641)
Symmetry-adapted cell: (8.57387, 8.57387, 8.52071, 60.2049, 60.2049, 59.5902)
Conventional setting: I m m m (No. 71)
Unit cell: (8.52071, 8.52071, 12.2, 90, 90, 90)
Change of basis: -x+z,-x-z,-y
Inverse: -1/2*x-1/2*y,-z,1/2*x-1/2*y
Maximal angular difference: 2.236 degrees
Symmetry in minimum-lengths cell: F m m m (-x+y+z,x-y+z,x+y-z) (No. 69)
Input minimum-lengths cell: (8.40567, 8.51612, 8.52071, 61.4489, 61.5879, 60.4641)
Symmetry-adapted cell: (8.55628, 8.59142, 8.52071, 60, 60.4101, 59.5899)
Conventional setting: F m m m (No. 69)
Unit cell: (12, 12.1, 12.2, 90, 90, 90)
Change of basis: x,z,-y
Inverse: x,-z,y
Maximal angular difference: 2.236 degrees
Symmetry in minimum-lengths cell: C 1 2/m 1 (x+y,-x+y,x-y+z) (No. 12)
Input minimum-lengths cell: (8.40567, 8.51612, 8.52071, 61.4489, 61.5879, 60.4641)
Symmetry-adapted cell: (8.55628, 8.59142, 8.52071, 60, 60.4101, 59.5899)
Conventional setting: I 1 2/m 1 (No. 12)
Unit cell: (8.52071, 12.2, 8.52071, 90, 90.4755, 90)
Change of basis: x-z,y,x+z
Inverse: 1/2*x+1/2*z,y,-1/2*x+1/2*z
Maximal angular difference: 2.236 degrees
Symmetry in minimum-lengths cell: C 1 2/m 1 (x-y+z,x+y,-x+y) (No. 12)
Input minimum-lengths cell: (8.40567, 8.51612, 8.52071, 61.4489, 61.5879, 60.4641)
Symmetry-adapted cell: (8.40567, 8.59142, 8.52071, 61.1478, 61.0005, 60.1608)
Conventional setting: I 1 2/m 1 (No. 12)
Unit cell: (8.40567, 12.1, 8.70429, 90, 90.9476, 90)
Change of basis: x+y,z,x-y
Inverse: 1/2*x+1/2*z,1/2*x-1/2*z,y
Maximal angular difference: 1.001 degrees
Symmetry in minimum-lengths cell: C 1 2/m 1 (-x+y,x-y+z,x+y) (No. 12)
Input minimum-lengths cell: (8.40567, 8.51612, 8.52071, 61.4489, 61.5879, 60.4641)
Symmetry-adapted cell: (8.55628, 8.51612, 8.52071, 60.2943, 60.9905, 59.8794)
Conventional setting: I 1 2/m 1 (No. 12)
Unit cell: (8.51612, 12, 8.66606, 90, 90.4716, 90)
Change of basis: y-z,x,-y-z
Inverse: y,1/2*x-1/2*z,-1/2*x-1/2*z
Maximal angular difference: 2.000 degrees
Symmetry in minimum-lengths cell: C 1 2/m 1 (z,x-y,x+y) (No. 12)
Input minimum-lengths cell: (8.40567, 8.51612, 8.52071, 61.4489, 61.5879, 60.4641)
Symmetry-adapted cell: (8.40567, 8.51842, 8.51842, 61.4489, 61.0277, 61.0277)
Conventional setting: I 1 2/m 1 (No. 12)
Unit cell: (8.40567, 8.70429, 12.1, 90, 90.7257, 90)
Change of basis: x+y,x-y,-z
Inverse: 1/2*x+1/2*y,1/2*x-1/2*y,-z
Maximal angular difference: 1.172 degrees
Symmetry in minimum-lengths cell: C 1 2/m 1 (x+y,z,x-y) (No. 12)
Input minimum-lengths cell: (8.40567, 8.51612, 8.52071, 61.4489, 61.5879, 60.4641)
Symmetry-adapted cell: (8.46339, 8.51612, 8.46339, 60.9617, 61.5908, 60.9617)
Conventional setting: I 1 2/m 1 (No. 12)
Unit cell: (8.51612, 8.66606, 12, 90, 91.4324, 90)
Change of basis: y-z,y+z,x
Inverse: z,1/2*x+1/2*y,-1/2*x+1/2*y
Maximal angular difference: 1.474 degrees
Symmetry in minimum-lengths cell: C 1 2/m 1 (x-y,x+y,z) (No. 12)
Input minimum-lengths cell: (8.40567, 8.51612, 8.52071, 61.4489, 61.5879, 60.4641)
Symmetry-adapted cell: (8.46108, 8.46108, 8.52071, 61.5187, 61.5187, 60.4668)
Conventional setting: I 1 2/m 1 (No. 12)
Unit cell: (8.52071, 8.52071, 12.2, 90, 92.1185, 90)
Change of basis: x-z,-x-z,y
Inverse: 1/2*x-1/2*y,z,-1/2*x-1/2*y
Maximal angular difference: 0.860 degrees
Symmetry in minimum-lengths cell: C 1 2/m 1 (-z,-2*x+z,x+y) (No. 12)
Input minimum-lengths cell: (8.40567, 8.51612, 8.52071, 61.4489, 61.5879, 60.4641)
Symmetry-adapted cell: (8.53686, 8.61072, 8.52071, 60.3452, 60.0626, 59.589)
Conventional setting: I 1 2/m 1 (No. 12)
Unit cell: (8.52071, 8.52071, 12.2, 90, 90.6981, 90)
Change of basis: x+z,x-z,y
Inverse: 1/2*x+1/2*y,z,1/2*x-1/2*y
Maximal angular difference: 2.177 degrees
Symmetry in minimum-lengths cell: C 1 2/m 1 (-z,-x+y,2*x+z) (No. 12)
Input minimum-lengths cell: (8.40567, 8.51612, 8.52071, 61.4489, 61.5879, 60.4641)
Symmetry-adapted cell: (8.46339, 8.51612, 8.61299, 60.3713, 60.9859, 59.7937)
Conventional setting: I 1 2/m 1 (No. 12)
Unit cell: (8.66606, 8.51612, 12, 90, 91.4076, 90)
Change of basis: -y-z,-y+z,-x
Inverse: -z,-1/2*x-1/2*y,-1/2*x+1/2*y
Maximal angular difference: | |
{'заведение': 3},
{'зависимость': 1},
{'идти': 1},
{'иметься': 2},
{'интересный': 1},
{'конкуренция': 1},
{'купить': 4},
{'магазин': 9},
{'мало': 1},
{'менее': 1},
{'место': 1},
{'многие': 1},
{'наиболее': 2},
{'небольшой': 1},
{'новый': 1},
{'определять': 1},
{'отношение': 1},
{'очень': 2},
{'поверить': 1},
{'позволять': 3},
{'поиск': 1},
{'покупателеи': 1},
{'покупатель': 11},
{'покупка': 1},
{'поэтому': 1},
{'представить': 1},
{'продавать': 1},
{'продавец': 1},
{'работать': 1},
{'рекомендовать': 1},
{'решить': 1},
{'рублеи': 2},
{'сами': 1},
{'самои': 2},
{'свободный': 1},
{'сеичас': 1},
{'слово': 1},
{'современный': 1},
{'сторона': 2},
{'страна': 1},
{'считать': 1},
{'терять': 1},
{'товар': 4},
{'условие': 1},
{'учебный': 1},
{'являться': 4},
{'акция': 2},
{'интернет': 1},
{'каждыи_день': 1},
{'ниже': 1},
{'низкий': 1},
{'нормальный': 1},
{'открывать': 1},
{'открыть': 3},
{'падение': 1},
{'прибыть': 1},
{'сеть': 8},
{'составить': 1},
{'теоретический': 1},
{'город': 1},
{'доход': 1},
{'миллион': 1},
{'партнёр': 1},
{'популярность': 1},
{'посетителеи': 1},
{'приносить': 1},
{'тысяча': 1},
{'франшиза': 11},
{'больший': 2},
{'зависеть': 1},
{'отзыв': 1},
{'поступать': 1},
{'завод': 1},
{'запретить': 1},
{'инвестиция': 1},
{'масса': 1},
{'необходимый': 1},
{'нужный': 1},
{'отдельно': 1},
{'результат': 1},
{'стимул': 1},
{'встречать': 1},
{'необходимость': 1},
{'оплата': 1},
{'определённый': 1},
{'приобретать': 2},
{'зона': 2},
{'отработать': 1},
{'политика': 2},
{'ассортиментный': 2},
{'базовый': 1},
{'запрос': 1},
{'инои': 1},
{'многом': 1},
{'объём': 2},
{'получать': 1},
{'постоянный': 2},
{'потребитель': 2},
{'предложение': 1},
{'продукт': 5},
{'продукция': 3},
{'производитель': 1},
{'работа': 2},
{'развиваться': 1},
{'реализация': 3},
{'ряд': 1},
{'способ': 1},
{'торговый': 4},
{'федеральнои': 1},
{'шаг': 1},
{'поддерживать': 1},
{'свидетельствовать': 2},
{'сильный': 1},
{'закупка': 1},
{'модель': 1},
{'принять': 1},
{'данным': 1},
{'сегмент': 1},
{'цифра': 1},
{'эксперт': 1},
{'требовать': 1},
{'пиво': 11},
{'формат': 5},
{'деиствительно': 1},
{'дом': 2},
{'изучать': 1},
{'которои': 2},
{'предложить': 1},
{'собраться': 1},
{'основать': 1},
{'представитель': 2},
{'увеличить': 1},
{'взнос': 1},
{'вложение': 1},
{'менеджер': 1},
{'сама': 1},
{'тип': 1},
{'значительно': 1},
{'обоидется': 1},
{'правда': 1},
{'улица': 1},
{'ценник': 1},
{'намерен': 1},
{'открыто': 1},
{'кошка': 1},
{'полка': 2},
{'потребительский': 1},
{'предприниматель': 2},
{'прожить': 1},
{'повод': 1},
{'многое': 1},
{'наити': 1},
{'приобретение': 1},
{'конкурент': 1},
{'марка': 2},
{'напиток': 1},
{'творог': 1},
{'удерживать': 1},
{'помещение': 1},
{'привыкнуть': 1},
{'требоваться': 2},
{'обеспечивать': 1},
{'отрицательный': 1},
{'предлагать': 4},
{'приятный': 1},
{'ассортимент': 4},
{'заморачиваться': 1},
{'подрасти': 1},
{'сервис': 1},
{'сетеи': 1},
{'сопутствовать': 1},
{'федеральный': 1},
{'проводиться': 1},
{'вечер': 1},
{'первоначальный': 1},
{'трудный': 1},
{'установить': 1},
{'актуальность': 1},
{'актуальный': 1},
{'первои': 1},
{'выбирать': 1},
{'рыба': 4},
{'уникальный': 1},
{'спрашиваться': 1},
{'получиться': 1},
{'гарантировать': 1},
{'оптимальнои': 1},
{'потребоваться': 1},
{'торговать': 1},
{'хлеб': 4},
{'супермаркет': 1},
{'статистик': 1},
{'точка': 3},
{'адаптированный': 1},
{'базовои': 1},
{'верныи': 1},
{'весовой': 1},
{'вечерний': 1},
{'взлететь': 1},
{'вкусного': 1},
{'вкусный': 6},
{'возражение': 1},
{'выкладка': 1},
{'выстроеннои': 1},
{'грамотно': 1},
{'дегустация': 1},
{'диалог': 1},
{'договариваться': 1},
{'домашнии': 1},
{'дополнение': 1},
{'друзеи': 1},
{'жесточаишую': 1},
{'жёлтый': 1},
{'заведении': 1},
{'завершившиися': 1},
{'завоевавшими': 1},
{'заити': 1},
{'закуска': 2},
{'запретнои': 1},
{'знаковый': 1},
{'изделии': 1},
{'иных': 1},
{'кассиры': 1},
{'колесить': 1},
{'корма': 1},
{'кран': 1},
{'курильщик': 2},
{'курить': 1},
{'мангал': 1},
{'маркетинговый': 1},
{'материнскои': 1},
{'многообещающий': 1},
{'молока': 1},
{'моментально': 1},
{'мощныи': 1},
{'мрц': 1},
{'наличие': 2},
{'напоследок': 1},
{'низкои': 1},
{'нормальнои': 2},
{'обоидет': 1},
{'организм': 1},
{'основнои': 1},
{'особои': 1},
{'островок': 2},
{'отдача': 1},
{'отличныи': 1},
{'отправляться': 1},
{'паушальныи': 1},
{'пекарня': 1},
{'переехать': 1},
{'периодически': 1},
{'пив_amp': 6},
{'пивзавоз': 1},
{'пивко': 5},
{'пивовареннои': 1},
{'пиву': 1},
{'планка': 1},
{'подсадить': 1},
{'поисках': 1},
{'положительный': 2},
{'портал': 1},
{'предлагаться': 2},
{'преимущество': 2},
{'приемлемый': 1},
{'прикассовои': 1},
{'прилавочный': 1},
{'приправа': 1},
{'причинои': 1},
{'приём': 1},
{'продвинутым': 1},
{'пятничныи': 1},
{'пятёрочка': 1},
{'разливной': 1},
{'располагаться': 1},
{'реализациеи': 1},
{'решившиися': 1},
{'решиться': 1},
{'розжиг': 1},
{'россиискои': 1},
{'роялти': 1},
{'рядом': 1},
{'свежий': 1},
{'сигарета': 10},
{'сложныи': 1},
{'снеки': 1},
{'снеков': 1},
{'сорт': 1},
{'способствовать': 1},
{'субботнии': 1},
{'табачный': 1},
{'угли': 1},
{'улыбкои': 1},
{'футбол': 1},
{'хлебобулочный': 1},
{'ход': 1},
{'ходовой': 1},
{'хождение': 1},
{'чемпионат': 1},
{'шашлык': 1},
{'широкии': 1}],
[{'большой': 1},
{'вывести': 1},
{'год': 7},
{'итог': 3},
{'компания': 1},
{'продажа': 5},
{'рынок': 7},
{'снижение': 1},
{'цена': 7},
{'доля': 3},
{'прийтись': 2},
{'вывод': 1},
{'наоборот': 1},
{'новыи': 1},
{'новый': 4},
{'отличие': 1},
{'результате': 1},
{'рублеи': 2},
{'связать': 1},
{'ситуация': 1},
{'стать': 2},
{'низкий': 1},
{'составить': 4},
{'популярность': 1},
{'график': 1},
{'отметить': 1},
{'рост': 1},
{'также': 1},
{'жильё': 3},
{'тыс': 1},
{'выражение': 1},
{'стадия': 1},
{'ценовый': 2},
{'октябрь': 2},
{'основный': 1},
{'сравнение': 1},
{'аналитик': 1},
{'доллар': 1},
{'общеи': 1},
{'объём': 5},
{'первичный': 2},
{'предложение': 9},
{'вырасти': 1},
{'начало': 2},
{'рубль': 1},
{'сентябрь': 1},
{'конец': 1},
{'руб': 1},
{'сегмент': 4},
{'формат': 1},
{'квартира': 2},
{'долларовый': 1},
{'апартамент': 8},
{'сервисный': 1},
{'новостройка': 2},
{'оценка': 1},
{'среднем': 1},
{'столица': 1},
{'элитный': 8},
{'достигнуть': 1},
{'набирать': 1},
{'открытый': 1},
{'жилой': 1},
{'возрасти': 1},
{'увеличиться': 1},
{'следствие': 1},
{'комплекс': 6},
{'динамик': 1},
{'стартовать': 2},
{'готовность': 1},
{'выйти': 1},
{'рублёвый': 1},
{'iii': 2},
{'высшем': 1},
{'кв': 2},
{'квартал': 3},
{'комплексе': 1},
{'космодамианская': 1},
{'лот': 2},
{'набережный': 1},
{'отчасти': 1},
{'палиха': 1},
{'пер': 1},
{'площадеи': 1},
{'площадь': 1},
{'предложении': 1},
{'премиальный': 1},
{'раионам': 1},
{'сомелье': 1},
{'сравнению': 1},
{'старт': 1},
{'стартовый': 1},
{'строительнои': 1},
{'суммарныи': 1},
{'тверской': 1},
{'тишинскии': 2},
{'ул': 2},
{'электрическии': 1},
{'ямской': 1}],
[{'год': 4},
{'компания': 1},
{'проект': 1},
{'развитие': 1},
{'рынок': 1},
{'средство': 1},
{'технология': 2},
{'бизнес': 1},
{'возврат': 1},
{'задолженность': 1},
{'пока': 1},
{'принятие': 1},
{'решение': 1},
{'высокий': 1},
{'давать': 1},
{'закон': 1},
{'новый': 1},
{'сложиться': 1},
{'становиться': 1},
{'план': 2},
{'прибыть': 1},
{'эффективность': 1},
{'больший': 1},
{'прогноз': 1},
{'задача': 1},
{'частный': 1},
{'линия': 1},
{'повысить': 1},
{'данные': 1},
{'показателеи': 1},
{'работа': 1},
{'система': 1},
{'деталь': 1},
{'прежде': 1},
{'ожидание': 2},
{'позволить': 1},
{'судебный': 1},
{'успешно': 1},
{'целом': 1},
{'достигнуть': 1},
{'инвестировать': 1},
{'запуск': 1},
{'дальнеишее': 1},
{'поделиться': 1},
{'рано': 1},
{'следующии': 1},
{'вашеи': 1},
{'зависимый': 1},
{'закладывать': 1},
{'инновационный': 1},
{'каковы': 2},
{'приставах': 1},
{'проблемнои': 1},
{'урегулирование': 1}],
[{'двигаться': 1},
{'компания': 1},
{'мир': 1},
{'назад': 1},
{'начинать': 1},
{'продажа': 2},
{'сегодня': 3},
{'статья': 2},
{'должный': 1},
{'момент': 1},
{'пока': 1},
{'процент': 1},
{'сумма': 1},
{'видеть': 2},
{'возможный': 1},
{'время': 3},
{'делать': 1},
{'думать': 1},
{'качество': 2},
{'небольшой': 1},
{'общий': 1},
{'однако': 1},
{'первый': 2},
{'покупка': 3},
{'посмотреть': 1},
{'продать': 1},
{'рассуждение': 1},
{'рублеи': 1},
{'сеичас': 1},
{'ситуация': 2},
{'слишком': 1},
{'акция': 6},
{'брокер': 1},
{'инвестор': 1},
{'открыть': 1},
{'показывать': 1},
{'пользоваться': 1},
{'составить': 1},
{'составлять': 1},
{'счёт': 5},
{'лично': 1},
{'материал': 1},
{'прибыль': 2},
{'рост': 3},
{'инвестиция': 1},
{'отчёт': 1},
{'результат': 2},
{'смысл': 1},
{'устроить': 2},
{'выражение': 1},
{'скорее': 1},
{'вверх': 1},
{'минимум': 1},
{'мнение': 1},
{'написание': 1},
{'пара': 1},
{'портфель': 2},
{'совпадать': 1},
{'баланс': 1},
{'делиться': 1},
{'маленький': 1},
{'месяц': 2},
{'напомнить': 1},
{'основа': 1},
{'система': 1},
{'шаг': 1},
{'критичный': 1},
{'процентный': 1},
{'фиксировать': 1},
{'январь': 1},
{'перспектива': 1},
{'комиссия': 1},
{'новость': 1},
{'подоидет': 1},
{'рад': 1},
{'вперед': 1},
{'целом': 1},
{'намерен': 1},
{'держать': 2},
{'пойти': 1},
{'обязательно': 1},
{'подробность': 1},
{'выплата': 1},
{'рассматривать': 1},
{'подрасти': 1},
{'замереть': 1},
{'дивиденд': 2},
{'инвестирование': 1},
{'приложение': 1},
{'русгидро': 1},
{'внести': 2},
{'декабрь': 1},
{'инвестиционныи': 1},
{'долгосрочнои': 1},
{'мобильный': 1},
{'ближаишее': 1},
{'получиться': 1},
{'следить': 1},
{'добрый': 1},
{'плюсе': 1},
{'устраивать': 1},
{'анализируите': 1},
{'выплатои': 1},
{'держу': 1},
{'думаите': 1},
{'золоту': 1},
{'идеально': 1},
{'мосбиржи': 2},
{'начинающий': 1},
{'нерешительность': 1},
{'отслеживать': 1},
{'оттолкнуться': 1},
{'поделюсь': 1},
{'портфеле': 1},
{'просадка': 1},
{'пятилетний': 1},
{'сбербанке': 1},
{'северсталь': 1},
{'спекулятивныи': 1},
{'суток': 1},
{'толчок': 1},
{'утраивать': 1}],
[{'год': 9},
{'история': 2},
{'какои': 1},
{'компания': 1},
{'кстати': 1},
{'международный': 2},
{'мир': 2},
{'невозможный': 1},
{'подобный': 1},
{'попытаться': 1},
{'появиться': 2},
{'раз': 1},
{'развитие': 1},
{'рынок': 3},
{'сегодня': 2},
{'создать': 1},
{'средство': 1},
{'цена': 5},
{'вообще': 1},
{'должный': 2},
{'момент': 1},
{'мочь': 1},
{'оставаться': 1},
{'пока': 2},
{'риск': 1},
{'стоять': 2},
{'этои': 1},
{'ближаишие': 1},
{'внимание': 1},
{'возможный': 1},
{'выходить': 1},
{'давать': 2},
{'деньга': 1},
{'дорогой': 1},
{'достаточно': 1},
{'зависимость': 1},
{'категория': 1},
{'качество': 1},
{'количество': 1},
{'конкуренция': 1},
{'кривой': 1},
{'купить': 2},
{'магазин': 8},
{'настоящий': 2},
{'настроение': 1},
{'небольшой': 1},
{'обычный': 1},
{'первый': 2},
{'период': 1},
{'покупка': 2},
{'представить': 2},
{'работать': 1},
{'самое': 1},
{'сначала': 1},
{'стать': 2},
{'стоить': 1},
{'страна': 3},
{'товар': 1},
{'тои': 1},
{'торговля': 1},
{'увеличиваться': 1},
{'хотеть': 1},
{'число': 2},
{'являться': 1},
| |
"""
functions to handle data loading
TO DO:
data augmentation for xyz coordinates
data augmentation by adding gaussian noise
"""
import numpy as np
import copy
import tensorflow as tf
from collections import Counter
from numpy.linalg import norm
class Data_Loader:
input_mean = None
input_std = None
output_mean = None
output_std = None
datatype = np.float32
def __init__(
self,
filename="data.npz",
shuffle=True,
input_label=["xyz"],
target_label=["pe"],
n_sample=10000,
test_sample=1000,
batch_size=100,
num_epoch=10,
weight_by_pe=True,
weight_by_label=True,
ngrid=100,
test_only=False,
input_norm=True,
output_norm=True,
):
"""load the data from npz file
:param filename: str, root dir of .npy data
:param shuffle, boolean,
whether or not to shuffle training data
:param input_label: list of str that define the input
:param target_label: list of str that define the output
:param n_sample: int, number of training samples to use
"""
# load data from data.npz
data = dict(np.load(filename))
for k in input_label + target_label:
data[k] = data[k].astype(self.datatype)
n_config = data[input_label[0]].shape[0]
# if n_sample is too big
if (n_sample+test_sample) > n_config:
if test_sample < n_config:
n_sample = n_config - test_sample
else:
n_sample = n_config //2
test_sample = n_config - n_sample
# shuffle data and target with the same permutation
if shuffle:
r = np.random.permutation(n_config)
else:
r = np.arange(n_config)
for k in data.keys():
np.take(data[k], r, axis=0, out=data[k])
data[k] = data[k][:n_sample+test_sample]
if "label" in data.keys():
minlabel = np.min(data["label"])
if minlabel != 0:
print(f"WARNING, the data label will be shifted {minlabel}")
data["label"] = np.int32(data["label"] - minlabel)
print("type of labels", Counter(data["label"]))
if "pe" in data.keys():
values = data["pe"][np.where(data["pe"]!=np.inf)[0]]
print("potential energy", np.min(values), np.max(values))
# assemble the input
if len(input_label) > 1:
x = []
for label_id in input_label:
# # cheating for Alanine dipeptide
# if (label_id == "xyz" and data[label_id].shape[1]==66):
# x += [np.vstack(data[label_id])[:, [3, 4, 5, 12, 13, 14, 15, 16, 17, 18, 19, 20, 24, 25, 26, 30, 31, 32, 42, 43, 44, 45, 46, 47, 48, 49, 50, 54, 55, 56]]]
if label_id == "colvar" and ("colvar" not in data.keys()):
x += [np.vstack(data["intc"])[:, [1, 2]]]
else:
x += [np.vstack(data[label_id])]
print("x added", label_id)
x = np.hstack(x)
else:
# # cheating for Alanine dipeptide
# if (input_label[0] == "xyz" and data[input_label[0]].shape[1]==66):
# x = np.vstack(data[input_label[0]])[:, [3, 4, 5, 12, 13, 14, 15, 16, 17, 18, 19, 20, 24, 25, 26, 30, 31, 32, 42, 43, 44, 45, 46, 47, 48, 49, 50, 54, 55, 56]]
if input_label[0] == "colvar" and ("colvar" not in data.keys()):
x = np.vstack(data["intc"])[:, [1, 2]]
else:
x = np.vstack(data[input_label[0]])
# prepare the normalization mean and std for input
if (self.input_mean is None) and (input_norm is True):
xtemp, input_mean, input_std = normalize_data_bound(x, with_inf=False)
if x.shape[1] == 66:
newx = rotation(x)
xtemp, input_mean, input_std = normalize_data_bound(newx, with_inf=False)
del newx
self.input_mean = input_mean.astype(self.datatype)
self.input_std = input_std.astype(self.datatype)
del xtemp
# assemble all data
alldata = {"x": x}
# for debug. TO DO: remove or add a flag for this
alldata["xyz"] = data["xyz"]
if "intc" in data.keys():
alldata["intc"] = data["intc"]
# assemble the output
for label_id in target_label:
ori_data = data[label_id]
if len(ori_data.shape) == 1:
ori_data = np.hstack(ori_data)
else:
ori_data = np.vstack(ori_data)
alldata[label_id] = ori_data
for k in alldata:
if len(alldata[k].shape) == 1:
alldata[k] = alldata[k].reshape([-1, 1])
print("record added", k, "shape", alldata[k].shape)
# prepare the normalization mean and std for output
if (
"pe" in target_label
and (self.output_mean is None)
and (output_norm is True)
):
petemp, output_mean, output_std = normalize_data_bound(data["pe"], with_inf=True)
print("!!!!", output_mean, output_std)
self.output_mean = output_mean.astype(self.datatype)
self.output_std = output_std.astype(self.datatype)
del petemp
self.input_dim = x.shape[1]
self.output_dim = 0
if "label" in data.keys() and weight_by_label is True:
l = data["label"]
else:
l = None
if "pe" in data.keys() and weight_by_pe is True:
p = data["pe"]
else:
p = None
weight = define_weight(label=l, pe=p, ngrid=ngrid)
avg = np.average(weight)
weight = self.datatype(weight / avg)
if "pe" in alldata:
alldata["pe_prefactor"] = np.array(alldata["pe"] != np.inf, dtype=np.float32)
ids = np.where(alldata["pe"] == np.inf)[0]
alldata["pe"][ids] = 0
self.alldata = alldata
self.alldata["w"] = weight.reshape([-1, 1])
self.n_sample = n_sample
self.test_sample = test_sample
self.batch_size = batch_size
self.num_epoch = num_epoch
self.test_only = test_only
self.to_tfdataset()
def to_tfdataset(self):
alldata = self.alldata
n_sample = self.n_sample
test_sample = self.test_sample
if not self.test_only:
train_dict = {}
test_dict = {}
for k in alldata.keys():
train_dict[k] = alldata[k][:n_sample]
test_dict[k] = alldata[k][n_sample : n_sample + test_sample]
del self.alldata
self.train_dict = train_dict
self.test_dict = test_dict
self.train_dataset = tf.data.Dataset.from_tensor_slices(train_dict)
self.train_dataset = self.train_dataset.batch(self.batch_size)
self.test_dataset = tf.data.Dataset.from_tensor_slices(test_dict)
self.test_dataset = self.test_dataset.batch(self.batch_size * 10)
self.iterator = self.train_dataset.make_initializable_iterator()
self.test_iterator = self.test_dataset.make_initializable_iterator()
else:
datadict = {}
for k in alldata.keys():
datadict[k] = alldata[k]
datadict[k] = datadict[k][:test_sample]
del self.alldata
self.datadict = datadict
self.dataset = tf.data.Dataset.from_tensor_slices(datadict)
self.dataset = self.dataset.batch(self.batch_size)
self.iterator = self.dataset.make_initializable_iterator()
self.test_iterator = None
def normalize_data_std(data):
"""
Normalize data such that mean is 0 and std is 1
:param data: np.ndarray, shape [nsample, nfeature]
:return: np.ndarray, normalized data
:return: float, mean value
:return: float, std value
"""
normalized_data = copy.deepcopy(data)
ids = np.where(normalized_data!=np.inf)[0]
normalized_data = normalized_data[ids]
if len(data.shape) == 1:
mean = np.mean(data)
std = np.std(data) + np.finfo(float).eps
else:
mean = np.mean(data, axis=0)
std = np.std(data, axis=0) + np.finfo(float).eps
return (normalized_data - mean) / std, mean, std
def normalize_data_bound(data, with_inf = True):
"""
Normalize data such that data ranges from 0 to 1
:param data: np.ndarray, shape [nsample, nfeature]
:return: np.ndarray, normalized data
:return: float, median of the data
:return: float, range of the data
"""
values = copy.deepcopy(data)
if with_inf:
ids = np.where(values!=np.inf)[0]
values = values[ids]
if len(values.shape) == 1:
xmin = np.min(values)
xmax = np.max(values)
else:
xmin = np.min(values, axis=0)
xmax = np.max(values, axis=0)
mean = (xmin + xmax) * 0.5
std = xmax - xmin + np.finfo(float).eps
return (values - mean) / std, mean, std
def define_weight(label=None, pe=None, ngrid=100):
"""
give weight to sample based on the label and potential energies
:param label: 1D np.array, labels of the data
:param pe: 1D np.array, potential energies of the data, has to be float
:return: np.ndarray, product of the two weights
"""
if label is not None:
label_weight, tlabel = weight_by_label(label)
nsample = len(label)
if pe is not None:
nclass = len(tlabel)
label_dict = {t: i for (i, t) in enumerate(tlabel)}
print("label_dict", label_dict)
# sort out the pe that belong to each class
sorted_pe = [[] for i in range(nclass)]
count_w_label = np.zeros(nclass)
new_pe_id = np.zeros(nsample)
for idx in range(nsample):
label_id = label_dict[label[idx]]
sorted_pe[label_id] += [pe[idx]]
new_pe_id[idx] = count_w_label[label_id]
count_w_label[label_id] += 1
new_pe_id = list(map(int, new_pe_id))
sorted_weight = [[] for i in range(nclass)]
for idc in range(nclass):
sorted_weight[idc] = weight_by_hist(sorted_pe[idc], ngrid)
pe_weight = np.zeros(nsample)
for idx in range(nsample):
label_id = label_dict[label[idx]]
pe_weight[idx] = sorted_weight[label_id][new_pe_id[idx]]
else:
pe_weight = np.ones(nsample)
else:
label_weight = np.ones(n_sample)
if pe is not None:
pe_weight = weight_by_hist(pe, ngrid)
else:
pe_weight = np.ones(nsample)
return label_weight * pe_weight
def weight_by_label(label):
"""
give weight to sample based on the counts of the label. the label can be anything that is accepted by collection.Counters. can be string or int or float
:param label: 1D np.array, value to construct historgram
:return: np.ndarray, weight
:return: dict, dict of unique class
"""
unique_label = Counter(label.reshape([-1]))
# print("count label", unique_label)
weight = dict(unique_label)
for t in unique_label.keys():
weight[t] = 1.0 / float(unique_label[t])
label_weight = np.array([weight[t] for t in label])
return label_weight, unique_label.keys()
def weight_by_hist(values, ngrid):
"""
give weight to sample based on the inverse of values histogram
:param values: 1D np.array, value to construct historgram
:param ngrid: number of bins for the hisgotram
:return: np.ndarray, weight
"""
# get
ori_n = len(values)
values = np.array(values)
print(values[:10])
ids = np.where(np.array(values) != np.inf )[0]
if len(ids) == 0:
return np.ones(ori_n)/ori_n
if len(ids) < ori_n:
values = values[ids]
# build an energy histogram
vmin = np.min(values)
vmax = np.max(values)
vmax_id = np.argmax(values)
dv = (vmax - vmin) / float(ngrid)
values_id = map(int, np.floor((values - vmin) / dv))
values_id = list(values_id)
values_id[vmax_id] -= 1
count = Counter(values_id)
# print("dv", vmax, dv, count.keys())
# for i in range(len(values_id)):
# print(i, values_id[i], values[i])
# print(count)
weight = dict(count)
for t in count.keys():
weight[t] = 1.0 / float(count[t])
non_zero_weights = np.array([weight[t] for t in values_id])
if len(ids) != ori_n:
hist_weights = np.ones(ori_n) / (ori_n | |
import sys
from basic import *
import tcr_distances
import parse_tsv
import numpy as np
from scipy.cluster import hierarchy
from scipy.spatial import distance
import util
import html_colors
from all_genes import all_genes
with Parser(locals()) as p:
#p.str('args').unspecified_default().multiple().required()
p.str('organism').required()
p.str('clones_file').required()
p.int('nrepeat').default(100)
p.int('num_random_trees').default(3)
p.int('nbrdist_percentile').default(25)
p.float('tree_height_inches').default(3.0)
p.float('max_leaf_font_size').default(10.0)
p.float('min_leaf_font_size').default(6.0)
p.multiword('epitopes').cast(lambda x:x.split())
p.multiword('all_chains').cast(lambda x:x.split()).default("A B AB")
p.flag('scale_bars_in_key') # --flag_arg (no argument passed)
p.flag('no_mouse_labels') # --flag_arg (no argument passed)
p.flag('paper_figs') # --flag_arg (no argument passed)
p.flag('constant_seed') # --flag_arg (no argument passed)
p.str('distance_params')
p.str('outfile_prefix')
if constant_seed: random.seed(1)
distance_params = tcr_distances.DistanceParams( config_string = distance_params )
if outfile_prefix is None:
outfile_prefix = clones_file[:-4]
import matplotlib
matplotlib.rcParams['mathtext.default'] = 'regular'
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#recompute_nbrdists = True
clones_file_with_nbrdists = '{}_nbrdists.tsv'.format(clones_file[:-4])
assert exists( clones_file_with_nbrdists )
## read the epitope-specific TCRs
all_tcrs = parse_tsv.parse_tsv_file( clones_file, ['epitope','subject'], ['va_genes','vb_genes','cdr3a','cdr3b'],
save_l = True ) ## last element will be the full parse_tsv_line dictionary
if not epitopes:
epitopes = all_tcrs.keys()[:]
epitopes.sort()
Log('reading {}'.format(clones_file_with_nbrdists))
nbrdist_tag_suffix = '_wtd_nbrdist{}'.format(nbrdist_percentile)
nbrdist_tags = [ x+'_'+y+nbrdist_tag_suffix for x in epitopes for y in all_chains ]
all_nbrdists = parse_tsv.parse_tsv_file( clones_file_with_nbrdists, ['epitope','subject'], nbrdist_tags )
## BEGIN stolen from compute_distances.py
Log( 'precomputing v-region distances')
rep_dists = tcr_distances.compute_all_v_region_distances( organism, distance_params )
Log( 'done precomputing v-region distances' )
def compute_mouse_distances_fast( reorder, mice, mouse_indices, D ):
inter_mouse_distances = []
intra_mouse_distances = []
inter_mouse_distance_averages = []
intra_mouse_distance_averages = []
for m1 in mice:
for m2 in mice:
if m1>m2: continue
avg_dist = 0.0
count=0
for i1 in mouse_indices[m1]:
for i2 in mouse_indices[m2]:
dist = D[ reorder[i1], reorder[i2] ]
if m1==m2:
if i1>=i2: continue
intra_mouse_distances.append( dist )
else:
inter_mouse_distances.append( dist )
avg_dist += dist
count += 1
if not count: count=1
if m1==m2:
intra_mouse_distance_averages.append( avg_dist/count )
else:
inter_mouse_distance_averages.append( avg_dist/count )
return intra_mouse_distances, inter_mouse_distances, intra_mouse_distance_averages, inter_mouse_distance_averages
def get_mouse_average_nbrdists( reorder, mice, mouse_indices, nbrdists ): ## returns a list ordered same as mice
avgs = []
for m in mice:
avgs.append( sum( ( nbrdists[ reorder[ x ] ] for x in mouse_indices[m] ) ) / len( mouse_indices[m] ) )
return avgs
def get_Z( val, mean, sdev ):
return (val-mean)/sdev if sdev else 0.0
nrows = len(epitopes)
ncols = 1+num_random_trees
fig_height = nrows * tree_height_inches + 1.5
fig_width = 12
bottom_margin = 0.5 / fig_height
top_margin = ( fig_height-1.0 ) / fig_height
#print 'fig_height:',fig_height
plt.figure(1,figsize=(fig_width,fig_height))
plt.subplots_adjust(left=0.4,top=top_margin, bottom=bottom_margin,hspace=0.3)
plotno=0
figcounter = 3 ## reserve figures 1 and 2 for the old plots
epitope_zps = {}
epitope_zp2s = {}
for epitope in epitopes:
tcrs = []
tcr_infos = []
mouse_indices = {}
num_mouse_tcrs = {}
mice = all_tcrs[epitope].keys()[:]
mice.sort()
for mouse in mice:
mouse_indices[ mouse ] = []
num_mouse_tcrs[ mouse ] = len(all_tcrs[epitope][mouse] )
assert num_mouse_tcrs[ mouse ] == len( all_nbrdists[epitope][mouse] )
for l in all_tcrs[epitope][mouse]:
## ( va_reps, vb_reps, cdr3a, cdr3b )
index = len( tcrs )
mouse_indices[mouse].append( index )
va_reps = frozenset( ( all_genes[organism][x].rep for x in l[0].split(';') ) )
vb_reps = frozenset( ( all_genes[organism][x].rep for x in l[1].split(';') ) )
tcrs.append( ( va_reps, vb_reps, l[2], l[3] ) )
assert len(l) == 5
dict_from_parse_tsv_line = l[4]
tcr_infos.append( dict_from_parse_tsv_line )
util.assign_label_reps_and_colors_based_on_most_common_genes_in_repertoire( tcr_infos, organism )
## now stored as va_label_rep, jb_label_rep
num_tcrs = len( tcrs )
num_mice = len( mice )
if num_mice<=1:
Log( `( 'only one mouse?',epitope,mouse_indices.keys() )` )
continue
for chains in all_chains:
my_nbrdist_index = nbrdist_tags.index( epitope+'_'+chains+nbrdist_tag_suffix )
nbrdists = []
for mouse in mice:
for nbrdistl in all_nbrdists[epitope][mouse]:
nbrdists.append( float( nbrdistl[ my_nbrdist_index ] ) )
Log('START filling distance matrix {} {}'.format(epitope,chains))
D = np.zeros( ( num_tcrs, num_tcrs ) )
for i1,t1 in enumerate( tcrs ):
for i2,t2 in enumerate( tcrs ):
if i2<=i1: continue
dist = tcr_distances.compute_distance( tcrs[ i1 ], tcrs[ i2 ], chains, rep_dists, distance_params )
D[i1,i2]= dist
D[i2,i1]= dist
Log('DONE filling distance matrix {} {}'.format(epitope,chains))
reorder = range(len(tcrs))
intras,inters,real_intra_avs,real_inter_avs = compute_mouse_distances_fast( reorder, mice, mouse_indices, D )
save_inters = [ real_inter_avs ]
assert len(intras) + len(inters) == ( num_tcrs * (num_tcrs-1) ) /2
avg_inter = sum(inters)/len(inters)
avg_intra = sum(intras)/len(intras)
rand_inters = []
rand_intras = []
rand_inter_avs = []
rand_intra_avs = []
save_rand_inters = []
real_avg_nbrdists = get_mouse_average_nbrdists( reorder, mice, mouse_indices, nbrdists )
all_rand_avg_nbrdists = []
for r in range(nrepeat):
random.shuffle( reorder )
intras,inters,intra_avs,inter_avs = compute_mouse_distances_fast( reorder, mice, mouse_indices, D )
if r<num_random_trees:
save_inters.append( inter_avs )
rand_intras.append( sum(intras)/len(intras))
rand_inters.append( sum(inters)/len(inters))
rand_intra_avs.append( intra_avs )
rand_inter_avs.append( inter_avs )
all_rand_avg_nbrdists.append( get_mouse_average_nbrdists( reorder, mice, mouse_indices, nbrdists ) )
mean_intra,sdev_intra = get_mean_and_sdev( rand_intras )
mean_inter,sdev_inter = get_mean_and_sdev( rand_inters )
nlower1 = len( [ x for x in rand_intras if x < avg_intra ] )
nlower2 = len( [ x for x in rand_inters if x < avg_inter ] )
z_intra = ( avg_intra-mean_intra)/sdev_intra
z_inter = ( avg_inter-mean_inter)/sdev_inter
assert abs( z_intra + z_inter )<1e-3
p_intra = (1.0*nlower1)/nrepeat
print 'rep Z: {:9.3f} P: {:9.6f} intra {:9.3f} inter {:9.3f} ntcrs: {:3d} nmice: {:3d} {} {}'\
.format( z_intra, p_intra, avg_intra, avg_inter,
num_tcrs, num_mice,
chains, epitope )
if chains == 'AB':
epitope_zps[epitope] = ( z_intra, p_intra )
## look for mice with surprisingly low intras
mouse_zp = {}
for ii,real_av in enumerate( real_intra_avs ):
rand_avs = [ x[ii] for x in rand_intra_avs ]
mean,sdev = get_mean_and_sdev( rand_avs )
nlower = len( [ x for x in rand_avs if x < real_av ] )
Z = get_Z( real_av, mean, sdev )
p = ( 1.0 * nlower ) / nrepeat if sdev else .5
mouse_zp[ mice[ii] ] = (Z,p)
print 'mouse Z: {:9.3f} P: {:9.6f} {:2d} {} {} {}'\
.format( Z, p,
len(mouse_indices[mice[ii]]), mice[ii] , chains, epitope )
## look at mouse nbrdists
rand_Zsums = [0.0]*nrepeat
Zsum = 0.0
mouse_zp2 = {}
for ii, mouse in enumerate(mice):
real_av = real_avg_nbrdists[ii]
rand_avs = [x[ii] for x in all_rand_avg_nbrdists ]
mean,sdev = get_mean_and_sdev( rand_avs )
nlower = len( [ x for x in rand_avs if x < real_av ] )
nhigher = len( [ x for x in rand_avs if x > real_av ] )
Z = get_Z( real_av, mean, sdev )
plower = ( 1.0 * nlower ) / nrepeat if sdev else .5
phigher = ( 1.0 * nhigher ) / nrepeat if sdev else .5
mouse_zp2[ mouse ] = (Z,min(plower,phigher))
print 'mouse-nbrdist Z: {:9.3f} P: {:9.6f} {:2d} {} {} {}'\
.format( Z, min( plower, phigher ),
len(mouse_indices[mice[ii]]), mice[ii] , chains, epitope )
Zsum += abs(Z)
for jj,rand_av in enumerate( rand_avs ):
randZ = get_Z( rand_av, mean, sdev )
rand_Zsums[jj] += abs(randZ)
## compare Zsum to rand_Zsums
mean,sdev = get_mean_and_sdev( rand_Zsums )
ZZ = get_Z( Zsum, mean, sdev )
nhigher = len( [ x for x in rand_Zsums if x > Zsum ] )
Zp = ( 1.0 * nhigher ) / nrepeat if sdev else .5
if chains == 'AB':
epitope_zp2s[epitope] = ( ZZ, Zp )
print 'rep-nbrdist Z: {:9.3f} P: {:9.6f} ntcrs: {:3d} nmice: {:3d} {} {}'\
.format( ZZ, Zp, num_tcrs, num_mice, chains, epitope )
mouse_pairs = []
for m1 in mice:
for m2 in mice:
if m1>=m2: continue
mouse_pairs.append( ( m1,m2) )
assert len(mouse_pairs) == len( real_inter_avs )
for ii,real_av in enumerate( real_inter_avs ):
rand_avs = [ x[ii] for x in rand_inter_avs ]
mean,sdev = get_mean_and_sdev( rand_avs )
nlower = len( [ x for x in rand_avs if x < real_av ] )
mouse1,mouse2 = mouse_pairs[ii]
print 'mice Z: {:9.3f} P: {:9.6f} {:2d} {:2d} {} {} {} {}'\
.format( get_Z( real_av, mean, sdev ), ( 1.0 * nlower ) / nrepeat if sdev else .5,
len(mouse_indices[mouse1]), len(mouse_indices[mouse2]),
mouse1, mouse2, chains, epitope )
if chains == 'AB': ## make some plots
label_height_inches = float( tree_height_inches ) / num_mice
leaf_font_size = max( min_leaf_font_size, min( max_leaf_font_size,
int( floor( 0.5+label_height_inches * 72.0 * 0.85 ) ) ) )
for ii in range(1+num_random_trees):
inters = save_inters[ii]
mouse_D = np.zeros( ( num_mice,num_mice ) )
#print len(inters), len(mouse_pairs)
assert len(inters) == len(mouse_pairs)
for i1,m1 in enumerate(mice):
for i2,m2 in enumerate(mice):
if m1>=m2: continue
mouse_D[i1,i2] = inters[0]
mouse_D[i2,i1] = inters[0]
del inters[0]
assert not inters
y = distance.squareform( mouse_D, checks=True )
assert len(y) == ( num_mice*(num_mice-1) )/2
Z = hierarchy.average( y )
#Z = hierarchy.average( mouse_D )
c,coph_dists = hierarchy.cophenet(Z,y)
# leaves = hierarchy.leaves_list( Z )
# tree_ordered_mice = [ mice[x] for x in leaves ]
# #print 'leaves:',leaves
print 'coph:',epitope,ii,c
#print Z[:3]
plotno += 1
ax = plt.subplot( nrows, ncols, plotno )
def get_stars_from_pvalue( p ):
if p<=0.001:
| |
"""Beam lifetime calculation."""
import os as _os
import importlib as _implib
from copy import deepcopy as _dcopy
import numpy as _np
from mathphys.functions import get_namedtuple as _get_namedtuple
from mathphys import constants as _cst, units as _u, \
beam_optics as _beam
from . import optics as _optics
if _implib.util.find_spec('scipy'):
import scipy.integrate as _integrate
import scipy.special as _special
else:
_integrate = None
_special = None
class Lifetime:
"""Class which calculates the lifetime for a given accelerator."""
# Constant factors
_MBAR_2_PASCAL = 1.0e-3 / _u.pascal_2_bar
_D_TOUSCHEK_FILE = _os.path.join(
_os.path.dirname(__file__), 'data', 'd_touschek.npz')
_KSI_TABLE = None
_D_TABLE = None
OPTICS = _get_namedtuple('Optics', ['EdwardsTeng', 'Twiss'])
EQPARAMS = _get_namedtuple('EqParams', ['BeamEnvelope', 'RadIntegrals'])
TOUSCHEKMODEL = _get_namedtuple('TouschekModel', ['Piwinski', 'FlatBeam'])
def __init__(self, accelerator, touschek_model=None,
type_eqparams=None, type_optics=None):
"""."""
self._acc = accelerator
self._type_eqparams = Lifetime.EQPARAMS.BeamEnvelope
self._type_optics = Lifetime.OPTICS.EdwardsTeng
self._touschek_model = Lifetime.TOUSCHEKMODEL.Piwinski
self.type_eqparams = type_eqparams
self.type_optics = type_optics
self.touschek_model = touschek_model
if self.type_eqparams == self.EQPARAMS.BeamEnvelope:
self._eqparams_func = _optics.EqParamsFromBeamEnvelope
elif self.type_eqparams == self.EQPARAMS.RadIntegrals:
self._eqparams_func = _optics.EqParamsFromRadIntegrals
if self.type_optics == self.OPTICS.EdwardsTeng:
self._optics_func = _optics.calc_edwards_teng
elif self._type_optics == self.OPTICS.Twiss:
self._optics_func = _optics.calc_twiss
self._eqpar = self._eqparams_func(self._acc)
self._optics_data, *_ = self._optics_func(self._acc, indices='closed')
_twiss = self._optics_data
if self.type_optics != self.OPTICS.Twiss:
_twiss, *_ = _optics.calc_twiss(self._acc, indices='closed')
res = _optics.calc_transverse_acceptance(self._acc, _twiss)
self._accepx_nom = _np.min(res[0])
self._accepy_nom = _np.min(res[1])
self._curr_per_bun = 100/864 # [mA]
self._avg_pressure = 1e-9 # [mbar]
self._atomic_number = 7
self._temperature = 300 # [K]
self._tau1 = self._tau2 = self._tau3 = None
self._emit1 = self._emit2 = self._espread0 = self._bunlen = None
self._accepx = self._accepy = self._accepen = None
@property
def type_eqparams_str(self):
"""."""
return Lifetime.EQPARAMS._fields[self._type_eqparams]
@property
def type_eqparams(self):
"""."""
return self._type_eqparams
@type_eqparams.setter
def type_eqparams(self, value):
if value is None:
return
if isinstance(value, str):
self._type_eqparams = int(value in Lifetime.EQPARAMS._fields[1])
elif int(value) in Lifetime.EQPARAMS:
self._type_eqparams = int(value)
@property
def type_optics_str(self):
"""."""
return Lifetime.OPTICS._fields[self._type_optics]
@property
def type_optics(self):
"""."""
return self._type_optics
@type_optics.setter
def type_optics(self, value):
if value is None:
return
if isinstance(value, str):
self._type_optics = int(value in Lifetime.OPTICS._fields[1])
elif int(value) in Lifetime.OPTICS:
self._type_optics = int(value)
@property
def touschek_model_str(self):
"""."""
return Lifetime.TOUSCHEKMODEL._fields[self._touschek_model]
@property
def touschek_model(self):
"""."""
return self._touschek_model
@touschek_model.setter
def touschek_model(self, value):
if value is None:
return
if isinstance(value, str):
self._touschek_model = int(
value in Lifetime.TOUSCHEKMODEL._fields[1])
elif int(value) in Lifetime.TOUSCHEKMODEL:
self._touschek_model = int(value)
@property
def accelerator(self):
"""."""
return self._acc
@accelerator.setter
def accelerator(self, val):
self._eqpar = self._eqparams_func(val)
self._optics_data, *_ = self._optics_func(val, indices='closed')
_twiss = self._optics_data
if self.type_optics != self.OPTICS.Twiss:
_twiss, *_ = _optics.calc_twiss(val, indices='closed')
res = _optics.calc_transverse_acceptance(val, _twiss)
self._accepx_nom = _np.min(res[0])
self._accepy_nom = _np.min(res[1])
self._acc = val
@property
def equi_params(self):
"""Equilibrium parameters."""
return self._eqpar
@property
def optics_data(self):
"""Optics data."""
return self._optics_data
@property
def curr_per_bunch(self):
"""Return current per bunch [mA]."""
return self._curr_per_bun
@curr_per_bunch.setter
def curr_per_bunch(self, val):
self._curr_per_bun = float(val)
@property
def particles_per_bunch(self):
"""Particles per bunch."""
return int(_beam.calc_number_of_electrons(
self._acc.energy * _u.eV_2_GeV, self.curr_per_bunch,
self._acc.length))
@property
def avg_pressure(self):
"""Average Pressure [mbar]."""
return self._avg_pressure
@avg_pressure.setter
def avg_pressure(self, val):
self._avg_pressure = float(val)
@property
def atomic_number(self):
"""Atomic number of residual gas."""
return self._atomic_number
@atomic_number.setter
def atomic_number(self, val):
self._atomic_number = int(val)
@property
def temperature(self):
"""Average Temperature of residual gas [K]."""
return self._temperature
@temperature.setter
def temperature(self, val):
self._temperature = float(val)
@property
def emit1(self):
"""Stationary emittance of mode 1 [m.rad]."""
if self._emit1 is not None:
return self._emit1
attr = 'emitx' if \
self.type_eqparams == self.EQPARAMS.RadIntegrals else 'emit1'
return getattr(self._eqpar, attr)
@emit1.setter
def emit1(self, val):
self._emit1 = float(val)
@property
def emit2(self):
"""Stationary emittance of mode 2 [m.rad]."""
if self._emit2 is not None:
return self._emit2
attr = 'emity' if \
self.type_eqparams == self.EQPARAMS.RadIntegrals else 'emit2'
return getattr(self._eqpar, attr)
@emit2.setter
def emit2(self, val):
self._emit2 = float(val)
@property
def espread0(self):
"""Relative energy spread."""
if self._espread0 is not None:
return self._espread0
return self._eqpar.espread0
@espread0.setter
def espread0(self, val):
self._espread0 = float(val)
@property
def bunlen(self):
"""Bunch length [m]."""
if self._bunlen is not None:
return self._bunlen
return self._eqpar.bunlen
@bunlen.setter
def bunlen(self, val):
self._bunlen = float(val)
@property
def tau1(self):
"""Mode 1 damping Time [s]."""
if self._tau1 is not None:
return self._tau1
attr = 'taux' if \
self.type_eqparams == self.EQPARAMS.RadIntegrals else 'tau1'
return getattr(self._eqpar, attr)
@tau1.setter
def tau1(self, val):
self._tau1 = float(val)
@property
def tau2(self):
"""Mode 2 damping Time [s]."""
if self._tau2 is not None:
return self._tau2
attr = 'tauy' if \
self.type_eqparams == self.EQPARAMS.RadIntegrals else 'tau2'
return getattr(self._eqpar, attr)
@tau2.setter
def tau2(self, val):
self._tau2 = float(val)
@property
def tau3(self):
"""Mode 3 damping Time [s]."""
if self._tau3 is not None:
return self._tau3
attr = 'taue' if \
self.type_eqparams == self.EQPARAMS.RadIntegrals else 'tau3'
return getattr(self._eqpar, attr)
@tau3.setter
def tau3(self, val):
self._tau3 = float(val)
@property
def accepen(self):
"""Longitudinal acceptance."""
if self._accepen is not None:
return self._accepen
dic = dict()
rf_accep = self._eqpar.rf_acceptance
dic['spos'] = self._optics_data.spos
dic['accp'] = dic['spos']*0 + rf_accep
dic['accn'] = dic['spos']*0 - rf_accep
return dic
@accepen.setter
def accepen(self, val):
if isinstance(val, dict):
if {'spos', 'accp', 'accn'} - val.keys():
raise KeyError(
"Dictionary must contain keys 'spos', 'accp', 'accn'")
spos = val['spos']
accp = val['accp']
accn = val['accn']
elif isinstance(val, (list, tuple, _np.ndarray)):
spos = self._optics_data.spos
accp = spos*0.0 + val[1]
accn = spos*0.0 + val[0]
elif isinstance(val, (int, _np.int, float, _np.float)):
spos = self._optics_data.spos
accp = spos*0.0 + val
accn = spos*0.0 - val
else:
raise TypeError('Wrong value for energy acceptance')
self._accepen = _dcopy(dict(spos=spos, accp=accp, accn=accn))
@property
def accepx(self):
"""Horizontal acceptance."""
if self._accepx is not None:
return self._accepx
dic = dict()
dic['spos'] = self._optics_data.spos
dic['acc'] = dic['spos']*0 + self._accepx_nom
return dic
@accepx.setter
def accepx(self, val):
if isinstance(val, dict):
if {'spos', 'acc'} - val.keys():
raise KeyError(
"Dictionary must contain keys 'spos', 'acc'")
spos = val['spos']
acc = val['acc']
elif isinstance(val, (int, _np.int, float, _np.float)):
spos = self._optics_data.spos
acc = spos*0.0 + val
else:
raise TypeError('Wrong value for energy acceptance')
self._accepx = _dcopy(dict(spos=spos, acc=acc))
@property
def accepy(self):
"""Vertical acceptance."""
if self._accepy is not None:
return self._accepy
dic = dict()
dic['spos'] = self._optics_data.spos
dic['acc'] = dic['spos']*0 + self._accepy_nom
return dic
@accepy.setter
def accepy(self, val):
if isinstance(val, dict):
if {'spos', 'acc'} - val.keys():
raise KeyError(
"Dictionary must contain keys 'spos', 'acc'")
spos = val['spos']
acc = val['acc']
elif isinstance(val, (int, _np.int, float, _np.float)):
spos = self._optics_data.spos
acc = spos*0.0 + val
else:
raise TypeError('Wrong value for energy acceptance')
self._accepy = _dcopy(dict(spos=spos, acc=acc))
@property
def touschek_data(self):
"""Calculate loss rate due to Touschek beam lifetime.
If touschek_model = 'FlatBeam', the calculation follows the formulas
presented in Ref. [1], where the vertical betatron beam size and
vertical dispersion are not taken into account
If touschek_model = 'Piwinski', the calculation follows the formulas
presented in Ref. [2], Eqs. 32-42. This formalism describes the
most general case with respect to the horizontal and vertical
betatron oscillation, the horizontal and vertical dispersion, and
the derivatives of the amplitude functions and dispersions.
References:
[1] <NAME>. (1988). Single and multiple Touschek effects. In
CERN Acccelerator School: Accelerator Physics (pp. 114–130).
[2] <NAME>. (1999). The Touschek Effect in Strong Focusing
Storage Rings. November. http://arxiv.org/abs/physics/9903034
parameters used in calculation:
emit1 = Mode 1 emittance [m.rad]
emit2 = Mode 2 emittance [m.rad]
energy = Bunch energy [GeV]
nr_part = Number of electrons ber bunch
espread = relative energy spread,
bunlen = bunch length [m]
accepen = relative energy acceptance of the machine.
optics = pyaccel.TwissArray object or similar object with fields:
spos, betax, betay, etax, etay, alphax, alphay, etapx, etapy
or
pyaccel.EdwardsTengArray object or similar object with fields:
spos, beta1, beta2, eta1, eta2, alpha1, alpha2, etap1, etap2
output:
dictionary with fields:
rate = loss rate along the ring [1/s]
avg_rate = average loss rate along the ring [1/s]
pos = longitudinal position where loss rate was calculated [m]
volume = volume of the beam along the ring [m^3]
touschek_coeffs = dict with coefficients for corresponding
formalism
"""
self._load_touschek_integration_table()
gamma = self._acc.gamma_factor
beta = self._acc.beta_factor
en_accep = self.accepen
optics = self._optics_data
emit1, emit2 = self.emit1, self.emit2
espread = self.espread0
bunlen = self.bunlen
nr_part = self.particles_per_bunch
_, ind = _np.unique(optics.spos, return_index=True)
spos = en_accep['spos']
accp = en_accep['accp']
accn = en_accep['accn']
# calculate lifetime for each 10cm of the ring
npoints = int((spos[-1] - spos[0])/0.1)
s_calc = _np.linspace(spos[0], spos[-1], npoints)
d_accp = _np.interp(s_calc, spos, accp)
d_accn = _np.interp(s_calc, spos, -accn)
# if momentum aperture is 0, set it | |
import os
import h5py
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
def put_static_first(data, col, static_col):
"""Simple function putting the static columns first in the data.
Args:
data: Dict with a data array for each split.
col: Ordered list of the columns in the data.
static_col: List of static columns names.
Returns:
data_inverted : Analog to data with columns reordered in each split.
col_inverted : Analog to col woth columns names reordered.
"""
static_index = list(np.where(np.isin(np.array(col), static_col))[0])
n_col = len(col)
non_static_index = [k for k in range(n_col) if k not in static_index]
new_idx = static_index + non_static_index
data_inverted = {}
for split in ['train', 'test', 'val']:
data_inverted[split] = data[split][:, new_idx]
col_inverted = list(np.array(col)[new_idx])
return data_inverted, col_inverted
def clip_dataset(var_range, data, columns):
"""Set each values outside of predefined range to NaN.
Args:
var_range: Dict with associated range [min,max] to each variable name.
data: Dict with a data array for each split.
columns: Ordered list of the columns in the data.
Returns:
new_data : Data with no longer any value outside of the range.
"""
new_data = {}
for split in ['train', 'test', 'val']:
clipped_data = data[split][:]
for i, col in enumerate(columns):
if var_range.get(col):
idx = np.sort(np.concatenate([np.argwhere(clipped_data[:, i] > var_range[col][1]),
np.argwhere(clipped_data[:, i] < var_range[col][0])])[:, 0])
clipped_data[idx, i] = np.nan
new_data[split] = clipped_data
return new_data
def finding_cat_features(rep_data, threshold):
"""
Extracts the index and names of categorical in a pre-built dataset.
Args:
rep_data: Pre-built dataset as a h5py.File(...., 'r').
threshold: Number of uniqur value below which we consider a variable as categorical if it's an integer
Returns:
categorical: List of names containing categorical features.
categorical_idx: List of matching column indexes.
"""
columns = rep_data['data'].attrs['columns']
categorical = []
for i, c in enumerate(columns):
values = rep_data['data']['train'][:, i]
values = values[~np.isnan(values)]
nb_values = len(np.unique(values))
if nb_values <= threshold and np.all(values == values.astype(int)):
categorical.append(c)
categorical_idx = np.sort([np.argwhere(columns == feat)[0, 0] for feat in categorical])
return categorical, categorical_idx
def finding_cat_features_fom_file(rep_data, info_df):
"""
Extracts the index and names of categorical in a pre-built dataset.
Args:
rep_data: Pre-built dataset as a h5py.File(...., 'r').
info_df: Dataframe with information on each variable.
Returns:
categorical: List of names containing categorical features.
categorical_idx: List of matching column indexes.
"""
columns = rep_data['data'].attrs['columns']
categorical = []
for i, c in enumerate(columns):
if c.split('_')[0] != 'plain':
pass
else:
if info_df[info_df['VariableID'] == c.split('_')[-1]]['Datatype'].values == 'Categorical':
categorical.append(c)
categorical_idx = np.sort([np.argwhere(columns == feat)[0, 0] for feat in categorical])
return categorical, categorical_idx
def get_one_hot(rep_data, cat_names, cat_idx):
"""
One-hots the categorical features in a given pre-built dataset.
Args:
rep_data: Pre-built dataset as a h5py.File(...., 'r').
cat_names: List of names containing categorical features.
cat_idx: List of matching column indexes.
Returns:
all_categorical_data: Dict with each split one-hotted categorical column as a big array.
col_name: List of name of the matching columns
"""
all_categorical_data = np.concatenate([rep_data['data']['train'][:, cat_idx],
rep_data['data']['test'][:, cat_idx],
rep_data['data']['val'][:, cat_idx]], axis=0)
cat_dict = {}
col_name = []
for i, cat in enumerate(cat_idx):
dum = np.array(pd.get_dummies(all_categorical_data[:, i]))
if dum.shape[-1] <= 2:
dum = dum[:, -1:]
col_name += [cat_names[i].split('_')[-1] + '_cat']
else:
col_name += [cat_names[i].split('_')[-1] + '_cat_' + str(k) for k in range(dum.shape[-1])]
cat_dict[cat] = dum
all_categorical_data_one_h = np.concatenate(list(cat_dict.values()), axis=1)
all_categorical_data = {}
all_categorical_data['train'] = all_categorical_data_one_h[:rep_data['data']['train'].shape[0]]
all_categorical_data['test'] = all_categorical_data_one_h[
rep_data['data']['train'].shape[0]:rep_data['data']['train'].shape[0] +
rep_data['data']['test'].shape[0]]
all_categorical_data['val'] = all_categorical_data_one_h[-rep_data['data']['val'].shape[0]:]
return all_categorical_data, col_name
def scaling_data_common(data_path, threshold=25, scaler=StandardScaler(), static_idx=None, df_ref=None):
"""
Wrapper which one-hot and scales the a pre-built dataset.
Args:
data_path: String with the path to the pre-built non scaled dataset
threshold: Int below which we consider a variable as categorical
scaler: sklearn Scaler to use, default is StandardScaler.
static_idx: List of indexes containing static columns.
df_ref: Reference dataset containing supplementary information on the columns.
Returns:
data_dic: dict with each split as a big array.
label_dic: dict with each split and and labels array in same order as lookup_table.
patient_dic: dict containing a array for each split such that each row of the array is of the type
[start_index, stop_index, patient_id].
col: list of the variables names corresponding to each column.
labels_name: list of the tasks name corresponding to labels columns.
"""
rep_data = h5py.File(data_path, 'r')
columns = rep_data['data'].attrs['columns']
train_data = rep_data['data']['train'][:]
test_data = rep_data['data']['test'][:]
val_data = rep_data['data']['val'][:]
# We just extract tasks name to propagate
if 'tasks' in list(rep_data['labels'].attrs.keys()):
labels_name = rep_data['labels'].attrs['tasks']
else:
labels_name = None
# We treat np.inf and np.nan as the same
np.place(train_data, mask=np.isinf(train_data), vals=np.nan)
np.place(test_data, mask=np.isinf(test_data), vals=np.nan)
np.place(val_data, mask=np.isinf(val_data), vals=np.nan)
train_data_scaled = scaler.fit_transform(train_data)
val_data_scaled = scaler.transform(val_data)
test_data_scaled = scaler.transform(test_data)
# We pad after scaling, Thus zero is equivalent to padding with the mean value across patient
np.place(train_data_scaled, mask=np.isnan(train_data_scaled), vals=0.0)
np.place(test_data_scaled, mask=np.isnan(test_data_scaled), vals=0.0)
np.place(val_data_scaled, mask=np.isnan(val_data_scaled), vals=0.0)
# If we have static values we take one per patient stay
if static_idx:
train_static_values = train_data[rep_data['patient_windows']['train'][:][:, 0]][:, static_idx]
static_scaler = StandardScaler()
static_scaler.fit(train_static_values)
# Scale all entries
train_data_static_scaled = static_scaler.transform(train_data[:, static_idx])
val_data_static_scaled = static_scaler.transform(val_data[:, static_idx])
test_data_static_scaled = static_scaler.transform(test_data[:, static_idx])
# Replace NaNs
np.place(train_data_static_scaled, mask=np.isnan(train_data_static_scaled), vals=0.0)
np.place(val_data_static_scaled, mask=np.isnan(val_data_static_scaled), vals=0.0)
np.place(test_data_static_scaled, mask=np.isnan(test_data_static_scaled), vals=0.0)
# Insert in the scaled dataset
train_data_scaled[:, static_idx] = train_data_static_scaled
test_data_scaled[:, static_idx] = test_data_static_scaled
val_data_scaled[:, static_idx] = val_data_static_scaled
# We deal with the categorical features.
if df_ref is None:
cat_names, cat_idx = finding_cat_features(rep_data, threshold)
else:
cat_names, cat_idx = finding_cat_features_fom_file(rep_data, df_ref)
# We check for columns that are both categorical and static
if static_idx:
common_idx = [idx for idx in cat_idx if idx in static_idx]
if common_idx:
common_name = columns[common_idx]
else:
common_name = None
if len(cat_names) > 0:
# We one-hot categorical features with more than two possible values
all_categorical_data, oh_cat_name = get_one_hot(rep_data, cat_names, cat_idx)
if common_name is not None:
common_cat_name = [c for c in oh_cat_name if c.split('_')[0] in common_name]
print(common_cat_name)
# We replace them at the end of the features
train_data_scaled = np.concatenate([np.delete(train_data_scaled, cat_idx, axis=1),
all_categorical_data['train']], axis=-1)
test_data_scaled = np.concatenate([np.delete(test_data_scaled, cat_idx, axis=1),
all_categorical_data['test']], axis=-1)
val_data_scaled = np.concatenate([np.delete(val_data_scaled, cat_idx, axis=1),
all_categorical_data['val']], axis=-1)
columns = np.concatenate([np.delete(columns, cat_idx, axis=0), oh_cat_name], axis=0)
# We ensure that static categorical features are also among the first features with other static ones.
if common_name is not None:
common_current_idx = [i for i, n in enumerate(columns) if n in common_cat_name]
print(common_current_idx)
new_idx = common_current_idx + [k for k in range(len(columns)) if k not in common_current_idx]
columns = columns[new_idx]
train_data_scaled = train_data_scaled[:, new_idx]
test_data_scaled = test_data_scaled[:, new_idx]
val_data_scaled = val_data_scaled[:, new_idx]
data_dic = {'train': train_data_scaled,
'test': test_data_scaled,
'val': val_data_scaled}
if 'labels' in rep_data.keys():
label_dic = rep_data['labels']
else:
label_dic = None
if 'patient_windows' in rep_data.keys():
patient_dic = rep_data['patient_windows']
else:
patient_dic = None
return data_dic, label_dic, patient_dic, columns, labels_name
def save_to_h5_with_tasks(save_path, col_names, task_names, data_dict, label_dict, patient_windows_dict):
"""
Save a dataset with the desired format as h5.
Args:
save_path: Path to save the dataset to.
col_names: List of names the variables in the dataset.
data_dict: Dict with an array for each split of the data
label_dict: (Optional) Dict with each split and and labels array in same order as lookup_table.
patient_windows_dict: Dict containing a array for each split such that each row of the array is of the type
[start_index, stop_index, patient_id].
Returns:
"""
with h5py.File(save_path, "w") as f:
n_data = f.create_group('data')
n_data.create_dataset('train', data=data_dict['train'].astype(float), dtype=np.float32)
n_data.create_dataset('test', data=data_dict['test'].astype(float), dtype=np.float32)
n_data.create_dataset('val', data=data_dict['val'].astype(float), dtype=np.float32)
n_data.attrs['columns'] = list(col_names)
if label_dict is not None:
labels = f.create_group('labels')
labels.create_dataset('train', data=label_dict['train'], dtype=np.float32)
labels.create_dataset('test', data=label_dict['test'], dtype=np.float32)
labels.create_dataset('val', data=label_dict['val'], dtype=np.float32)
labels.attrs['tasks'] = list(task_names)
if patient_windows_dict is not None:
p_windows = f.create_group('patient_windows')
p_windows.create_dataset('train', data=patient_windows_dict['train'], dtype=np.int32)
p_windows.create_dataset('test', data=patient_windows_dict['test'], dtype=np.int32)
p_windows.create_dataset('val', data=patient_windows_dict['val'], dtype=np.int32)
if not len(col_names) == data_dict['train'].shape[-1]:
raise Exception(
"We saved to data but the number of columns ({}) didn't match the number of features {} ".format(
len(col_names), data_dict['train'].shape[-1]))
def build_few_label_dataset(path_to_data, path_to_save, percentage=10, seed=1234, task=None, overwrite=False):
"""Builds a dataset with reduced amounts of labeled training data.
Stratification is made at a patient level to ensure quicker decrease in labeled data diversity.
Args:
path_to_data: String with path to the initial h5 file containing full dataset.
path_to_save: String with path where to save the future dataset.
percentage: Integer with percentage of the labeled data.
seed: Integer with seed to split on.
task: String with task name to stratify on.
overwrite:
Returns:
Path | |
<reponame>djhohnstein/Mythic
# -*- coding: utf-8 -*-
import peewee as p
import datetime
from app import mythic_db
import app.crypto as crypto
import json
from uuid import uuid4
def gen_uuid():
return str(uuid4())
class Operator(p.Model):
light_config = json.dumps(
{
"background-color": "white",
"text-color": "black",
"hover": "#2B4978",
"highlight": "#2B4978",
"autocomplete": "#B7C8FA",
"highlight-text": "#ECEDF0",
"timestamp": "black",
"operator": "#7E8BD9",
"display": "#FF4D4D",
"is-background-dark": "false",
"new-callback-color": "#829BC4",
"table-headers": "#F1F1F1",
"operation-color": "#b366ff",
"success_highlight": "#340080",
"failure_highlight": "#f68d8d",
"code-theme": "xcode",
"table-color": "",
"response-background": "#e8e8e8",
"outline-buttons": "-",
"bg-header": "hsl(225, 6%, 18%)",
"bg-header-dark": "#c8c8c8",
"bg-card-body": "#e8e8e8",
"bg-card-body-l1": "hsl(225, 6%, 23%)",
"bg-card-body-l2": "hsl(225, 6%, 80%)",
"bg-card-footer": "#c8c8c8",
"bg-body": "hsl(225, 6%, 18%)",
"th": "#adadad",
"font-size": "14",
"top-bar": "#182842",
"row-highlight": "#B7C8FA",
"link": "#192A45",
"link-visited": "#192A45",
}
)
dark_config = json.dumps(
{
"background-color": "rgb(21,22,25)",
"text-color": "#ECEDF0",
"hover": "hsl(225, 6%, 12%)",
"highlight": "#2C314D",
"autocomplete": "#1E2133",
"highlight-text": "#ECEDF0",
"timestamp": "#24E0FF",
"operator": "#7E8BD9",
"display": "#FF4D4D",
"is-background-dark": "true",
"new-callback-color": "#515A8C",
"table-headers": "#F1F1F1",
"operation-color": "#b366ff",
"success_highlight": "#340080",
"failure_highlight": "#f68d8d",
"code-theme": "monokai",
"table-color": "table-dark",
"response-background": "hsl(225, 6%, 23%)",
"outline-buttons": "-outline-",
"bg-header": "hsl(225, 6%, 18%)",
"bg-header-dark": "hsl(225, 6%, 13%)",
"bg-card-body": "hsl(225, 6%, 22%)",
"bg-card-body-l1": "hsl(225, 6%, 23%)",
"bg-card-body-l2": "hsl(225, 6%, 27%)",
"bg-card-footer": "hsl(225, 6%, 23%)",
"bg-body": "rgb(35,35,35)",
"th": "hsl(225, 6%, 20%)",
"font-size": "14",
"top-bar": "#1E2133",
"row-highlight": "#2C314D",
"link": "",
"link-visited": "",
}
)
username = p.TextField(unique=True, null=False)
password = p.TextField(null=False)
admin = p.BooleanField(null=True, default=False)
creation_time = p.DateTimeField(default=datetime.datetime.utcnow, null=False)
last_login = p.DateTimeField(default=None, null=True)
# option to simply de-activate an account instead of delete it so you keep all your relational data intact
active = p.BooleanField(null=False, default=True)
current_operation = p.DeferredForeignKey("Operation", null=True)
ui_config = p.TextField(null=False, default=dark_config)
view_utc_time = p.BooleanField(null=False, default=False)
deleted = p.BooleanField(null=False, default=False)
class Meta:
ordering = [
"-id",
]
database = mythic_db
def to_json(self):
r = {
"id": getattr(self, "id"),
"username": self.username,
"admin": self.admin,
"creation_time": self.creation_time.strftime("%m/%d/%Y %H:%M:%S"),
"last_login": self.last_login.strftime("%m/%d/%Y %H:%M:%S") if self.last_login is not None else "",
"active": self.active,
"current_operation": self.current_operation.name if self.current_operation is not None else None,
"current_operation_id": self.current_operation.id if self.current_operation is not None else None,
"ui_config": self.ui_config,
"view_utc_time": self.view_utc_time,
"deleted": self.deleted
}
return r
def __str__(self):
return json.dumps(self.to_json())
async def check_password(self, password):
temp_pass = await crypto.hash_SHA512(password)
return self.password.lower() == temp_pass.lower()
async def hash_password(self, password):
return await crypto.hash_SHA512(password)
# This is information about a class of payloads (like Apfell-jxa)
# This will have multiple Command class objects associated with it
# Users can create their own commands and payload types as well
class PayloadType(p.Model):
ptype = p.TextField(null=False, unique=True)
creation_time = p.DateTimeField(null=False, default=datetime.datetime.utcnow)
file_extension = p.CharField(null=True)
# if this type requires another payload to be already created
wrapper = p.BooleanField(default=False, null=False)
# indicate which OS/versions this payload works for
supported_os = p.TextField(null=False, default="")
# information about getting information to/from another container or machine for building/loading/transforming
last_heartbeat = p.DateTimeField(default=datetime.datetime.utcnow, null=False)
container_running = p.BooleanField(null=False, default=False)
service = p.TextField(null=False, default="rabbitmq")
# who created the code for the payload type, not just who imported it
author = p.TextField(null=False, default="")
note = p.TextField(null=False, default="")
supports_dynamic_loading = p.BooleanField(null=False, default=False)
deleted = p.BooleanField(null=False, default=False)
class Meta:
database = mythic_db
def to_json(self):
r = {
"id": getattr(self, "id"),
"ptype": self.ptype,
"creation_time": self.creation_time.strftime("%m/%d/%Y %H:%M:%S"),
"file_extension": self.file_extension,
"wrapper": self.wrapper,
"supported_os": self.supported_os,
"last_heartbeat": self.last_heartbeat.strftime("%m/%d/%Y %H:%M:%S"),
"container_running": self.container_running,
"service": self.service,
"author": self.author,
"note": self.note,
"supports_dynamic_loading": self.supports_dynamic_loading,
"deleted": self.deleted
}
if getattr(self, "build_parameters") is not None:
r["build_parameters"] = [
x.to_json()
for x in getattr(self, "build_parameters")
if x.deleted is False
]
else:
r["build_parameters"] = []
return r
def __str__(self):
return json.dumps(self.to_json())
class WrappedPayloadTypes(p.Model):
# which payload type does the wrapping
wrapper = p.ForeignKeyField(PayloadType, null=False)
# which payload type is wrapped
wrapped = p.ForeignKeyField(PayloadType, backref="wrapped", null=False)
class Meta:
database = mythic_db
def to_json(self):
r = {
"id": getattr(self, "id"),
"wrapper": self.wrapper.ptype,
"wrapped": self.wrapped.ptype
}
return r
def __str__(self):
return json.dumps(self.to_json())
class BuildParameter(p.Model):
# name presented to the user
name = p.TextField(null=False, default="")
# what kind of parameter should be shown in the UI? String or ChooseOne
parameter_type = p.TextField(null=False, default="None")
description = p.TextField(null=False, default="")
# associated payload type
payload_type = p.ForeignKeyField(PayloadType, backref="build_parameters")
required = p.BooleanField(default=True)
verifier_regex = p.TextField(default="", null=False)
deleted = p.BooleanField(default=False)
parameter = p.TextField(null=False, default="")
class Meta:
indexes = ((("name", "payload_type"), True),)
database = mythic_db
def to_json(self):
r = {
"id": getattr(self, "id"),
"name": self.name,
"parameter_type": self.parameter_type,
"description": self.description,
"payload_type": self.payload_type.ptype,
"required": self.required,
"verifier_regex": self.verifier_regex,
"deleted": self.deleted,
"parameter": self.parameter
}
return r
def __str__(self):
return json.dumps(self.to_json())
# This has information about a specific command that can be executed by a PayloadType
# Custom commands can be created by users
# There will be a new Command instance for every cmd+payload_type combination
# (each payload_type needs its own 'shell' command because they might be implemented differently)
class Command(p.Model):
needs_admin = p.BooleanField(null=False, default=False)
# generates get-help info on the command
help_cmd = p.TextField(null=False, default="")
description = p.TextField(null=False)
cmd = p.CharField(null=False)
# this command applies to what payload types
payload_type = p.ForeignKeyField(PayloadType, null=False)
creation_time = p.DateTimeField(null=False, default=datetime.datetime.utcnow)
# what version, so we can know if loaded commands are out of date
version = p.IntegerField(null=False, default=1)
# indicate if this command is the exit command for a payload type
is_exit = p.BooleanField(null=False, default=False)
# indicate if this is the command used for browsing files
is_file_browse = p.BooleanField(null=False, default=False)
# indicate if this is the command used for listing processes
is_process_list = p.BooleanField(null=False, default=False)
# indicate if this is the command used for downloading files
is_download_file = p.BooleanField(null=False, default=False)
# indicate if this is the command used for removing files
is_remove_file = p.BooleanField(null=False, default=False)
# indicate if this is the command used to upload files
is_upload_file = p.BooleanField(null=False, default=False)
author = p.TextField(null=False, default="")
deleted = p.BooleanField(null=False, default=False)
class Meta:
indexes = ((("cmd", "payload_type"), True),)
database = mythic_db
def to_json(self):
r = {
"id": getattr(self, "id"),
"needs_admin": self.needs_admin,
"help_cmd": self.help_cmd,
"description": self.description,
"cmd": self.cmd,
"payload_type": self.payload_type.ptype,
"creation_time": self.creation_time.strftime("%m/%d/%Y %H:%M:%S"),
"version": self.version,
"is_exit": self.is_exit,
"is_file_browse": self.is_file_browse,
"is_process_list": self.is_process_list,
"is_download_file": self.is_download_file,
"is_remove_file": self.is_remove_file,
"is_upload_file": self.is_upload_file,
"author": self.author,
"deleted": self.deleted
}
return r
def __str__(self):
return json.dumps(self.to_json())
# these parameters are used to create an easily parsible JSON 'params' field for the agent to utilize
class CommandParameters(p.Model):
command = p.ForeignKeyField(Command, null=False)
# what is the name of the parameter (what is displayed in the UI and becomes dictionary key)
name = p.TextField(null=False)
# String, Boolean, Number, Array, Choice, ChoiceMultiple, Credential, File, PayloadList, AgentConnect
type = p.CharField(null=False, default="String")
default_value = p.TextField(null=False, default="")
# \n separated list of possible choices
choices = p.TextField(null=False, default="")
required = p.BooleanField(null=False, default=False)
description = p.TextField(null=False, default="")
# if the action is related to payloads or linking agents, you can limit the options to only agents you want
supported_agents = p.TextField(null=False, default="")
class Meta:
indexes = ((("command", "name"), True),)
database = mythic_db
def to_json(self):
r = {
"id": getattr(self, "id"),
"command": self.command.id,
"cmd": self.command.cmd,
"payload_type": self.command.payload_type.ptype,
"name": self.name,
"type": self.type,
"default_value": self.default_value,
"choices": self.choices,
"required": self.required,
"description": self.description,
"supported_agents": self.supported_agents
}
return r
def __str__(self):
return json.dumps(self.to_json())
# users will be associated with operations
# payload_types and commands are associated with all operations
# when creating a new operation, associate all the default c2profiles with it
class Operation(p.Model):
name = p.TextField(null=False, unique=True)
admin = p.ForeignKeyField(Operator, null=False) # who is an admin of this operation
complete = p.BooleanField(null=False, default=False)
# auto create an AES PSK key when the operation is created for things like PFS with EKE
AESPSK = p.TextField(null=False, unique=True)
webhook = p.TextField(null=True)
class Meta:
database = mythic_db
def to_json(self):
r = {
"id": getattr(self, "id"),
"name": self.name,
"admin": self.admin.username,
"complete": self.complete,
"AESPSK": self.AESPSK,
"webhook": self.webhook
}
return r
def __str__(self):
return json.dumps(self.to_json())
class DisabledCommandsProfile(p.Model):
# A set of commands that are disabled for an operation due to OPSEC concerns
# only the lead of an operation will be able to set this for other operators on that operation
# name to group a bunch of disabled commands together for an operator
name = p.TextField(null=False)
command = p.ForeignKeyField(Command, null=False)
class Meta:
indexes = ((("command", "name"), True),)
database = mythic_db
def to_json(self):
r = {
"id": getattr(self, "id"),
"name": self.name,
"command": self.command.cmd,
"command_id": self.command.id,
"payload_type": self.command.payload_type.ptype
}
return r
def __str__(self):
return json.dumps(self.to_json())
class DisabledCommands(p.Model):
command = p.ForeignKeyField(Command, null=False)
operator = p.ForeignKeyField(Operator, null=False)
operation = | |
# -*- coding: utf-8 -*-
from collections import Sequence
from datetime import datetime
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin # NOQA
from xively.models import (
Datapoint,
Datastream,
Feed,
Key,
Location,
Permission,
Resource,
Trigger,
Unit,
Waypoint,
)
class ManagerBase(object):
"""Abstract base class for all of out manager classes."""
@property
def base_url(self):
if getattr(self, '_base_url', None) is not None:
return self._base_url
parent = getattr(self, 'parent', None)
if parent is None:
return
manager = getattr(parent, '_manager', None)
if manager is None:
return
base_url = manager.url(parent.id) + '/' + self.resource
return base_url
@base_url.setter # NOQA
def base_url(self, base_url):
self._base_url = base_url
def url(self, id_or_url=None):
"""Return a url relative to the base url."""
url = self.base_url
if id_or_url:
url = urljoin(url + '/', str(id_or_url))
return url
def _parse_datetime(self, value):
"""Parse and return a datetime string from the Xively API."""
return datetime.strptime(value, "%Y-%m-%dT%H:%M:%S.%fZ")
def _prepare_params(self, params):
"""Prepare parameters to be passed in query strings to the Xively API."""
params = dict(params)
for name, value in params.items():
if isinstance(value, datetime):
params[name] = value.isoformat() + 'Z'
return params
class FeedsManager(ManagerBase):
"""Create, update and return Feed objects.
.. note:: This manager should live on a :class:`.XivelyAPIClient` instance
and not instantiated directly.
:param client: Low level :class:`.Client` instance
Usage::
>>> import xively
>>> api = xively.XivelyAPIClient("API_KEY")
>>> api.feeds.create(title="Xively Office environment")
<xively.Feed(7021)>
>>> api.feeds.get(7021)
<xively.Feed(7021)>
>>> api.feeds.update(7021, private=True)
>>> api.feeds.delete(7021)
"""
resource = 'feeds'
# List of fields that can be returned from the API but not directly set.
_readonly_fields = (
'id',
'feed',
'status',
'creator',
'created',
'updated',
'version',
'auto_feed_url',
'product_id',
'device_serial',
)
def __init__(self, client):
self.client = client
self.base_url = client.base_url + self.resource
def create(self, title, description=None, website=None, email=None,
tags=None, location=None, private=None, datastreams=None):
"""Creates a new Feed.
:param title: A descriptive name for the feed
:param description: A longer text description of the feed
:param website: The URL of a website which is relevant to this feed
e.g. home page
:param email: A public contact email address for the creator of this
feed
:param tags: Tagged metadata about the environment (characters ' " and
commas will be stripped out)
:param location: :class:`.Location` object for this feed
:param private: Whether the environment is private or not. Can be
either True or False
"""
data = {
'version': Feed.VERSION,
'title': title,
'description': description,
'website': website,
'email': email,
'tags': tags,
'location': location,
'private': private,
'datastreams': datastreams,
}
feed = self._coerce_feed(data)
response = self.client.post(self.url(), data=feed)
response.raise_for_status()
location = response.headers['location']
feed.feed = location
feed.id = _id_from_url(location)
return feed
def update(self, id_or_url, **kwargs):
"""Updates an existing feed by its id or url.
:param id_or_url: The id of a :class:`.Feed` or its URL
:param kwargs: The fields to be updated
"""
url = self.url(id_or_url)
response = self.client.put(url, data=kwargs)
response.raise_for_status()
def list(self, page=None, per_page=None, content=None, q=None, tag=None,
user=None, units=None, status=None, order=None, show_user=None,
lat=None, lon=None, distance=None, distance_units=None):
"""Returns a paged list of feeds.
Only feeds that are viewable by the authenticated account will be
returned. The following parameters can be applied to limit or refine
the returned feeds:
:param page: Integer indicating which page of results you are
requesting. Starts from 1.
:param per_page: Integer defining how many results to return per page
(1 to 1000)
:param content: String parameter ('full' or 'summary') describing
whether we want full or summary results. Full results means all
datastream values are returned, summary just returns the
environment meta data for each feed
:param q: Full text search parameter. Should return any feeds matching
this string
:param tag: Returns feeds containing datastreams tagged with the search
query
:param user: Returns feeds created by the user specified
:param units: Returns feeds containing datastreams with units specified
by the search query
:param status: Possible values ('live', 'frozen', or 'all'). Whether to
search for only live feeds, only frozen feeds, or all feeds.
Defaults to all
:param order: Order of returned feeds. Possible values ('created_at',
'retrieved_at', or 'relevance')
:param show_user: Include user login and user level for each feed.
Possible values: true, false (default)
The following additional parameters are available which allow location
based searching of feeds:
:param lat: Used to find feeds located around this latitude
:param lon: Used to find feeds located around this longitude
:param distance: search radius
:param distance_units: miles or kms (default)
"""
url = self.url()
params = {k: v for k, v in (
('page', page),
('per_page', per_page),
('content', content),
('q', q),
('tag', tag),
('user', user),
('units', units),
('status', status),
('order', order),
('show_user', show_user),
('lat', lat),
('lon', lon),
('distance', distance),
('distance_units', distance_units),
) if v is not None}
response = self.client.get(url, params=params)
response.raise_for_status()
json = response.json()
feeds = [self._coerce_feed(feed_data) for feed_data in json['results']]
return feeds
def get(self, id_or_url, datastreams=None, show_user=None, start=None,
end=None, duration=None, find_previous=None, limit=None,
interval_type=None, interval=None):
"""Fetches and returns a feed by id or url.
By default the most recent datastreams are returned. It is also
possible to filter the datastreams returned with the feed by using the
"datastreams" parameter and a list of datastream IDs.
:param datastreams: Filter the returned datastreams
:type datastreams: list of datastream IDs
:param show_user: Include user login for each feed. (default: False)
:type show_user: bool
:param start: Defines the starting point of the query
:param end: Defines the end point of the data returned
:param duration: Specifies the duration of the query
:param find_previous:
Will also return the previous value to the date range being
requested.
:param limit:
Limits the number of results to the number specified. Defaults to
100 and has a maximum of 1000.
:param interval_type:
If set to "discrete" the data will be returned in fixed time
interval format according to the inverval value supplied. If this
is not set, the raw datapoints will be returned.
:param interval:
Determines what interval of data is requested and is defined in
seconds between the datapoints. If a value is passed in which does
not match one of these values, it is rounded up to the next value.
See :meth:`~.DatapointsManager.history` for details.
"""
url = self.url(id_or_url)
if isinstance(datastreams, Sequence):
datastreams = ','.join(datastreams)
params = {k: v for k, v in (
('datastreams', datastreams),
('show_user', show_user),
('start', start),
('end', end),
('duration', duration),
('find_previous', find_previous),
('limit', limit),
('interval_type', interval_type),
('interval', interval),
) if v is not None}
params = self._prepare_params(params)
response = self.client.get(url, params=params)
response.raise_for_status()
data = response.json()
feed = self._coerce_feed(data)
return feed
def delete(self, id_or_url):
"""Delete a feed by id or url.
.. WARNING:: This is final and cannot be undone.
:param id_or_url: The feed ID or its URL
"""
url = self.url(id_or_url)
response = self.client.delete(url)
response.raise_for_status()
def _coerce_feed(self, feed_data):
"""Returns a Feed object from a mapping object (dict)."""
datastreams_data = feed_data.pop('datastreams', None)
location_data = feed_data.pop('location', None)
# Strip out the readonly fields and manually set later.
readonly = {f: feed_data.pop(f)
for f in self._readonly_fields
if f in feed_data}
feed = Feed(**feed_data)
feed._manager = self
feed.id = readonly.pop('id', None)
feed.feed = readonly.pop('feed', None) or self.url(feed.id)
# Explicitely set the readonly fields we stripped out earlier.
for name, value in readonly.items():
setattr(feed, name, value)
if datastreams_data:
feed._datastreams_manager = DatastreamsManager(feed)
feed.datastreams = self._coerce_datastreams(
datastreams_data, feed._datastreams_manager)
if location_data:
location = self._coerce_location(location_data)
else:
location = Location()
feed._data['location'] = location
return feed
def _coerce_datastreams(self, datastreams_data, datastreams_manager):
"""Returns Datastream objects from the data given."""
datastreams = []
for data in datastreams_data:
datastream = datastreams_manager._coerce_datastream(data)
datastreams.append(datastream)
return datastreams
def _coerce_location(self, instance):
"""Returns a Location object, converted from instance if required."""
if isinstance(instance, Location):
location = instance
else:
location_data = dict(**instance)
waypoints_data = location_data.pop('waypoints', None)
if waypoints_data is not None:
waypoints = self._coerce_waypoints(waypoints_data)
location_data['waypoints'] = waypoints
location = Location(**location_data)
return location
def _coerce_waypoints(self, waypoints_data):
"""Returns a list of Waypoint objects from the given waypoint data."""
waypoints = []
for data in waypoints_data:
at = self._parse_datetime(data['at'])
data = {k: v for k, v in data.items() if k != 'at'}
waypoint = Waypoint(at=at, **data)
waypoints.append(waypoint)
return waypoints
class DatastreamsManager(Sequence, ManagerBase):
"""Create, update and return Datastream objects.
Instances of | |
# if we don't have urls, but the service type contains OGC: .. , download or website, then we have an error
# TODO: document
if (protocolstxt.find("OGC:") > -1 or protocolstxt.find("download") > -1 or protocolstxt.find("website") > -1) and nrurls == 0:
score = 0
# checkid = 6, so the index in the matrix is: 5
result = checksdatasets[5][2][score]
else:
# there must be a URL as well, so check this
if errors > 0 or nrurls == 0:
score = 0
else:
score = 2
result = checksservices[5][2][score]
return MkmScore(urlstxt, score, result)
# protocoltxt analyses the protocols by looking if they are in the codelist
# the mdtype determines which weight to apply for the score
def checkprotocol(protocolstxt, mdtype):
""" Check the protocol
Datasets: for Check 5
Services: for Check 5, servicetype
Logic: each listed protocol must be in the codelist for servicetypes for a score = 2. If this is not the case, the score = 1. If no protocol is provided but a URL is (for check 6), then the score is 0.
"""
# protocolstxt consists of a list of protocols, split by ;
# for each protocol, check if it is in the codelist
score = 2
# if len(protocolstxt) > 2:
# start with an empty score if there are urls
# score = 0
protocols = protocolstxt.split(valuesep) # TODO: make configurable?
notinlist = 0
missing = False
for p in protocols:
if p != None:
try:
if p == "--geen protocol bij url--": # TODO: configurable?
score = 0
missing == True
elif len(p) > 0:
# For serviceTypes: use the values from the codelist, since
# this one contains an array of (single valued) arrays
found = False
for st in codelistServiceTypes:
if p == st[0]:
found = True
if found == False:
notinlist = notinlist + 1
except Exception as e:
logging.debug("Protocol not in codelist, protocol: " + p)
logging.debug(str(e))
notinlist = notinlist + 1
if missing:
score = 0
elif notinlist > 0:
score = 1
# elif notinlist == 0:
# score = 2
if mdtype == "dataset" or mdtype == "series":
# checkid = 5, so the index in the matrix is: 11-1=10
result = checksdatasets[4][2][score]
else:
result = checksservices[13][2][score]
return MkmScore(protocolstxt, score, result)
def checkjuridischegebruiksrestricties(restrictionsarr, mdtype):
""" Check the juridische gebruikssrestricties
Datasets: for Check 4
Services: for Check 4
Logic: otherRestrictions must be present or no value is provided for a score = 2. Other values and more than 1 value are not allowed.
For logic / remarks, also see issue https://bitbucket.org/thijsbrentjens/metadatakwaliteit/issue/27
"""
score = 2 # Might be empty, so start from score = 2
restrictionstxt = ""
if restrictionsarr != None:
# restrictionsarr could contain multiple values
logging.debug("Restrictions array (nr of objects: " +
str(len(restrictionsarr)) + "): " + str(restrictionsarr))
if (len(restrictionsarr) != 0 and restrictionsarr[0] != "otherRestrictions") or len(restrictionsarr) > 1:
score = 0
restrictionstxt = valuesep.join(restrictionsarr)
if mdtype == "dataset" or mdtype == "series":
# checkid = 4, so the index in the matrix is: 3
result = checksdatasets[3][2][score]
else:
result = checksservices[3][2][score]
return MkmScore(restrictionstxt, score, result)
def checkjuridischetoegangsrestricties(restrictionsarr, mdtype):
""" Check the juridische toegangsrestricties
Datasets: for Check 3
Services: for Check 3
Logic: otherRestrictions must be present (1 or 2 times) for a score = 2 and must be the only value provided, otherwise the score is 0.
For logic / remarks, also see issue https://bitbucket.org/thijsbrentjens/metadatakwaliteit/issue/27
"""
score = 0 # has to be provided, so start from score 0
restrictionstxt = ""
errorfound = False
try:
if restrictionsarr != None:
# otherRestricions must be found, only then the score could be 2
# It might be there two times.
for r in restrictionsarr:
if r != "otherRestrictions":
errorfound = True
if len(restrictionsarr) > 2: # max 2 times otherRestrictions
errorfound = True
logging.debug("Too much occurrences of toegangsrestricties, found: " +
str(len(restrictionsarr)) + ", values: " + str(restrictionsarr))
restrictionstxt = valuesep.join(restrictionsarr)
else:
errorfound = True
except Exception as e:
logging.info('Error in toegangsrestricties, score 0.')
logging.debug(str(e))
errorfound = True
if errorfound == False and restrictionstxt != "":
score = 2
if mdtype == "dataset" or mdtype == "series":
# checkid = 4, so the 3rd value in the matrix (index 2).
result = checksdatasets[2][2][score]
else:
result = checksservices[2][2][score]
return MkmScore(restrictionstxt, score, result)
def checkbbox(boundingbox, mdtype):
""" Check the boundingbox / extent in Netherlands
Datasets: for Check 10
Logic: the extent must be in the area of NL + NCP (3.047,50.670,7.276,53.612). If no extent is provided, the score is 0.
"""
score = 0
result = 0
bboxstr = ""
try:
bboxstr = boundingbox.minx + "," + boundingbox.miny + \
"," + boundingbox.maxx + "," + boundingbox.maxy
# TODO: make values bbox configurable?
# larger: 2.0, 50.0, 8.0, 55.0
if float(boundingbox.minx) >= 2.0 and float(boundingbox.miny) >= 50.0 and float(boundingbox.maxx) <= 8.0 and float(boundingbox.maxy) <= 57.0:
score = 2
logging.debug('Boudingbox ' + bboxstr + ' is in NL area')
else:
score = 1
logging.debug('Boudingbox ' + bboxstr + ' is NOT in NL area')
except Exception as e:
logging.info('Error in boundinbox extent.')
logging.info(str(e))
try:
bboxstr = boundingbox.minx + "," + boundingbox.miny + \
"," + boundingbox.maxx + "," + boundingbox.maxy
except Exception as e:
logging.debug(
'Error in boundinbox extent, bboxstr cannot be constructed')
logging.debug(str(e))
if mdtype == "dataset" or mdtype == "series":
# checkid = 10, so the index in the matrix is: 9
result = checksdatasets[9][2][score]
return MkmScore(bboxstr, score, result)
def checkoverigebeperkingen(beperkingenarr, mdtype):
""" Check both the URL and description of the Beperkingen.
Datasets: for Check 1 and 2
Services: for Check 1 and 2
Logic: for Check 1: the combination of description and URL must be in the codelist for limitations, then score = 2, otherwise 0
Logic: for Check 2: the combination of description and URL must be in the codelist for limitations, then score = 2, otherwise 0
"""
beperkingurltxt = ""
beschrijvingtxt = ""
score1 = 0
score2 = 0
try:
# maybe one element, then this must be a url
if len(beperkingenarr) > 0:
beperkingurltxt = beperkingenarr[0]
if beperkingurltxt.lower().startswith("http") == False:
beschrijvingtxt = beperkingenarr[0]
beperkingurltxt = ""
if len(beperkingenarr) >= 2:
beschrijvingtxt = beperkingenarr[1]
# elements may be turned around
if beperkingurltxt.lower().startswith("http") == False:
beperkingurltxt = beperkingenarr[1]
beschrijvingtxt = beperkingenarr[0]
# now loop over the codelist for limiations
beschrijvingtxt = beschrijvingtxt.replace("\n", " ")
beperkingurltxt = beperkingurltxt.replace("\n", " ")
for codes in codelistLimitations:
# Score if the value is in the second column of the codelist.
# this makes sure that the URL matches with the textual code
# Score if the value is in the first column of the codelist and a
# valid URL is provided. This is the same logic for check 1 and 2
if beperkingurltxt.lower().startswith(codes[1].lower()) and beschrijvingtxt.lower().startswith(codes[0].lower()):
# if codes[1].lower().startswith(beperkingurltxt.lower()) and
# codes[0].lower().startswith(beschrijvingtxt.lower()):
score1 = 2
score2 = 2
# if beperkingurltxt.startswith(codes[1]) and codes[0].lower() == beschrijvingtxt.lower():
# score2 = 2
except Exception as e:
logging.debug(str(e))
score1 = 0
score2 = 0
if mdtype == "dataset" or mdtype == "series":
# checkid = 11, so the index in the matrix is: 11-1=10
result1 = checksdatasets[0][2][score1]
result2 = checksdatasets[1][2][score2]
else:
result1 = checksservices[0][2][score1]
result2 = checksservices[1][2][score2]
results = [MkmScore(beperkingurltxt, score1, result1),
MkmScore(beschrijvingtxt, score2, result2)]
return results
# the abstract has checkid = 11 for datasets, 10 for services
def checkabstract(abstracttxt, mdtype):
""" Check the abstract
Datasets: for Check 11
Services: for Check 10
Logic: the abstract must be at least 25 characters and at max 2000 for a score = 2.
"""
ascore = 0
if abstracttxt != None:
abstracttxt = abstracttxt.replace("\n", " ")
# TODO: make min and max abstract length configurable?
if len(abstracttxt) >= 25 and len(abstracttxt) <= 4000:
ascore = 2
else:
abstracttxt = ""
# Now fetch the result
if mdtype == "dataset" or mdtype == "series":
# checkid = 11, so | |
[mRNA_AC,protein_id,protein_seq]; values = string.join(values,'\t')+'\n'; datar.write(values)
datad.write(mRNA_AC+'\t'+string.replace(cds_location,'..','\t')+'\n')
translated_mRNAs[mRNA_AC]=[]
### Parse new line
#>ENST00000400685 cdna:known supercontig::NT_113903:9607:12778:1 gene:ENSG00000215618
t= string.split(data[1:],':'); sequence=''
transid_data = string.split(t[0],' '); transid = transid_data[0]
if '.' in transid:
transid = string.split(transid,'.')[0]
#try: ensembl_id,chr,strand,transid,prot_id = t
#except ValueError: ensembl_id,chr,strand,transid = t
except IndexError: continue
try:
if data[0] != '>': sequence = sequence + data
except IndexError: continue
#### Add the last entry
if len(sequence) > 0:
if transid in missing_protein_ACs:
### Perform in silico translation
mRNA_db = {}; mRNA_db[transid] = '',sequence[1:]
translation_db = BuildInSilicoTranslations(mRNA_db)
for mRNA_AC in translation_db: ### Export in silico protein predictions
protein_id, protein_seq, cds_location = translation_db[mRNA_AC]
values = [mRNA_AC,protein_id,protein_seq]; values = string.join(values,'\t')+'\n'; datar.write(values)
datad.write(mRNA_AC+'\t'+string.replace(cds_location,'..','\t')+'\n')
translated_mRNAs[mRNA_AC]=[]
datar.close()
datad.close()
end_time = time.time(); time_diff = int(end_time-start_time)
print "Ensembl transcript sequences analyzed in %d seconds" % time_diff
missing_protein_ACs_inSilico=[]
for mRNA_AC in missing_protein_ACs:
if mRNA_AC not in translated_mRNAs:
missing_protein_ACs_inSilico.append(mRNA_AC)
print len(missing_protein_ACs_inSilico), 'Ensembl mRNAs without mRNA sequence NOT in silico translated (e.g., lncRNAs)', missing_protein_ACs_inSilico[:10]
def importEnsemblProteinSeqData(species,unique_ens_transcripts):
from build_scripts import FeatureAlignment
protein_relationship_file,protein_feature_file,protein_seq_fasta,null = FeatureAlignment.getEnsemblRelationshipDirs(species)
ens_transcript_protein_db = FeatureAlignment.importEnsemblRelationships(protein_relationship_file,'transcript')
unique_ens_proteins = {}
for transcript in ens_transcript_protein_db:
if transcript in unique_ens_transcripts:
protein_id = ens_transcript_protein_db[transcript]
if len(protein_id)>1:
unique_ens_proteins[protein_id] = transcript
ensembl_protein_seq_db = importEnsemblProtSeq(protein_seq_fasta,unique_ens_proteins)
transcript_with_prot_seq = {}
for protein_id in ensembl_protein_seq_db:
if protein_id in unique_ens_proteins:
transcript = unique_ens_proteins[protein_id]
transcript_with_prot_seq[transcript]=[]
missing_ens_proteins={}
for transcript in unique_ens_transcripts:
if transcript not in transcript_with_prot_seq: missing_ens_proteins[transcript]=[]
print len(ensembl_protein_seq_db),'Ensembl transcripts linked to protein sequence and',len(missing_ens_proteins), 'transcripts missing protein sequence.'
return ensembl_protein_seq_db, missing_ens_proteins
def importEnsemblProtSeq(filename,unique_ens_proteins):
export_file = 'AltDatabase/'+species+'/SequenceData/output/sequences/Transcript-EnsProt_sequences.txt'
export_data = export.ExportFile(export_file)
fn=filepath(filename); ensembl_protein_seq_db={}; sequence = ''; y=0
for line in open(fn,'r').xreadlines():
try: data, newline= string.split(line,'\n'); y+=1
except ValueError: continue
try:
if data[0] == '>':
if len(sequence) > 0:
try: ensembl_prot = ensembl_prot
except Exception: print data,y,t;kill
if ensembl_prot in unique_ens_proteins:
mRNA_AC = unique_ens_proteins[ensembl_prot]
values = string.join([mRNA_AC,ensembl_prot,sequence],'\t')+'\n'
export_data.write(values); ensembl_protein_seq_db[ensembl_prot] = []
### Parse new line
t= string.split(data[1:],' '); sequence=''
ensembl_prot = t[0]
if '.' in ensembl_prot:
ensembl_prot = string.split(ensembl_prot,'.')[0] ### Added to Ensembl after version 77
except IndexError: continue
try:
if data[0] != '>': sequence = sequence + data
except IndexError: continue
if ensembl_prot in unique_ens_proteins:
mRNA_AC = unique_ens_proteins[ensembl_prot]
values = string.join([mRNA_AC,ensembl_prot,sequence],'\t')+'\n'
export_data.write(values); ensembl_protein_seq_db[ensembl_prot] = []
export_data.close()
return ensembl_protein_seq_db
def importUniProtSeqeunces(species,transcripts_with_uniprots,transcripts_to_analyze):
global n_terminal_seq; global c_terminal_seq
n_terminal_seq={}; c_terminal_seq={}
export_file = 'AltDatabase/'+species+'/SequenceData/output/sequences/Transcript-UniProt_sequences.txt'
export_data = export.ExportFile(export_file)
#filename = 'AltDatabase/'+species+'/SequenceData/'+'uniprot_trembl_sequence.txt'
filename = 'AltDatabase/uniprot/'+species+'/uniprot_sequence.txt'
fn=filepath(filename); transcript_to_uniprot={}
unigene_ensembl_up = {}
for line in open(fn,'r').readlines():
data, newline= string.split(line,'\n')
t = string.split(data,'\t')
id=t[0];ac=t[1];ensembls=t[4];seq=t[2];type=t[6];unigenes=t[7];embls=t[9]
ac=string.split(ac,','); embls=string.split(embls,',') #; ensembls=string.split(ensembls,','); unigenes=string.split(unigenes,',')
if type != 'swissprot1': ### unclear why this condition was excluding swissprot so added 1 - version 2.1.1
### Note: These associations are based on of UCSC, which in some cases don't look correct: see AY429540 and Q75N08 from the KgXref file.
### Possibly exclude
ac = ac[0]
if ac in transcripts_with_uniprots:
mRNA_ACs = transcripts_with_uniprots[ac]
for mRNA_AC in mRNA_ACs:
transcript_to_uniprot[mRNA_AC] = []
values = string.join([mRNA_AC,ac,seq],'\t')+'\n'; export_data.write(values)
for embl in embls:
proceed = 'no'
if (len(embl)>0) and type == 'fragment': ###NOTE: Fragment annotated seem to be the only protein IDs that contain direct references to a specific mRNA rather than promiscous (as opposed to Swissprot and Variant)
if embl in transcripts_to_analyze: proceed = 'yes'
elif embl in transcripts_with_uniprots: proceed = 'yes'
if proceed == 'yes':
if embl not in transcript_to_uniprot:
transcript_to_uniprot[embl] = []
values = string.join([embl,id,seq],'\t')+'\n'; export_data.write(values)
n_terminal_seq[seq[:5]] = []
c_terminal_seq[seq[-5:]] = []
export_data.close()
missing_protein_ACs={}
for mRNA_AC in transcripts_to_analyze:
if mRNA_AC not in transcript_to_uniprot: missing_protein_ACs[mRNA_AC]=[]
for protein_AC in transcripts_with_uniprots:
mRNA_ACs = transcripts_with_uniprots[protein_AC]
for mRNA_AC in mRNA_ACs:
if mRNA_AC not in transcript_to_uniprot: missing_protein_ACs[mRNA_AC]=[]
if len(transcripts_to_analyze)>0: ### Have to submitt ACs to report them
print len(missing_protein_ACs), 'missing protein ACs for associated UniProt mRNAs and', len(transcript_to_uniprot), 'found.'
print len(n_terminal_seq),len(c_terminal_seq),'N and C terminal, respectively...'
return missing_protein_ACs
def BuildInSilicoTranslations(mRNA_db):
"""
BI517798
Seq('ATGTGGCCAGGAGACGCCACTGGAGAACATGCTGTTCGCCTCCTTCTACCTTCTGGATTT ...', IUPACUnambiguousDNA())
213 MWPGDATGEHAVRLLLPSGFYPGFSWQYPGSVAFHPRPQVRDPGQRVPDASGRGRLVVRAGPAHPPGLPLLWEPLAIWGNRMPSHRLPLLPQHVRQHLLPHLHQRRPFPGHCAPGQVPQAPQAPLRTPGLCLPVGGGGCGHGPAAGEPTDRADKHTVGLPAAVPGEGSNMPGVPWQWPSLPVHHQVTCTVIIRSCGRPRVEKALRTRQGHESP
211 MNGLEVAPPGLITNFSLATAEQCGQETPLENMLFASFYLLDFILALVGNTLALWLFIRDHKSGTPANVFLMHLAVADLSCVLVLPTRLVYHFSGNHWPFGEIACRLTGFLFYLNMYASIYFLTCISADRFLAIVHPVKSLKLRRPLYAHLACAFLWVVVAVAMAPLLVSPQTVQTNTRWVCLQLYREKAPTCLVSLGSGLHFPFITRSRVL
"""
translation_db={}
from Bio.Seq import Seq
### New Biopython methods - http://biopython.org/wiki/Seq
from Bio.Alphabet import generic_dna
### Deprecated
#from Bio.Alphabet import IUPAC
#from Bio import Translate ### deprecated
#print 'Begining in silco translation for',len(mRNA_db),'sequences.'
def cleanSeq(input_seq):
"""Wrapper for Biopython translate function. Bio.Seq.translate will complain if input sequence is
not a mulitple of 3. This wrapper function passes an acceptable input to Bio.Seq.translate in order to
avoid this warning."""
#https://github.com/broadinstitute/oncotator/pull/265/commits/94b20aabff48741a92b3f9e608e159957af6af30
trailing_bases = len(input_seq) % 3
if trailing_bases:
input_seq = ''.join([input_seq, 'NN']) if trailing_bases == 1 else ''.join([input_seq, 'N'])
return input_seq
first_time = 1
for mRNA_AC in mRNA_db:
if mRNA_AC == 'AK025306': print '@@@@@@@@@@^^^^AK025306...attempting in silico translation'
temp_protein_list=[]; y=0
protein_id,sequence = mRNA_db[mRNA_AC]
if protein_id == '': protein_id = mRNA_AC+'-PEP'
original_seqeunce = sequence
sequence = string.upper(sequence)
loop=0
while (string.find(sequence,'ATG')) != -1: #while there is still a methionine in the DNA sequence, reload this DNA sequence for translation: find the longest ORF
x = string.find(sequence,'ATG') #before doing this, need to find the start codon ourselves
y += x #maintain a running count of the sequence position
if loop!=0: y+=3 ### This accounts for the loss in sequence_met
#if y<300: print original_seqeunce[:y+2], x
sequence_met = sequence[x:] #x gives us the position where the first Met* is.
### New Biopython methods - http://biopython.org/wiki/Seq
dna_clean = cleanSeq(sequence_met)
dna_seq = Seq(dna_clean, generic_dna)
prot_seq = dna_seq.translate(to_stop=True)
### Deprecated code
#seq_type = IUPAC.unambiguous_dna
#dna_seq = Seq(sequence_met,seq_type)
#standard_translator = Translate.unambiguous_dna_by_id[1]
#prot_seq = standard_translator.translate_to_stop(dna_seq) #convert the dna to protein sequence
#prot_seq_string = prot_seq.tostring()
prot_seq_string = str(prot_seq)
prot_seq_tuple = len(prot_seq_string),y,prot_seq_string,dna_seq #added DNA sequence to determine which exon we're in later
temp_protein_list.append(prot_seq_tuple) #create a list of protein sequences to select the longest one
sequence = sequence_met[3:] # sequence_met is the sequence after the first or proceeduring methionine, reset the sequence for the next loop
loop+=1
if len(temp_protein_list) == 0:
continue
else:
#temp_protein_list = pick_optimal_peptide_seq(temp_protein_list) ###Used a more complex method in the original code to determine the best selection
temp_protein_list.sort(); temp_protein_list.reverse()
peptide_len1 = temp_protein_list[0][0]
prot_seq_string = temp_protein_list[0][2] #extract out the protein sequence string portion of the tuple
coding_dna_seq_string = temp_protein_list[0][3]
pos1 = temp_protein_list[0][1] ###position in DNA sequence where the translation starts
n_term1 = prot_seq_string[:5]; c_term1 = prot_seq_string[-5:]
###Check the top protein sequences and see if there are frame differences
choose = 0
for protein_data in temp_protein_list[1:]: ###exlcude the first entry
peptide_len2 = protein_data[0]; pos2= protein_data[1]
percent_of_top = (float(peptide_len1)/peptide_len2)*100
if (percent_of_top>70) and (peptide_len2>20):
prot_seq_string2 = protein_data[2]
n_term2 = prot_seq_string2[:5]; c_term2 = prot_seq_string2[-5:]
frame_shift = check4FrameShifts(pos1,pos2)
if frame_shift == 'yes':
###determine which prediction is better to use
if n_term1 in n_terminal_seq: choose = 1
elif n_term2 in n_terminal_seq: choose = 2
elif c_term1 in c_terminal_seq: choose = 1
elif c_term2 in c_terminal_seq: choose = 2
if choose == 2:
prot_seq_string = protein_data[2]
coding_dna_seq_string = protein_data[3]
alt_prot_seq_string = temp_protein_list[0][2]
alt_coding_dna_seq_string = temp_protein_list[0][3]
pos1 = protein_data[1]
if first_time == 0:
print mRNA_AC
print coding_dna_seq_string
print len(prot_seq_string),prot_seq_string
print alt_coding_dna_seq_string
print len(alt_prot_seq_string),alt_prot_seq_string
first_time = 1
###write this data out in the future
else: break
else: break ###do not need to keep looking
dl = (len(prot_seq_string))*3 #figure out what the DNA coding sequence length is
#dna_seq_string_coding_to_end = coding_dna_seq_string.tostring()
dna_seq_string_coding_to_end = str(coding_dna_seq_string)
coding_dna_seq_string = dna_seq_string_coding_to_end[0:dl]
cds_location = str(pos1+1)+'..'+str(pos1+len(prot_seq_string)*3+3)
### Determine if a stop codon is in the sequence or there's a premature end
coding_diff = len(dna_seq_string_coding_to_end) - len(coding_dna_seq_string)
if coding_diff > 4: stop_found = 'stop-codon-present'
else: stop_found = 'stop-codon-absent'
#print [mRNA_AC],[protein_id],prot_seq_string[0:10]
if mRNA_AC == 'AK025306': print '*********AK025306',[protein_id],prot_seq_string[0:10]
translation_db[mRNA_AC] = protein_id,prot_seq_string,cds_location
return translation_db
def check4FrameShifts(pos1,pos2):
pos_diff = abs(pos2 - pos1)
str_codon_diff = str(float(pos_diff)/3)
value1,value2 = string.split(str_codon_diff,'.')
if value2 == '0': frame_shift = 'no'
else: frame_shift = 'yes'
return frame_shift
def convertListsToTuple(list_of_lists):
list_of_tuples=[]
for ls in list_of_lists:
list_of_tuples.append(tuple(ls))
return list_of_tuples
def compareProteinFeaturesForPairwiseComps(probeset_protein_db,protein_seq_db,probeset_gene_db,species,array_type):
if (array_type == 'junction' or array_type == 'RNASeq') and data_type != 'null':
export_file = 'AltDatabase/'+species+'/'+array_type+'/'+data_type+'/probeset-protein-dbase_seqcomp.txt'
else: export_file = 'AltDatabase/'+species+'/'+array_type+'/probeset-protein-dbase_seqcomp.txt'
fn=filepath(export_file); export_data1 = open(fn,'w')
title_row = 'Probeset\tAligned protein_id\tNon-aligned protein_id\n'; export_data1.write(title_row)
minimal_effect_db={}; accession_seq_db={};start_time = time.time()
print "Comparing protein features for all pair-wise comparisons"
for probeset in probeset_protein_db:
geneid = probeset_gene_db[probeset][0] ###If one probeset
match_list,null_list = probeset_protein_db[probeset]
prot_match_list=[]; prot_null_list=[]
for protein_ac in match_list:
protein_seq = protein_seq_db[protein_ac][0]
prot_match_list.append([protein_ac,protein_seq])
for protein_ac in null_list:
protein_seq = protein_seq_db[protein_ac][0]
prot_null_list.append([protein_ac,protein_seq])
### Compare all | |
/ 2
phi["C"] = - np.pi / 2
phi["O"] = - np.pi / 2
# Define init_values
# Estimate the mean helix axis...
nv_dict = {}
for i in xyzs_dict.keys(): nv_dict[i] = estimate_axis(xyzs_dict[i])
nv_array = np.array( [ v for v in nv_dict.values() ] )
nv = np.nanmean(nv_array, axis = 0)
nv /= np.linalg.norm(nv)
# Calculate direction cosine angle
# nx = cos(alpha) = a1/|a|
# ny = cos(beta ) = a2/|a|
# nz = cos(gamma) = a3/|a|
nx, ny, nz = np.arccos(nv)
# Estimate the mean position that the axis of helix passes through...
pv_dict = {}
for i in xyzs_dict.keys(): pv_dict[i] = np.nanmean(xyzs_dict[i], axis = 0)
pv_array = np.array( [ v for v in pv_dict.values() ], dtype = np.float64)
pv = np.nanmean(pv_array, axis = 0)
px, py, pz = pv
# Record the init position that an axial will pass through
pa0 = pv.copy()
# Predefine axial translational offset
t = {}
xyzs_nonan_dict = {}
for i in xyzs_dict.keys():
xyzs_nonan_dict[i] = xyzs_dict[i][~np.isnan(xyzs_dict[i]).any(axis = 1)]
## t[i] = - np.linalg.norm(pv - xyzs_nonan_dict[i][0])
# The projection along axis is equivalent to translation...
pv_to_firstatom = xyzs_nonan_dict[i][0] - pv
t[i] = np.dot( pv_to_firstatom, nv )
# Init params...
params = init_params()
# Load init values
params.add("px" , value = px)
params.add("py" , value = py)
params.add("pz" , value = pz)
params.add("nx" , value = nx)
params.add("ny" , value = ny)
params.add("nz" , value = nz)
params.add("s" , value = s)
params.add("omega", value = omega)
params.add("rN" , value = r["N"])
params.add("rCA" , value = r["CA"])
params.add("rC" , value = r["C"])
params.add("rO" , value = r["C"])
params.add("phiN" , value = phi["N"])
params.add("phiCA", value = phi["CA"])
params.add("phiC" , value = phi["C"])
params.add("phiO" , value = phi["O"])
params.add("tN" , value = t["N"])
params.add("tCA" , value = t["CA"])
params.add("tC" , value = t["C"])
params.add("tO" , value = t["O"])
if report: report_params_helix(params, title = f"Init report")
# Fitting process...
# Set constraints...
for i in params.keys(): params[i].set(vary = False)
for i in ["px", "py", "pz"]: params[i].set(vary = True)
result = fit_helix(params, xyzs_dict, pa0, lam)
if report:
report_params_helix(params, title = f"px, py, pz: " + \
f"success = {result.success}, " + \
f"rmsd = {calc_rmsd(result.residual)}")
params = result.params
for i in ["nx", "ny", "nz"]: params[i].set(vary = True)
result = fit_helix(params, xyzs_dict, pa0, lam)
if report:
report_params_helix(params, title = f"nx, ny, nz: " + \
f"success = {result.success}, " + \
f"rmsd = {calc_rmsd(result.residual)}")
params = result.params
for i in ["phiN", "phiCA", "phiC", "phiO"]: params[i].set(vary = True)
result = fit_helix(params, xyzs_dict, pa0, lam)
if report:
report_params_helix(params, title = f"phi: " + \
f"success = {result.success}, " + \
f"rmsd = {calc_rmsd(result.residual)}")
params = result.params
for i in ["s", "omega"]: params[i].set(vary = True)
result = fit_helix(params, xyzs_dict, pa0, lam)
if report:
report_params_helix(params, title = f"s, omega: " + \
f"success = {result.success}, " + \
f"rmsd = {calc_rmsd(result.residual)}")
params = result.params
for i in ["tN", "tCA", "tC", "tO"]: params[i].set(vary = True)
result = fit_helix(params, xyzs_dict, pa0, lam)
if report:
report_params_helix(params, title = f"t: " + \
f"success = {result.success}, " + \
f"rmsd = {calc_rmsd(result.residual)}")
params = result.params
for i in ["rN", "rCA", "rC", "rO"]: params[i].set(vary = True)
result = fit_helix(params, xyzs_dict, pa0, lam)
if report:
report_params_helix(params, title = f"r: " + \
f"success = {result.success}, " + \
f"rmsd = {calc_rmsd(result.residual)}")
params = result.params
for i in range(5):
result = fit_helix(params, xyzs_dict, pa0, lam, ftol = 1e-9)
if report:
report_params_helix(params, title = f"All params: " + \
f"success = {result.success}, " + \
f"rmsd = {calc_rmsd(result.residual)}")
params = result.params
return result
def check_fit_helix(params, xyzs_dict, pv0, nv0, nterm, atom_to_check):
# Unpack parameters...
parvals = unpack_params(params)
px, py, pz, nx, ny, nz, s, omega = parvals[ :8]
rN, rCA, rC, rO = parvals[8:8+4]
phiN, phiCA, phiC, phiO = parvals[12:12+4]
tN, tCA, tC, tO = parvals[16:16+4]
# Construct paramters for each atom...
parval_dict = {}
parval_dict["N"] = px, py, pz, nx, ny, nz, s, omega, rN, phiN, tN
parval_dict["CA"] = px, py, pz, nx, ny, nz, s, omega, rCA, phiCA, tCA
parval_dict["C"] = px, py, pz, nx, ny, nz, s, omega, rC, phiC, tC
parval_dict["O"] = px, py, pz, nx, ny, nz, s, omega, rO, phiO, tO
for i in atom_to_check:
print(f"Check {i}")
check_fit_purehelix(parval_dict[i], xyzs_dict[i], pv0, nv0, nterm)
return None
def fit_helix_by_length(xyzs_dict, helixlen):
''' Go through whole data and return the helix segment position that fits the best.
The best fit is found using brute-force.
'''
assert len(xyzs_dict["N"]) >= helixlen, "helixlen should be smaller than the total length."
results = []
xyzs_filtered_dict = {}
for i in range(len(xyzs_dict["N"]) - helixlen):
for k, v in xyzs_dict.items():
xyzs_filtered_dict[k] = v[i:i+helixlen]
results.append( [ i, helix(xyzs_filtered_dict) ] )
sorted_results = sorted(results, key = lambda x: calc_rmsd(x[1].residual))
return sorted_results[0]
def fit_purehelix_by_length(xyzs, helixlen):
''' Go through whole data and return the helix segment position that fits the best.
The best fit is found using brute-force.
'''
assert len(xyzs) >= helixlen, "helixlen should be smaller than the total length."
results = []
for i in range(len(xyzs) - helixlen):
results.append( [ i, purehelix(xyzs[i:i+helixlen]) ] )
sorted_results = sorted(results, key = lambda x: calc_rmsd(x[1].residual))
return sorted_results[0]
def check_fit_purehelix(parvals, xyzs, pv0, nv0, nterm):
# Generate the helix...
xyzs_nonan = xyzs[~np.isnan(xyzs).any(axis = 1)]
q = helixmodel(parvals, xyzs.shape[0], xyzs_nonan[0])
# Unpack parameters...
px, py, pz, nx, ny, nz, s, omega, r, phi, t = parvals
pv = np.array([px, py, pz])
nv = np.array([nx, ny, nz])
nv = np.cos(nv)
import GnuplotPy3
gp = GnuplotPy3.GnuplotPy3()
gp("set view equal xyz")
gp("set xlabel 'x'")
gp("set ylabel 'y'")
gp("set key")
gp(f"set arrow front from {pv0[0]},{pv0[1]},{pv0[2]} \
to {pv0[0] + nv0[0]}, \
{pv0[1] + nv0[1]}, \
{pv0[2] + nv0[2]} \
linecolor rgb 'black'")
gp(f"set arrow front from {pv[0]},{pv[1]},{pv[2]} \
to {pv[0] + nv[0]}, \
{pv[1] + nv[1]}, \
{pv[2] + nv[2]} \
linecolor rgb 'red'")
gp(f"splot '-' using 1:2:3 with linespoints pointtype 6 linecolor rgb 'black' title 'data', \\")
gp(f" '-' using 1:2:3:4 with labels notitle, \\")
gp(f" '-' using 1:2:3 with points pointtype 6 linecolor rgb 'black'notitle, \\")
gp(f" '-' using 1:2:3 with points pointtype 6 linecolor rgb 'red'notitle, \\")
gp(f" '-' using 1:2:3 with linespoints pointtype 6 linecolor rgb 'red' title 'model', \\")
gp(f"")
for i, (x, y, z) in enumerate(xyzs):
if np.nan in (x, y, z): continue
gp(f"{x} {y} {z}")
gp( "e")
for i, (x, y, z) in enumerate(xyzs):
if np.nan in (x, y, z): continue
gp(f"{x} {y} {z} {i + nterm}")
gp( "e")
gp(f"{pv0[0]} {pv0[1]} {pv0[2]}")
gp( "e")
gp(f"{pv[0]} {pv[1]} {pv[2]}")
gp( "e")
for i, (x, y, z) in enumerate(q):
if np.nan in (x, y, z): continue
gp(f"{x} {y} {z}")
gp( "e")
input("Press enter to exit...")
return None
def check_select_helix(parvals, xyzs_dict, pv0, nv0, nterm, bindex, helixlen):
# Unpack parameters...
px, py, pz, nx, ny, nz, s, omega = parvals[ :8]
rN, rCA, rC, rO = parvals[8:8+4]
phiN, phiCA, phiC, phiO = parvals[12:12+4]
tN, tCA, tC, tO = parvals[16:16+4]
# Construct paramters for each atom...
parval_dict = {}
parval_dict["N"] = px, py, pz, nx, ny, nz, s, omega, rN, phiN, tN
parval_dict["CA"] = px, py, pz, nx, ny, nz, s, omega, rCA, phiCA, tCA
parval_dict["C"] = px, py, pz, nx, ny, nz, s, omega, rC, phiC, tC
parval_dict["O"] = px, py, pz, nx, ny, nz, s, omega, rO, phiO, tO
for i in xyzs_dict.keys():
print(f"Check {i}")
check_select_purehelix(parval_dict[i], xyzs_dict[i], pv0, nv0, nterm, bindex, helixlen)
return None
def check_select_purehelix(parvals, xyzs, pv0, nv0, nterm, bindex, helixlen):
# Generate the helix...
xyzs_sel = xyzs[bindex:bindex+helixlen]
xyzs_sel_nonan = xyzs_sel[~np.isnan(xyzs_sel).any(axis = 1)]
## parvals = unpack_params(params)
q = helixmodel(parvals, xyzs_sel.shape[0], xyzs_sel_nonan[0])
# Unpack parameters...
px, py, pz, nx, ny, nz, s, omega, r, phi, t = parvals
pv = np.array([px, py, pz])
nv = np.array([nx, ny, nz])
nv = np.cos(nv)
import GnuplotPy3
gp = GnuplotPy3.GnuplotPy3()
gp("set view equal xyz")
gp("set xlabel 'x'")
gp("set ylabel 'y'")
gp("unset key")
gp(f"set arrow front from {pv0[0]},{pv0[1]},{pv0[2]} \
to {pv0[0] + nv0[0]}, \
{pv0[1] + | |
from utils import UtilMethods as Utils
from pipeprediction import Extractor, DimensionHandler, Loader
import re, os
import numpy as np
import pandas as pd
from sklearn.model_selection import GridSearchCV, train_test_split, StratifiedKFold
from sklearn import svm
from sklearn.gaussian_process import GaussianProcessClassifier as GP
from sklearn.ensemble import RandomForestClassifier as RF, IsolationForest as IF
from sklearn.linear_model import LogisticRegression as Logit
from sklearn.neural_network import MLPClassifier as MLP
from sklearn.svm import OneClassSVM as OCSVM
from sklearn.covariance import EllipticEnvelope as RC #RC = Robust Covariance
from sklearn.neighbors import LocalOutlierFactor as LOF
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support as pfrs, classification_report
from sklearn.externals import joblib
from sklearn.utils.validation import check_is_fitted
from pyspark import SparkContext
###############
# Manages machine learning pipeline:
# train, validation, test, model loading
# and saving, classifiers, performance output
###############
class ML:
def __init__(self):
# read application configuration props
self.config = Utils.loadConfig()
self.path = self.config.get('prediction', 'source.path')
self.path = Utils.normalizePath(self.path)
self.trainPath = self.path + 'train/'
self.validPath = self.path + 'validation/'
self.gridCVPath = self.path + 'train_validation/'
self.testPath = self.path + 'test/'
self.outputPath = self.path + 'metrics/cv_gridsearchparams/'
self.task = self.config.get('prediction', 'task')
self.posPerc = int(self.config.get('prediction', 'pos.perc'))
self.classif = self.config.get('prediction', 'classifier')
os.makedirs(os.path.dirname(self.outputPath), exist_ok=True)
self.extractor = Extractor.Extractor(self.config, self.outputPath)
self.loader = Loader.Loader(self.config, self.outputPath)
self.dimHandler = DimensionHandler.DimensionHandler(self.config, self.outputPath)
self.outFile = ''
self.useEmbeddings = self.config.getboolean('prediction', 'use.embeddings')
self.cv = self.config.getboolean('prediction', 'use.crossvalid')
if('cross' in self.task):
self.cv = True
if (not 'none' in self.dimHandler.name.lower()):
self.outFile = self.dimHandler.getOutFile(self.classif)
self.outFile = self.outFile + '_embeddings' if self.useEmbeddings else self.outFile
else:
self.outFile = self.outputPath + self.classif + '_' + self.extractor.featType
if('kmers' in self.extractor.featType):
kmerfeats = 'kmers' + str(self.extractor.size) + '_minOcc' + str(self.extractor.minOcc)
self.outFile = self.outFile.replace('kmers', kmerfeats)
#self.outFile += str(self.extractor.size) + '_minOcc' + str(self.extractor.minOcc)
if('cross' in self.task or 'grid' in self.task or self.cv):
self.extractor.featFile = self.extractor.featFile.replace('.feat', '.complete.feat') if 'grid' in self.task else self.extractor.featFile
if('cross' in self.task or self.cv):
self.outFile += '_cv05'
self.modelFile = self.outFile + '.model.pkl'
self.classifier = self.setUpClassifier()
def main(self):
sparkContext = SparkContext(conf=self.extractor.initSpark())
# performs gridsearch or cross validation, extract features from entire train set
if ('cross' in self.task or 'grid' in self.task):
self.extractor.extractFeatures(self.gridCVPath, sparkContext, featPerInst=False)
IDs, x_occ, y_labels, parentDir = self.extractor.countOccurrence(self.gridCVPath, sparkContext)
if ('cross' in self.task):
self.runCrossValid(x_occ, y_labels, IDs)
elif ('grid' in self.task):
self.runGridSearch(x_occ, y_labels)
# performs training, extracts features from split train / valid
elif ('train' in self.task and not os.path.isfile(self.modelFile)):
if (not os.path.isfile(self.extractor.featFile)):
self.extractor.extractFeatures(self.trainPath, sparkContext, featPerInst=False)
print('Training...')
IDs, x_occ, y_labels, parentDir = self.extractor.countOccurrence(self.trainPath, sparkContext)
if(not 'none' in self.dimHandler.name.lower()):
x_occ = self.dimHandler.trainMethod(x_occ, y_labels)
self.classifier.fit(x_occ, y_labels)
joblib.dump(self.classifier, self.modelFile)
print('Model saved. \nPredicting...')
else:
try:
self.classifier = joblib.load(self.modelFile)
print('Model loaded. \nPredicting...')
except FileNotFoundError:
print('Model', self.modelFile, 'does not exist. Generate it by training (task = train).')
if('cross' not in self.task and 'randomforest' in self.classif.lower() and not os.path.isfile(self.extractor.featFile + 'importance')):
self.getRFImportance()
# performs validation, loads features from split train /valid
if('validation' in self.task):
IDs, x_occ_val, y_labels_val, parentDir = self.extractor.countOccurrence(self.validPath, sparkContext)
output, IDoutput = self.getPredictions(IDs, x_occ_val, y_labels_val)
Utils.writeFile(self.outFile + '.valid', output)
Utils.writeFile(self.outFile + '.IDs.valid', IDoutput)
# performs validation, loads features from split train /valid
if ('test' in self.task):
IDs, x_occ_test, y_labels_test, parentDir = self.extractor.countOccurrence(self.testPath, sparkContext)
if('none' not in self.dimHandler.name.lower()):
x_occ_test = self.dimHandler.testMethod(x_occ_test)
output, IDoutput = self.getPredictions(IDs, x_occ_test, y_labels_test)
Utils.writeFile(self.outFile + '_' + parentDir + '.test', output)
Utils.writeFile(self.outFile + '_' + parentDir + '.IDs.test', IDoutput)
print('Done!')
def getPredictions(self, IDs, occ, labels):
if ('outlier' in self.classif.lower()):
predLabels = self.classifier.fit_predict(occ)
else:
predLabels = self.classifier.predict(occ)
if('one' or 'isolation' in self.classif.lower()):
score = 'one class case'
else:
score = self.classifier.score(occ, labels)
IDoutput = ''
confidence = [None] * len(IDs)
if ('svc' in self.classif.lower()):
confidence = self.classifier.decision_function(occ)
for i in range(0,len(predLabels)):
IDoutput += str(IDs[i]) + '\t' + str(predLabels[i]) + '\n'
output = self.getMetrics(score, predLabels, labels)
return output, IDoutput
##############################
# output RF feature importance
##############################
def getRFImportance(self):
pd.options.display.float_format = '{:,.8f}'.format
importance = self.classifier.feature_importances_
features = self.extractor.loadFeatures()
feature_importances = pd.DataFrame(importance,
index=features,
columns=['importance']).sort_values('importance', ascending=False)
Utils.writeFile(self.extractor.featFile + 'importance', feature_importances.to_string())
def getMaxLen(self):
if('mlp' in self.classif.lower()):
return len(self.classifier.coefs_[0])
elif('logit' in self.classif.lower() or 'linearsvc' in self.classif.lower()):
return self.classifier.coef_.size
elif('randomforest' in self.classif.lower()):
return self.classifier.n_features_
elif('nusvc' in self.classif.lower() or self.classif.lower() in 'svc'):
return self.classifier.shape_fit_[1]
def setUpClassifier(self):
if('linearsvc' in self.classif.lower()):
if (not 'grid' in self.task):
return svm.LinearSVC(C=0.01, loss='squared_hinge', penalty='l2')
else:
return svm.LinearSVC()
# in case positive perc is low,
# NuSVC nu param has to be adjusted
elif('nusvc' in self.classif.lower()):
thisNu = self.posPerc / 100
if(not 'grid' in self.task):
return svm.NuSVC(nu=thisNu, coef0=0.01, gamma=0.01, kernel='sigmoid')
else:
return svm.NuSVC(nu=thisNu)
elif(self.classif.lower() in 'svc'):
# pass 'probability=True' if confidence values must be computed
if (not 'grid' in self.task):
return svm.SVC(C=100, gamma=0.001, kernel='rbf')
else:
return svm.SVC()
elif ('svr' in self.classif.lower()):
return svm.SVR()
elif ('mlp' in self.classif.lower()):
if (not 'grid' in self.task):
return MLP(activation='relu', batch_size=256, hidden_layer_sizes=256, learning_rate='adaptive', solver='adam')
else:
return MLP()
elif ('gaussian' in self.classif.lower()):
return GP()
elif('randomforest' in self.classif.lower()):
if (not 'grid' in self.task):
return RF(bootstrap=False, criterion='entropy', max_features='log2', n_estimators=1000)
else:
return RF()
elif('logit' in self.classif.lower()):
if (not 'grid' in self.task):
return Logit(penalty='l1', C=10, solver='saga')
else:
return Logit()
elif('one' in self.classif.lower()):
return OCSVM(kernel='rbf')
elif('isolation' in self.classif.lower()):
return IF()
elif('covariance' in self.classif.lower()):
return RC()
elif('outlier' in self.classif.lower()):
return LOF()
def getMetrics(self, score, pLabels, rLabels):
output = 'Mean accuracy:\t' + str(score) + '\n'
prfscore = pfrs(rLabels, pLabels)
precision = np.array2string(prfscore[0]).replace('[', '').replace(']', '')
recall = np.array2string(prfscore[1]).replace('[', '').replace(']', '')
fbeta = np.array2string(prfscore[2]).replace('[', '').replace(']', '')
precision = re.sub('\s+', '\t', precision)
recall = re.sub('\s+', '\t', recall)
fbeta = re.sub('\s+', '\t', fbeta)
output += '\tneg\tpos\n'
output += 'P:' + precision + '\n'
output += 'R:' + recall + '\n'
output += 'F1:' + fbeta + '\n'
cf = np.array2string(confusion_matrix(rLabels, pLabels))
cf = 'neg\t' + cf.replace('[[', '').replace(']]', '')
cf = cf.replace(']\n [', '\npos\t')
output += 'Confusion matrix: ' + '\n'
output += '\tpNeg\tpPos' + '\n'
output += cf
return output
def getConfidencePerID(self, IDs, pLabels, rLabels, confidence):
print('ID\tPredicted\tTrue class\tConfidence')
for i in range(len(IDs)):
print(str(IDs[i]) + '\t'
+ str(pLabels[i]) + '\t'
+ str(rLabels[i]) + '\t'
+ str(confidence[i]))
# perform cross validation on dataset
def runCrossValid(self, x_occ, y_labels, IDs):
seed = 5
np.random.seed(seed)
i = 1
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed)
for train, valid in kfold.split(x_occ, y_labels):
# get position of features inexistent on train, remove such feats from valid
# gives which indexes are greater than 0
filter = np.where(np.sum(x_occ[train], axis=0) > 0)[0]
# takes only column indices from *filter*
x_occT = np.take(x_occ[train], filter, axis=1)
x_occV = np.take(x_occ[valid], filter, axis=1)
self.classifier.fit(x_occT, y_labels[train])
output, IDoutput = self.getPredictions(IDs[valid], x_occV, y_labels[valid])
Utils.writeFile(self.outFile + 'f'+ str(i) + '.valid', output)
Utils.writeFile(self.outFile + 'f'+ str(i) + '.IDs.valid', IDoutput)
i += 1
self.classifier = self.setUpClassifier()
self.classifier.fit(x_occ, y_labels)
joblib.dump(self.classifier, self.modelFile)
print('Model saved.')
# performs grid search on training and validation data to
def runGridSearch(self, x_occ, y_labels):
output = 'Running grid search for ' + self.classif + ' in ' + str(len(x_occ)) + ' instances ...\n'
print('Running grid search for', self.classif, 'in', str(len(x_occ)), 'instances ...\n')
scores = ['f1', 'precision', 'recall']
for score in scores:
output += 'Grid search for score: ---> ' + score + ' <---\n'
classif = GridSearchCV(estimator=self.setUpClassifier(), param_grid=self.getGridParams(), scoring=score,
cv=5, n_jobs=60)
classif.fit(x_occ, y_labels)
output += 'Best parameters in train set:\n'
output += str(classif.best_params_) + '\n'
output += 'Grid scores in train set:\n'
means = classif.cv_results_['mean_test_score']
stds = classif.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, classif.cv_results_['params']):
params = str(params).replace('{', '').replace('}', '')
output += ("%0.3f (+/-%0.03f) |\t params %r" % (mean, std * 2, params)) + '\n'
output += "\n--------------------------------------------------\n"
print('Done with', score, '.')
Utils.writeFile(self.outputPath + self.classif + '.gridSearch', output)
print(output)
def getGridParams(self):
Cvalues = [0.01,0.1, 1,10,100]
gammaValues = [0.01,0.001,0.0001]
lossValues = ['hinge', 'squared_hinge']
estimatorValues = [10,100,1000]
maxFeatValues = ['', 'auto', 'sqrt', 'log2']
penalties = ['l1', 'l2']
hiddenLayers = [(64,), (128,), (256,), (512,), (1024,)]
activationValues = ['identity', 'logistic', 'tanh', 'relu']
learningRate = ['constant', 'invscaling', 'adaptive']
batchSizes = [32,64,128,256]
nu = self.posPerc / 100
if ('linearsvc' in self.classif.lower()):
return [{'penalty': ['l1'], 'loss': ['squared_hinge'],'C': Cvalues, 'dual': [False]},
{'penalty': ['l2'], 'loss': lossValues,'C': Cvalues}]
elif ('nusvc' in self.classif.lower()):
return [{'nu': [nu], 'kernel': ['rbf'], 'gamma': gammaValues},
{'nu': [nu], 'kernel': ['linear']},
{'nu': [nu], 'kernel': ['sigmoid'], 'gamma': gammaValues},
{'nu': [nu], 'kernel': ['poly'], 'gamma': gammaValues}]
elif (self.classif.lower() in 'svc'):
return [{'kernel': ['rbf'], 'gamma': gammaValues, 'C': Cvalues},
{'kernel': ['linear'], 'C': Cvalues},
{'kernel': ['sigmoid'], 'gamma': gammaValues, 'C': Cvalues},
{'kernel': ['poly'], 'gamma': gammaValues, 'C': Cvalues}]
elif ('mlp' in self.classif.lower()):
return [{'solver': ['lbfgs'], 'activation': activationValues, 'hidden_layer_sizes': hiddenLayers, 'learning_rate': learningRate},
{'solver': ['sgd'], 'activation': | |
retcode
self.signal_message = message
return retcode, message
# parse process log
def load_process_log( self ):
Msg.fout( self.process_log, "dbg" )
with open( self.process_log , "r" ) as my_flog:
try:
for my_line in my_flog:
Msg.user("Load: %s" % my_line , "PROCESS-LINE")
self.load_process_line( my_line )
except Exception as arg_ex:
Msg.error_trace( arg_ex )
Msg.err( str( arg_ex ))
finally:
my_flog.close()
# here is the lowdown, if a command line exists then a result line must also exists,
# check for generate pair
if ( self.detail_flags & SummaryDetail.GenCmd ) and not ( self.detail_flags & SummaryDetail.GenResult ):
retcode, message = self.check_missing_result(self.process_result, "generator")
self.load_gen_result( { "retcode": retcode
, "stdout" : self.process_result[1]
, "stderr" : self.process_result[2]
, "start" : self.process_result[3]
, "end" : self.process_result[4]
, "message": message
} )
# check for summary pair
elif ( self.detail_flags & SummaryDetail.IssCmd ) and not ( self.detail_flags & SummaryDetail.IssResult ):
retcode, message = self.check_missing_result(self.process_result, "iss")
self.load_iss_result( { "retcode" : retcode
, "log" : None
, "message": message
} )
# check for compare pair
elif ( self.detail_flags & SummaryDetail.TraceCmpCmd ) and not ( self.detail_flags & SummaryDetail.TraceCmpResult ):
retcode, message = self.check_missing_result(self.process_result, "trace-cmp")
self.load_trace_cmp_result( { "trace-cmp-retcode" : retcode
, "trace-cmp-log" : None
, "message": message
} )
# check for RTL pair
elif ( self.detail_flags & SummaryDetail.RtlCmd ) and not ( self.detail_flags & SummaryDetail.RtlResult ):
retcode, message = self.check_missing_result(self.process_result, "rtl")
self.load_rtl_result( { "retcode" : retcode
, "log" : None
, "message" : message
} )
def load_process_line( self, arg_line ):
Msg.user( "Process Result Line: %s" % (str( arg_line)), "SUM-TUPLE" )
if arg_line[0] == '[':
return
my_val = None
try:
my_glb, my_loc = SysUtils.exec_content( arg_line, True )
except (SyntaxError, TypeError) as arg_ex:
return
except :
raise
# summary tuples are initialized in the __init__ as are the element indexes
# the idea is to the tuple list for a match on the key. When one is found
# the callback proc that is referenced is executed with the line dictionary
# retrieved and the flags are updated as to what this summary item contains
for ( my_key, my_proc, my_mask ) in self.sum_tups:
# Msg.user( "Key: %s, Proc: %s, Mask: %s" % (str( my_key ), str( my_proc is not None ) , (str( my_mask ))), "SUM-TUPLE" )
my_result = my_loc.get( my_key , None )
# Msg.user( "Key: %s, Result: %s" % ( str( my_key ), str( my_result )), "SUM-TUPLE" )
if my_result is None:
# nothing was returned continue causes the next element to be checked
# if there is a next element
continue
my_proc( my_result )
self.detail_flags |= int(my_mask)
break
return True
# raise Exception ????
def load_gen_info( self, arg_dict ):
# Msg.lout( arg_dict, "user", "Generate Info Dictionary ... " )
self.force_cmd = arg_dict["command"]
self.force_log = arg_dict["log"]
self.force_elog= arg_dict["elog"]
self.max_instr = arg_dict["max-instr"]
self.min_instr = arg_dict["min-instr"]
# load the force generate results
def load_gen_result( self, arg_dict ):
# Msg.lout( arg_dict, "dbg", "Generate Results Dictionary ... " )
try:
my_retcode = str( arg_dict["retcode"] ).strip()
Msg.user( "Return Code: %s" % ( my_retcode ))
if my_retcode is None:
self.force_retcode = -1
raise Exception( "Generate Return Code Not Found" )
self.force_retcode = int( str( arg_dict["retcode"] ).strip() )
except :
self.force_retcode = -1
Msg.err( "Generate Return Code in unrecognizable format" )
self.force_stdout = arg_dict["stdout" ]
self.force_stderr = arg_dict["stderr" ]
if SysUtils.failed( self.force_retcode ):
self.force_level = SummaryLevel.Fail
self.force_start = float( arg_dict.get("start", 0.00 ))
self.force_end = float( arg_dict.get("end" , 0.00 ))
self.secondary = int( arg_dict.get("secondary", 0 ))
self.default = int( arg_dict.get("default" , 0 ))
self.total = int( arg_dict.get("total" , 0 ))
if self.signal_id is None:
self.force_msg = arg_dict.get("message", None )
self.seed = str( arg_dict.get("seed", "Seed Not Found" ))
else:
self.force_msg = "Incomplete, Signal Id: %s, %s " % (str(self.signal_id), str(self.signal_message))
self.seed = None
# Msg.lout( self, "dbg", "General Summary Item ... " )
# load the iss execution information
def load_iss_info( self, arg_dict ):
# Msg.lout( arg_dict, "user", "ISS Info Dictionary ... " )
self.iss_cmd = arg_dict["command"]
# load the iss execution results
def load_iss_result( self, arg_dict ):
# Msg.lout( arg_dict, "user", "ISS Results Dictionary ... " )
self.iss_log = arg_dict["log"]
self.instr_count = int( arg_dict.get("count", 0 ))
self.iss_retcode = int( arg_dict["retcode"] )
if self.signal_id is None:
self.iss_message = str( arg_dict.get("message", None ))
else:
self.iss_message = "Incomplete, Signal Id: %s, %s " % (str(self.signal_id), str(self.signal_message))
# load the rtl execution information
def load_rtl_info( self, arg_dict ):
# Msg.lout( arg_dict, "user", "RTL Info Dictionary ... " )
self.rtl_cmd = arg_dict["rtl-command"]
# load the rtl execution results
def load_rtl_result( self, arg_dict ):
# Msg.lout( arg_dict, "user", "ISS Results Dictionary ... " )
self.rtl_log = arg_dict["log"]
self.cycle_count = int( arg_dict.get("count", 0 ))
self.rtl_retcode = int( arg_dict["retcode"] )
if self.signal_id is None:
self.rtl_message = str( arg_dict.get("message", None ))
else:
self.rtl_message = "Incomplete, Signal Id: %s, %s " % (str(self.signal_id), str(self.signal_message))
# load the cmp execution information
def load_trace_cmp_info( self, arg_dict ):
# Msg.lout( arg_dict, "user", "CMP Info Dictionary ... " )
self.trace_cmp_cmd = arg_dict["trace-cmp-cmd"]
# load the cmp execution results
def load_trace_cmp_result( self, arg_dict ):
# Msg.lout( arg_dict, "user", "CMP Results Dictionary ... " )
self.trace_cmp_log = arg_dict["trace-cmp-log"]
try:
self.trace_cmp_retcode = int( arg_dict["trace-cmp-retcode"] )
if self.signal_id is None:
self.trace_cmp_msg = str( arg_dict["trace-cmp-msg" ] )
else:
self.trace_cmp_msg = "Incomplete, Signal Id: %s, %s " % (str(self.signal_id), str(self.signal_message))
except :
self.trace_cmp_retcode = -1
Msg.err( "CMP Return Code in unrecognizable format" )
def load_signaled( self, arg_dict ):
Msg.user( str( arg_dict ), "SIGNALED" )
try:
self.signal_id = arg_dict["retcode"]
self.signal_message = arg_dict["message"]
except :
self.signal_id = -1
Msg.err( "Signal Info Corrupt" )
# load the force generate information
def commit( self ):
my_gen_cnt = 0
my_gen_ret = 0
my_sim_cnt = 0
my_sim_ret = 0
my_rtl_cnt = 0
my_rtl_ret = 0
my_trace_cmp_ret = 0
my_trace_cmp_cnt = 0
my_tgt_name = ""
self.passed = True
if self.has_generate():
#Msg.user( "if self.has_generate(): True" )
my_gen_cnt = 1
my_gen_ret = self.commit_generate()
self.passed = self.passed and bool( my_gen_ret )
if self.has_simulate():
#Msg.user( "if self.has_simulate(): True" )
my_sim_cnt = 1
my_sim_ret = self.commit_simulate()
self.passed = self.passed and bool( my_sim_ret )
if self.has_rtl():
#Msg.user( "if self.has_rtl(): True" )
my_rtl_cnt = 1
my_rtl_ret = self.commit_rtl()
self.passed = self.passed and bool( my_rtl_ret )
# Msg.user( "SummaryItem::commit - [20]", "GOT HERE" )
if self.has_trace_cmp():
# Msg.user( "SummaryItem::commit - [21]", "GOT HERE" )
my_trace_cmp_cnt = 1
# Msg.user( "SummaryItem::commit - [22]", "GOT HERE" )
my_trace_cmp_ret = self.commit_trace_cmp()
self.passed = self.passed and bool( my_trace_cmp_ret )
# Msg.user( "SummaryItem::commit - [24]", "GOT HERE" )
my_src_name = "%s%s" % ( self.task_path, "STARTED" )
my_tgt_name = "%s%s" % ( self.task_path, SysUtils.ifthen( self.passed, "PASS", SysUtils.ifthen( self.signal_id is None, "FAIL", "INCOMPLETE" )))
PathUtils.move( my_src_name, my_tgt_name )
if not self.passed:
self.summary.do_on_fail( self )
return ( my_gen_cnt, my_gen_ret, my_sim_cnt, my_sim_ret, my_rtl_cnt, my_rtl_ret, my_trace_cmp_cnt, my_trace_cmp_ret )
class SummaryGroups( object ):
def __init__( self ):
self.groups = {}
self.queue = SummaryQueue()
# self.group_lookup = []
def update_groups( self, arg_group ):
if not arg_group in self.groups:
self.groups[ arg_group ] = []
# adds an item to a group list if the group does not exist the list is created
def add_item( self, arg_item ):
# Msg.dbg( "Item Group: %s"% ( arg_item.item_group ))
if not arg_item.item_group in self.groups:
self.groups[ arg_item.item_group ] = []
self.groups[ arg_item.item_group ].append( arg_item )
# Msg.dbg( "Group Count: %d, Group %s Membership Count: %d" % ( len( self.groups ), arg_item.item_group, len( self.groups[ arg_item.item_group ]) ))
# returns the item list associated with the group passed as argument
def group_items( self, arg_group ):
return self.groups[ arg_group ]
# return the list of groups
def task_groups( self ):
return self.groups
# class SummaryThread( HiThread ):
class SummaryThread( HiOldThread ):
def __init__( self, sq, summary):
self.summary_queue = sq
self.summary = summary
# We do not want the thread to launch until we've loaded all the properties
super().__init__(True)
def commit_item( self, arg_item ):
if not arg_item.task_id in self.summary.tasks:
self.summary.tasks[ arg_item.task_id ] = []
self.summary.task_lookup.append( arg_item.task_id )
self.summary.groups.update_groups( arg_item.item_group )
return 0
def run( self ):
# Block on process queue while | |
<reponame>ZJONSSON/fasteignamat-functions<gh_stars>1-10
# -*- coding: utf-8 -*-
#Fasteignamat.py - functions for scraping info from Fasteignamat Íslands (www.skra.is)
#For fastanr.:
# Returns general info
# Return extended info
# Returns land info and landnr
#For landnr.:
# Return geo info from skra.is geoserver + ISN93 xx,yy converted to WGS84 lat/lng
#Author: <EMAIL> / @pallih
#See demo.py for demo usage
import requests
from random import choice
import string
import lxml.html
import json
import math
chars = string.letters + string.digits
random = lambda: ''.join([choice(chars) for i in xrange(4)]) # create a random string for url appending to avoid cache
def latin1_to_ascii(unicrap):
"""This takes a UNICODE string and replaces Latin-1 characters with
something equivalent in 7-bit ASCII. It returns a plain ASCII string. This
function makes a best effort to convert Latin-1 characters into ASCII
equivalents. It does not just strip out the Latin-1 characters. All
characters in the standard 7-bit ASCII range are preserved. In the 8th bit
range all the Latin-1 accented letters are converted to unaccented
equivalents. Most symbol characters are converted to something meaningful.
Anything not converted is deleted.
"""
xlate = {
u'\N{ACUTE ACCENT}': "'",
u'\N{BROKEN BAR}': '|',
u'\N{CEDILLA}': '{cedilla}',
u'\N{CENT SIGN}': '{cent}',
u'\N{COPYRIGHT SIGN}': '{C}',
u'\N{CURRENCY SIGN}': '{currency}',
u'\N{DEGREE SIGN}': '{degrees}',
u'\N{DIAERESIS}': '{umlaut}',
u'\N{DIVISION SIGN}': '/',
u'\N{FEMININE ORDINAL INDICATOR}': '{^a}',
u'\N{INVERTED EXCLAMATION MARK}': '!',
u'\N{INVERTED QUESTION MARK}': '?',
u'\N{LATIN CAPITAL LETTER A WITH ACUTE}': 'A',
u'\N{LATIN CAPITAL LETTER A WITH CIRCUMFLEX}': 'A',
u'\N{LATIN CAPITAL LETTER A WITH DIAERESIS}': 'A',
u'\N{LATIN CAPITAL LETTER A WITH GRAVE}': 'A',
u'\N{LATIN CAPITAL LETTER A WITH RING ABOVE}': 'A',
u'\N{LATIN CAPITAL LETTER A WITH TILDE}': 'A',
u'\N{LATIN CAPITAL LETTER AE}': 'Ae',
u'\N{LATIN CAPITAL LETTER C WITH CEDILLA}': 'C',
u'\N{LATIN CAPITAL LETTER E WITH ACUTE}': 'E',
u'\N{LATIN CAPITAL LETTER E WITH CIRCUMFLEX}': 'E',
u'\N{LATIN CAPITAL LETTER E WITH DIAERESIS}': 'E',
u'\N{LATIN CAPITAL LETTER E WITH GRAVE}': 'E',
u'\N{LATIN CAPITAL LETTER ETH}': 'D',
u'\N{LATIN CAPITAL LETTER I WITH ACUTE}': 'I',
u'\N{LATIN CAPITAL LETTER I WITH CIRCUMFLEX}': 'I',
u'\N{LATIN CAPITAL LETTER I WITH DIAERESIS}': 'I',
u'\N{LATIN CAPITAL LETTER I WITH GRAVE}': 'I',
u'\N{LATIN CAPITAL LETTER N WITH TILDE}': 'N',
u'\N{LATIN CAPITAL LETTER O WITH ACUTE}': 'O',
u'\N{LATIN CAPITAL LETTER O WITH CIRCUMFLEX}': 'O',
u'\N{LATIN CAPITAL LETTER O WITH DIAERESIS}': 'O',
u'\N{LATIN CAPITAL LETTER O WITH GRAVE}': 'O',
u'\N{LATIN CAPITAL LETTER O WITH STROKE}': 'O',
u'\N{LATIN CAPITAL LETTER O WITH TILDE}': 'O',
u'\N{LATIN CAPITAL LETTER THORN}': 'th',
u'\N{LATIN CAPITAL LETTER U WITH ACUTE}': 'U',
u'\N{LATIN CAPITAL LETTER U WITH CIRCUMFLEX}': 'U',
u'\N{LATIN CAPITAL LETTER U WITH DIAERESIS}': 'U',
u'\N{LATIN CAPITAL LETTER U WITH GRAVE}': 'U',
u'\N{LATIN CAPITAL LETTER Y WITH ACUTE}': 'Y',
u'\N{LATIN SMALL LETTER A WITH ACUTE}': 'a',
u'\N{LATIN SMALL LETTER A WITH CIRCUMFLEX}': 'a',
u'\N{LATIN SMALL LETTER A WITH DIAERESIS}': 'a',
u'\N{LATIN SMALL LETTER A WITH GRAVE}': 'a',
u'\N{LATIN SMALL LETTER A WITH RING ABOVE}': 'a',
u'\N{LATIN SMALL LETTER A WITH TILDE}': 'a',
u'\N{LATIN SMALL LETTER AE}': 'ae',
u'\N{LATIN SMALL LETTER C WITH CEDILLA}': 'c',
u'\N{LATIN SMALL LETTER E WITH ACUTE}': 'e',
u'\N{LATIN SMALL LETTER E WITH CIRCUMFLEX}': 'e',
u'\N{LATIN SMALL LETTER E WITH DIAERESIS}': 'e',
u'\N{LATIN SMALL LETTER E WITH GRAVE}': 'e',
u'\N{LATIN SMALL LETTER ETH}': 'd',
u'\N{LATIN SMALL LETTER I WITH ACUTE}': 'i',
u'\N{LATIN SMALL LETTER I WITH CIRCUMFLEX}': 'i',
u'\N{LATIN SMALL LETTER I WITH DIAERESIS}': 'i',
u'\N{LATIN SMALL LETTER I WITH GRAVE}': 'i',
u'\N{LATIN SMALL LETTER N WITH TILDE}': 'n',
u'\N{LATIN SMALL LETTER O WITH ACUTE}': 'o',
u'\N{LATIN SMALL LETTER O WITH CIRCUMFLEX}': 'o',
u'\N{LATIN SMALL LETTER O WITH DIAERESIS}': 'o',
u'\N{LATIN SMALL LETTER O WITH GRAVE}': 'o',
u'\N{LATIN SMALL LETTER O WITH STROKE}': 'o',
u'\N{LATIN SMALL LETTER O WITH TILDE}': 'o',
u'\N{LATIN SMALL LETTER SHARP S}': 'ss',
u'\N{LATIN SMALL LETTER THORN}': 'th',
u'\N{LATIN SMALL LETTER U WITH ACUTE}': 'u',
u'\N{LATIN SMALL LETTER U WITH CIRCUMFLEX}': 'u',
u'\N{LATIN SMALL LETTER U WITH DIAERESIS}': 'u',
u'\N{LATIN SMALL LETTER U WITH GRAVE}': 'u',
u'\N{LATIN SMALL LETTER Y WITH ACUTE}': 'y',
u'\N{LATIN SMALL LETTER Y WITH DIAERESIS}': 'y',
u'\N{LEFT-POINTING DOUBLE ANGLE QUOTATION MARK}': '<<',
u'\N{MACRON}': '_',
u'\N{MASCULINE ORDINAL INDICATOR}': '{^o}',
u'\N{MICRO SIGN}': '{micro}',
u'\N{MIDDLE DOT}': '*',
u'\N{MULTIPLICATION SIGN}': '*',
u'\N{NOT SIGN}': '{not}',
u'\N{PILCROW SIGN}': '{paragraph}',
u'\N{PLUS-MINUS SIGN}': '{+/-}',
u'\N{POUND SIGN}': '{pound}',
u'\N{REGISTERED SIGN}': '{R}',
u'\N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}': '>>',
u'\N{SECTION SIGN}': '{section}',
u'\N{SOFT HYPHEN}': '-',
u'\N{SUPERSCRIPT ONE}': '{^1}',
u'\N{SUPERSCRIPT THREE}': '{^3}',
u'\N{SUPERSCRIPT TWO}': '{^2}',
u'\N{VULGAR FRACTION ONE HALF}': '{1/2}',
u'\N{VULGAR FRACTION ONE QUARTER}': '{1/4}',
u'\N{VULGAR FRACTION THREE QUARTERS}': '{3/4}',
u'\N{YEN SIGN}': '{yen}',
u'\N{LEFT PARENTHESIS}': '', # Paranthesis to nothing
u'\N{RIGHT PARENTHESIS}': '',
u'\N{QUESTION MARK}': '', # Question mark strip
u'\N{FULL STOP}': '', # period strip
u'\N{COMMA}': '_', # comma to underscore
u'\N{SPACE}': '_' # convert space to underscore
}
r = ''
for i in unicrap:
if i in xlate:
r += xlate[i]
elif ord(i) >= 0x80:
pass
else:
r += str(i)
return r
def fastanr_extended_info(fastanr):
url = 'http://www.skra.is/default.aspx?pageid=957' + "&x=" + random() # append a random string to url to avoid bad status line / cache ...
params = {'pageid': '957', 'fnum': fastanr.replace('-', '')}
r = requests.post(url, data=params)
html = r.content
root = lxml.html.fromstring(html)
xpath = '//table[@class="matsforsendur"]/tr/.'
table = root.xpath(xpath)
record = {}
record['fastanumer_org'] = fastanr
for tr in table:
if tr[0].tag == 'th' and tr[0].attrib != {'colspan': '2', 'style': 'text-align: center;'}:
record[latin1_to_ascii(tr[0].text).lower()] = tr[1].text
return record
def fastanr_general_info(fastanr):
url = 'http://www.skra.is/default.aspx?pageid=1000' + "&x=" + random() # append a random string to url to avoid bad status line / cache ...
params = {'pageid': '1000', 'streetname': fastanr.replace('-', '')}
r = requests.post(url, data=params)
html = r.content
root = lxml.html.fromstring(html)
xpath = '//table[@class="resulttable large"]//tbody/tr/.' # taflan med nidurstodum
xpath_address = '//div[@id="response"]/h2/text()'
address = root.xpath(xpath_address)
table = root.xpath(xpath)
record = {}
record['fastanumer_org'] = fastanr
record['address'] = ' '.join(address[0].split())
for tr in table:
try:
if tr[0][0][2].text is not None:
if tr[0][0][2].text == fastanr:
for td in tr:
if td.attrib['header'] == 'fastmat' or td.attrib['header'] == 'fasteignamat':
record[td.attrib['header']] = td.text_content().strip()
elif td.attrib['header'] == 'fastanr':
record[td.attrib['header']] = td[0][2].text_content().strip()
else:
record[td.attrib['header']] = td.text.strip()
except IndexError:
if tr[0].text == fastanr:
for td in tr:
if td.attrib['header'] == 'fastmat' or td.attrib['header'] == 'fasteignamat':
record[td.attrib['header']] = td.text_content().strip()
else:
record[td.attrib['header']] = td.text.strip()
return record
def fastanr_land_info(fastanr):
url = 'http://www.skra.is/default.aspx?pageid=1000'
params = {'pageid': '1000', 'streetname': fastanr.replace('-', '')}
r = requests.post(url, data=params)
html = r.content
root = lxml.html.fromstring(html)
xpath = '//table[@class="resulttable small"]//tbody/tr/.' # taflan med nidurstodum
table = root.xpath(xpath)
record = {}
record['fastanumer_org'] = fastanr
if table != []:
for tr in table:
for td in tr:
record[td.attrib['header']] = td.text.strip()
else:
record['landnr_lookup_error'] = 1
return record
def isnet93_to_wgs84(xx, yy):
x = xx
y = yy
a = 6378137.0
f = 1/298.257222101
lat1 = 64.25
lat2 = 65.75
latc = 65.00
lonc = 19.00
eps = 0.00000000001
def fx(p):
return a * math.cos(p/rho)/math.sqrt(1 - math.pow(e*math.sin(p/rho), 2))
def f1(p):
return math.log((1 - p)/(1 + p))
def f2(p):
return f1(p) - e * f1(e * p)
def f3(p):
return pol1*math.exp((f2(math.sin(p/rho)) - f2sin1)*sint / 2)
rho = 45/math.atan2(1.0, 1.0)
e = math.sqrt(f * (2 - f))
dum = f2(math.sin(lat1/rho)) - f2(math.sin(lat2/rho))
sint = 2 * (math.log(fx(lat1)) - math.log(fx(lat2))) / dum
f2sin1 = f2(math.sin(lat1/rho))
pol1 = fx(lat1)/sint
polc = f3(latc) + 500000.0
peq = a * math.cos(latc/rho)/(sint*math.exp(sint*math.log((45-latc/2)/rho)))
pol = math.sqrt(math.pow(x-500000, 2) + math.pow(polc-y, 2))
lat = 90 - 2 * rho * math.atan(math.exp(math.log(pol / peq) / sint))
lon = 0
fact = rho * math.cos(lat / rho) / sint / pol
fact = rho * math.cos(lat / rho) / sint / pol
delta = 1.0
while math.fabs(delta) > eps:
delta = (f3(lat) - pol) * fact
lat += delta
lon = -(lonc + rho * math.atan((500000 - x) / (polc - y)) / sint)
return {
'lat': round(lat, 7),
'lng': round(lon, 7),
}
def geocode_landnr(landnr):
url = 'http://geo.skra.is/geoserver/wfs'
params = {
'service': 'wfs',
'version': '1.1.0',
'request': 'GetFeature',
'typename': 'fasteignaskra:VSTADF',
'outputformat': 'json',
#'maxfeatures':'5',
'filter': '<Filter><PropertyIsLike wildCard="*" singleChar="#" escapeChar="!"><PropertyName>fasteignaskra:LANDNR</PropertyName><Literal>%s</Literal></PropertyIsLike></Filter>' % (landnr)
}
r = requests.post(url, data=params)
jsonstring = r.content
process = json.loads(jsonstring)
record = {}
try:
for p in process['features']:
for key, value in dict.items(p['properties']):
record[key] = value
for key, value in dict.items(p['geometry']):
record[key] = value
record['id'] = p['id']
| |
60) & (datset_churn['MonthlyCharges'] <= 80 ), 'MonthlyChargesCat'] = 3
datset_churn.loc[(datset_churn['MonthlyCharges'] > 80) & (datset_churn['MonthlyCharges'] <= 100 ), 'MonthlyChargesCat'] = 4
datset_churn.loc[ datset_churn['MonthlyCharges'] > 100, 'MonthlyChargesCat'] = 5
#Checking the categories
datset_churn[['MonthlyCharges','MonthlyChargesRange','MonthlyChargesCat']].head(10)
# #### Creating new derived columns for Categorical variables
# In[ ]:
#Creating a new column for family. If a customer has dependant or Partner, I am considering it as family .
list_family = []
for rows in range(len(datset_churn['Partner'])):
if ((datset_churn['Partner'][rows] == 'No') and (datset_churn['Dependents'][rows] == 'No')):
list_family.append('No')
else:
list_family.append('Yes')
datset_churn['Family'] = list_family
#print(datset_churn[['Partner', 'Dependents', 'Family' ]].head(10))
#Creating a new column for Online Services (Online Security & Online Backup) . If a customer has Online Security or Online Backup services
#then , I am considering it as "Yes" else "No"
list_online_services = []
for rows_os in range(len(datset_churn['OnlineSecurity'])):
if ((datset_churn['OnlineSecurity'][rows_os] == 'No') and (datset_churn['OnlineBackup'][rows_os] == 'No')):
list_online_services.append('No')
else:
list_online_services.append('Yes')
datset_churn['OnlineServices'] = list_online_services
#print(datset_churn[['OnlineSecurity', 'OnlineBackup', 'OnlineServices' ]].head(10))
#Creating a new column for Streaming Services (StreamingTV & StreamingMovies) . If a customer has StreamingTV or StreamingMovies
#then , I am considering it as "Yes" else "No"
list_streaming_services = []
for rows_stv in range(len(datset_churn['StreamingTV'])):
if ((datset_churn['StreamingTV'][rows_stv] == 'No') and (datset_churn['StreamingMovies'][rows_stv] == 'No')):
list_streaming_services.append('No')
else:
list_streaming_services.append('Yes')
datset_churn['StreamingServices'] = list_streaming_services
#print(datset_churn[['StreamingTV', 'StreamingMovies', 'StreamingServices' ]].head(10))
plot_cat_data = sns.catplot(x='Family', col='Churn_Num', data = datset_churn, kind='count', size=4, aspect=0.8)
plot_cat_data = sns.catplot(x='OnlineServices', col='Churn_Num', data = datset_churn, kind='count', size=4, aspect=0.8)
plot_cat_data = sns.catplot(x='StreamingServices', col='Churn_Num', data = datset_churn, kind='count', size=4, aspect=0.8)
# #### Observation
# - Customers with family are less likely to Churn
# - Customers not opted for online services (online backup or security) have slightly higher chances of churn
# - Customer opted for Streaming Services seems to have slightly higher chances of churn
# ## Preparing Columns for Classification
# ### Converting the Object/Categorical Variable to Numerical Variable
# In[ ]:
datset_churn.info()
# In[ ]:
#Converting Gender column to numeric value
#datset_churn['Gender'].unique() # Print unique values in the column
datset_churn['Gender_Num'] = datset_churn['Gender'].map( {'Female': 1, 'Male': 0} ).astype(int) #Map Categorical to Numerical Values
datset_churn[['Gender','Gender_Num']].head(2) # Test the mapping
# In[ ]:
# For Partner & Dependant , we created Family Column . Converting Family column to numeric value
#datset_churn['Family'].unique() # Print unique values in the column
datset_churn['Family_Num'] = datset_churn['Family'].map( {'Yes': 1, 'No': 0} ).astype(int) #Map Categorical to Numerical Values
datset_churn[['Family','Family_Num']].head(2) # Test the mapping
# In[ ]:
datset_churn['PhoneService_Num'] = datset_churn['PhoneService'].map( {'Yes': 1, 'No': 0} ).astype(int)
datset_churn['MultipleLines_Num'] = datset_churn['MultipleLines'].map( {'No': 0, 'Yes': 1, 'No phone service':2} ).astype(int)
datset_churn['InternetService_Num'] = datset_churn['InternetService'].map( {'DSL': 0, 'Fiber optic': 1, 'No':2} ).astype(int)
datset_churn['OnlineServices_Num'] = datset_churn['OnlineServices'].map( {'Yes': 1, 'No': 0} ).astype(int)
datset_churn['DeviceProtection_Num'] = datset_churn['DeviceProtection'].map( {'No': 0, 'Yes': 1, 'No internet service':2} ).astype(int)
datset_churn['StreamingServices_Num'] = datset_churn['StreamingServices'].map( {'Yes': 1, 'No': 0} ).astype(int)
datset_churn['TechSupport_Num'] = datset_churn['TechSupport'].map( {'No': 0, 'Yes': 1, 'No internet service':2} ).astype(int)
datset_churn['Contract_Num'] = datset_churn['Contract'].map( {'Month-to-month': 0, 'One year': 1, 'Two year': 2} ).astype(int)
datset_churn['PaperlessBilling_Num'] = datset_churn['PaperlessBilling'].map( {'Yes': 1, 'No': 0} ).astype(int)
datset_churn['PaymentMethod_Num'] = datset_churn['PaymentMethod'].map( {'Electronic check': 0, 'Mailed check': 1, 'Bank transfer (automatic)': 2 , 'Credit card (automatic)' : 3} ).astype(int)
# In[ ]:
datset_churn.info()
# ### Now we will delete the non-required rows and prepare the dataset for classification
# In[ ]:
# Take a copy of dataset
datset_churn_copy = datset_churn.copy()
# In[ ]:
#Dropping the Categorical columns and keeping their equivalent numeric column
columns_to_drop = ['Gender', 'Partner', 'Dependents', 'Tenure', 'PhoneService', 'MultipleLines', 'InternetService', 'OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies', 'Contract', 'PaperlessBilling', 'PaymentMethod', 'TotalCharges', 'Churn', 'Family', 'OnlineServices', 'StreamingServices']
datset_churn = datset_churn.drop(columns_to_drop, axis=1)
#Re-arranging the columns as per origial dataset
datset_churn = datset_churn[['CustomerID', 'Gender_Num', 'SeniorCitizen', 'Family_Num', 'TenureCat', 'PhoneService_Num', 'MultipleLines_Num', 'InternetService_Num', 'OnlineServices_Num', 'DeviceProtection_Num', 'TechSupport_Num', 'StreamingServices_Num', 'Contract_Num', 'PaperlessBilling_Num', 'PaymentMethod_Num', 'MonthlyChargesCat', 'Churn_Num']]
datset_churn = datset_churn.rename(columns={'Gender_Num' : 'Gender', 'Family_Num' : 'Family', 'PhoneService_Num' : 'PhoneService', 'MultipleLines_Num': 'MultipleLines', 'InternetService_Num' : 'InternetService', 'OnlineServices_Num' : 'OnlineServices', 'DeviceProtection_Num' : 'DeviceProtection', 'TechSupport_Num' : 'TechSupport', 'StreamingServices_Num' : 'StreamingServices', 'Contract_Num' : 'Contract', 'PaperlessBilling_Num' : 'PaperlessBilling', 'PaymentMethod_Num' : 'PaymentMethod', 'MonthlyCharges' : 'MonthlyCharges', 'Churn_Num' : 'Churn' })
datset_churn.info()
# In[ ]:
datset_churn.head(10) # Taking a quick look into the new data
# In[ ]:
X = datset_churn.iloc[:,1:16].values # Feature Variable
y = datset_churn.iloc[:,16].values # Target Variable
#Dividing data into test & train splitting 70% data for training anf 30% for test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)
print('There are {} samples in the training set and {} samples in the test set'.format(X_train.shape[0], X_test.shape[0]))
# ## Classification
# ### We will run all classifiers to have an initial look at the performance
# #### Defining function for Confusion Matrix , Precision, Recall and F1 Score
# In[ ]:
#Creating function for Confusion Matrix , Precsion, Recall and F1 Score
def plot_confusion_matrix(classifier, y_test, y_pred_test):
cm = confusion_matrix(y_test, y_pred_test)
print("\n",classifier,"\n")
plt.clf()
plt.imshow(cm, interpolation='nearest', cmap='RdBu')
classNames = ['Churn-No','Churn-Yes']
plt.ylabel('True label')
plt.xlabel('Predicted label')
tick_marks = np.arange(len(classNames))
plt.xticks(tick_marks, classNames, rotation=45)
plt.yticks(tick_marks, classNames)
s = [['TN','FP'], ['FN', 'TP']]
for i in range(2):
for j in range(2):
plt.text(j,i, str(s[i][j])+" = "+str(cm[i][j]), horizontalalignment='center', color='White')
print()
tn, fp, fn, tp = cm.ravel()
recall = tp / (tp + fn)
precision = tp / (tp + fp)
F1 = 2*recall*precision/(recall+precision)
print('Recall={0:0.3f}'.format(recall),'\nPrecision={0:0.3f}'.format(precision))
print('F1={0:0.3f}'.format(F1))
return;
# #### Defining function for Precision Recall Curve
# In[ ]:
from sklearn.metrics import average_precision_score, precision_recall_curve
def plot_prec_rec_curve(classifier, y_test, y_pred_score):
precision, recall, _ = precision_recall_curve(y_test, y_pred_score)
average_precision = average_precision_score(y_test, y_pred_score)
print('Average precision-recall score: {0:0.3f}'.format( average_precision))
plt.plot(recall, precision, label='area = %0.3f' % average_precision, color="green")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision Recall Curve')
plt.legend(loc="best")
print()
# ### Master Classification Engine
# In[ ]:
# Making a list of all classifiers
classifier_model = [LogisticRegression(),KNeighborsClassifier(),GaussianNB(),SVC(),DecisionTreeClassifier(),RandomForestClassifier(), SGDClassifier(), AdaBoostClassifier()]
# Creating empty list to store the performance details
classifier_model_list= []
classifier_accuracy_test = []
classifier_accuracy_train = []
f1score = []
precisionscore = []
recallscore = []
avg_pre_rec_score = []
cv_score = []
for classifier_list in classifier_model:
classifier = classifier_list
# Fitting the training set into classification model
classifier.fit(X_train,y_train)
# Predicting the output on test datset
y_pred_test = classifier.predict(X_test)
score_test = accuracy_score(y_test, y_pred_test)
# Predicting the output on training datset
y_pred_train = classifier.predict(X_train)
score_train = accuracy_score(y_train, y_pred_train)
# Cross Validation Score on training test
scores = cross_val_score(classifier, X_train,y_train, cv=10)
cv_score.append(scores.mean())
#Keeping the model and accuracy score into a list
classifier_model_list.append(classifier_list.__class__.__name__)
classifier_accuracy_test.append(round(score_test,4))
classifier_accuracy_train.append(round(score_train,4))
#Precision, Recall and F1 score
f1score.append(f1_score(y_test, y_pred_test))
precisionscore.append(precision_score(y_test, y_pred_test))
recallscore.append(recall_score(y_test, y_pred_test))
#Calculating Average Precision Recall Score
try:
y_pred_score = classifier.decision_function(X_test)
except:
y_pred_score = classifier.predict_proba(X_test)[:,1]
from sklearn.metrics import average_precision_score
average_precision = average_precision_score(y_test, y_pred_score)
avg_pre_rec_score.append(average_precision)
#Confusion Matrix
plot_confusion_matrix(classifier_list.__class__.__name__, y_test, y_pred_test)
plot_prec_rec_curve(classifier_list.__class__.__name__, y_test, y_pred_score)
# ### CLASSIFICATION MODEL PERFORMANCE EVALUATION
# In[ ]:
#Creating pandas dataframe with Model and corresponding accuracy
#accuracy_df = pd.DataFrame({'Model':classifier_model_list , 'Test Accuracy':classifier_accuracy_test, 'Train Accuracy' :classifier_accuracy_train , 'Precision':precisionscore, 'Recall':recallscore ,'F1 Score':f1score},index=None)
accuracy_df = pd.DataFrame({'Model':classifier_model_list , 'Cross Val Score':cv_score, 'Test Accuracy' :classifier_accuracy_train , 'Precision':precisionscore, 'Recall':recallscore ,'Avg Precision Recall':avg_pre_rec_score ,'F1 Score':f1score})
# Calculating Average Accuracy = (Test + Train)/2
accuracy_df['Average_Accuracy'] = (accuracy_df['Cross Val Score'] + accuracy_df['Test Accuracy'] )/ 2
#Arranging the Columns
print("\n*------------------------------ CLASSIFICATION MODEL PERFORMANCE EVALUATION ---------------------*\n")
accuracy_df = accuracy_df[['Model','Cross Val Score', 'Test Accuracy', 'Average_Accuracy','Precision', 'Recall','Avg Precision Recall','F1 Score']] # This will arrange the columns in the order we want
#Sorting the Columns based on Average Accuracy
accuracy_df.sort_values('Average_Accuracy', axis=0, ascending=False, inplace=True) # Sorting the data with highest accuracy in the top
accuracy_df
#accuracy_df.transpose()
# #### Observations
#
# 1. Since our dataset class is imbalanced. Churn "Yes" is almost 3 times as "No', Accuracy is not the right measure and we have to consider Precision, Recall and F1 Score for further evaluation and improvement of model
#
# 1.1 Precision: A measure of a classifiers exactness.A low precision can also indicate a large number of False Positives.
#
# 1.2 Recall: A measure of a classifiers completeness.A low recall indicates many False Negatives.
#
# 1.3 F1 Score (or F-score): A weighted average or Harmonic Mean of precision and recall.
#
# 2. Logistic Regression (AUC = 0.65) and Adaboost model (AUC = 0.65) looks promising. Let's try to improve the model
# ## Improving our Model: Model Tuning
# ### Grid Search for Logistic Regression Classifier and running with optimized hyperparameters
# In[ ]:
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import make_scorer
from sklearn.metrics import fbeta_score, accuracy_score
from sklearn.linear_model import LogisticRegression # Logistic Regression Classifier
#Logistic Regression Classifier
clf = LogisticRegression()
#Hyperparameters
parameters = {'C':np.logspace(0, 4, 10), 'penalty' : ['l1', 'l2'] }
# Make an fbeta_score scoring object
scorer = make_scorer(fbeta_score,beta=0.5)
# Perform grid search on the classifier using 'scorer' as the scoring method
grid_obj = GridSearchCV(clf, parameters,scorer)
# Fit the grid search object to the training data and find the optimal parameters
grid_fit = grid_obj.fit(X_train,y_train)
# Get the estimator
best_clf = grid_fit.best_estimator_
# View best hyperparameters
#print(grid_srchfit.best_params_)
# Make predictions using the unoptimized and model
predictions = (clf.fit(X_train, y_train)).predict(X_test)
best_predictions = best_clf.predict(X_test)
# Report the | |
<reponame>ruby-compiler-survey/pypy
import py, os, sys
from .support import setup_make
currpath = py.path.local(__file__).dirpath()
test_dct = str(currpath.join("datatypesDict.so"))
def setup_module(mod):
setup_make("datatypesDict.so")
class AppTestDATATYPES:
spaceconfig = dict(usemodules=['_cppyy', '_rawffi', 'itertools'])
def setup_class(cls):
cls.w_test_dct = cls.space.newtext(test_dct)
cls.w_datatypes = cls.space.appexec([], """():
import ctypes, _cppyy
_cppyy._post_import_startup()
return ctypes.CDLL(%r, ctypes.RTLD_GLOBAL)""" % (test_dct, ))
cls.w_N = cls.space.newint(5) # should be imported from the dictionary
def test01_instance_data_read_access(self):
"""Read access to instance public data and verify values"""
import _cppyy as cppyy
CppyyTestData = cppyy.gbl.CppyyTestData
c = CppyyTestData()
assert isinstance(c, CppyyTestData)
# reading boolean type
assert c.m_bool == False
assert not c.get_bool(); assert not c.get_bool_cr(); assert not c.get_bool_r()
# reading char types
assert c.m_char == 'a'
assert c.m_schar == 'b'
assert c.m_uchar == 'c'
# reading integer types
assert c.m_short == -11; assert c.get_short_cr() == -11; assert c.get_short_r() == -11
assert c.m_ushort == 11; assert c.get_ushort_cr() == 11; assert c.get_ushort_r() == 11
assert c.m_int == -22; assert c.get_int_cr() == -22; assert c.get_int_r() == -22
assert c.m_uint == 22; assert c.get_uint_cr() == 22; assert c.get_uint_r() == 22
assert c.m_long == -33; assert c.get_long_cr() == -33; assert c.get_long_r() == -33
assert c.m_ulong == 33; assert c.get_ulong_cr() == 33; assert c.get_ulong_r() == 33
assert c.m_llong == -44; assert c.get_llong_cr() == -44; assert c.get_llong_r() == -44
assert c.m_ullong == 44; assert c.get_ullong_cr() == 44; assert c.get_ullong_r() == 44
assert c.m_long64 == -55; assert c.get_long64_cr() == -55; assert c.get_long64_r() == -55
assert c.m_ulong64 == 55; assert c.get_ulong64_cr() == 55; assert c.get_ulong64_r() == 55
# reading floating point types
assert round(c.m_float + 66., 5) == 0
assert round(c.get_float_cr() + 66., 5) == 0
assert round(c.get_float_r() + 66., 5) == 0
assert round(c.m_double + 77., 11) == 0
assert round(c.get_double_cr() + 77., 11) == 0
assert round(c.get_double_r() + 77., 11) == 0
assert round(c.m_ldouble + 88., 24) == 0
assert round(c.get_ldouble_cr() + 88., 24) == 0
assert round(c.get_ldouble_r() + 88., 24) == 0
assert round(c.get_ldouble_def() -1., 24) == 0
assert round(c.get_ldouble_def(2) -2., 24) == 0
"""# complex<double> type
assert type(c.get_complex()) == complex
assert round(c.get_complex().real - 99., 11) == 0
assert round(c.get_complex().imag - 101., 11) == 0
assert repr(c.get_complex()) == '(99+101j)'
assert round(c.get_complex_cr().real - 99., 11) == 0
assert round(c.get_complex_cr().imag - 101., 11) == 0
assert round(c.get_complex_r().real - 99., 11) == 0
assert round(c.get_complex_r().imag - 101., 11) == 0
assert complex(cppyy.gbl.std.complex['double'](1, 2)) == complex(1, 2)
# complex<int> retains C++ type in all cases (but includes pythonization to
# resemble Python's complex more closely
assert type(c.get_icomplex()) == cppyy.gbl.std.complex[int]
assert round(c.get_icomplex().real - 121., 11) == 0
assert round(c.get_icomplex().imag - 141., 11) == 0
assert repr(c.get_icomplex()) == '(121+141j)'
assert round(c.get_icomplex_cr().real - 121., 11) == 0
assert round(c.get_icomplex_cr().imag - 141., 11) == 0
assert type(c.get_icomplex_r()) == cppyy.gbl.std.complex[int]
assert round(c.get_icomplex_r().real - 121., 11) == 0
assert round(c.get_icomplex_r().imag - 141., 11) == 0
assert complex(cppyy.gbl.std.complex['int'](1, 2)) == complex(1, 2)"""
# reading of enum types
assert c.m_enum == CppyyTestData.kNothing
assert c.m_enum == c.kNothing
# reading of boolean array
for i in range(self.N):
assert c.m_bool_array[i] == bool(i%2)
assert c.get_bool_array()[i] == bool(i%2)
assert c.m_bool_array2[i] == bool((i+1)%2)
assert c.get_bool_array2()[i] == bool((i+1)%2)
# reading of integer array types
names = ['uchar', 'short', 'ushort', 'int', 'uint', 'long', 'ulong']
alpha = [ (1, 2), (-1, -2), (3, 4), (-5, -6), (7, 8), (-9, -10), (11, 12)]
for j in range(self.N):
assert getattr(c, 'm_%s_array' % names[i])[i] == alpha[i][0]*i
assert getattr(c, 'get_%s_array' % names[i])()[i] == alpha[i][0]*i
assert getattr(c, 'm_%s_array2' % names[i])[i] == alpha[i][1]*i
assert getattr(c, 'get_%s_array2' % names[i])()[i] == alpha[i][1]*i
# reading of floating point array types
for k in range(self.N):
assert round(c.m_float_array[k] + 13.*k, 5) == 0
assert round(c.m_float_array2[k] + 14.*k, 5) == 0
assert round(c.m_double_array[k] + 15.*k, 8) == 0
assert round(c.m_double_array2[k] + 16.*k, 8) == 0
# out-of-bounds checks
raises(IndexError, c.m_uchar_array.__getitem__, self.N)
raises(IndexError, c.m_short_array.__getitem__, self.N)
raises(IndexError, c.m_ushort_array.__getitem__, self.N)
raises(IndexError, c.m_int_array.__getitem__, self.N)
raises(IndexError, c.m_uint_array.__getitem__, self.N)
raises(IndexError, c.m_long_array.__getitem__, self.N)
raises(IndexError, c.m_ulong_array.__getitem__, self.N)
raises(IndexError, c.m_float_array.__getitem__, self.N)
raises(IndexError, c.m_double_array.__getitem__, self.N)
# can not access an instance member on the class
raises(AttributeError, getattr, CppyyTestData, 'm_bool')
raises(AttributeError, getattr, CppyyTestData, 'm_int')
assert not hasattr(CppyyTestData, 'm_bool')
assert not hasattr(CppyyTestData, 'm_int')
c.__destruct__()
def test02_instance_data_write_access(self):
"""Test write access to instance public data and verify values"""
import _cppyy as cppyy
CppyyTestData = cppyy.gbl.CppyyTestData
c = CppyyTestData()
assert isinstance(c, CppyyTestData)
# boolean types through functions
c.set_bool(True); assert c.get_bool() == True
c.set_bool(0); assert c.get_bool() == False
# boolean types through data members
c.m_bool = True; assert c.get_bool() == True
c.set_bool(True); assert c.m_bool == True
c.m_bool = 0; assert c.get_bool() == False
c.set_bool(0); assert c.m_bool == False
raises(ValueError, 'c.set_bool(10)')
# char types through functions
c.set_char('c'); assert c.get_char() == 'c'
c.set_uchar('e'); assert c.get_uchar() == 'e'
# char types through data members
c.m_char = 'b'; assert c.get_char() == 'b'
c.m_char = 40; assert c.get_char() == chr(40)
c.set_char('c'); assert c.m_char == 'c'
c.set_char(41); assert c.m_char == chr(41)
c.m_uchar = 'd'; assert c.get_uchar() == 'd'
c.m_uchar = 42; assert c.get_uchar() == chr(42)
c.set_uchar('e'); assert c.m_uchar == 'e'
c.set_uchar(43); assert c.m_uchar == chr(43)
raises(ValueError, 'c.set_char("string")')
raises(ValueError, 'c.set_char(500)')
raises(ValueError, 'c.set_uchar("string")')
raises(ValueError, 'c.set_uchar(-1)')
# integer types
names = ['short', 'ushort', 'int', 'uint', 'long', 'ulong', 'llong', 'ullong']
for i in range(len(names)):
setattr(c, 'm_'+names[i], i)
assert eval('c.get_%s()' % names[i]) == i
for i in range(len(names)):
getattr(c, 'set_'+names[i])(2*i)
assert eval('c.m_%s' % names[i]) == 2*i
for i in range(len(names)):
getattr(c, 'set_'+names[i]+'_cr')(3*i)
assert eval('c.m_%s' % names[i]) == 3*i
# float types through functions
c.set_float(0.123); assert round(c.get_float() - 0.123, 5) == 0
c.set_double(0.456); assert round(c.get_double() - 0.456, 8) == 0
c.set_ldouble(0.789); assert round(c.get_ldouble() - 0.789, 8) == 0
# float types through data members
c.m_float = 0.123; assert round(c.get_float() - 0.123, 5) == 0
c.set_float(0.234); assert round(c.m_float - 0.234, 5) == 0
c.set_float_cr(0.456); assert round(c.m_float - 0.456, 5) == 0
c.m_double = 0.678; assert round(c.get_double() - 0.678, 8) == 0
c.set_double(0.890); assert round(c.m_double - 0.890, 8) == 0
c.set_double_cr(0.012); assert round(c.m_double - 0.012, 8) == 0
c.m_ldouble = 0.876; assert round(c.get_ldouble() - 0.876, 8) == 0
c.set_ldouble(0.098); assert round(c.m_ldouble - 0.098, 8) == 0
c.set_ldouble_cr(0.210); assert round(c.m_ldouble - 0.210, 8) == 0
# arrays; there will be pointer copies, so destroy the current ones
c.destroy_arrays()
# integer arrays
names = ['uchar', 'short', 'ushort', 'int', 'uint', 'long', 'ulong']
import array
a = range(self.N)
atypes = ['B', 'h', 'H', 'i', 'I', 'l', 'L']
for j in range(len(names)):
b = array.array(atypes[j], a)
setattr(c, 'm_'+names[j]+'_array', b) # buffer copies
for i in range(self.N):
assert eval('c.m_%s_array[i]' % names[j]) == b[i]
setattr(c, 'm_'+names[j]+'_array2', b) # pointer copies
assert 3 < self.N
b[3] = 28
for i in range(self.N):
assert eval('c.m_%s_array2[i]' % names[j]) == b[i]
# can not write to constant data
assert c.m_const_int == 17
raises(TypeError, setattr, c, 'm_const_int', 71)
c.__destruct__()
def test03_array_passing(self):
"""Test passing of array arguments"""
import _cppyy as cppyy, array, sys
CppyyTestData = cppyy.gbl.CppyyTestData
c = CppyyTestData()
assert isinstance(c, CppyyTestData)
a = range(self.N)
# test arrays in mixed order, to give overload resolution a workout
for t in ['d', 'i', 'f', 'H', 'I', 'h', 'L', 'l']:
b = array.array(t, a)
# typed passing
ca = c.pass_array(b)
assert type(ca[0]) == type(b[0])
assert len(b) == self.N
for i in range(self.N):
assert ca[i] == b[i]
# void* passing
ca = eval('c.pass_void_array_%s(b)' % t)
assert type(ca[0]) == type(b[0])
assert len(b) == self.N
for i in range(self.N):
assert ca[i] == b[i]
# NULL/nullptr passing (will use short*)
assert not c.pass_array(0)
raises(Exception, c.pass_array(0).__getitem__, 0) # raises SegfaultException
assert raises(TypeError, c.pass_array, None)
assert not c.pass_array(cppyy.nullptr)
raises(Exception, c.pass_array(cppyy.nullptr).__getitem__, 0) # id. id.
c.__destruct__()
def test04_class_read_access(self):
"""Test read access to class public data and verify values"""
import _cppyy as cppyy, sys
CppyyTestData = cppyy.gbl.CppyyTestData
c = CppyyTestData()
assert isinstance(c, CppyyTestData)
# char types
assert CppyyTestData.s_char == 'c'
assert c.s_char == 'c'
assert c.s_uchar == 'u'
assert CppyyTestData.s_uchar == 'u'
# integer types
assert CppyyTestData.s_short == -101
assert c.s_short == -101
assert c.s_ushort == 255
assert CppyyTestData.s_ushort == 255
assert CppyyTestData.s_int == -202
assert c.s_int == -202
assert c.s_uint == 202
assert CppyyTestData.s_uint == 202
assert CppyyTestData.s_long == -303
assert c.s_long == -303
assert c.s_ulong == 303
assert CppyyTestData.s_ulong | |
"""
Tests the inference module.
"""
# pylint: disable=protected-access
# pylint: disable=invalid-name
# pylint: disable=missing-docstring
# pylint: disable=too-many-public-methods
import unittest
from numpy.testing import assert_array_almost_equal
import numpy as np
from pyugm.factor import DiscreteFactor, DiscreteBelief
from pyugm.infer import Inference
from pyugm.infer_message import LoopyBeliefUpdateInference
from pyugm.infer_message import FloodingProtocol
from pyugm.infer_message import DistributeCollectProtocol
from pyugm.infer_message import LoopyDistributeCollectProtocol
from pyugm.infer_message import multiply
from pyugm.infer_message import ExhaustiveEnumeration
from pyugm.model import Model
from pyugm.tests.test_utils import GraphTestCase
class TestFactorMultiplication(unittest.TestCase):
def test_multiply_small_inplace(self):
data = np.array([[1, 2],
[5, 6]])
af = DiscreteFactor([(0, 2), (1, 2)], data=data)
a = DiscreteBelief(af)
data = np.array([2, 3])
bf = DiscreteFactor([(1, 2)], data=data)
b = DiscreteBelief(bf)
data = np.array([[2, 6],
[10, 18]])
c = DiscreteFactor([(0, 2), (1, 2)], data=data)
multiply(a, b)
print a.data
print c.data
print a.data.shape
print c.data.shape
print af.data
self.assertEqual(a.variables, c.variables)
self.assertEqual(a.axis_to_variable, c.axis_to_variable)
assert_array_almost_equal(a.data, c.data)
def test_multiply_small_a(self):
data = np.array([[1, 2],
[5, 6]], dtype='float64')
af = DiscreteFactor([(0, 2), (1, 2)], data=data)
a = DiscreteBelief(af)
data = np.array([2, 3])
e = DiscreteFactor([(0, 2)], data=data)
data = np.array([[1 * 2, 2 * 2],
[5 * 3, 6 * 3]])
f = DiscreteFactor([(0, 2), (1, 2)], data=data)
multiply(a, e)
print 'a', a.data
print 'e', e.data
print
print f.data
print a.data.shape
print f.data.shape
self.assertEqual(a.variables, f.variables)
self.assertEqual(a.axis_to_variable, f.axis_to_variable)
assert_array_almost_equal(a.data, f.data)
def test_multiply_larger(self):
data = np.array([[[2, 1, 2],
[3, 7, 4]],
[[1, 1, 3],
[4, 9, 10]]])
af = DiscreteFactor([(0, 2), (3, 2), (12, 3)], data=data)
a = DiscreteBelief(af)
data = np.array([[2, 3, 1],
[5, 1, 7]])
b = DiscreteFactor([(0, 2), (12, 3)], data=data)
data = np.array([[[2 * 2, 1 * 3, 2 * 1],
[3 * 2, 7 * 3, 4 * 1]],
[[1 * 5, 1 * 1, 3 * 7],
[4 * 5, 9 * 1, 10 * 7]]])
c = DiscreteFactor([(0, 2), (3, 2), (12, 3)], data=data)
multiply(a, b)
print a.data
print c.data
print a.data.shape
print c.data.shape
self.assertEqual(a.variables, c.variables)
self.assertEqual(a.axis_to_variable, c.axis_to_variable)
assert_array_almost_equal(a.data, c.data)
def test_multiply_larger_correct_order(self):
data = np.array([[[2, 1, 2],
[3, 7, 4]],
[[1, 1, 3],
[4, 9, 10]]])
a = DiscreteFactor([(0, 2), (3, 2), (12, 3)], data=data)
data = np.array([[2, 5],
[3, 1],
[1, 7]])
b = DiscreteFactor([(12, 3), (0, 2)], data=data)
data = np.array([[[2 * 2, 1 * 3, 2 * 1],
[3 * 2, 7 * 3, 4 * 1]],
[[1 * 5, 1 * 1, 3 * 7],
[4 * 5, 9 * 1, 10 * 7]]])
c = DiscreteFactor([(0, 2), (3, 2), (12, 3)], data=data)
multiply(a, b)
print a.data
print c.data
print a.data.shape
print c.data.shape
self.assertEqual(a.variables, c.variables)
self.assertEqual(a.axis_to_variable, c.axis_to_variable)
assert_array_almost_equal(a.data, c.data)
def test_divide_small(self):
a = DiscreteFactor([(0, 2), (1, 2)], data=np.array([[1.0, 2], [5, 6]]))
b = DiscreteFactor([(1, 2)], data=np.array([2.0, 3]))
data = np.array([[1.0 / 2.0, 2.0 / 3.0],
[5.0 / 2.0, 6.0 / 3.0]])
c = DiscreteFactor([(0, 2), (1, 2)], data=data)
multiply(a, b, divide=True)
print a.data
print c.data
print a.data.shape
print c.data.shape
self.assertEqual(a.variables, c.variables)
self.assertEqual(a.axis_to_variable, c.axis_to_variable)
assert_array_almost_equal(a.data, c.data)
class TestBeliefUpdateInference(GraphTestCase):
def test_set_up_separators(self):
a = DiscreteFactor([(0, 2), (1, 2), (2, 2)])
b = DiscreteFactor([(2, 2), (3, 2), (3, 2)])
model = Model([a, b])
inference = LoopyBeliefUpdateInference(model)
s = DiscreteFactor([(2, 2)])
print inference._separator_potential
forward_edge = list(model.edges)[0]
forward_and_backward_edge = [forward_edge, (forward_edge[1], forward_edge[0])]
for edge in forward_and_backward_edge:
separator_factor = inference._separator_potential[edge]
self.assertSetEqual(separator_factor.variable_set, s.variable_set)
self.assertDictEqual(separator_factor.cardinalities, s.cardinalities)
assert_array_almost_equal(separator_factor.data, s.data)
def test_update_beliefs_small(self):
a = DiscreteFactor([0, 1])
b = DiscreteFactor([1, 2])
model = Model([a, b])
update_order1 = FloodingProtocol(model=model, max_iterations=2)
inference = LoopyBeliefUpdateInference(model, update_order1)
# 0
# 0 1
# Phi* = Sum_{0} 1 0 [ 1 1 ] = 1 0 [ 2 ]
# 1 [ 1 1 ] 1 [ 2 ]
#
# 1 1
# Psi* = Phi* x Psi = 1 0 [2] x 2 0 [ 1 1 ] = 2 0 [ 2 2 ]
# Phi 1 [2] 1 [ 1 1 ] 1 [ 2 2 ]
#
# 1 1
# Phi** = Sum_{2} 2 0 [ 2 2 ] = [ 4 4 ]
# 1 [ 2 2 ]
#
# 1 0 0
# Psi** = Phi** x Psi = [ 2 2 ] x 1 0 [ 1 1 ] = 1 0 [ 2 2 ]
# Phi* 1 [ 1 1 ] 1 [ 2 2 ]
#
# 1
# Phi*** = [ 4 4 ]
# 1
# Psi*** = Phi*** x Psi* = 2 0 [ 2 2 ]
# Phi** 1 [ 2 2 ]
#
inference.calibrate()
#update_order2 = FloodingProtocol(model=model, max_iterations=3)
#change1, iterations1 = inference.calibrate(update_order2)
#print 'changes:', change0, change1, 'iterations:', iterations0, iterations1
final_a_data = np.array([[2, 2],
[2, 2]], dtype='f64') / 8.0
final_b_data = np.array([[2, 2],
[2, 2]], dtype='f64') / 8.0
belief_a = inference.beliefs[a]
assert_array_almost_equal(final_a_data, belief_a.normalized_data)
belief_b = inference.beliefs[b]
assert_array_almost_equal(final_b_data, belief_b.normalized_data)
def test_update_beliefs_disconnected(self):
a = DiscreteFactor([(1, 2), (2, 2)], data=np.array([[1, 2], [3, 4]], dtype=np.float64))
b = DiscreteFactor([(2, 2), (3, 2)], data=np.array([[1, 2], [3, 4]], dtype=np.float64))
c = DiscreteFactor([(4, 2), (5, 2)], data=np.array([[5, 6], [8, 9]], dtype=np.float64))
d = DiscreteFactor([(5, 2), (6, 2)], data=np.array([[1, 6], [2, 3]], dtype=np.float64))
e = DiscreteFactor([(7, 2), (8, 2)], data=np.array([[2, 1], [2, 3]], dtype=np.float64))
model = Model([a, b, c, d, e])
for factor in model.factors:
print 'before', factor, np.sum(factor.data)
update_order = DistributeCollectProtocol(model)
inference = LoopyBeliefUpdateInference(model, update_order=update_order)
exact_inference = ExhaustiveEnumeration(model)
exhaustive_answer = exact_inference.calibrate().belief
print 'Exhaust', np.sum(exhaustive_answer.data)
change = inference.calibrate()
print change
for factor in model.factors:
print factor, np.sum(factor.data)
for variable in model.variables:
marginal_beliefs = inference.get_marginals(variable)
true_marginal = exhaustive_answer.marginalize([variable])
for marginal in marginal_beliefs:
assert_array_almost_equal(true_marginal.normalized_data, marginal.normalized_data)
expected_ln_Z = np.log(exhaustive_answer.data.sum())
self.assertAlmostEqual(expected_ln_Z, inference.partition_approximation())
def test_belief_update_larger_tree(self):
a = DiscreteFactor([0, 1], data=np.array([[1, 2], [2, 2]], dtype=np.float64))
b = DiscreteFactor([1, 2], data=np.array([[3, 2], [1, 2]], dtype=np.float64))
c = DiscreteFactor([2, 3], data=np.array([[1, 2], [3, 4]], dtype=np.float64))
d = DiscreteFactor([3], data=np.array([2, 1], dtype=np.float64))
e = DiscreteFactor([0], data=np.array([4, 1], dtype=np.float64))
f = DiscreteFactor([2], data=np.array([1, 2], dtype=np.float64))
#
# a{0 1} - b{1 2} - c{2 3} - d{3}
# | |
# e{0} f{2}
#
model = Model([a, b, c, d, e, f])
print 'edges', model.edges
update_order = DistributeCollectProtocol(model)
inference = LoopyBeliefUpdateInference(model, update_order=update_order)
exact_inference = ExhaustiveEnumeration(model)
exhaustive_answer = exact_inference.calibrate().belief
print 'bp'
change = inference.calibrate()
print change
for factor in model.factors:
print factor
for variable in model.variables:
marginal_beliefs = inference.get_marginals(variable)
true_marginal = exhaustive_answer.marginalize([variable])
for marginal in marginal_beliefs:
assert_array_almost_equal(true_marginal.normalized_data, marginal.normalized_data)
expected_ln_Z = np.log(exhaustive_answer.data.sum())
self.assertAlmostEqual(expected_ln_Z, inference.partition_approximation())
def test_belief_update_long_tree(self):
label_template = np.array([['same', 'different'],
['different', 'same']])
observation_template = np.array([['obs_low'] * 32,
['obs_high'] * 32])
observation_template[0, 13:17] = 'obs_high'
observation_template[1, 13:17] = 'obs_low'
N = 2
pairs = [DiscreteFactor([(i, 2), (i + 1, 2)], parameters=label_template) for i in xrange(N - 1)]
obs = [DiscreteFactor([(i, 2), (i + N, 32)], parameters=observation_template) for i in xrange(N)]
repe = [16., 16., 14., 13., 15., 16., 14., 13., 15., 16., 15.,
13., 14., 16., 16., 15., 13., 13., 14., 14., 13., 14.,
14., 14., 14., 14., 14., 14., 14., 14., 14., 14., 14.,
14., 14., 14., 14., 14., 14., 14., 14., 9., 4., 4.,
4., 4., 5., 3., 2., 3., 2., 3., 3., 3., 3.,
3., 3., 3., 3., 4., 4., 5., 5., 5.]
evidence = dict((i + N, 0 if repe[i % len(repe)] >= 13 and repe[i % len(repe)] < 17 else 1) for i in xrange(N))
model = Model(pairs + obs)
parameters = {'same': 2.0, 'different': -1.0, 'obs_high': 0.0, 'obs_low': -0.0}
update_order = FloodingProtocol(model, max_iterations=4)
inference = LoopyBeliefUpdateInference(model, update_order=update_order)
inference.calibrate(evidence, parameters)
exact_inference = ExhaustiveEnumeration(model)
exhaustive_answer = exact_inference.calibrate(evidence, parameters).belief
for i in xrange(N):
expected_marginal = exhaustive_answer.marginalize([i])
for actual_marginal in inference.get_marginals(i):
print i
print expected_marginal.normalized_data
print actual_marginal.normalized_data
assert_array_almost_equal(expected_marginal.normalized_data, actual_marginal.normalized_data)
expected_ln_Z = np.log(exhaustive_answer.data.sum())
self.assertAlmostEqual(expected_ln_Z, inference.partition_approximation())
class TestLoopyBeliefUpdateInference(GraphTestCase):
def test_loopy_distribute_collect(self):
a = DiscreteFactor([0, 1], data=np.array([[1, 2], [2, 2]], dtype=np.float64))
b = DiscreteFactor([1, 2], data=np.array([[3, 2], [1, 2]], dtype=np.float64))
c = DiscreteFactor([2, 0], data=np.array([[1, 2], [3, 4]], dtype=np.float64))
#
# a{0 1} - b{1 2}
# \ /
# c{2 0}
#
# a{0 1} - {0} - c{2 0}
#
#
#
#
model = Model([a, b, c])
update_order = LoopyDistributeCollectProtocol(model, max_iterations=40)
inference = LoopyBeliefUpdateInference(model, update_order=update_order)
inference.calibrate()
exact_inference = ExhaustiveEnumeration(model)
exhaustive_answer = exact_inference.calibrate().belief
for factor in model.factors:
print factor, np.sum(factor.data)
for var in model.variables_to_factors.keys():
print var, exhaustive_answer.marginalize([var]).data
print
for var in model.variables_to_factors.keys():
| |
None ) :
o0OO0O0OO0oO0 = ( "sudo iptables -A POSTROUTING -t mangle -p tcp -j " + "CHECKSUM --checksum-fill; " )
if 9 - 9: oO0o % i11iIiiIii / Oo0Ooo
o0OO0O0OO0oO0 += ( "sudo iptables -A POSTROUTING -t mangle -p udp -j " + "CHECKSUM --checksum-fill; " )
if 20 - 20: oO0o * O0 + I11i - OoooooooOO . I11i
o0OO0O0OO0oO0 += ( "sudo ip6tables -A POSTROUTING -t mangle -p tcp -j " + "CHECKSUM --checksum-fill; " )
if 60 - 60: o0oOOo0O0Ooo . o0oOOo0O0Ooo / iII111i
o0OO0O0OO0oO0 += ( "sudo ip6tables -A POSTROUTING -t mangle -p udp -j " + "CHECKSUM --checksum-fill" )
if 45 - 45: O0 . i11iIiiIii % iII111i . OoOoOO00 % IiII % iIii1I11I1II1
os . system ( o0OO0O0OO0oO0 )
Oo = lisp . bold ( "virtio" , False )
lisp . lprint ( "{} bug workaround, configure '{}'" . format ( Oo , o0OO0O0OO0oO0 ) )
if 66 - 66: i11iIiiIii * iIii1I11I1II1 % OoooooooOO
return
if 5 - 5: OoOoOO00 % OoooooooOO
if 60 - 60: OoOoOO00 . i1IIi % OoO0O00 % ooOoO0o % OOooOOo
if 33 - 33: iIii1I11I1II1 - Ii1I * I1ii11iIi11i % iIii1I11I1II1 + OoO0O00 . OOooOOo
if 56 - 56: i11iIiiIii * iII111i . oO0o
if 78 - 78: OoOoOO00
if 1 - 1: OOooOOo . IiII
if 42 - 42: OOooOOo % oO0o / OoO0O00 - oO0o * i11iIiiIii
def o00oOo0oOoo ( sources , dyn_eids , l2_overlay , pitr ) :
if ( l2_overlay ) :
i1I11IiI1iiII = "ether[6:4] >= 0 and ether[10:2] >= 0"
lisp . lprint ( "Using pcap filter: '{}'" . format ( i1I11IiI1iiII ) )
return ( i1I11IiI1iiII )
if 19 - 19: oO0o * I1IiiI % i11iIiiIii
if 24 - 24: o0oOOo0O0Ooo
iIi1Iii111I = "(not ether proto 0x806)"
I1Iiiiiii = " or (udp src port 4342 and ip[28] == 0x28)"
IIi11i11 = " or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
if 18 - 18: iIii1I11I1II1 + I11i * I1IiiI - OOooOOo / I1IiiI
if 78 - 78: I11i . IiII
iI1i1II = ""
I1ii1ii1I = ""
for i1oOOoo0o0OOOO in sources :
iI1i1II += "{}" . format ( i1oOOoo0o0OOOO )
if ( i1oOOoo0o0OOOO not in dyn_eids ) : I1ii1ii1I += "{}" . format ( i1oOOoo0o0OOOO )
if ( sources [ - 1 ] == i1oOOoo0o0OOOO ) : break
iI1i1II += " or "
if ( i1oOOoo0o0OOOO not in dyn_eids ) : I1ii1ii1I += " or "
if 18 - 18: oO0o * oO0o % oO0o
if ( I1ii1ii1I [ - 4 : : ] == " or " ) : I1ii1ii1I = I1ii1ii1I [ 0 : - 4 ]
if 17 - 17: O0 * OoOoOO00 * I1ii11iIi11i * II111iiii * I11i % i1IIi
if 33 - 33: I1ii11iIi11i * I1ii11iIi11i . ooOoO0o . i11iIiiIii
if 48 - 48: o0oOOo0O0Ooo . Ii1I + OoOoOO00 % I1ii11iIi11i / i11iIiiIii
if 74 - 74: II111iiii . O0 - I1IiiI + IiII % i11iIiiIii % OoOoOO00
if 78 - 78: Ii1I + OoOoOO00 + IiII - IiII . i11iIiiIii / OoO0O00
if 27 - 27: Ii1I - O0 % I11i * I1Ii111 . IiII % iIii1I11I1II1
IiIi1i = commands . getoutput ( "egrep 'lisp-nat = yes' ./lisp.config" )
IiIi1i = ( IiIi1i != "" and IiIi1i [ 0 ] == " " )
oO0O0oO = lisp . lisp_get_loopback_address ( ) if ( IiIi1i ) else None
if 27 - 27: O0 / OoOoOO00 + iIii1I11I1II1 - OOooOOo % o0oOOo0O0Ooo
I111i1Ii1i1 = ""
iI1IIi1IiI = lisp . lisp_get_all_addresses ( )
for OOo0 in iI1IIi1IiI :
if ( OOo0 == oO0O0oO ) : continue
I111i1Ii1i1 += "{}" . format ( OOo0 )
if ( iI1IIi1IiI [ - 1 ] == OOo0 ) : break
I111i1Ii1i1 += " or "
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
if 35 - 35: I1ii11iIi11i / iII111i % I1IiiI + iIii1I11I1II1
if ( iI1i1II != "" ) :
iI1i1II = " and (src net {})" . format ( iI1i1II )
if 79 - 79: OoOoOO00 / ooOoO0o
if ( I1ii1ii1I != "" ) :
I1ii1ii1I = " and not (dst net {})" . format ( I1ii1ii1I )
if 77 - 77: Oo0Ooo
if ( I111i1Ii1i1 != "" ) :
I111i1Ii1i1 = " and not (dst host {})" . format ( I111i1Ii1i1 )
if 46 - 46: I1Ii111
if 72 - 72: iII111i * OOooOOo
if 67 - 67: i1IIi
if 5 - 5: II111iiii . OoooooooOO
if 57 - 57: I1IiiI
if 35 - 35: OoooooooOO - I1Ii111 / OoO0O00
if 50 - 50: OoOoOO00
if ( pitr ) :
I1ii1ii1I = ""
I111i1Ii1i1 = I111i1Ii1i1 . replace ( "dst " , "" )
if 33 - 33: I11i
if 98 - 98: OoOoOO00 % II111iiii
if 95 - 95: iIii1I11I1II1 - I1Ii111 - OOooOOo + I1Ii111 % I1ii11iIi11i . I1IiiI
if 41 - 41: O0 + oO0o . i1IIi - II111iiii * o0oOOo0O0Ooo . OoO0O00
if 68 - 68: o0oOOo0O0Ooo
i1I11IiI1iiII = iIi1Iii111I + iI1i1II + I1ii1ii1I + I111i1Ii1i1
i1I11IiI1iiII += I1Iiiiiii
i1I11IiI1iiII += IIi11i11
if 20 - 20: I1Ii111 - I1Ii111
lisp . lprint ( "Using pcap filter: '{}'" . format ( i1I11IiI1iiII ) )
return ( i1I11IiI1iiII )
if 37 - 37: IiII
if 37 - 37: Oo0Ooo / IiII * O0
if 73 - 73: iII111i * iII111i / ooOoO0o
if 43 - 43: I1ii11iIi11i . i1IIi . IiII + O0 * Ii1I * O0
if 41 - 41: I1ii11iIi11i + Ii1I % OoooooooOO . I1ii11iIi11i + iII111i . iII111i
if 31 - 31: i11iIiiIii + II111iiii . iII111i * OoOoOO00
if 66 - 66: OoOoOO00 + i1IIi % II111iiii . O0 * I1ii11iIi11i % I1ii11iIi11i
def iIi1 ( device , pfilter , pcap_lock ) :
lisp . lisp_set_exception ( )
if 87 - 87: OOooOOo + o0oOOo0O0Ooo . iII111i - OoooooooOO
pcap_lock . acquire ( )
iiiiI1IiI1I1 = pcappy . open_live ( device , 9000 , 0 , 100 )
pcap_lock . release ( )
if 19 - 19: Ii1I
iiiiI1IiI1I1 . filter = pfilter
iiiiI1IiI1I1 . loop ( - 1 , i1iI1 , device )
return
if 55 - 55: OOooOOo % OOooOOo / O0 % iII111i - o0oOOo0O0Ooo . Oo0Ooo
if 49 - 49: iIii1I11I1II1 * i1IIi . OoooooooOO
if 90 - 90: o0oOOo0O0Ooo % I1ii11iIi11i - iIii1I11I1II1 % OoOoOO00
if 8 - 8: OoOoOO00 * Oo0Ooo / IiII % Ii1I - I1IiiI
if 71 - 71: iII111i
if 23 - 23: i1IIi . iIii1I11I1II1 . OOooOOo . O0 % Ii1I % i11iIiiIii
if 11 - 11: O0 - II111iiii . OOooOOo . Ii1I % I1Ii111
if 21 - 21: Oo0Ooo / iII111i . I1Ii111 * OoooooooOO + I11i - i1IIi
if 58 - 58: I1ii11iIi11i
def ii1I ( ) :
global I11
global II1Ii1iI1i
global II1iII1i
if 98 - 98: i1IIi
lisp . lisp_set_exception ( )
if 51 - 51: I1ii11iIi11i + ooOoO0o + Oo0Ooo / i1IIi + i1IIi
if 12 - 12: iIii1I11I1II1 . Ii1I . I1ii11iIi11i % I1IiiI . II111iiii . oO0o
if 32 - 32: I1ii11iIi11i + IiII / O0 / OoOoOO00 * OoooooooOO % ooOoO0o
if 50 - 50: OoO0O00
if 66 - 66: iIii1I11I1II1
I11II1i11 = [ II1Ii1iI1i , II1Ii1iI1i ,
oO0oIIII ]
lisp . lisp_build_info_requests ( I11II1i11 , None , lisp . LISP_CTRL_PORT )
if 28 - 28: II111iiii - oO0o % OoOoOO00 + OoO0O00 - OoOoOO00
if 28 - 28: II111iiii . oO0o + O0 . O0 . OOooOOo
if 98 - 98: OoooooooOO % O0 - O0
if 76 - 76: i1IIi % OoOoOO00 - I1IiiI / o0oOOo0O0Ooo * ooOoO0o
I11 . cancel ( )
I11 = threading . Timer ( lisp . LISP_INFO_INTERVAL ,
ii1I , [ | |
find the same functionality in tehir python version)
# check the new versions of these libraries to see if they added more capabilities
# if coords1==coords2: print('same....')
t0 = datetime.datetime.now()
if job_num==-1:
job_num = 0
parallel = False
else:
parallel = True
tqdm_kwargs['position'] = job_num
# if job_num!=0:
# verbose=False
# silent=True
if job_num != 0:
silent = True
skip_busypal = -1 if show_progress else 1
if silent:
verbose=0
skip_busypal = 2
disable_tqdm = True
else:
disable_tqdm = False
# print('session.viewedonscreen()',session.viewedonscreen())
idx1, idx2 = search_around_sky(coords=coords, coords1=coords1, coords2=coords2, seplimit=Angle(f'{linking_length}s'), storekdtree=storekdtree, verbose=verbose, show_progress=show_progress, silent=silent, tqdm_kwargs=tqdm_kwargs) #coords1.search_around_sky(coords2, Angle(f'{linking_length}s'))
if verbose:
pass
# print(f'kdtree done in about {str(datetime.timedelta(seconds=round((datetime.datetime.now()-t0).seconds)))} hms.')
# multoprocessing has barrier as well:
# 'waiting for all processes to catch up'
# idx1 += coords1_idxshift
# idx2 += coords2_idxshift
graph_edges = set((a,b) if a<b else (b,a) for a,b in zip(idx1, idx2) if a!=b) # delete duplicates and 1:1 singles (It is important that a<b is enforced for networkit nnodes finder to work)
# gds = GraphDataStructure(graph_lib) # num_threads=4 # for networkit only
# graph = gds.build_graph_from_edges(graph_edges, verbose=verbose)
num_objects_chunk = len(coords) if coords is not None else len(coords1)+len(coord2)
with BusyPal('Building the representative graph/network', style={'id':6,'color':'sandy_brown'}, fmt='{spinner} {message}', skip=skip_busypal):
gds = GraphDataStructure(graph_lib, num_threads=num_threads) # threads are for networkit only
#with BusyPal(f'Building the graph using the {graph_lib} library'):
final_graph = gds.build_graph_from_edges(graph_edges, verbose=False, nnodes=num_objects_chunk)
clusters = find_clusters(final_graph, graph_lib=graph_lib, verbose=False)
nclusters = len(clusters) #max(chain.from_iterable(seq))
starting_id = (job_num+2)*num_objects+coords_idxshift # make them very far from each other (absolutely no chance of a conflict)
group_ids = np.arange(starting_id, starting_id+num_objects_chunk)
linked_mask = np.zeros(num_objects_chunk, dtype=bool)
del tqdm_kwargs['desc']
if overidx is not None:
overidx = set(overidx) # makes the lookups very fast!
for idx, cluster in enumerate(tqdm(clusters, total=nclusters, desc='Finding connected components of the '+'graphs and shared components' if parallel else 'graph', disable=disable_tqdm, **tqdm_kwargs)):
if any(gidx in overidx for gidx in cluster): # if any of the connected components has a foot on the overlap region that whole group should be involved in stitching later
# print('yes!')
linked_mask[cluster] = True
group_ids[cluster] = idx+coords_idxshift
# for galaxy_idx in cluster:
# group_ids[galaxy_idx] = idx+coords_idxshift
del clusters
if verbose:
print('\r\r'+cl.stylize('✔', cl.fg('green')+cl.attr('bold'))+' Assigned group ids for each chunk by using connected components of the '+'graphs' if parallel else 'by using connected components of the graph')
return group_ids, linked_mask
else: # it might be parallel but it is not using linked_mask
for idx, cluster in enumerate(tqdm(clusters, total=nclusters, desc='Finding connected components of the '+'graphs' if parallel else 'graph', disable=disable_tqdm, **tqdm_kwargs)):
group_ids[cluster] = idx+coords_idxshift
# for galaxy_idx in cluster:
# group_ids[galaxy_idx] = idx+coords_idxshift
del clusters
if verbose:
print('\r\r'+cl.stylize('✔', cl.fg('green')+cl.attr('bold'))+' Assigned group ids for each chunk' if parallel else ' Assigned group ids')
return group_ids
# idx_isolated = (group_ids==-1)
# with BusyPal('np.arange'):
# group_ids[idx_isolated] = np.arange(nclusters, nclusters+idx_isolated.sum())
# print('***',group_ids)
# return group_ids, linked_mask
# @busy('Clustering')
def find_clusters(graphs,graph_lib='igraph',verbose=True):
# graphs can be a list/tuple or just a single graph
t0 = datetime.datetime.now()
gds = GraphDataStructure(graph_lib) # num_threads=4 # for networkit only
# - merge and cluster (it does the merging internally if you pass a list or tuple of graphs)
clusters = gds.cluster(graphs,verbose=verbose)
if verbose:
print(f'clustering done in {str(datetime.timedelta(seconds=round((datetime.datetime.now()-t0).seconds)))} hms.')
# - remove isolated points
# t0 = datetime.datetime.now()
# clusters = list(filter(lambda x: len(x)>1, clusters))
# clusters = [sl for sl in clusters if len(sl)>1] # not as elegant
# print(f'removing isolated clusters for {graph_lib} generated cluster done in {str(datetime.timedelta(seconds=round((datetime.datetime.now()-t0).seconds)))} hms.')
return clusters
# # modified from https://stackoverflow.com/questions/56120273/quicker-way-to-implement-numpy-isin-followed-by-sum
# def fast_isin_int(A,a): # at least 2X faster than np.isin for integers (numpy might make it's np.isin ~10X faster in the future, watch its github)
# # suitable for arrays containing small integers like less than 1e7
# grid = np.zeros(max(np.max(A),np.max(a))+1, bool)
# grid[a] = True
# return grid[A]
@busy('Mosaicking data', style={'id':6,'color':'sandy_brown'}, fmt='{spinner} {message}') #TODO: make it paralllel!!! change plural masoiacs to mosaic -- since mosaic is plural by default!
def get_mosaic_sets(coords=None, coords1=None, coords2=None, linking_length=None, wcs=None, mode='all', nside_mosaics=None, njobs=None, overlap=1.0, use_linked_mask=False):
# nside_mosaics : resolution parameter
# print(coords1,coords2,coords)
# if coords1==coords2==None:
# print('ggg')
# assert (coords is not None and not (coords1==coords2==None))
# Note: x and y are projection of ra and dec onto the image plane (temporarilly made them for splitting purpose)
x, y = radec2xy(coords, wcs=wcs, mode=mode)
if nside_mosaics is None:
nside_mosaics = int(2*np.sqrt(njobs)) # a func of njobs somehow!! this way each job takes care of multiple mosaics
H, xedges, yedges = np.histogram2d(x, y, bins=nside_mosaics)
idx_filled_mosaics = np.where(H>0)
num_filled_mosaics = len(idx_filled_mosaics[0])
xbounds, ybounds, refidx_inside, refidx_overlap = [], [], [], []
for idx_x, idx_y in zip(*idx_filled_mosaics):
xbounds.append([xedges[idx_x], xedges[idx_x+1]])
ybounds.append([yedges[idx_y], yedges[idx_y+1]])
for xbound, ybound in zip(xbounds, ybounds):
# - create overlapping mosaics?
# if yes, technically 0.5*l (overlap=1) leads to one linking length overlap, but the user can choose to be more conservative
x0 = xbound[0]-linking_length*float(overlap)/2.0
x1 = xbound[1]+linking_length*float(overlap)/2.0
y0 = ybound[0]-linking_length*float(overlap)/2.0
y1 = ybound[1]+linking_length*float(overlap)/2.0
cx0 = x>=x0
cx1 = x<=x1 if x1==xedges[-1] else x<x1 # x < x1 is enough if overlapping_mosaics = False
cy0 = y>=y0
cy1 = y<=y1 if y1==yedges[-1] else y<y1 # y < y1 is enough if overlapping_mosaics = False
refidx_inside.append(np.where(cx0 & cx1 & cy0 & cy1)[0])
# idx_inside = np.where(cx0 & cx1 & cy0 & cy1) # integers not bools
#coords_mosaics.append(coords[idx_inside])
if use_linked_mask:
# find the internal bounds it makes with other overlapping mosaics (used for stitching the group id chunks later after concatenating results from different procs)
x0_p = xbound[0]+linking_length*float(overlap)/2.0
x1_p = xbound[1]-linking_length*float(overlap)/2.0
y0_p = ybound[0]+linking_length*float(overlap)/2.0
y1_p = ybound[1]-linking_length*float(overlap)/2.0
cx0_p = x<=x0_p #(x0<=x) & (x<=x0_p)
cx1_p = x1_p<=x #(x1_p<=x) & (x<=x1) if x1==xedges[-1] else (x1_p<=x) & (x<x1) # x < x1 is enough if overlapping_mosaics = False
cy0_p = y<=y0_p #(y0<=y) & (y<=y0_p)
cy1_p = y1_p<=y #(y1_p<=y) & (y<=y1) if y1==yedges[-1] else (y1_p<=y) & (y<y1) # y < y1 is enough if overlapping_mosaics = False
refidx_overlap.append(np.where( (cx0 & cx1 & cy0 & cy1) & (cx0_p | cx1_p | cy0_p | cy1_p) )[0])
idx_mosaics_for_chunks = np.array_split(range(num_filled_mosaics), njobs)
# chunks can consist of one or more mosaics which are assigned to each job
coords_chunks, refidx_chunks = [], []
overidx_chunks = [] if use_linked_mask else [None]*njobs
for idx_mosaics in idx_mosaics_for_chunks:
refidx_inside_unified = np.array([], dtype=np.int64)
if use_linked_mask:
refidx_overlap_unified = np.array([], dtype=np.int64)
for m in idx_mosaics:
refidx_inside_unified = np.append(refidx_inside_unified, refidx_inside[m])
if use_linked_mask:
refidx_overlap_unified = np.append(refidx_overlap_unified, refidx_overlap[m])
refidx_inside_unified = np.unique(refidx_inside_unified) # there might be some duplicates since little mosaics have overlaps
coords_chunks.append(coords[refidx_inside_unified])
refidx_chunks.append(refidx_inside_unified)
if use_linked_mask:
refidx_overlap_unified = np.unique(refidx_overlap_unified) # there might be some duplicates since little mosaics have overlaps
idx_overlap_unified = np.where(np.isin(refidx_inside_unified, refidx_overlap_unified))[0] # gives indices to the chunk array (not the ref catalog)
overidx_chunks.append(idx_overlap_unified)
# refidx_chunks # indices to the main catalog
if coords is not None:
return coords_chunks, refidx_chunks, overidx_chunks # refidx indices to the main catalog
else:
return coords1_chunks, coords2_chunks, refidx_chunks, overidx_chunks #..... FIXME!
def fastmatch(coords=None, coords1=None, coords2=None,linking_length=None, periodic_box_size=None,
reassign_group_indices=True, njobs=1, overlap=1.0, graph_lib='igraph',
num_threads=None, storekdtree=True, use_linked_mask=False,
verbose=1, show_progress=True, silent=False, **tqdm_kwargs):
'''
use_linked_mask: bool
An experimental feature that generates a mask to be applied to the arrays before stitching.
This reduces the time to create a graph in strich_group_ids() but might have some amount of overhead
(it can be negligible or a bit significant depending on the data) while making the mask through
get_mosaics() and get_group_ids(). Experiment it with your data.
overlap: 1 should be enough to compensate for lost pairs that cross the boundaries (or maybe 1.01 just in case).
'''
# - define aliass for graph libraries names
if graph_lib=='nk':
graph_lib='networkit'
elif graph_lib=='nx':
graph_lib='networkx'
elif graph_lib=='ig':
graph_lib='igraph'
if use_linked_mask and graph_lib=='networkx':
raise ValueError('TODO: The `networkx` graph library does not give the right results with use_linked_mask=True. Use `networkit` and `igraph` libraries instead if you would like to set use_linked_mask=True.')
# if num_threads is None:
# num_threads = njobs
if coords is None and None in (coords1, coords2):
raise ValueError('either pass `coords` for internal matching or a pair of coordinate lists/arrays (`coords1` and `coords2`) for cross-matching')
elif (coords1 is not None and coords2 is not | |
"""
The :class:`Structure` object is a collection of atoms in a periodic box.
The mandatory inputs are the cell vectors of the box and the chemical species
and *Cartesian coordinates* of the atoms.
The atoms are automatically folded back into the primary cell, so the
input coordinates don't need to lie inside the box.
"""
from typing import List, Union, Any
from json import dumps, loads
from abc import abstractmethod
import pickle as pickle
import numpy as np
from flare.utils.element_coder import element_to_Z, Z_to_element, NumpyEncoder
from flare.utils.learner import get_max_cutoff
try:
# Used for to_pmg_structure method
import pymatgen.core.structure as pmgstruc
import pymatgen.io.vasp.inputs as pmgvaspio
_pmg_present = True
except ImportError:
_pmg_present = False
class Structure:
"""
Contains information about a periodic structure of atoms, including the
periodic cell boundaries, atomic species, and coordinates.
*Note that input positions are assumed to be Cartesian.*
:param cell: 3x3 array whose rows are the Bravais lattice vectors of the
cell.
:type cell: np.ndarray
:param species: List of atomic species, which are represented either as
integers or chemical symbols.
:type species: List
:param positions: Nx3 array of atomic coordinates.
:type positions: np.ndarray
:param mass_dict: Dictionary of atomic masses used in MD simulations.
:type mass_dict: dict
:param prev_positions: Nx3 array of previous atomic coordinates used in
MD simulations.
:type prev_positions: np.ndarray
:param species_labels: List of chemical symbols. Used in the output file
of on-the-fly runs.
:type species_labels: List[str]
:param stds: Uncertainty associated with forces
:type stds: np.ndarray
"""
def __init__(
self,
cell: "ndarray",
species: Union[List[str], List[int]],
positions: "ndarray",
mass_dict: dict = None,
prev_positions: "ndarray" = None,
species_labels: List[str] = None,
forces=None,
stds=None,
energy: float = None,
):
# Define cell (each row is a Bravais lattice vector).
self.cell = np.array(cell)
# Compute the max cutoff compatible with a 3x3x3 supercell of the
# structure.
self.max_cutoff = get_max_cutoff(self.cell)
# Set positions.
self.positions = np.array(positions)
# If species are strings, convert species to integers by atomic number
if species_labels is None:
self.species_labels = species
else:
self.species_labels = species_labels
self.coded_species = np.array([element_to_Z(spec) for spec in species])
self.nat = len(species)
# Default: atoms have no velocity
if prev_positions is None:
self.prev_positions = np.copy(self.positions)
else:
assert len(positions) == len(
prev_positions
), "Previous positions and positions are not same length"
self.prev_positions = prev_positions
# Set forces, energies, and stresses and their uncertainties.
if forces is not None:
self.forces = np.array(forces)
else:
self.forces = np.zeros((len(positions), 3))
if stds is not None:
self.stds = np.array(stds)
else:
self.stds = np.zeros((len(positions), 3))
self.energy = energy
self.local_energies = None
self.local_energy_stds = None
self.partial_stresses = None
self.partial_stress_stds = None
self.stress = None
self.stress_stds = None
# Potential energy attribute needed to mirror ASE atoms object.
self.potential_energy = None
self.mass_dict = mass_dict
# Convert from elements to atomic numbers in mass dict
if mass_dict is not None:
keys = list(mass_dict.keys())
for elt in keys:
if isinstance(elt, str):
mass_dict[element_to_Z(elt)] = mass_dict[elt]
if elt.isnumeric():
mass_dict[int(elt)] = mass_dict[elt]
@property
def positions(self):
return self._positions
@property
def wrapped_positions(self):
return self._wrapped_positions
@positions.setter
def positions(self, position_array):
self._positions = position_array
self._wrapped_positions = self.wrap_positions()
@property
def cell(self):
return self._cell
@property
def vec1(self):
return self._vec1
@property
def vec2(self):
return self._vec2
@property
def vec3(self):
return self._vec3
@property
def cell_transpose(self):
return self._cell_transpose
@property
def cell_transpose_inverse(self):
return self._cell_transpose_inverse
@property
def cell_dot(self):
return self._cell_dot
@property
def cell_dot_inverse(self):
return self._cell_dot_inverse
@cell.setter
def cell(self, cell_array):
"""Set the cell and related properties."""
self._cell = cell_array
self._vec1 = cell_array[0, :]
self._vec2 = cell_array[1, :]
self._vec3 = cell_array[2, :]
self._cell_transpose = cell_array.transpose()
self._cell_transpose_inverse = np.linalg.inv(self._cell_transpose)
self._cell_dot = self.get_cell_dot(cell_array)
self._cell_dot_inverse = np.linalg.inv(self._cell_dot)
@staticmethod
def get_cell_dot(cell_array):
"""
Compute 3x3 array of dot products of cell vectors used to
fold atoms back to the unit cell.
:return: 3x3 array of cell vector dot products.
:rtype: np.ndarray
"""
cell_dot = np.zeros((3, 3))
for m in range(3):
for n in range(3):
cell_dot[m, n] = np.dot(cell_array[m], cell_array[n])
return cell_dot
@staticmethod
def raw_to_relative(
positions: "ndarray", cell_transpose: "ndarray", cell_dot_inverse: "ndarray"
) -> "ndarray":
"""Convert Cartesian coordinates to relative (fractional) coordinates,
expressed in terms of the cell vectors set in self.cell.
:param positions: Cartesian coordinates.
:type positions: np.ndarray
:param cell_transpose: Transpose of the cell array.
:type cell_transpose: np.ndarray
:param cell_dot_inverse: Inverse of the array of dot products of
cell vectors.
:type cell_dot_inverse: np.ndarray
:return: Relative positions.
:rtype: np.ndarray
"""
relative_positions = np.matmul(
np.matmul(positions, cell_transpose), cell_dot_inverse
)
return relative_positions
@staticmethod
def relative_to_raw(
relative_positions: "ndarray",
cell_transpose_inverse: "ndarray",
cell_dot: "ndarray",
) -> "ndarray":
"""Convert fractional coordinates to raw (Cartesian) coordinates.
:param relative_positions: fractional coordinates.
:type relative_positions: np.ndarray
:param cell_transpose_inverse: Transpose of the cell array.
:type cell_transpose_inverse: np.ndarray
:param cell_dot: Dot products of cell vectors
:type cell_dot: np.ndarray
:return: Cartesian positions.
:rtype: np.ndarray
"""
return np.matmul(
np.matmul(relative_positions, cell_dot), cell_transpose_inverse
)
def wrap_positions(self) -> "ndarray":
"""
Convenience function which folds atoms outside of the unit cell back
into the unit cell. in_place flag controls if the wrapped positions
are set in the class.
:return: Cartesian coordinates of positions all in unit cell
:rtype: np.ndarray
"""
rel_pos = self.raw_to_relative(
self.positions, self.cell_transpose, self.cell_dot_inverse
)
rel_wrap = rel_pos - np.floor(rel_pos)
pos_wrap = self.relative_to_raw(
rel_wrap, self.cell_transpose_inverse, self.cell_dot
)
return pos_wrap
def indices_of_specie(self, specie: Union[int, str]) -> List[int]:
"""
Return the indices of a given species within atoms of the structure.
:param specie: Element to target, can be string or integer
:return: The indices in the structure at which this element occurs
:rtype: List[str]
"""
return [i for i, spec in enumerate(self.coded_species) if spec == specie]
# TODO make more descriptive
def __str__(self) -> str:
"""
Simple descriptive string of structure.
:return: One-line descriptor of number of atoms and species present.
:rtype: str
"""
return "Structure with {} atoms of types {}".format(
self.nat, set(self.species_labels)
)
def __len__(self) -> int:
"""
Returns number of atoms in structure.
:return: number of atoms in structure.
:rtype: int
"""
return self.nat
def as_dict(self) -> dict:
"""
Returns structure as a dictionary; useful for serialization purposes.
:return: Dictionary version of current structure
:rtype: dict
"""
return dict(vars(self))
def as_str(self) -> str:
"""
Returns string dictionary serialization cast as string.
:return: output of as_dict method cast as string
:rtype: str
"""
return dumps(self.as_dict(), cls=NumpyEncoder)
@staticmethod
def from_dict(dictionary: dict) -> "flare.struc.Structure":
"""
Assembles a Structure object from a dictionary parameterizing one.
:param dictionary: dict describing structure parameters.
:return: FLARE structure assembled from dictionary
"""
struc = Structure(
cell=np.array(dictionary.get("_cell", dictionary.get("cell"))),
species=dictionary["coded_species"],
positions=np.array(
dictionary.get("_positions", dictionary.get("positions"))
),
mass_dict=dictionary.get("mass_dict"),
prev_positions=dictionary.get("prev_positions", None),
species_labels=dictionary.get("species_labels"),
forces=np.array(dictionary.get("forces")),
stds=np.array(dictionary.get("stds")),
energy=dictionary.get("energy", None),
)
struc.stress = dictionary.get("stress", None)
return struc
@staticmethod
def from_ase_atoms(atoms: "ase.Atoms", cell=None) -> "flare.struc.Structure":
"""
From an ASE Atoms object, return a FLARE structure
:param atoms: ASE Atoms object
:type atoms: ASE Atoms object
:return: A FLARE structure from an ASE atoms object
"""
if cell is None:
cell = np.array(atoms.cell)
try:
forces = atoms.get_forces()
except:
forces = None
try:
stds = atoms.get_uncertainties()
except:
stds = None
try:
energy = atoms.get_potential_energy()
except:
energy = None
try:
stress = atoms.get_stress() # TODO: what stress order should we use?
except:
stress = None
struc = Structure(
cell=cell,
positions=atoms.positions,
species=atoms.get_chemical_symbols(),
forces=forces,
stds=stds,
energy=energy,
)
struc.stress = stress
return struc
def to_ase_atoms(self) -> "ase.Atoms":
from ase import Atoms
from ase.calculators.singlepoint import SinglePointCalculator
atoms = Atoms(
self.species_labels, positions=self.positions, cell=self.cell, pbc=True
)
results = {}
properties = ["forces", "energy", "stress"]
for p in properties:
results[p] = getattr(self, p)
atoms.calc = SinglePointCalculator(atoms, **results)
return atoms
def to_pmg_structure(self):
"""
Returns FLARE structure as a pymatgen structure.
:return: Pymatgen structure corresponding to current FLARE structure
"""
if not _pmg_present:
raise ModuleNotFoundError(
"Pymatgen is not present. Please install Pymatgen and try again"
)
if self.forces is None:
forces_temp = np.zeros((len(self.positions), 3))
site_properties = {"force:": forces_temp, "std": self.stds}
else:
site_properties = {"force:": self.forces, "std": self.stds}
return pmgstruc.Structure(
lattice=self.cell,
species=self.species_labels,
coords=self.positions,
coords_are_cartesian=True,
site_properties=site_properties,
)
@staticmethod
def from_pmg_structure(structure: "pymatgen Structure") -> "flare Structure":
"""
Returns Pymatgen structure as FLARE structure.
:param structure: Pymatgen Structure
:type structure: Pymatgen Structure
:return: FLARE Structure
"""
cell = structure.lattice.matrix.copy()
species = [str(spec) for spec in structure.species]
positions = structure.cart_coords.copy()
new_struc = Structure(cell=cell, species=species, positions=positions)
site_props = structure.site_properties
if | |
<gh_stars>1-10
#!/usr/bin/env python
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (C) 2003, 2004 <NAME>
# Copyright (C) 2003, 2004 <NAME>
# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer
# Copyright (C) 2005 <NAME>
# Copyright (C) 2005 ROAD GmbH
# Copyright (C) 2006 - 2007 <NAME>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys, os, getopt, glob, copy, os.path, re, time
import bb
from bb import utils, data, parse, event, cache, providers, taskdata, runqueue
from bb import command
import itertools, sre_constants
class MultipleMatches(Exception):
"""
Exception raised when multiple file matches are found
"""
class ParsingErrorsFound(Exception):
"""
Exception raised when parsing errors are found
"""
class NothingToBuild(Exception):
"""
Exception raised when there is nothing to build
"""
# Different states cooker can be in
cookerClean = 1
cookerParsing = 2
cookerParsed = 3
# Different action states the cooker can be in
cookerRun = 1 # Cooker is running normally
cookerShutdown = 2 # Active tasks should be brought to a controlled stop
cookerStop = 3 # Stop, now!
#============================================================================#
# BBCooker
#============================================================================#
class BBCooker:
"""
Manages one bitbake build run
"""
def __init__(self, configuration, server):
self.status = None
self.cache = None
self.bb_cache = None
self.server = server.BitBakeServer(self)
self.configuration = configuration
if self.configuration.verbose:
bb.msg.set_verbose(True)
if self.configuration.debug:
bb.msg.set_debug_level(self.configuration.debug)
else:
bb.msg.set_debug_level(0)
if self.configuration.debug_domains:
bb.msg.set_debug_domains(self.configuration.debug_domains)
self.configuration.data = bb.data.init()
bb.data.inheritFromOS(self.configuration.data)
self.parseConfigurationFiles(self.configuration.file)
if not self.configuration.cmd:
self.configuration.cmd = bb.data.getVar("BB_DEFAULT_TASK", self.configuration.data, True) or "build"
bbpkgs = bb.data.getVar('BBPKGS', self.configuration.data, True)
if bbpkgs and len(self.configuration.pkgs_to_build) == 0:
self.configuration.pkgs_to_build.extend(bbpkgs.split())
#
# Special updated configuration we use for firing events
#
self.configuration.event_data = bb.data.createCopy(self.configuration.data)
bb.data.update_data(self.configuration.event_data)
# TOSTOP must not be set or our children will hang when they output
fd = sys.stdout.fileno()
if os.isatty(fd):
import termios
tcattr = termios.tcgetattr(fd)
if tcattr[3] & termios.TOSTOP:
bb.msg.note(1, bb.msg.domain.Build, "The terminal had the TOSTOP bit set, clearing...")
tcattr[3] = tcattr[3] & ~termios.TOSTOP
termios.tcsetattr(fd, termios.TCSANOW, tcattr)
self.command = bb.command.Command(self)
self.cookerState = cookerClean
self.cookerAction = cookerRun
def parseConfiguration(self):
# Change nice level if we're asked to
nice = bb.data.getVar("BB_NICE_LEVEL", self.configuration.data, True)
if nice:
curnice = os.nice(0)
nice = int(nice) - curnice
bb.msg.note(2, bb.msg.domain.Build, "Renice to %s " % os.nice(nice))
def parseCommandLine(self):
# Parse any commandline into actions
if self.configuration.show_environment:
self.commandlineAction = None
if 'world' in self.configuration.pkgs_to_build:
bb.error("'world' is not a valid target for --environment.")
elif len(self.configuration.pkgs_to_build) > 1:
bb.error("Only one target can be used with the --environment option.")
elif self.configuration.buildfile and len(self.configuration.pkgs_to_build) > 0:
bb.error("No target should be used with the --environment and --buildfile options.")
elif len(self.configuration.pkgs_to_build) > 0:
self.commandlineAction = ["showEnvironmentTarget", self.configuration.pkgs_to_build]
else:
self.commandlineAction = ["showEnvironment", self.configuration.buildfile]
elif self.configuration.buildfile is not None:
self.commandlineAction = ["buildFile", self.configuration.buildfile, self.configuration.cmd]
elif self.configuration.revisions_changed:
self.commandlineAction = ["compareRevisions"]
elif self.configuration.show_versions:
self.commandlineAction = ["showVersions"]
elif self.configuration.parse_only:
self.commandlineAction = ["parseFiles"]
# FIXME - implement
#elif self.configuration.interactive:
# self.interactiveMode()
elif self.configuration.dot_graph:
if self.configuration.pkgs_to_build:
self.commandlineAction = ["generateDotGraph", self.configuration.pkgs_to_build, self.configuration.cmd]
else:
self.commandlineAction = None
bb.error("Please specify a package name for dependency graph generation.")
else:
if self.configuration.pkgs_to_build:
self.commandlineAction = ["buildTargets", self.configuration.pkgs_to_build, self.configuration.cmd]
else:
self.commandlineAction = None
bb.error("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.")
def runCommands(self, server, data, abort):
"""
Run any queued asynchronous command
This is done by the idle handler so it runs in true context rather than
tied to any UI.
"""
return self.command.runAsyncCommand()
def tryBuildPackage(self, fn, item, task, the_data):
"""
Build one task of a package, optionally build following task depends
"""
try:
if not self.configuration.dry_run:
bb.build.exec_task('do_%s' % task, the_data)
return True
except bb.build.FuncFailed:
bb.msg.error(bb.msg.domain.Build, "task stack execution failed")
raise
except bb.build.EventException, e:
event = e.args[1]
bb.msg.error(bb.msg.domain.Build, "%s event exception, aborting" % bb.event.getName(event))
raise
def tryBuild(self, fn, task):
"""
Build a provider and its dependencies.
build_depends is a list of previous build dependencies (not runtime)
If build_depends is empty, we're dealing with a runtime depends
"""
the_data = self.bb_cache.loadDataFull(fn, self.configuration.data)
item = self.status.pkg_fn[fn]
#if bb.build.stamp_is_current('do_%s' % self.configuration.cmd, the_data):
# return True
return self.tryBuildPackage(fn, item, task, the_data)
def showVersions(self):
# Need files parsed
self.updateCache()
pkg_pn = self.status.pkg_pn
preferred_versions = {}
latest_versions = {}
# Sort by priority
for pn in pkg_pn:
(last_ver,last_file,pref_ver,pref_file) = bb.providers.findBestProvider(pn, self.configuration.data, self.status)
preferred_versions[pn] = (pref_ver, pref_file)
latest_versions[pn] = (last_ver, last_file)
bb.msg.plain("%-35s %25s %25s" % ("Package Name", "Latest Version", "Preferred Version"))
bb.msg.plain("%-35s %25s %25s\n" % ("============", "==============", "================="))
for p in sorted(pkg_pn):
pref = preferred_versions[p]
latest = latest_versions[p]
prefstr = pref[0][0] + ":" + pref[0][1] + '-' + pref[0][2]
lateststr = latest[0][0] + ":" + latest[0][1] + "-" + latest[0][2]
if pref == latest:
prefstr = ""
bb.msg.plain("%-35s %25s %25s" % (p, lateststr, prefstr))
def compareRevisions(self):
ret = bb.fetch.fetcher_compare_revisons(self.configuration.data)
bb.event.fire(bb.command.CookerCommandSetExitCode(ret), self.configuration.event_data)
def showEnvironment(self, buildfile = None, pkgs_to_build = []):
"""
Show the outer or per-package environment
"""
fn = None
envdata = None
if buildfile:
self.cb = None
self.bb_cache = bb.cache.init(self)
fn = self.matchFile(buildfile)
elif len(pkgs_to_build) == 1:
self.updateCache()
localdata = data.createCopy(self.configuration.data)
bb.data.update_data(localdata)
bb.data.expandKeys(localdata)
taskdata = bb.taskdata.TaskData(self.configuration.abort)
taskdata.add_provider(localdata, self.status, pkgs_to_build[0])
taskdata.add_unresolved(localdata, self.status)
targetid = taskdata.getbuild_id(pkgs_to_build[0])
fnid = taskdata.build_targets[targetid][0]
fn = taskdata.fn_index[fnid]
else:
envdata = self.configuration.data
if fn:
try:
envdata = self.bb_cache.loadDataFull(fn, self.configuration.data)
except IOError, e:
bb.msg.error(bb.msg.domain.Parsing, "Unable to read %s: %s" % (fn, e))
raise
except Exception, e:
bb.msg.error(bb.msg.domain.Parsing, "%s" % e)
raise
class dummywrite:
def __init__(self):
self.writebuf = ""
def write(self, output):
self.writebuf = self.writebuf + output
# emit variables and shell functions
try:
data.update_data(envdata)
wb = dummywrite()
data.emit_env(wb, envdata, True)
bb.msg.plain(wb.writebuf)
except Exception, e:
bb.msg.fatal(bb.msg.domain.Parsing, "%s" % e)
# emit the metadata which isnt valid shell
data.expandKeys(envdata)
for e in envdata.keys():
if data.getVarFlag( e, 'python', envdata ):
bb.msg.plain("\npython %s () {\n%s}\n" % (e, data.getVar(e, envdata, 1)))
def generateDepTreeData(self, pkgs_to_build, task):
"""
Create a dependency tree of pkgs_to_build, returning the data.
"""
# Need files parsed
self.updateCache()
# If we are told to do the None task then query the default task
if (task == None):
task = self.configuration.cmd
pkgs_to_build = self.checkPackages(pkgs_to_build)
localdata = data.createCopy(self.configuration.data)
bb.data.update_data(localdata)
bb.data.expandKeys(localdata)
taskdata = bb.taskdata.TaskData(self.configuration.abort)
runlist = []
for k in pkgs_to_build:
taskdata.add_provider(localdata, self.status, k)
runlist.append([k, "do_%s" % task])
taskdata.add_unresolved(localdata, self.status)
rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist)
rq.prepare_runqueue()
seen_fnids = []
depend_tree = {}
depend_tree["depends"] = {}
depend_tree["tdepends"] = {}
depend_tree["pn"] = {}
depend_tree["rdepends-pn"] = {}
depend_tree["packages"] = {}
depend_tree["rdepends-pkg"] = {}
depend_tree["rrecs-pkg"] = {}
for task in range(len(rq.runq_fnid)):
taskname = rq.runq_task[task]
fnid = rq.runq_fnid[task]
fn = taskdata.fn_index[fnid]
pn = self.status.pkg_fn[fn]
version = "%s:%s-%s" % self.status.pkg_pepvpr[fn]
if pn not in depend_tree["pn"]:
depend_tree["pn"][pn] = {}
depend_tree["pn"][pn]["filename"] = fn
depend_tree["pn"][pn]["version"] = version
for dep in rq.runq_depends[task]:
depfn = taskdata.fn_index[rq.runq_fnid[dep]]
deppn = self.status.pkg_fn[depfn]
dotname = "%s.%s" % (pn, rq.runq_task[task])
if not dotname in depend_tree["tdepends"]:
depend_tree["tdepends"][dotname] = []
depend_tree["tdepends"][dotname].append("%s.%s" % (deppn, rq.runq_task[dep]))
if fnid not in seen_fnids:
seen_fnids.append(fnid)
packages = []
depend_tree["depends"][pn] = []
for dep in taskdata.depids[fnid]:
depend_tree["depends"][pn].append(taskdata.build_names_index[dep])
depend_tree["rdepends-pn"][pn] = []
for rdep in taskdata.rdepids[fnid]:
depend_tree["rdepends-pn"][pn].append(taskdata.run_names_index[rdep])
rdepends = self.status.rundeps[fn]
for package in rdepends:
depend_tree["rdepends-pkg"][package] = []
for rdepend in bb.utils.explode_deps(rdepends[package]):
depend_tree["rdepends-pkg"][package].append(rdepend)
packages.append(package)
rrecs = self.status.runrecs[fn]
for package in rrecs:
depend_tree["rrecs-pkg"][package] = []
for rdepend in bb.utils.explode_deps(rrecs[package]):
depend_tree["rrecs-pkg"][package].append(rdepend)
if not package in packages:
packages.append(package)
for package in packages:
if package not in depend_tree["packages"]:
depend_tree["packages"][package] = {}
depend_tree["packages"][package]["pn"] = pn
depend_tree["packages"][package]["filename"] = fn
depend_tree["packages"][package]["version"] = version
return depend_tree
def generateDepTreeEvent(self, pkgs_to_build, task):
"""
Create a task dependency graph of pkgs_to_build.
Generate an event with the result
"""
depgraph = self.generateDepTreeData(pkgs_to_build, task)
bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.configuration.data)
def generateDotGraphFiles(self, pkgs_to_build, task):
"""
Create a task dependency graph of pkgs_to_build.
Save the result to a set of .dot files.
"""
depgraph = self.generateDepTreeData(pkgs_to_build, task)
# Prints a flattened form of package-depends below | |
if r.has_surface():
cr,cg,cb = r.surface_piece.color[:3] #r.color[:3]
r.surface_piece.color = ( cr, cg, cb, 1.0 )
r.surface_piece.displayStyle = r.surface_piece.Mesh
r.surface_piece.lineThickness = 1.0
def SelectAllRegions ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
sel_regs = set ( smod.selected_regions() )
surfs = [r.surface_piece for r in smod.regions
if 1]
chimera.selection.clearCurrent ()
chimera.selection.addCurrent ( surfs )
def Invert ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
sel_regs = set ( smod.selected_regions() )
surfs = [r.surface_piece for r in smod.regions
if not r in sel_regs and r.surface_piece]
chimera.selection.clearCurrent ()
chimera.selection.addCurrent ( surfs )
def Group ( self ):
if NothingSelected():
if self.groupMode.get() == 'smooth' :
self.SmoothAndGroupOneStep()
else :
self.GroupByConsOneStep()
else:
self.JoinSelRegs()
def JoinSelRegs ( self ) :
smod = self.CurrentSegmentation()
if smod is None : return
regs = smod.selected_regions()
if len(regs)==0 :
umsg ( "No regions selected" )
return
regs = regions.TopParentRegions(regs)
jreg = smod.join_regions ( regs )
jreg.make_surface(None, None, smod.regions_scale)
if smod.adj_graph :
graph.create_graph ( smod, smod.graph_links )
chimera.selection.setCurrent([jreg.surface_piece])
self.ReportRegionCount(smod)
umsg ( "Grouped %d regions" % len(regs) )
def DelSelRegs ( self ) :
smod = self.CurrentSegmentation()
if smod is None :
umsg ( "No segmentation selected..." )
return
regs = smod.selected_regions()
if len(regs)==0 :
umsg ( "Select one or more regions to delete" )
return
smod.remove_regions ( regs, update_surfaces = True, remove_children = True )
self.ReportRegionCount(smod)
umsg ( "Deleted %d regions" % len(regs) )
def DelExcSelRegs ( self ) :
smod = self.CurrentSegmentation()
if smod is None :
umsg ( "No segmentation selected..." )
return
sel_regs = smod.selected_regions()
if len(sel_regs)==0 :
umsg ( "No regions selected..." )
return
dregs = [r for r in smod.regions
if not r in sel_regs]
smod.remove_regions ( dregs, update_surfaces = True, remove_children = True )
self.ReportRegionCount(smod)
umsg ( "Deleted %d regions" % len(dregs) )
def Ungroup ( self ):
if NothingSelected():
self.UngroupLastSmoothing()
else:
self.UngroupSelRegs()
def SafeCreateSurfsForRegs ( self, smod, rlist, rregs ) :
maxnr = self.MaximumRegionsToDisplay()
nsurfs = 0
for r in smod.regions :
if r.has_surface() :
nsurfs += 1
print " - %d surfs have pieces before" % nsurfs
# surfs that will go away...
for r in rregs :
if r.has_surface() :
nsurfs -= 1
print " - %d surfs will have pieces after removing selected" % nsurfs
if nsurfs >= maxnr :
umsg('Ungrouped to %d regions, but did not show their surfaces, see Options' % len(rlist) )
else :
canshow = maxnr - nsurfs
if canshow < len(rlist) :
umsg('Ungrouped to %d regions, but did not show all surfaces, see Options' % len(rlist) )
else :
umsg('Ungrouped to %d regions' % len(rlist) )
from chimera import tasks, CancelOperation
task = tasks.Task('Adding surfaces', modal = True)
try:
for ri, reg in enumerate ( rlist ) :
if ri >= canshow :
break
reg.make_surface(None, None, smod.regions_scale)
except CancelOperation:
pass
finally:
task.finished()
def ShowNumSubRegs ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
sregs = smod.selected_regions()
if len(sregs) == 0 :
umsg ( "No regions selected" )
return
sregs = regions.TopParentRegions(sregs)
num = 0
for r in sregs :
if len(r.cregs) == 0 :
pass
else :
num += len(r.cregs)
umsg ( "selected regions have %d total sub regions" % num )
def UngroupSelRegs ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
sregs = smod.selected_regions()
if len(sregs) == 0 :
umsg ( "No regions selected" )
return
sregs = regions.TopParentRegions(sregs)
chimera.selection.clearCurrent ()
[rlist, removedRegs] = smod.ungroup_regions ( sregs )
self.SafeCreateSurfsForRegs ( smod, rlist, removedRegs )
for r in removedRegs : r.remove_surface()
print " - now %d regions" % len(smod.regions)
if smod.adj_graph :
graph.create_graph ( smod, smod.graph_links )
chimera.selection.setCurrent ( [r.surface_piece for r in rlist if (hasattr(r,'surface_piece') and r.surface_piece != None)] )
self.ReportRegionCount(smod)
def UngroupAllRegs ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
rlist = list(smod.regions)
[rlist2, removedRegs] = smod.ungroup_regions(rlist)
self.SafeCreateSurfsForRegs ( smod, rlist2, removedRegs )
for r in removedRegs : r.remove_surface()
self.ReportRegionCount(smod)
def UngroupLastSmoothing ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
levels = [r.smoothing_level for r in smod.regions]
if len(levels) == 0:
return
slev = max(levels)
rlev = [r for r in smod.regions if r.smoothing_level == slev]
rlist2 = []
removedRegs = []
from chimera import tasks, CancelOperation
task = tasks.Task('Ungrouping', modal = True)
try:
[rlist2, removedRegs] = smod.ungroup_regions(rlev, task)
except CancelOperation:
pass
finally:
task.finished()
self.SafeCreateSurfsForRegs ( smod, rlist2, removedRegs )
for r in removedRegs : r.remove_surface()
levels = [r.smoothing_level for r in smod.regions]
smod.smoothing_level = max(levels)
if smod.adj_graph :
graph.create_graph ( smod, smod.graph_links )
#umsg ( "Ungrouped to %.3g voxel smoothing, %d regions" % (smod.smoothing_level, len(smod.regions)) )
self.ReportRegionCount(smod)
def CloseRegions ( self ) :
smod = self.CurrentSegmentation()
if smod is None : return
chimera.openModels.remove ( smod )
self.SetCurrentSegmentation(None)
self.ReportRegionCount(None)
if smod.adj_graph : smod.adj_graph.close()
def CloseSeg ( self ) :
smod = self.CurrentSegmentation()
if smod is None : return
smod.close()
self.SetCurrentSegmentation(None)
def RegionsVolume ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
sregs = smod.selected_regions()
print "%d selected regions" % len(sregs)
if len(sregs) == 0 :
sregs = smod.regions
if len(sregs) == 0 :
umsg ( "No regions found in %s" % smod.name )
return
tvol = sum([reg.enclosed_volume() for reg in sregs])
pcount = sum([reg.point_count() for reg in sregs])
rw = "region"
if len(sregs) > 1 : rw = "regions"
umsg ( "Volume of %d %s: %.3g Angstroms^3, %d points" % ( len(sregs), rw, tvol, pcount ) )
def RegionMeanAndSD ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
sregs = smod.selected_regions()
if len(sregs) == 0 :
umsg ( "No regions selected in %s" % smod.name )
return
v = self.SegmentationMap()
if v is None:
v = smod.volume_data()
if v is None:
umsg ( 'No map specified' )
return
means, sdevs = regions.mean_and_sd(sregs, v)
for r, m, sd in zip(sregs, means, sdevs):
umsg ( 'Region %d mean %.5g, SD %.5g' % (r.rid, m, sd) )
def Graph ( self ) :
smod = self.CurrentSegmentation()
if smod:
graph.create_graph(smod,"uniform")
def GraphAvgD ( self ) :
smod = self.CurrentSegmentation()
if smod:
graph.create_graph(smod,"avgd")
def GraphMaxD ( self ) :
smod = self.CurrentSegmentation()
if smod:
graph.create_graph(smod,"maxd")
def GraphN ( self ) :
smod = self.CurrentSegmentation()
if smod:
graph.create_graph(smod,"N")
def LoadGraph ( self ) :
smod = self.CurrentSegmentation()
if smod is None:
graph.open_skeleton(smod)
def SaveGraph ( self ) :
smod = self.CurrentSegmentation()
if smod:
graph.read_graph(smod)
def CloseGraph ( self ) :
smod = self.CurrentSegmentation()
if smod:
graph.close(smod)
smod.display = True
def GroupBySkeleton ( self ) :
smod = self.CurrentSegmentation()
if smod:
skeleton.group_by_skeleton(smod)
smod.display = True
smod.display_regions()
self.ReportRegionCount(smod)
def RemoveGraphLinks ( self ) :
graph.remove_graph_links()
def ShowRegionsAxes ( self, regs ) :
smod = self.CurrentSegmentation()
if smod is None: return
for r in regs :
sp = r.surface_piece
try :
sp.axes.display = True
chimera.openModels.close ( sp.axes )
except :
pass
tpoints = r.map_points()
sp.COM, sp.U, sp.S, sp.V = prAxes ( tpoints )
com = numpy.sum(tpoints, axis=0) / len(tpoints)
comv = numpy.ones_like ( tpoints ) * com
points = tpoints - comv
ppoints = points * sp.U
sp.Extents = numpy.asarray ( numpy.max ( numpy.abs ( ppoints ), 0 ) )[0]
sp.Extents[0] += 5.0
sp.Extents[1] += 5.0
sp.Extents[2] += 5.0
import axes
reload (axes)
if 0 :
# for ribosome direction
sp.Extents[1] = sp.Extents[1] * float(self.axesFactor.get())
sp.axes = axes.AxesMod ( sp.COM, sp.U, sp.Extents, 6, 1.0, alignTo = sp.model )
else :
sp.axes = axes.AxesMod ( sp.COM, sp.U, sp.Extents, 1.0, 1.1, alignTo = sp.model )
sp.axes.name = "region_%d_axes" % r.rid
def ShowRegionAxesSelected ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
sregs = smod.selected_regions()
if len(sregs)==0 : print "no selected regions found"; return
self.ShowRegionsAxes ( sregs )
def HideRegionAxes ( self ) :
print "hiding axes"
for m in OML() :
t = m.name.split ("_")
if t[0] | |
<reponame>marcus-h/python-keyring-keyutils
"""A high-level interface for reading and writing BER/DER data.
Currently, only a few ASN.1 types are supported. This implementation
is based on the rules that are specified in X.690 (08/2015).
Note: there are probably better BER readers/writers out there - I just wrote
this in order to get familiar with ASN.1 and BER.
"""
import re
import struct
import sys
from io import BytesIO
class BERIOError(Exception):
pass
class ValidationError(Exception):
pass
class DecodingError(Exception):
pass
def read_exact(readable, count):
data = bytearray()
while count:
d = readable.read(count)
if not d:
raise BERIOError('premature EOF')
data.extend(d)
count -= len(d)
return bytes(data)
def write_exact(writable, buf):
while buf:
count = writable.write(buf)
if not count:
raise BERIOError('0-length write')
buf = buf[count:]
class Validator:
def _validate(condition, msg, *args, **kwargs):
if not condition:
raise ValidationError(msg.format(*args, **kwargs))
def validate_tag_number(self, tag_number):
self._validate(tag_number < 0, "illegal tag: {}", tag_number)
def validate_tag_class(self, tag_class):
self._validate(0 <= tag_class <= 3, 'illegal class: {}', tag_class)
def validate_length(self, length):
self._validate(length >= 0, "length must be non-negative: {}", length)
def validate(*items):
def _decorator(meth):
def _validate_and_process(self, *args):
if len(args) < len(items):
raise ValueError('to few arguments')
for item, arg in zip(items, args):
validation = getattr(self._validator,
"validate_{}".format(item))
# the validation is supposed to raise an exception in case of
# an error
validation(arg)
return meth(self, *args)
return _validate_and_process
return _decorator
class Tag:
# tag class, tag_number, constructed
END_OF_CONTENTS = (0, 0, False)
BOOLEAN = (0, 1, False)
INTEGER = (0, 2, False)
ENUMERATED = (0, 10, False)
BITSTRING_PRIMITIVE = (0, 3, False)
BITSTRING_CONSTRUCTED = (0, 3, True)
OCTETSTRING_PRIMITIVE = (0, 4, False)
OCTETSTRING_CONSTRUCTED = (0, 4, True)
UTF8STRING_PRIMITIVE = (0, 12, False)
UTF8STRING_CONSTRUCTED = (0, 12, True)
PRINTABLESTRING_PRIMITIVE = (0, 19, False)
PRINTABLESTRING_CONSTRUCTED = (0, 19, True)
UTCTIME_PRIMITIVE = (0, 23, False)
UTCTIME_CONSTRUCTED = (0, 23, True)
NULL = (0, 5, False)
SEQUENCE = (0, 16, True)
SET = (0, 17, True)
OID = (0, 6, False)
class Base:
def __init__(self, validator=None):
if validator is None:
validator = Validator()
self._validator = validator
# see Table 10 in X.680
# note: it is important to use \Z instead of $ because the latter
# matches '\n' (which is not a printable string)
_printablestring_re = re.compile('^[A-Za-z0-9 \'()+,-./:=?]*\\Z')
def _is_printablestring(self, data):
return self._printablestring_re.search(data) is not None
# YYMMDDhhmm(ss) Z or time differential
_utctime_re = re.compile(
rb'^(\d\d)(\d\d)(\d\d)(\d\d)(\d\d)(\d\d)?(Z|([+-])(\d\d)(\d\d))\Z')
def _is_utctime(self, value):
def _is_valid_hour(hour):
return 0 <= hour and hour <= 23
def _is_valid_minute(minute):
return 0 <= minute and minute <= 59
mo = self._utctime_re.search(value.encode('ascii'))
if mo is None:
return None
nums = (d if d is None or d in (b'Z', b'+', b'-') else int(d)
for d in mo.groups())
yy, mm, dd, hh, minute, ss, utc, diff_op, diff_hh, diff_mm = nums
if mm <= 0 or mm > 12:
return False
elif dd <= 0 or dd > 31:
return False
elif not _is_valid_hour(hh):
return False
elif not _is_valid_minute(minute):
return False
elif utc == b'Z':
# no time differential specified
return True
# ok, we got a time differential
return _is_valid_hour(diff_hh) and _is_valid_minute(diff_mm)
class AbstractContainerEncodingMapper:
def is_sequence(self, item):
return isinstance(item, (tuple, list))
def is_set(self, item):
return isinstance(item, (set, frozenset))
def map(self, item):
raise NotImplementedError()
class ContainerEncodingMapper(AbstractContainerEncodingMapper):
def __init__(self, encoder):
self._map = {
bool: encoder.write_boolean,
int: encoder.write_integer,
bytes: encoder.write_octetstring,
None.__class__: encoder.write_null,
str: encoder.write_utf8string,
}
def map(self, item):
return self._map[item.__class__]
class Encoder(Base):
def __init__(self, writable, *args, **kwargs):
super().__init__(*args, **kwargs)
self._writables = []
self._push_writable(writable)
def _write(self, data):
write_exact(self._writables[-1], data)
def _pack(self, pformat, *args):
return self._write(struct.pack(pformat, *args))
def _push_writable(self, writable):
self._writables.append(writable)
def _pop_writable(self):
return self._writables.pop()
@validate('tag_class', 'tag_number')
def write_tag(self, tag_class, tag_number, constructed=False):
cval = 0
if constructed:
cval = 32
if tag_number <= 30:
self._pack('B', tag_class << 6 | cval | tag_number)
return
self._pack('B', tag_class << 6 | cval | 31)
octets = []
while tag_number:
octets.append(128 | (tag_number & 127))
tag_number >>= 6
octets[0] &= 127
octets.reverse()
self._pack("{}B".format(len(octets)), *octets)
@validate('length')
def write_length(self, length):
if length <= 127:
self._pack('B', length)
return
octets = []
while length:
octets.append(length & 255)
length >>= 8
if len(octets) >= 127:
# 127 is ok, but SHALL not be used (see X.690 8.1.3.5 (c))
raise ValueError('too many length octets')
octets.reverse()
self._pack('B', 128 | len(octets))
self._pack("{}B".format(len(octets)), *octets)
def write_indefinite_length(self):
self._pack('B', 128)
def write_boolean(self, value):
self.write_tag(*Tag.BOOLEAN)
self.write_length(1)
self._pack('B', 255 if value else 0)
def _write_integer(self, tag, num):
self.write_tag(*tag)
if not num:
self.write_length(1)
self._pack('B', 0)
return
# probably the dumbest way to calculate a two's complement...
signed = num < 0
if signed:
num *= -1
octets = []
c = 1
while num:
val = num & 255
num >>= 8
if signed:
val = (~val & 255) + c
c = 0
if val >= 256:
c = 1
val &= 255
octets.append(val)
if c and signed:
# c == 1 implies num == 0
raise RuntimeError('c must not be 1')
if not signed and (octets[-1] & 128):
octets.append(0)
elif signed and not (octets[-1] & 128):
octets.append(255)
octets.reverse()
self.write_length(len(octets))
self._pack("{}B".format(len(octets)), *octets)
def write_integer(self, num):
self._write_integer(Tag.INTEGER, num)
def write_enumerated(self, num):
self._write_integer(Tag.ENUMERATED, num)
def write_bitstring(self, raw, unused_bits):
if unused_bits < 0 or unused_bits > 7:
raise ValueError('unused_bits must be between 0 and 7')
# for now, we just support the length restricted, primitive encoding
self.write_tag(*Tag.BITSTRING_PRIMITIVE)
length = 1 + len(raw)
self.write_length(length)
self._pack("{}B".format(length), unused_bits, *raw)
def write_octetstring(self, raw):
# for now, we just support the length restricted, primitive encoding
self.write_tag(*Tag.OCTETSTRING_PRIMITIVE)
length = len(raw)
self.write_length(length)
self._pack("{}B".format(length), *raw)
def _write_string(self, tag, value, encoding):
self.write_tag(*tag)
raw = value.encode(encoding)
length = len(raw)
self.write_length(length)
self._pack("{}B".format(length), *raw)
def write_utf8string(self, value):
self._write_string(Tag.UTF8STRING_PRIMITIVE, value, 'utf-8')
def write_printablestring(self, value):
if not self._is_printablestring(value):
raise ValueError("{} is not a printable string".format(value))
self._write_string(Tag.PRINTABLESTRING_PRIMITIVE, value, 'ascii')
def write_utctime(self, value):
if not self._is_utctime(value):
raise ValueError("invalid utctime: {}".format(value))
elif value[-1] != 'Z':
raise ValueError('a time differential is not (yet) supported')
elif len(value) != 13:
raise ValueError('full format (including seconds) required')
self._write_string(Tag.UTCTIME_PRIMITIVE, value, 'ascii')
def write_null(self, value=None):
if value is not None:
raise ValueError('value must be None')
self.write_tag(*Tag.NULL)
self.write_length(0)
def _write_container(self, container, mapper=None):
# this implementation conforms to DER (that's why we use a definite
# length for containers) => works only for moderately small containers
# and subcontainers
def _tag_for_container(container):
if mapper.is_sequence(container):
return Tag.SEQUENCE
elif mapper.is_set(container):
return Tag.SET
else:
raise ValueError('Either a set or sequence container expected')
if mapper is None:
mapper = ContainerEncodingMapper(self)
iterators = [(_tag_for_container(container), iter(container))]
self._push_writable(BytesIO())
seen = {}
while iterators:
tag, iterator = iterators.pop()
exhausted = True
for item in iterator:
if mapper.is_sequence(item) or mapper.is_set(item):
if seen.get(id(item), False):
raise ValueError('cannot serialize cyclic sequence')
seen[id(item)] = True
iterators.append((tag, iterator))
new_tag = _tag_for_container(item)
iterators.append((new_tag, iter(item)))
self._push_writable(BytesIO())
exhausted = False
break
meth = mapper.map(item)
meth(item)
if exhausted:
bio = self._pop_writable()
self.write_tag(*tag)
self.write_length(len(bio.getvalue()))
self._write(bio.getvalue())
def write_sequence(self, sequence, mapper=None):
self._write_container(sequence, mapper)
def write_sequence_of(self, sequence, mapper=None):
# no type checking etc.
self.write_sequence(sequence, mapper)
def write_set(self, set_value, mapper=None):
self._write_container(set_value, mapper)
def write_set_of(self, set_value, mapper=None):
# no type checking etc. (see write_sequence_of)
self.write_set(set_value, mapper)
def write_oid(self, oid):
if len(oid) < 2:
raise ValueError('oid must have at least two arcs')
if min(oid) < 0:
raise ValueError('all arcs must be non-negative')
root, second, oid = oid[0], oid[1], oid[2:]
if root not in (0, 1, 2):
raise ValueError("illegal root: {}".format(root))
if root in (0, 1) and second > 39:
raise ValueError("illegal arcs: {} {}".format(root, second))
octets = []
oid.insert(0, root * 40 + second)
for arc in oid:
if not arc:
octets.append(arc)
continue
arc_octets = []
while arc:
arc_octets.append(128 | (arc & 127))
arc >>= 7
arc_octets[0] &= 127
arc_octets.reverse()
octets.extend(arc_octets)
self.write_tag(*Tag.OID)
length = len(octets)
self.write_length(length)
self._pack("{}B".format(length), *octets)
class AbstractContainerDecodingBuilder:
def is_container_tag(self, tag):
raise NotImplementedError()
def begin_container(self, tag):
raise NotImplementedError()
def end_container(self):
raise NotImplementedError()
def handle(self, tag):
raise NotImplementedError()
def build(self):
raise NotImplementedError()
class ContainerDecodingBuilder(AbstractContainerDecodingBuilder):
def __init__(self, decoder, immutable_containers=False):
self._tag_map = {
Tag.BOOLEAN: decoder.read_boolean,
Tag.INTEGER: decoder.read_integer,
Tag.BITSTRING_PRIMITIVE: decoder.read_bitstring,
Tag.BITSTRING_CONSTRUCTED: decoder.read_bitstring,
Tag.OCTETSTRING_PRIMITIVE: decoder.read_octetstring,
Tag.OCTETSTRING_CONSTRUCTED: decoder.read_octetstring,
Tag.UTF8STRING_PRIMITIVE: decoder.read_utf8string,
Tag.UTF8STRING_CONSTRUCTED: decoder.read_utf8string,
Tag.PRINTABLESTRING_PRIMITIVE: decoder.read_printablestring,
Tag.PRINTABLESTRING_CONSTRUCTED: decoder.read_printablestring,
Tag.UTCTIME_PRIMITIVE: decoder.read_utctime,
Tag.UTCTIME_CONSTRUCTED: decoder.read_utctime,
Tag.NULL: decoder.read_null,
Tag.OID: decoder.read_oid
}
self._data = [[]]
self._tags = []
self._immutable_container_count = 0 if not immutable_containers else 1
def _new_container(self, container=None):
if container is None:
container = []
self._data[-1].append(container)
self._data.append(container)
def _container_append(self, data):
if self._immutable_container_count:
if isinstance(data, bytearray):
| |
<filename>pyfolio/utils.py<gh_stars>0
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import warnings
from itertools import cycle
from matplotlib.pyplot import cm
import numpy as np
import pandas as pd
from IPython.display import display, HTML
import empyrical.utils
from . import pos
from . import txn
APPROX_BDAYS_PER_MONTH = 30
APPROX_BDAYS_PER_YEAR = 365
MONTHS_PER_YEAR = 12
WEEKS_PER_YEAR = 52
MM_DISPLAY_UNIT = 1000000.
DAILY = 'daily'
WEEKLY = 'weekly'
MONTHLY = 'monthly'
YEARLY = 'yearly'
ANNUALIZATION_FACTORS = {
DAILY: APPROX_BDAYS_PER_YEAR,
WEEKLY: WEEKS_PER_YEAR,
MONTHLY: MONTHS_PER_YEAR
}
COLORMAP = 'Paired'
COLORS = ['#e6194b', '#3cb44b', '#ffe119', '#0082c8', '#f58231',
'#911eb4', '#46f0f0', '#f032e6', '#d2f53c', '#fabebe',
'#008080', '#e6beff', '#aa6e28', '#800000', '#aaffc3',
'#808000', '#ffd8b1', '#000080', '#808080']
def one_dec_places(x, pos):
"""
Adds 1/10th decimal to plot ticks.
"""
return '%.1f' % x
def two_dec_places(x, pos):
"""
Adds 1/100th decimal to plot ticks.
"""
return '%.2f' % x
def percentage(x, pos):
"""
Adds percentage sign to plot ticks.
"""
return '%.0f%%' % x
def format_asset(asset):
"""
If zipline asset objects are used, we want to print them out prettily
within the tear sheet. This function should only be applied directly
before displaying.
"""
try:
import zipline.assets
except ImportError:
return asset
if isinstance(asset, zipline.assets.Asset):
return asset.symbol
else:
return asset
def vectorize(func):
"""
Decorator so that functions can be written to work on Series but
may still be called with DataFrames.
"""
def wrapper(df, *args, **kwargs):
if df.ndim == 1:
return func(df, *args, **kwargs)
elif df.ndim == 2:
return df.apply(func, *args, **kwargs)
return wrapper
def extract_rets_pos_txn_from_zipline(backtest):
"""
Extract returns, positions, transactions and leverage from the
backtest data structure returned by zipline.TradingAlgorithm.run().
The returned data structures are in a format compatible with the
rest of pyfolio and can be directly passed to
e.g. tears.create_full_tear_sheet().
Parameters
----------
backtest : pd.DataFrame
DataFrame returned by zipline.TradingAlgorithm.run()
Returns
-------
returns : pd.Series
Daily returns of strategy.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
Example (on the Quantopian research platform)
---------------------------------------------
>>> backtest = my_algo.run()
>>> returns, positions, transactions =
>>> pyfolio.utils.extract_rets_pos_txn_from_zipline(backtest)
>>> pyfolio.tears.create_full_tear_sheet(returns,
>>> positions, transactions)
"""
backtest.index = backtest.index.normalize()
if backtest.index.tzinfo is None:
backtest.index = backtest.index.tz_localize('UTC')
returns = backtest.returns
raw_positions = []
for dt, pos_row in backtest.positions.iteritems():
df = pd.DataFrame(pos_row)
df.index = [dt] * len(df)
raw_positions.append(df)
if not raw_positions:
raise ValueError("The backtest does not have any positions.")
positions = pd.concat(raw_positions)
positions = pos.extract_pos(positions, backtest.ending_cash)
transactions = txn.make_transaction_frame(backtest.transactions)
if transactions.index.tzinfo is None:
transactions.index = transactions.index.tz_localize('utc')
return returns, positions, transactions
def print_table(table,
name=None,
float_format=None,
formatters=None,
header_rows=None):
"""
Pretty print a pandas DataFrame.
Uses HTML output if running inside Jupyter Notebook, otherwise
formatted text output.
Parameters
----------
table : pandas.Series or pandas.DataFrame
Table to pretty-print.
name : str, optional
Table name to display in upper left corner.
float_format : function, optional
Formatter to use for displaying table elements, passed as the
`float_format` arg to pd.Dataframe.to_html.
E.g. `'{0:.2%}'.format` for displaying 100 as '100.00%'.
formatters : list or dict, optional
Formatters to use by column, passed as the `formatters` arg to
pd.Dataframe.to_html.
header_rows : dict, optional
Extra rows to display at the top of the table.
"""
if isinstance(table, pd.Series):
table = pd.DataFrame(table)
if name is not None:
table.columns.name = name
html = table.to_html(float_format=float_format, formatters=formatters)
if header_rows is not None:
# Count the number of columns for the text to span
n_cols = html.split('<thead>')[1].split('</thead>')[0].count('<th>')
# Generate the HTML for the extra rows
rows = ''
for name, value in header_rows.items():
rows += ('\n <tr style="text-align: right;"><th>%s</th>' +
'<td colspan=%d>%s</td></tr>') % (name, n_cols, value)
# Inject the new HTML
html = html.replace('<thead>', '<thead>' + rows)
display(HTML(html))
def standardize_data(x):
"""
Standardize an array with mean and standard deviation.
Parameters
----------
x : np.array
Array to standardize.
Returns
-------
np.array
Standardized array.
"""
return (x - np.mean(x)) / np.std(x)
def detect_intraday(positions, transactions, threshold=0.25):
"""
Attempt to detect an intraday strategy. Get the number of
positions held at the end of the day, and divide that by the
number of unique stocks transacted every day. If the average quotient
is below a threshold, then an intraday strategy is detected.
Parameters
----------
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
Returns
-------
boolean
True if an intraday strategy is detected.
"""
daily_txn = transactions.copy()
daily_txn.index = daily_txn.index.date
txn_count = daily_txn.groupby(level=0).symbol.nunique().sum()
daily_pos = positions.drop('cash', axis=1).replace(0, np.nan)
return daily_pos.count(axis=1).sum() / txn_count < threshold
def check_intraday(estimate, returns, positions, transactions):
"""
Logic for checking if a strategy is intraday and processing it.
Parameters
----------
estimate: boolean or str, optional
Approximate returns for intraday strategies.
See description in tears.create_full_tear_sheet.
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
Returns
-------
pd.DataFrame
Daily net position values, adjusted for intraday movement.
"""
if estimate == 'infer':
if positions is not None and transactions is not None:
if detect_intraday(positions, transactions):
warnings.warn('Detected intraday strategy; inferring positi' +
'ons from transactions. Set estimate_intraday' +
'=False to disable.')
return estimate_intraday(returns, positions, transactions)
else:
return positions
else:
return positions
elif estimate:
if positions is not None and transactions is not None:
return estimate_intraday(returns, positions, transactions)
else:
raise ValueError('Positions and txns needed to estimate intraday')
else:
return positions
def estimate_intraday(returns, positions, transactions, EOD_hour=23):
"""
Intraday strategies will often not hold positions at the day end.
This attempts to find the point in the day that best represents
the activity of the strategy on that day, and effectively resamples
the end-of-day positions with the positions at this point of day.
The point of day is found by detecting when our exposure in the
market is at its maximum point. Note that this is an estimate.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
Returns
-------
pd.DataFrame
Daily net position values, resampled for intraday behavior.
"""
# Construct DataFrame of transaction amounts
txn_val = transactions.copy()
txn_val.index.names = ['date']
txn_val['value'] = txn_val.amount * txn_val.price
txn_val = txn_val.reset_index().pivot_table(
index='date', values='value',
columns='symbol').replace(np.nan, 0)
# Cumulate transaction amounts each day
txn_val['date'] = txn_val.index.date
txn_val = txn_val.groupby('date').cumsum()
# Calculate exposure, then take peak of exposure every day
txn_val['exposure'] = txn_val.abs().sum(axis=1)
condition = (txn_val['exposure'] == txn_val.groupby(
pd.TimeGrouper('24H'))['exposure'].transform(max))
txn_val = txn_val[condition].drop('exposure', axis=1)
# Compute cash delta
txn_val['cash'] = -txn_val.sum(axis=1)
# Shift EOD positions to positions at start of next trading day
positions_shifted = positions.copy().shift(1).fillna(0)
starting_capital = positions.iloc[0].sum() / (1 + returns[0])
positions_shifted.cash[0] = starting_capital
# Format and add start positions to intraday position changes
txn_val.index = txn_val.index.normalize()
corrected_positions = positions_shifted.add(txn_val, fill_value=0)
corrected_positions.index.name = 'period_close'
corrected_positions.columns.name = 'sid'
return corrected_positions
def clip_returns_to_benchmark(rets, benchmark_rets):
"""
Drop entries from rets so that the start and end dates of rets match those
of benchmark_rets.
Parameters
----------
rets : pd.Series
Daily returns of the strategy, noncumulative.
- See pf.tears.create_full_tear_sheet for more details
benchmark_rets : pd.Series
Daily returns of the benchmark, noncumulative.
Returns
-------
clipped_rets : pd.Series
Daily noncumulative returns with index clipped to match that of
benchmark returns.
"""
if (rets.index[0] < benchmark_rets.index[0]) \
or (rets.index[-1] > benchmark_rets.index[-1]):
clipped_rets = rets[benchmark_rets.index]
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, C0D1G0 B1NAR10 and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.model.mapper import get_mapped_doc
from frappe.utils import cint, flt
import shutil
import os
import sys
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import threading
from xml.dom import minidom
class CFDI(Document):
pass
@frappe.whitelist()
def ticket(source_name, target_doc=None):
doclist = get_mapped_doc("Sales Invoice", source_name, {
"Sales Invoice": {
"doctype": "CFDI",
"field_map": {
"name": "ticket",
}
},
"Sales Invoice Item": {
"doctype": "CFDI Item",
"field_map": {
"rate": "precio_de_venta",
"net_rate": "precio_unitario_neto",
"amount": "monto",
"parent": "fuente",
"net_amount": "precio_neto",
# "impuesto": "tax",
}
}
}, target_doc)
return doclist
# RG-COMIENZA MODULO CFDI
from frappe.model.document import Document
from frappe.model.mapper import get_mapped_doc
from frappe.utils.file_manager import save_url
import shutil
import os
import sys
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import threading
from xml.dom import minidom
from datetime import timedelta,datetime
import base64
# from M2Crypto import RSA
from lxml import etree as ET
import sha
from suds import WebFault
from suds.client import Client
import logging
class Cliente:
def __init__(self, url, opciones = {}, debug = False):
self.debug = debug
self.url = url
self.opciones = {}
if self.debug: self._activa_debug()
for key, value in opciones.iteritems():
if key in ['emisorRFC', 'UserID', 'UserPass']:
self.opciones.update({ key: value })
def timbrar(self, src, opciones = { 'generarCBB': True, 'generarTXT': False, 'generarPDF': False}):
try:
# en caso de que src sea una ruta a archivo y no una cadena, abrir y cargar ruta
if os.path.isfile(src): src = open(src, 'r').read()
opciones['text2CFDI'] = base64.b64encode(src)
self.opciones.update(opciones)
cliente = Client(self.url)
respuesta = cliente.service.requestTimbrarCFDI(self.opciones)
for propiedad in ['xml', 'pdf', 'png', 'txt']:
if propiedad in respuesta:
self.__dict__[propiedad] = base64.b64decode(respuesta[propiedad])
if 'xml' in respuesta:
xml_cfdi = ET.fromstring(self.xml)
tfd = xml_cfdi.xpath('//tfd:TimbreFiscalDigital', namespaces={"tfd": "http://www.sat.gob.mx/TimbreFiscalDigital"})
self.__dict__['uuid'] = tfd[0].get('UUID')
self.__dict__['SelloCFD'] = tfd[0].get('SelloCFD')
self.__dict__['NoCertificadoSAT'] = tfd[0].get('NoCertificadoSAT')
self.__dict__['SelloSAT'] = tfd[0].get('SelloSAT')
self.__dict__['FechaTimbrado'] = tfd[0].get('FechaTimbrado')
if self.debug:
self.logger.info("\nSOAP request:\n %s" % cliente.last_sent())
self.logger.info("\nSOAP response:\n %s" % cliente.last_received())
return True
except WebFault, e:
self.__dict__['codigo_error'] = e.fault.faultcode
self.__dict__['error'] = e.fault.faultstring
if self.debug:
self.logger.error("\nSOAP request:\n %s\nSOAP response: [%s] - %s" % (cliente.last_sent(), e.fault.faultcode, e.fault.faultstring))
return False
except Exception, e:
self.__dict__['codigo_error'] = 'Error desconocido'
self.__dict__['error'] = e.message
return False
def cancelar(self, uuid):
try:
cliente = Client(self.url)
opciones = {'uuid': uuid}
opciones.update(self.opciones)
respuesta = cliente.service.requestCancelarCFDI(opciones)
if self.debug:
self.logger.info("\nSOAP request:\n %s" % cliente.last_sent())
self.logger.info("\nSOAP response:\n %s" % cliente.last_received())
return True
except WebFault, e:
self.__dict__['codigo_error'] = e.fault.faultcode
self.__dict__['error'] = e.fault.faultstring
if self.debug:
self.logger.error("\nSOAP request:\n %s\nSOAP response: [%s] - %s" % (cliente.last_sent(), e.fault.faultcode, e.fault.faultstring))
return False
except Exception, e:
self.__dict__['codigo_error'] = 'Error desconocido'
self.__dict__['error'] = e.message
return False
def activarCancelacion(self, archCer, archKey, passKey):
try:
# en caso de que archCer y/o archKey sean una ruta a archivo y no una cadena, abrir y cargar ruta
if os.path.isfile(archCer): archCer = open(archCer, 'r').read()
if os.path.isfile(archKey): archKey = open(archKey, 'r').read()
opciones = {}
opciones['archivoKey'] = base64.b64encode(archKey)
opciones['archivoCer'] = base64.b64encode(archCer)
opciones['clave'] = passKey
self.opciones.update(opciones)
cliente = Client(self.url)
respuesta = cliente.service.activarCancelacion(self.opciones)
if self.debug:
self.logger.info("\nSOAP request:\n %s" % cliente.last_sent())
self.logger.info("\nSOAP response:\n %s" % cliente.last_received())
return True
except WebFault, e:
self.__dict__['codigo_error'] = e.fault.faultcode
self.__dict__['error'] = e.fault.faultstring
if self.debug:
self.logger.error("\nSOAP request:\n %s\nSOAP response: [%s] - %s" % (cliente.last_sent(), e.fault.faultcode, e.fault.faultstring))
return False
except Exception, e:
self.__dict__['codigo_error'] = 'Error desconocido'
self.__dict__['error'] = e.message
return False
def _activa_debug(self):
if not os.path.exists('log'): os.makedirs('log')
self.logger = logging.getLogger('facturacion_moderna')
hdlr = logging.FileHandler('log/facturacion_moderna.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
self.logger.addHandler(hdlr)
self.logger.setLevel(logging.INFO)
@frappe.whitelist()
def prueba_cancelacion(docname, debug = True):
c = frappe.get_doc("Sales Invoice", docname)
rfc_emisor = c.rfc_emisor
url_timbrado = c.url_timbrado
user_id = c.user_id
user_password = c.user_password
params = {'emisorRFC': rfc_emisor, 'UserID': user_id, 'UserPass': <PASSWORD>}
cliente = Cliente(url_timbrado, params, debug)
# UUID del comprobante a cancelar
uuid = c.uuid
if cliente.cancelar(uuid):
frappe.msgprint("Cancelacion Exitosa")
frappe.db.set_value("Sales Invoice",c.name, 'cfdi_status', 'Cancelado') #RG-asi cambio el status del timbrado
else:
frappe.msgprint("[%s] - %s" % (cliente.codigo_error, cliente.error))
if __name__ == '__main__':
prueba_cancelacion()
@frappe.whitelist()
def cancelacion(docname,debug = True):
c = frappe.get_doc("CFDI", docname)
# si = c.ticket
rfc_emisor = c.rfc_emisor
url_timbrado = c.url_timbrado
user_id = c.user_id
user_password = <PASSWORD>
params = {'emisorRFC': rfc_emisor, 'UserID': user_id, 'UserPass': <PASSWORD>}
cliente = Cliente(url_timbrado, params, debug)
# UUID del comprobante a cancelar
uuid = c.uuid
if cliente.cancelar(uuid):
frappe.db.set_value("CFDI",c.name, 'cfdi_status', 'Cancelado') #RG-asi cambio el status del timbrado
for d in c.items:
frappe.db.set_value("Sales Invoice",d.fuente , 'cfdi_status', 'Sin Timbrar')
frappe.msgprint("Cancelacion Exitosa")
else:
frappe.msgprint("[%s] - %s" % (cliente.codigo_error, cliente.error))
if __name__ == '__main__':
cancelacion()
@frappe.whitelist()
def cancelar_egreso(docname, debug = True):
c = frappe.get_doc("CFDI Nota de Credito", docname)
rfc_emisor = c.rfc_emisor
url_timbrado = c.url_timbrado
user_id = c.user_id
user_password = <PASSWORD>
params = {'emisorRFC': rfc_emisor, 'UserID': user_id, 'UserPass': <PASSWORD>}
cliente = Cliente(url_timbrado, params, debug)
# UUID del comprobante a cancelar
uuid = c.uuid
if cliente.cancelar(uuid):
frappe.msgprint("Cancelacion Exitosa")
frappe.db.set_value("CFDI Nota de Credito",c.name, 'cfdi_status', 'Cancelado') #RG-asi cambio el status del timbrado
else:
frappe.msgprint("[%s] - %s" % (cliente.codigo_error, cliente.error))
if __name__ == '__main__':
cancelar_egreso()
@frappe.whitelist()
def prueba_timbrado(docname, debug = True):
c = frappe.get_doc("Sales Invoice", docname)
rfc_emisor = c.rfc_emisor
url_timbrado = c.url_timbrado
user_id = c.user_id
user_password = <PASSWORD>
cfdif = genera_layout(docname,rfc_emisor)
params = {'emisorRFC': rfc_emisor, 'UserID': user_id, 'UserPass': <PASSWORD>}
options = {'generarCBB': True, 'generarPDF': True, 'generarTXT': True}
cliente = Cliente(url_timbrado, params, debug)
source = '/home/frappe/frappe-bench/sites/comprobantes/'
webfolder =c.folder
dest ='/home/frappe/frappe-bench/sites/' + webfolder + '/public/files/'
# dest = '/home/frappe/frappe-bench/sites/facturas.posix.mx/public/files/'
if cliente.timbrar(cfdif, options):
folder = 'comprobantes'
if not os.path.exists(folder): os.makedirs(folder)
comprobante = os.path.join(folder, cliente.uuid)
for extension in ['xml', 'pdf', 'png', 'txt']:
if hasattr(cliente, extension):
with open(("%s.%s" % (comprobante, extension)), 'wb' if extension in ['pdf','png'] else 'w') as f: f.write(getattr(cliente, extension))
uuid=comprobante[13:]
shutil.move(source + uuid + ".xml", dest) # RG-Muevo el file de donde lo deja factmoderna a donde lo quiero
shutil.move(source + uuid + ".png", dest) # RG-Muevo el file de donde lo deja factmoderna a donde lo quiero
save_url("/files/" + uuid + ".xml", uuid + ".xml", "Sales Invoice",c.name, "Home/Attachments", 0) # RG-agrego el file como attachment # save_url (file_url, filename, dt, dn, folder, is_private)
frappe.db.set_value("Sales Invoice",c.name, 'cfdi_status', 'Timbrado') #RG-asi cambio el status del timbrado
frappe.db.set_value("Sales Invoice",c.name, 'SelloCFD', cliente.SelloCFD) #RG-Asi inserto los valores del xml
frappe.db.set_value("Sales Invoice",c.name, 'FechaTimbrado', cliente.FechaTimbrado)
frappe.db.set_value("Sales Invoice",c.name, 'uuid', cliente.uuid)
frappe.db.set_value("Sales Invoice",c.name, 'NoCertificadoSAT', cliente.NoCertificadoSAT)
frappe.db.set_value("Sales Invoice",c.name, 'SelloSAT', cliente.SelloSAT)
frappe.db.set_value("Sales Invoice",c.name, 'qr', 'http://' + webfolder + '/files/' + uuid + ".png") #RG-Asi inserto EL QR
frappe.msgprint(str(c.name) + " Timbrada exitosamente " + " " + "<a href='javascript:void(0)' onclick='window.location.reload()'><button class='btn btn-primary btn-sm primary-action' > Agregar XML a Factura</button></a>")
else:
frappe.msgprint("ERROR EN TIMBRADO: " + "[%s] - %s" % (cliente.codigo_error, cliente.error))
@frappe.whitelist()
def genera_layout(docname,rfc_emisor):
fecha_actual = (datetime.now()- timedelta(minutes=360)).isoformat()[0:19]
c = frappe.get_doc("Sales Invoice", docname)
serie = c.naming_series
folio = c.name
nombre_receptor = c.customer_name.encode('ascii', 'ignore').decode('ascii')
SubTotal='%.1f' % c.net_total
# SubTotal='%.1f' % c.total
redondeo = c.rounding_adjustment * -1
Descuento='%.1f' % (c.discount_amount)
# Descuento='%.1f' % (c.base_total - c.net_total)
PorcDescuento=flt((100 - c.additional_discount_percentage) * .01,2)
factorDesc = flt((c.additional_discount_percentage) * .01,2)
# Total='%.1f' % (c.base_grand_total + flt(redondeo)) RG- Se lo quite porque daba bronca el redondeo
Total='%.1f' % c.grand_total
# Total='%.1f' % c.base_grand_total
FormaPago=c.forma_de_pago
TipoDeComprobante=c.tipo_de_comprobante
MetodoPago=c.metodo_pago
LugarExpedicion=c.lugar_expedicion
NoCertificado=c.no_certificado
Rfc=c.tax_id
TotalImpuestosTrasladados='%.1f' % c.total_taxes_and_charges
TotalIva = '%.1f' % c.taxes[0].tax_amount
UsoCFDI = c.uso_cfdi
Nombre = c.nombre_emisor
RegimenFiscal = c.regimen_fiscal
cfdif = """[ComprobanteFiscalDigital]
Version=3.3
Serie={serie}
Folio={folio}
Fecha={fecha_actual}
FormaPago={FormaPago}
NoCertificado={NoCertificado}
Moneda=MXN
TipoDeComprobante={TipoDeComprobante}
MetodoPago={MetodoPago}
LugarExpedicion={LugarExpedicion}
SubTotal={SubTotal}
Descuento={Descuento}
Total={Total}
[Emisor]
Rfc={rfc_emisor}
Nombre={Nombre}
RegimenFiscal={RegimenFiscal}
[Receptor]
Rfc={Rfc}
Nombre={nombre_receptor}
UsoCFDI={UsoCFDI}
""".format(**locals())
for d in c.items:
NoIdentificacion=d.item_code
ClaveProdServ=d.clave_de_producto
ClaveUnidad=d.clave_unidad
Cantidad=d.qty
Unidad=d.uom
ValorUnitario='%.1f' % d.net_rate
# ValorUnitario='%.1f' % d.rate
Importe='%.1f' % (d.net_amount)
# Importe='%.1f' % (d.rate * d.qty)
idx=d.idx
Descripcion=d.description.encode('ascii', 'ignore').decode('ascii')
if "16.0" in d.item_tax_rate:
DescuentoItem= '%.1f' % (flt(Importe) * factorDesc)
PreBase= flt(Importe) - flt(DescuentoItem)
ImpuestosTrasladosBase= '%.1f' % PreBase
ImpuestosTrasladosImpuesto="002"
ImpuestosTrasladosTasaOCuota="0.160000"
# ImpuestosTrasladosImporte='%.1f' % (PreBase * 0.16) #RG- 23/Ene/2018 - cambie a .2 el decimal - estaba a .1
ImpuestosTrasladosImporte='%.1f' % ( d.net_amount * 0.16)
elif "8.0" in d.item_tax_rate:
DescuentoItem= '%.1f' % ((flt(Importe) * 1.08) * factorDesc)
PreImporte = flt(Importe) * 0.08
PreBase= flt(Importe) - flt(DescuentoItem)
ImpuestosTrasladosBase= '%.1f' % PreBase
ImpuestosTrasladosImpuesto="003"
ImpuestosTrasladosTasaOCuota="0.080000"
ImpuestosTrasladosImporte='%.1f' % ( d.net_amount * 0.08)
# ImpuestosTrasladosBase= '%.1f' % PreBase
else:
DescuentoItem= '%.1f' % ((flt(Importe) * factorDesc))
ImpuestosTrasladosImporte = "0.00"
ImpuestosTrasladosImpuesto = "002"
ImpuestosTrasladosTasaOCuota ="0.000000"
PreBase = flt(Importe) - flt(DescuentoItem)
ImpuestosTrasladosBase= '%.1f' % PreBase
cfdif += """[Concepto#{idx}]
ClaveProdServ={ClaveProdServ}
NoIdentificacion={NoIdentificacion}
Cantidad={Cantidad}
ClaveUnidad={ClaveUnidad}
Unidad={Unidad}
ValorUnitario={ValorUnitario}
Descuento={DescuentoItem}
Importe={Importe}
Impuestos.Traslados.Base=[{ImpuestosTrasladosBase}]
Impuestos.Traslados.Impuesto=[{ImpuestosTrasladosImpuesto}]
Impuestos.Traslados.TipoFactor=[Tasa]
Impuestos.Traslados.TasaOCuota=[{ImpuestosTrasladosTasaOCuota}]
Impuestos.Traslados.Importe=[{ImpuestosTrasladosImporte}]
Descripcion={Descripcion}
""".format(**locals())
cfdif += """[Traslados]
TotalImpuestosTrasladados={TotalImpuestosTrasladados}
Impuesto=[002]
TipoFactor=[Tasa]
TasaOCuota=[0.160000]
Importe=[{TotalIva}]
""".format(**locals())
# for t in c.taxes:
# if t.rate == 16:
# Impuesto="002"
# TasaOCuota="0.160000"
# Importe='%.1f' % t.tax_amount
# # Importe='%.2f' % (t.tax_amount_after_discount_amount)
# # if t.description == "IEPS 8":
# elif t.rate==8:
# Impuesto="003"
# TasaOCuota="0.080000"
# Importe='%.2f' % t.tax_amount
# else:
# Impuesto="002"
# TasaOCuota="0.000000"
# Importe='%.2f' % t.tax_amount
# cfdif += """
# Impuesto=[{Impuesto}]
# TipoFactor=[Tasa]
# TasaOCuota=[{TasaOCuota}]
# Importe=[{Importe}]
frappe.errprint(cfdif) #RG-esto es nomas pa ver que es lo que estoy mandando
return cfdif;
if __name__ == '__main__':
| |
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__license__ = "MIT"
__version__ = "0.2.5"
__status__ = "Prototype"
__name__ = "QmlReader"
# last edited: 2020-04-16
import lxml
from lxml import objectify
import logging
from qmlReader import questionnaire
import re
class QmlReader:
"""
Class for Reading and extracting elements from QML-Files.
"""
def __init__(self, file):
self.file = file
self.tmp = []
self.logger = logging.getLogger('debug')
self.startup_logger(log_level=logging.DEBUG)
self.logger.info('starting up QmlReader')
# self.DiGraph = nx.DiGraph()
with open(file, 'rb') as f:
self.logger.info('reading file: ' + str(file))
self.data = f.read()
self.root = objectify.fromstring(self.data)
self.title = None
self.set_title()
self.questionnaire = questionnaire.Questionnaire(file=self.file, title=self.title)
self.extract_declared_variables()
self.tmp_dict_of_pages = {}
# self.pgv_graph = None
# self.extract_pages_into_tmp_dict()
self.extract_pages_to_self()
self.extract_transitions_to_self()
self.extract_variables_from_pages_body()
self.extract_variables_from_pages_triggers()
self.extract_headers_and_questions_from_pages()
self.logger.info("QmlReader object is done.")
def list_of_variables_from_pages(self):
pass
def list_of_pages(self):
return list(self.questionnaire.pages.pages.keys())
def startup_logger(self, log_level=logging.DEBUG):
"""
CRITICAL: 50, ERROR: 40, WARNING: 30, INFO: 20, DEBUG: 10, NOTSET: 0
"""
logging.basicConfig(level=log_level)
fh = logging.FileHandler("{0}.log".format('log_' + __name__))
fh.setLevel(log_level)
fh_format = logging.Formatter('%(name)s\t%(module)s\t%(funcName)s\t%(asctime)s\t%(lineno)d\t'
'%(levelname)-8s\t%(message)s')
fh.setFormatter(fh_format)
self.logger.addHandler(fh)
def set_title(self):
self.logger.info("set_title")
self.title = self.extract_title()
def extract_title(self):
self.logger.info("extract_title")
return self.root.name.text
def extract_variables_from_pages_body(self):
self.logger.info("extract_variables_from_pages_body")
for pagenr in range(0, len(self.root.page)):
tmp_pagename = self.root.page[pagenr].attrib['uid']
if hasattr(self.root.page[pagenr], 'body'):
for element in self.root.page[pagenr].body.iterdescendants():
if 'variable' in element.attrib: # ToDo: if condition added just for debugging - remove later!
tmp_varname = element.attrib['variable']
if tmp_varname in self.questionnaire.variables.variables.keys():
tmp_var_object = self.questionnaire.variables.variables[tmp_varname].set_varplace(
varplace='body', varname=tmp_varname)
if tmp_varname not in self.questionnaire.pages.pages[
tmp_pagename].variables.list_all_vars() and tmp_varname not in \
self.questionnaire.pages.pages[tmp_pagename].duplicate_variables.list_all_vars():
self.questionnaire.pages.pages[tmp_pagename].variables.add_variable(tmp_var_object)
else:
self.logger.info(
'Variable "' + str(tmp_varname) + '" already in self.variables of page "' + str(
tmp_pagename) + '". Possible duplicate.')
self.questionnaire.pages.pages[tmp_pagename].duplicate_variables.add_variable(
tmp_var_object, replace=True)
shown_var_list = self.return_list_of_shown_variables_in_objectified_element_descendants(
self.root.page[pagenr])
for shown_variable in shown_var_list:
if shown_variable not in self.questionnaire.pages.pages[tmp_pagename].variables.list_all_shown_vars():
self.questionnaire.pages.pages[tmp_pagename].variables.add_variable(
questionnaire.Variable(varname=shown_variable, vartype='string', varplace='shown'))
# print(f'## shown: {shown_variable}')
@staticmethod
def return_list_of_shown_variables_in_objectified_element_descendants(
objectified_element: lxml.objectify.ObjectifiedElement) -> list:
tmp_list_text = [element for element in objectified_element.iterdescendants() if
hasattr(element, 'text')]
tmp_list_with_actual_text = [element for element in tmp_list_text if
element.text is not None]
tmp_list_with_shown_variables = [element for element in tmp_list_with_actual_text if '.value}' in element.text]
results_list = []
for entry in tmp_list_with_shown_variables:
if isinstance(entry.text, str):
[results_list.append(found_string) for found_string in
re.findall('\{([a-zA-Z0-9_-]+)\.value\}', entry.text) if found_string not in results_list]
return results_list
def extract_variables_from_pages_triggers(self):
self.logger.info("extract_variables_from_pages_triggers")
for pagenr in range(0, len(self.root.page)):
tmp_pagename = self.root.page[pagenr].attrib['uid']
if hasattr(self.root.page[pagenr], 'triggers'):
for i in self.root.page[pagenr].triggers.iterdescendants():
try:
tmp_varname = i.attrib['variable']
tmp_var_object = self.questionnaire.variables.variables[tmp_varname].set_varplace(
varplace='triggers', varname=tmp_varname)
if tmp_varname not in self.questionnaire.pages.pages[
tmp_pagename].variables.list_all_vars() and tmp_varname not in \
self.questionnaire.pages.pages[tmp_pagename].duplicate_variables.list_all_vars():
self.questionnaire.pages.pages[tmp_pagename].variables.add_variable(tmp_var_object)
else:
self.logger.info(
'Variable "' + str(tmp_varname) + '" already in self.variables of page "' + str(
tmp_pagename) + '". Possible duplicate.')
self.questionnaire.pages.pages[tmp_pagename].duplicate_variables.add_variable(
tmp_var_object, replace=True)
except KeyError:
pass
def extract_declared_variables(self):
self.logger.info("extract_declared_variables")
for i in range(0, len(self.root.variables.variable)):
# print(self.questionnaire.filename)
# print(self.root.variables.variable[i].attrib['name'])
self.questionnaire.variables.add_variable(
questionnaire.Variable(self.root.variables.variable[i].attrib["name"],
self.root.variables.variable[i].attrib["type"]))
# def extract_pages_into_tmp_dict(self):
# self.logger.info("extract_pages_into_tmp_dict")
# for i in range(0, len(self.root.page)):
# self.tmp_dict_of_pages[self.root.page[i].attrib['uid']] = self.root.page[i]
def extract_pages_to_self(self):
self.logger.info("extract_pages_to_self")
for i in range(0, len(self.root.page)):
tmp_qml_page_source = self.root.page[i]
tmp_page_uid = tmp_qml_page_source.attrib['uid']
self.questionnaire.pages.add_page(questionnaire.QmlPage(tmp_page_uid, declared=True))
# self.extract_transitions_from_qml_page_source(tmp_qml_page_source, tmp_page_uid)
def extract_transitions_to_self(self):
self.logger.info("extract_transitions_to_self")
for i in range(0, len(self.root.page)):
tmp_qml_page_source = self.root.page[i]
tmp_page_uid = tmp_qml_page_source.attrib['uid']
# self.questionnaire.pages.add_page(questionnaire.QmlPage(tmp_page_uid, declared=True))
self.extract_transitions_from_qml_page_source(tmp_qml_page_source, tmp_page_uid)
def extract_transitions_from_qml_page_source(self, qml_source_page, uid):
self.logger.info("extract_transitions_from_qml_page_source from page: " + str(uid))
assert isinstance(qml_source_page, lxml.objectify.ObjectifiedElement)
assert isinstance(uid, str)
if hasattr(qml_source_page, 'transitions'):
if hasattr(qml_source_page.transitions, 'transition'):
i = -1
for transition in qml_source_page.transitions.transition:
i += 1
tmp_index = i
tmp_transition_dict = transition.attrib
tmp_target = tmp_transition_dict['target']
source_page_index = self.list_of_pages().index(uid)
if tmp_target not in self.list_of_pages():
self.questionnaire.pages.add_page(qmlpage=questionnaire.QmlPage(uid=tmp_target, declared=False))
target_page_index = self.list_of_pages().index(tmp_target)
tmp_distance = target_page_index - source_page_index
if 'condition' in tmp_transition_dict:
tmp_condition = tmp_transition_dict['condition']
else:
tmp_condition = None
tmp_transition_object = questionnaire.Transition(index=tmp_index,
target=tmp_target,
condition=tmp_condition,
source=uid,
distance=tmp_distance)
self.questionnaire.pages.pages[uid].transitions.add_transitions(tmp_transition_object)
# add transition to sources for each page
self.questionnaire.pages.pages[tmp_target].sources.add_source(tmp_transition_object)
def extract_questions_from_pages(self):
self.logger.info("extract_questions_from_pages")
pass
def extract_headers_and_questions_from_pages(self):
self.logger.info("extract_headers_from_pages")
for i in range(0, len(self.root.page)):
tmp_qml_page_source = self.root.page[i]
tmp_page_uid = tmp_qml_page_source.attrib['uid']
self.extract_page_headers_from_qml_page_source(tmp_qml_page_source, tmp_page_uid)
self.extract_question_objects_from_qml_page_source(tmp_qml_page_source, tmp_page_uid)
def extract_page_headers_from_qml_page_source(self, qml_source_page, page_uid):
self.logger.info("extract_page_headers_from_page_sources; uid: " + str(page_uid))
assert isinstance(qml_source_page, lxml.objectify.ObjectifiedElement)
assert isinstance(page_uid, str)
if hasattr(qml_source_page, 'header'):
self.logger.info(" found page header")
i = -1
if len([i for i in qml_source_page.header.iterchildren()]) > 0:
self.logger.info(" page header has length > 0")
for header in qml_source_page.header.iterchildren():
tmp_object = None
i += 1
tmp_index = i
self.logger.info(" page header object - index: " + str(i))
if 'uid' not in header.attrib:
if hasattr(header, 'tag'):
if header.tag == 'comment':
self.logger.info(" found page header object: xml comment, ignored")
else:
self.logger.error(
' found object in page header of ' + str(page_uid) + ' that could not be read.')
continue
tmp_uid = header.attrib['uid']
self.logger.info(" page header object - uid: " + str(tmp_uid))
if header.text is not None:
tmp_text = header.text
else:
tmp_text = ''
self.logger.info(" page header object - text: '" + str(tmp_text) + "'")
if 'visible' in header.attrib:
tmp_visible_conditions = header.attrib['visible']
self.logger.info(" found visible condition: " + str(tmp_visible_conditions))
else:
tmp_visible_conditions = None
self.logger.info(" found visible condition: None")
tmp_tag = header.tag[header.tag.rfind('}') + 1:]
self.logger.info(" found tag: '" + str(tmp_tag) + "'")
tmp_object = questionnaire.PageHeaderObject(uid=tmp_uid, tag=tmp_tag, text=tmp_text,
index=tmp_index,
visible_conditions=tmp_visible_conditions)
self.logger.info(
" adding PageHeaderObject: '" + str(tmp_object.tag) + "' to page: " + str(page_uid))
self.questionnaire.pages.pages[page_uid].header.add_header_object(tmp_object)
else:
self.logger.info(" page header has length == 0 and will be ignored")
else:
self.logger.info(" no page header found")
def extract_question_objects_from_qml_page_source(self, qml_source_page, page_uid):
self.logger.info("extract_question_objects_from_qml_page_source; uid: " + str(page_uid))
assert isinstance(qml_source_page, lxml.objectify.ObjectifiedElement)
assert isinstance(page_uid, str)
if hasattr(qml_source_page, 'body'):
i = 0
self.logger.info(' body found on page "' + str(page_uid) + '".')
if 'uid' in qml_source_page.body.attrib:
tmp_body_uid = qml_source_page.body.attrib['uid']
else:
# ToDo: check if this can be set to None instead of str
tmp_body_uid = 'None'
for element in qml_source_page.body.iterchildren():
tmp_tag = element.tag[element.tag.rfind('}') + 1:]
if tmp_tag in ['calendar', 'comparison', 'display', 'matrixDouble', 'matrixQuestionMixed',
'matrixQuestionOpen', 'matrixQuestionSingleChoice', 'multipleChoice', 'questionOpen',
'questionPretest', 'questionSingleChoice']:
tmp_index = i
i += 1
if tmp_tag == 'calendar':
tmp_question_header_object = self.extract_question_header_from_qml_element_source(element, page_uid)
elif tmp_tag == 'comparison':
pass
elif tmp_tag == 'display':
pass
elif tmp_tag == 'matrixDouble':
pass
elif tmp_tag == 'matrixMultipleChoice':
pass
elif tmp_tag == 'matrixQuestionMixed':
pass
elif tmp_tag == 'matrixQuestionOpen':
pass
elif tmp_tag == 'matrixQuestionSingleChoice':
list_of_items_aos = []
list_of_elements = []
for entry in element.iterdescendants():
if entry.tag[entry.tag.rfind('}') + 1:] == 'item':
list_of_elements.append(entry)
for item in list_of_elements:
list_of_answeroptions = []
for item_element in item.iterdescendants():
print('444')
if item_element.tag[item_element.tag.rfind('}') + 1:] == 'answerOption':
tmp_value = None
if 'label' in item_element.attrib:
tmp_value = item_element.attrib['label']
list_of_answeroptions.append(tmp_value)
list_of_items_aos.append(tuple(list_of_answeroptions))
if list_of_items_aos:
if len(set(list_of_items_aos)) != 1:
print(page_uid)
print(list_of_items_aos)
elif tmp_tag == 'multipleChoice':
pass
elif tmp_tag == 'questionOpen':
pass
elif tmp_tag == 'questionPretest':
pass
elif tmp_tag == 'questionSingleChoice':
pass
# a = self.find_tag_in_descendants(element, 'responseDomain')
# b = self.find_attribute_in_descendants(element, 'responseDomain', 'type', 'dropDown')
# # ToDo
# ## self.questionnaire.pages.pages[page_uid].questions.add_question_object()
#
# (self.extract_question_header_from_qml_element_source(element, page_uid))
if tmp_tag == 'section':
pass
pass
@staticmethod
def find_tag_in_descendants(objectified_xml_element: lxml.objectify.ObjectifiedElement, tag_str: str) -> bool:
found_element_bool = False
y = []
for entry in objectified_xml_element.iterdescendants():
if entry.tag[entry.tag.rfind('}') + 1:] == tag_str:
found_element_bool = True
return found_element_bool
@staticmethod
def find_attribute_in_descendants(objectified_xml_element: lxml.objectify.ObjectifiedElement, tag_str: str,
attribute_str: str, value_str: str) -> bool:
found_element_bool = False
y = []
for entry in objectified_xml_element.iterdescendants():
if entry.tag[entry.tag.rfind('}') + 1:] == tag_str:
y.append(entry)
for entry in y:
if hasattr(y[0], tag_str) is True:
if attribute_str in entry.attrib:
if entry.attrib[attribute_str] == value_str:
found_element_bool = True
return found_element_bool
@staticmethod
def find_question_type_class_to_tag_string(string):
# tmp_dict = {'calendar': Questionnaire.BodyCalendar, 'comparison': Questionnaire.BodyComparison, 'display':, 'matrixDouble':, 'matrixQuestionMixed':, 'matrixQuestionOpen':, 'matrixQuestionSingleChoice':, 'multipleChoice':, 'questionOpen':, 'questionPretest':, 'questionSingleChoice':}
return ()
def extract_response_domains_from_question(self):
self.logger.info("extract_response_domains_from_question")
pass
def extract_items_from_response_domain(self):
self.logger.info("extract_items_from_response_domain")
pass
def extract_answeroptions_from_response_domain(self):
self.logger.info("extract_answeroptions_from_response_domain")
pass
# ToDo: move this method to questionnaire, fix the ToDos below
def extract_sources_from_questionnaire(self):
self.logger.info("extract_sources_from_questionnaire")
tmp_dict_of_additional_pages = {}
for page in self.questionnaire.pages.pages.values():
for transition in page.transitions.transitions.values():
# ToDo: (see below) the following is just a workaround until option "combine" is implemented issue#9
if transition.target in self.questionnaire.pages.pages.keys():
self.questionnaire.pages.pages[transition.target].sources.add_source(page.uid)
else:
tmp_dict_of_additional_pages[transition.target] = page.uid
# ToDo: (see above) the following is just a workaround until option "combine" is implemented issue#9
for newpagename in tmp_dict_of_additional_pages.keys():
self.questionnaire.pages.add_page(questionnaire.QmlPage(newpagename, declared=False))
self.questionnaire.pages.pages[newpagename].sources.add_source(tmp_dict_of_additional_pages[newpagename])
def extract_triggers_from_pages(self):
self.logger.info("extract_triggers_from_pages")
pass
def extract_question_from_qml_page(self, qml_page):
self.logger.info("extract_question_from_qml_page")
assert isinstance(qml_page, lxml.objectify.ObjectifiedElement)
def extract_triggers_from_qml_page(self, qml_page):
self.logger.info("extract_triggers_from_qml_page")
assert isinstance(qml_page, lxml.objectify.ObjectifiedElement)
# def draw_pgv_graph(self, output_file='output_file.png'):
# self.pgv_graph.draw(output_file)
def extract_question_header_from_qml_element_source(self, qml_source_element, page_uid):
flag_question = False
flag_instruction = False
flag_introduction = False
tmp_header = questionnaire.QuestionHeader()
if hasattr(qml_source_element, 'header'):
for header_question_object in qml_source_element.header.iterchildren():
j = 0
if hasattr(header_question_object, 'tag'):
if header_question_object.tag[header_question_object.tag.rfind('}') + 1:] == 'question':
self.logger.info(' tag "question" found')
elif header_question_object.tag[header_question_object.tag.rfind('}') + 1:] == 'instruction':
self.logger.info(' tag "instruction" found')
elif header_question_object.tag[header_question_object.tag.rfind('}') + 1:] == 'introduction':
self.logger.info(' tag "introduction" found')
elif header_question_object.tag[header_question_object.tag.rfind('}') + 1:] == 'comment':
self.logger.info(' comment found, ignored')
continue
elif | |
"""
aav.readers
~~~~~~~~~~~
:copyright: (c) 2018 <NAME>
:copyright: (c) 2018 Leiden University Medical Center
:license: MIT
"""
from functools import reduce
from math import log10
from typing import Optional, List, Type, Tuple, Set
import logging
from .variation import (Variant, InfoFieldNumber, InfoField, Genotype,
InfoHeaderLine, GT_FORMAT, VCF_v_4_2,
program_header, date_header, chrom_header,
InfoFieldType)
from .lookup import RSLookup
from .utils import comma_float, empty_string
GRCH37_LOOKUP = RSLookup("GRCh37")
GRCH38_LOOKUP = RSLookup("GRCh38")
logger = logging.getLogger('ArrayReader')
class Reader(object):
"""
Generic reader object
Readers are iterators that produce variants
"""
def __init__(self, path: str, n_header_lines: int = 0,
encoding: Optional[str] = None):
self.path = path
self.handle = open(path, mode="r", encoding=encoding)
self.header_lines = []
self.header_fields = [
VCF_v_4_2, date_header(), program_header(), GT_FORMAT
]
for _ in range(n_header_lines):
self.header_lines.append(next(self.handle))
def __next__(self) -> Variant:
raise NotImplementedError
def __iter__(self):
return self
def vcf_header(self, sample_name: str) -> str:
s = reduce(lambda x, y: x + str(y) + "\n", self.header_fields, "")
return s + chrom_header(sample_name) + '\n'
class OpenArrayReader(Reader):
def __init__(self, path: str, lookup_table: RSLookup, sample: str,
qual: int = 100, prefix_chr: Optional[str] = None,
encoding: Optional[str] = None,
exclude_assays: Optional[Set[str]] = None):
super().__init__(path, n_header_lines=18, encoding=encoding)
self.qual = qual
self.sample = sample
self.lookup_table = lookup_table
self.prefix_chr = prefix_chr
self.linecount = 18 # n_header_lines
self.unknown_call = {'INV', 'NOAMP', 'UND', '-/-'}
if exclude_assays is not None:
self.exclude_assays = exclude_assays
else:
self.exclude_assays = set()
self.header_fields += [
InfoHeaderLine("Assay_Name", InfoFieldNumber.one,
InfoFieldType.STRING),
InfoHeaderLine("Assay_ID", InfoFieldNumber.one,
InfoFieldType.STRING),
InfoHeaderLine("Gene_Symbol", InfoFieldNumber.unknown,
InfoFieldType.STRING)
]
self._header_splitted = self.header_lines[-1].strip().split("\t")
@property
def chromsome_col_idx(self) -> int:
return self._header_splitted.index("Chromosome #")
@property
def position_col_idx(self) -> int:
return self._header_splitted.index("Position")
@property
def sample_col_idx(self) -> int:
return self._header_splitted.index("Sample ID")
@property
def rsid_col_idx(self) -> int:
return self._header_splitted.index("NCBI SNP Reference")
@property
def assay_name_col_idx(self) -> int:
return self._header_splitted.index("Assay Name")
@property
def assay_id_col_idx(self) -> int:
return self._header_splitted.index("Assay ID")
@property
def gene_symbol_col_idx(self) -> int:
return self._header_splitted.index("Gene Symbol")
@property
def call_col_idx(self) -> int:
return self._header_splitted.index("Call")
def __next__(self):
for raw_line in self.handle:
self.linecount += 1
if empty_string(raw_line):
raise StopIteration # end of initial list
line = raw_line.strip('\n').split("\t")
if len(line) < 8: # may occur if assay design is dumped in file
logger.debug(f"Skipping line {self.linecount}, to few columns")
continue
assay_id = line[self.assay_id_col_idx]
if assay_id in self.exclude_assays:
logger.debug("Skipping excluded assay {assay_id}")
continue
line_sample = line[self.sample_col_idx]
if line_sample != self.sample:
logger.debug(f"Skipping line {self.linecount}, wrong sample "
f"({line_sample} is not {self.sample})")
continue
rs_id = line[self.rsid_col_idx].strip() # may have spaces :cry:
try:
raw_chrom = line[self.chromsome_col_idx]
except IndexError: # sometimes the entire row is truncated
logger.debug((f"Skipping line {self.linecount}, entire row "
"truncated"))
continue
pos = line[self.position_col_idx]
# Skip if fields we need are missing
if empty_string(raw_chrom):
logger.debug((f"Skipping line {self.linecount}, missing "
"chromosome"))
continue
if empty_string(pos):
logger.debug((f"Skipping line {self.linecount}, missing "
"position"))
continue
if empty_string(rs_id):
logger.debug((f"Skipping line {self.linecount}, missing "
"rs_id"))
continue
# Also skip if the rs_id is not in the lookup_table
try:
q_res = self.lookup_table[rs_id]
except KeyError:
logger.debug(f"Skipping {rs_id}, transcript not found")
continue
else:
call = line[self.call_col_idx]
ref = q_res.ref
genotype, alt = self.get_genotype_and_alt(call, ref, q_res.alt)
assay_name = line[self.assay_name_col_idx]
raw_gene_symbol = line[self.gene_symbol_col_idx]
infos = [
InfoField("Assay_Name", assay_name, InfoFieldNumber.one),
InfoField("Assay_ID", assay_id, InfoFieldNumber.one)
]
if not empty_string(raw_gene_symbol):
infos.append(InfoField("Gene_Symbol",
raw_gene_symbol.split(";"),
InfoFieldNumber.unknown))
chrom = self.get_chrom(raw_chrom)
return Variant(chrom=chrom, pos=int(pos), id=rs_id, ref=ref,
alt=alt, info_fields=infos, qual=self.qual,
genotype=genotype)
else:
raise StopIteration
def get_chrom(self, chrom: str) -> str:
if self.prefix_chr is None:
return chrom
return "{0}{1}".format(self.prefix_chr, chrom)
def get_genotype_and_alt(self, call: str, ref: str,
fallback_alt: str) -> Tuple[Genotype, str]:
# These are calls that indicate that no genotype could be determined
if call in self.unknown_call:
msg = f"Recognised {call}, which has no genotype information"
logger.debug(msg)
return Genotype.unknown, "."
alleles = set(call.split("/"))
# These are alleles that are not handled by this tool, so we print an
# error when we encounter them
for allele in alleles:
if not set(allele).issubset({"A", "T", "C", "G", "N"}):
logger.error(f"Skipping Unknown call {call}")
return Genotype.unknown, "."
if len(alleles) > 1:
return Genotype.het, (alleles - {ref}).pop()
homozygous_allele = alleles.pop()
if homozygous_allele == ref:
return Genotype.hom_ref, fallback_alt
else:
return Genotype.hom_alt, homozygous_allele
class AffyReader(Reader):
"""
Affymetrix files are expected to conform to the follow spec:
ID AffymetrixSNPsID rsID Chromosome Position log2ratio_AB N_AB Call_test LOH_likeihood # noqa
Call_test follows the following scheme:
0: unknown
1: hom_ref
2: het
3: hom_alt
"""
def __init__(self, path: str,
lookup_table: RSLookup,
qual: int = 100,
prefix_chr: Optional[str] = None,
encoding: Optional[str] = None):
super().__init__(path, n_header_lines=1, encoding=encoding)
self.qual = qual
self.prefix_chr = prefix_chr
self.lookup_table = lookup_table
self.header_fields += [
InfoHeaderLine("ID", InfoFieldNumber.one, InfoFieldType.STRING),
InfoHeaderLine("AffymetrixSNPsID", InfoFieldNumber.one,
InfoFieldType.STRING),
InfoHeaderLine("log2ratio_AB", InfoFieldNumber.one,
InfoFieldType.FLOAT),
InfoHeaderLine("N_AB", InfoFieldNumber.one, InfoFieldType.INT),
InfoHeaderLine("LOH_likelihood", InfoFieldNumber.one,
InfoFieldType.FLOAT)
]
def __next__(self) -> Variant:
for raw_line in self.handle:
line = raw_line.strip('\n').split("\t")
chrom = self.get_chrom(line[3])
pos = int(line[4])
rs_id = line[2]
try:
q_res = self.lookup_table[rs_id]
except KeyError:
logger.info(f"Skipping {rs_id}, transcript not found")
continue
else:
if q_res is None or q_res.ref_is_minor is None:
logger.info(f"Skipping {rs_id}, incomplete data: {q_res}")
continue
else:
ref = q_res.ref
alt = q_res.alt
ref_is_minor = q_res.ref_is_minor
gt = self.get_gt(int(line[7]), ref_is_minor)
infos = [
InfoField("ID", line[0], InfoFieldNumber.one),
InfoField("AffymetrixSNPsID", line[1], InfoFieldNumber.one),
InfoField("log2ratio_AB", line[5], InfoFieldNumber.one),
InfoField("N_AB", line[6], InfoFieldNumber.one),
InfoField("LOH_likelihood", line[8], InfoFieldNumber.one)
]
return Variant(chrom=chrom, pos=pos, ref=ref, alt=alt,
qual=self.qual, id=rs_id, info_fields=infos,
genotype=gt)
else:
raise StopIteration
def get_gt(self, val: int, ref_is_minor: bool) -> Genotype:
if val == 0:
return Genotype.unknown
elif val == 1:
return Genotype.unknown
elif val == 2:
return Genotype.het
elif val == 3:
return Genotype.unknown
return Genotype.unknown
def get_chrom(self, val):
"""23 = X"""
if val == "23":
val = "X"
if self.prefix_chr is not None:
return "{0}{1}".format(self.prefix_chr, val)
return val
class CytoScanReader(Reader):
"""
Cytoscan files are expected to conform to the following spec
They have 12 header lines, with the following columns:
Probe Set ID Call Codes Confidence Signal A Signal B Forward Strand Base Calls dbSNP RS ID Chromosome Chromosomal Position # noqa
"""
def __init__(self, path,
lookup_table: RSLookup,
prefix_chr: Optional[str] = None,
encoding: Optional[str] = None):
super().__init__(path, 12, encoding=encoding)
self.prefix_chr = prefix_chr
self.lookup_table = lookup_table
self.header_fields += [
InfoHeaderLine("Probe_Set_ID", InfoFieldNumber.one,
InfoFieldType.STRING),
InfoHeaderLine("Signal_A", InfoFieldNumber.one,
InfoFieldType.FLOAT),
InfoHeaderLine("Signal_B", InfoFieldNumber.one,
InfoFieldType.FLOAT)
]
def __next__(self) -> Variant:
for raw_line in self.handle:
line = raw_line.strip('\n').split("\t")
chrom = self.get_chrom(line[7])
pos = int(line[8])
rs_id = line[6]
try:
q_res = self.lookup_table[rs_id]
except KeyError:
logger.debug(f"Skipping {rs_id}, transcript not found")
continue
else:
if q_res is None or q_res.ref_is_minor is None:
logger.debug(f"Skipping {rs_id}, incomplete data: {q_res}")
continue
else:
ref = q_res.ref
alt = q_res.alt
gt = self.get_genotype(ref, alt, line[5])
qual = self.get_qual(float(line[2]))
infos = [
InfoField("Probe_Set_ID", line[0], InfoFieldNumber.one),
InfoField("Signal_A", line[3], InfoFieldNumber.one),
InfoField("Signal_B", line[4], InfoFieldNumber.one)
]
return Variant(chrom=chrom, pos=pos, ref=ref, alt=alt, id=rs_id,
qual=qual, info_fields=infos, genotype=gt)
else:
raise StopIteration
def get_genotype(self, ref: str, alt: List[str], calls: str) -> Genotype:
if calls is None or calls == "":
return Genotype.unknown
alleles = list(calls)
if len(set(alleles)) > 1:
return Genotype.het
elif alleles[0] == ref:
return Genotype.hom_ref
elif any([alleles[0] == x for x in alt]):
return Genotype.hom_alt
else:
return Genotype.unknown
def get_chrom(self, chrom: str) -> str:
if self.prefix_chr is None:
return chrom
return "{0}{1}".format(self.prefix_chr, chrom)
def get_qual(self, confidence: float) -> float:
if confidence == 0:
return 0
return -10 * log10(confidence)
class LumiReader(Reader):
"""
Lumi readers have one header line.
They required rsID lookups.
The first two columns (rs id and chr) may be switched around
"""
def __init__(self, path: str,
lookup_table: RSLookup,
prefix_chr: Optional[str] = None,
qual=100,
encoding: Optional[str] = None):
super().__init__(path, n_header_lines=1, encoding=encoding)
self.lookup_table = lookup_table
self.chr_prefix = prefix_chr
self.qual = qual
self.header_fields += [
InfoHeaderLine("Log_R_Ratio", InfoFieldNumber.one,
InfoFieldType.FLOAT),
InfoHeaderLine("CNV_Value", InfoFieldNumber.one,
InfoFieldType.INT),
InfoHeaderLine("Allele_Freq", InfoFieldNumber.one,
InfoFieldType.FLOAT)
]
def __next__(self) -> Variant:
for raw_line in self.handle:
line = raw_line.strip('\n').split("\t")
rs_id = self.get_rs_id(line)
raw_chrom = self.get_raw_chrom(line)
chrom = self.get_chrom(raw_chrom)
pos = int(line[2])
g_type = line[3]
try:
q_res = self.lookup_table[rs_id]
except KeyError:
logger.info(f"Skipping {rs_id}, transcript not found")
continue
else:
if q_res is None or q_res.ref_is_minor is None:
logger.info(f"Skipping {rs_id}, incomplete data: {q_res}")
continue
else:
ref = q_res.ref
alt = q_res.alt
ref_is_minor = q_res.ref_is_minor
gt = self.get_genotype(g_type, ref_is_minor)
infos = [
InfoField(
"Log_R_Ratio", comma_float(line[4]), InfoFieldNumber.one
),
InfoField(
"CNV_Value", int(line[5]), InfoFieldNumber.one
),
InfoField(
"Allele_Freq", comma_float(line[6]), InfoFieldNumber.one
)
]
return Variant(chrom=chrom, pos=pos, ref=ref, alt=alt,
qual=self.qual, id=rs_id, info_fields=infos,
genotype=gt)
else:
raise StopIteration
def get_chrom(self, chrom: str) -> str:
if self.chr_prefix is None:
return chrom
return "{0}{1}".format(self.chr_prefix, chrom)
def get_rs_id(self, line: List[str]) -> str:
raise NotImplementedError
def get_raw_chrom(self, line: List[str]) -> str:
raise | |
= 0x838F
FancyLedB144 = 0x8390
FancyLedB145 = 0x8391
FancyLedB146 = 0x8392
FancyLedB147 = 0x8393
FancyLedB148 = 0x8394
FancyLedB149 = 0x8395
FancyLedB150 = 0x8396
FancyLedB151 = 0x8397
FancyLedB152 = 0x8398
FancyLedB153 = 0x8399
FancyLedB154 = 0x839A
FancyLedB155 = 0x839B
FancyLedB156 = 0x839C
FancyLedB157 = 0x839D
FancyLedB158 = 0x839E
FancyLedB159 = 0x839F
FancyLedB160 = 0x83A0
FancyLedB161 = 0x83A1
FancyLedB162 = 0x83A2
FancyLedB163 = 0x83A3
FancyLedB164 = 0x83A4
FancyLedB165 = 0x83A5
FancyLedB166 = 0x83A6
FancyLedB167 = 0x83A7
FancyLedB168 = 0x83A8
FancyLedB169 = 0x83A9
FancyLedB170 = 0x83AA
FancyLedB171 = 0x83AB
FancyLedB172 = 0x83AC
FancyLedB173 = 0x83AD
FancyLedB174 = 0x83AE
FancyLedB175 = 0x83AF
FancyLedB176 = 0x83B0
FancyLedB177 = 0x83B1
FancyLedB178 = 0x83B2
FancyLedB179 = 0x83B3
FancyLedB180 = 0x83B4
FancyLedB181 = 0x83B5
FancyLedB182 = 0x83B6
FancyLedB183 = 0x83B7
FancyLedB184 = 0x83B8
FancyLedB185 = 0x83B9
FancyLedB186 = 0x83BA
FancyLedB187 = 0x83BB
FancyLedB188 = 0x83BC
FancyLedB189 = 0x83BD
FancyLedB190 = 0x83BE
FancyLedB191 = 0x83BF
FancyLedB192 = 0x83C0
FancyLedB193 = 0x83C1
FancyLedB194 = 0x83C2
FancyLedB195 = 0x83C3
FancyLedB196 = 0x83C4
FancyLedB197 = 0x83C5
FancyLedB198 = 0x83C6
FancyLedB199 = 0x83C7
FancyLedB200 = 0x83C8
FancyLedB201 = 0x83C9
FancyLedB202 = 0x83CA
FancyLedB203 = 0x83CB
FancyLedB204 = 0x83CC
FancyLedB205 = 0x83CD
FancyLedB206 = 0x83CE
FancyLedB207 = 0x83CF
FancyLedB208 = 0x83D0
FancyLedB209 = 0x83D1
FancyLedB210 = 0x83D2
FancyLedB211 = 0x83D3
FancyLedB212 = 0x83D4
FancyLedB213 = 0x83D5
FancyLedB214 = 0x83D6
FancyLedB215 = 0x83D7
FancyLedB216 = 0x83D8
FancyLedB217 = 0x83D9
FancyLedB218 = 0x83DA
FancyLedB219 = 0x83DB
FancyLedB220 = 0x83DC
FancyLedB221 = 0x83DD
FancyLedB222 = 0x83DE
FancyLedB223 = 0x83DF
FancyLedB224 = 0x83E0
FancyLedB225 = 0x83E1
FancyLedB226 = 0x83E2
FancyLedB227 = 0x83E3
FancyLedB228 = 0x83E4
FancyLedB229 = 0x83E5
FancyLedB230 = 0x83E6
FancyLedB231 = 0x83E7
FancyLedB232 = 0x83E8
FancyLedB233 = 0x83E9
FancyLedB234 = 0x83EA
FancyLedB235 = 0x83EB
FancyLedB236 = 0x83EC
FancyLedB237 = 0x83ED
FancyLedB238 = 0x83EE
FancyLedB239 = 0x83EF
FancyLedB240 = 0x83F0
FancyLedB241 = 0x83F1
FancyLedB242 = 0x83F2
FancyLedB243 = 0x83F3
FancyLedB244 = 0x83F4
FancyLedB245 = 0x83F5
FancyLedB246 = 0x83F6
FancyLedB247 = 0x83F7
FancyLedB248 = 0x83F8
FancyLedB249 = 0x83F9
FancyLedB250 = 0x83FA
FancyLedB251 = 0x83FB
FancyLedB252 = 0x83FC
FancyLedB253 = 0x83FD
FancyLedB254 = 0x83FE
FancyLedB255 = 0x83FF
# FancyButtonA List
FancyButtonA0 = 0xA000
FancyButtonA1 = 0xA001
FancyButtonA2 = 0xA002
FancyButtonA3 = 0xA003
FancyButtonA4 = 0xA004
FancyButtonA5 = 0xA005
FancyButtonA6 = 0xA006
FancyButtonA7 = 0xA007
FancyButtonA8 = 0xA008
FancyButtonA9 = 0xA009
FancyButtonA10 = 0xA00A
FancyButtonA11 = 0xA00B
FancyButtonA12 = 0xA00C
FancyButtonA13 = 0xA00D
FancyButtonA14 = 0xA00E
FancyButtonA15 = 0xA00F
FancyButtonA16 = 0xA010
FancyButtonA17 = 0xA011
FancyButtonA18 = 0xA012
FancyButtonA19 = 0xA013
FancyButtonA20 = 0xA014
FancyButtonA21 = 0xA015
FancyButtonA22 = 0xA016
FancyButtonA23 = 0xA017
FancyButtonA24 = 0xA018
FancyButtonA25 = 0xA019
FancyButtonA26 = 0xA01A
FancyButtonA27 = 0xA01B
FancyButtonA28 = 0xA01C
FancyButtonA29 = 0xA01D
FancyButtonA30 = 0xA01E
FancyButtonA31 = 0xA01F
FancyButtonA32 = 0xA020
FancyButtonA33 = 0xA021
FancyButtonA34 = 0xA022
FancyButtonA35 = 0xA023
FancyButtonA36 = 0xA024
FancyButtonA37 = 0xA025
FancyButtonA38 = 0xA026
FancyButtonA39 = 0xA027
FancyButtonA40 = 0xA028
FancyButtonA41 = 0xA029
FancyButtonA42 = 0xA02A
FancyButtonA43 = 0xA02B
FancyButtonA44 = 0xA02C
FancyButtonA45 = 0xA02D
FancyButtonA46 = 0xA02E
FancyButtonA47 = 0xA02F
FancyButtonA48 = 0xA030
FancyButtonA49 = 0xA031
FancyButtonA50 = 0xA032
FancyButtonA51 = 0xA033
FancyButtonA52 = 0xA034
FancyButtonA53 = 0xA035
FancyButtonA54 = 0xA036
FancyButtonA55 = 0xA037
FancyButtonA56 = 0xA038
FancyButtonA57 = 0xA039
FancyButtonA58 = 0xA03A
FancyButtonA59 = 0xA03B
FancyButtonA60 = 0xA03C
FancyButtonA61 = 0xA03D
FancyButtonA62 = 0xA03E
FancyButtonA63 = 0xA03F
FancyButtonA64 = 0xA040
FancyButtonA65 = 0xA041
FancyButtonA66 = 0xA042
FancyButtonA67 = 0xA043
FancyButtonA68 = 0xA044
FancyButtonA69 = 0xA045
FancyButtonA70 = 0xA046
FancyButtonA71 = 0xA047
FancyButtonA72 = 0xA048
FancyButtonA73 = 0xA049
FancyButtonA74 = 0xA04A
FancyButtonA75 = 0xA04B
FancyButtonA76 = 0xA04C
FancyButtonA77 = 0xA04D
FancyButtonA78 = 0xA04E
FancyButtonA79 = 0xA04F
FancyButtonA80 = 0xA050
FancyButtonA81 = 0xA051
FancyButtonA82 = 0xA052
FancyButtonA83 = 0xA053
FancyButtonA84 = 0xA054
FancyButtonA85 = 0xA055
FancyButtonA86 = 0xA056
FancyButtonA87 = 0xA057
FancyButtonA88 = 0xA058
FancyButtonA89 = 0xA059
FancyButtonA90 = 0xA05A
FancyButtonA91 = 0xA05B
FancyButtonA92 = 0xA05C
FancyButtonA93 = 0xA05D
FancyButtonA94 = 0xA05E
FancyButtonA95 = 0xA05F
FancyButtonA96 = 0xA060
FancyButtonA97 = 0xA061
FancyButtonA98 = 0xA062
FancyButtonA99 = 0xA063
FancyButtonA100 = 0xA064
FancyButtonA101 = 0xA065
FancyButtonA102 = 0xA066
FancyButtonA103 = 0xA067
FancyButtonA104 = 0xA068
FancyButtonA105 = 0xA069
FancyButtonA106 = 0xA06A
FancyButtonA107 = 0xA06B
FancyButtonA108 = 0xA06C
FancyButtonA109 = 0xA06D
FancyButtonA110 = 0xA06E
FancyButtonA111 = 0xA06F
FancyButtonA112 = 0xA070
FancyButtonA113 = 0xA071
FancyButtonA114 = 0xA072
FancyButtonA115 = 0xA073
FancyButtonA116 = 0xA074
FancyButtonA117 = 0xA075
FancyButtonA118 = 0xA076
FancyButtonA119 = 0xA077
FancyButtonA120 = 0xA078
FancyButtonA121 = 0xA079
FancyButtonA122 = 0xA07A
FancyButtonA123 = 0xA07B
FancyButtonA124 = 0xA07C
FancyButtonA125 = 0xA07D
FancyButtonA126 = 0xA07E
FancyButtonA127 = 0xA07F
FancyButtonA128 = 0xA080
FancyButtonA129 = 0xA081
FancyButtonA130 = 0xA082
FancyButtonA131 = 0xA083
FancyButtonA132 = 0xA084
FancyButtonA133 = 0xA085
FancyButtonA134 = 0xA086
FancyButtonA135 = 0xA087
FancyButtonA136 = 0xA088
FancyButtonA137 = 0xA089
FancyButtonA138 = 0xA08A
FancyButtonA139 = 0xA08B
FancyButtonA140 = 0xA08C
FancyButtonA141 = 0xA08D
FancyButtonA142 = 0xA08E
FancyButtonA143 = 0xA08F
FancyButtonA144 = 0xA090
FancyButtonA145 = 0xA091
FancyButtonA146 = 0xA092
FancyButtonA147 = 0xA093
FancyButtonA148 = 0xA094
FancyButtonA149 = 0xA095
FancyButtonA150 = 0xA096
FancyButtonA151 = 0xA097
FancyButtonA152 = 0xA098
FancyButtonA153 = 0xA099
FancyButtonA154 = 0xA09A
FancyButtonA155 = 0xA09B
FancyButtonA156 = 0xA09C
FancyButtonA157 = 0xA09D
FancyButtonA158 = 0xA09E
FancyButtonA159 = 0xA09F
FancyButtonA160 = 0xA0A0
FancyButtonA161 = 0xA0A1
FancyButtonA162 = 0xA0A2
FancyButtonA163 = 0xA0A3
FancyButtonA164 = 0xA0A4
FancyButtonA165 = 0xA0A5
FancyButtonA166 = 0xA0A6
FancyButtonA167 = 0xA0A7
FancyButtonA168 = 0xA0A8
FancyButtonA169 = 0xA0A9
FancyButtonA170 = 0xA0AA
FancyButtonA171 = 0xA0AB
FancyButtonA172 = 0xA0AC
FancyButtonA173 = 0xA0AD
FancyButtonA174 = 0xA0AE
FancyButtonA175 = 0xA0AF
FancyButtonA176 = 0xA0B0
FancyButtonA177 = 0xA0B1
FancyButtonA178 = 0xA0B2
FancyButtonA179 = 0xA0B3
FancyButtonA180 = 0xA0B4
FancyButtonA181 = 0xA0B5
FancyButtonA182 = 0xA0B6
FancyButtonA183 = 0xA0B7
FancyButtonA184 = 0xA0B8
FancyButtonA185 = 0xA0B9
FancyButtonA186 = 0xA0BA
FancyButtonA187 = 0xA0BB
FancyButtonA188 = 0xA0BC
FancyButtonA189 = 0xA0BD
FancyButtonA190 = 0xA0BE
FancyButtonA191 = 0xA0BF
FancyButtonA192 = 0xA0C0
FancyButtonA193 = 0xA0C1
FancyButtonA194 = 0xA0C2
FancyButtonA195 = 0xA0C3
FancyButtonA196 = 0xA0C4
FancyButtonA197 = 0xA0C5
FancyButtonA198 = 0xA0C6
FancyButtonA199 = 0xA0C7
FancyButtonA200 = 0xA0C8
FancyButtonA201 = 0xA0C9
FancyButtonA202 = 0xA0CA
FancyButtonA203 = 0xA0CB
FancyButtonA204 = 0xA0CC
FancyButtonA205 = 0xA0CD
FancyButtonA206 = 0xA0CE
FancyButtonA207 = 0xA0CF
FancyButtonA208 = 0xA0D0
FancyButtonA209 = 0xA0D1
FancyButtonA210 = 0xA0D2
FancyButtonA211 = 0xA0D3
FancyButtonA212 = 0xA0D4
FancyButtonA213 = 0xA0D5
FancyButtonA214 = 0xA0D6
FancyButtonA215 = 0xA0D7
FancyButtonA216 = 0xA0D8
FancyButtonA217 = 0xA0D9
FancyButtonA218 = 0xA0DA
FancyButtonA219 = 0xA0DB
FancyButtonA220 = 0xA0DC
FancyButtonA221 = 0xA0DD
FancyButtonA222 = 0xA0DE
FancyButtonA223 = 0xA0DF
FancyButtonA224 = 0xA0E0
FancyButtonA225 = 0xA0E1
FancyButtonA226 = 0xA0E2
FancyButtonA227 = 0xA0E3
FancyButtonA228 = 0xA0E4
FancyButtonA229 = 0xA0E5
FancyButtonA230 = 0xA0E6
FancyButtonA231 = 0xA0E7
FancyButtonA232 = 0xA0E8
FancyButtonA233 = 0xA0E9
FancyButtonA234 = 0xA0EA
FancyButtonA235 = 0xA0EB
FancyButtonA236 = 0xA0EC
FancyButtonA237 = 0xA0ED
FancyButtonA238 = 0xA0EE
FancyButtonA239 = 0xA0EF
FancyButtonA240 = 0xA0F0
FancyButtonA241 = 0xA0F1
FancyButtonA242 = 0xA0F2
FancyButtonA243 = 0xA0F3
FancyButtonA244 = 0xA0F4
FancyButtonA245 = 0xA0F5
FancyButtonA246 = 0xA0F6
FancyButtonA247 = 0xA0F7
FancyButtonA248 = 0xA0F8
FancyButtonA249 = 0xA0F9
FancyButtonA250 = 0xA0FA
FancyButtonA251 = 0xA0FB
FancyButtonA252 = 0xA0FC
FancyButtonA253 = 0xA0FD
FancyButtonA254 = 0xA0FE
FancyButtonA255 = 0xA0FF
# FancyButtonB List
FancyButtonB0 = 0xA100
FancyButtonB1 = 0xA101
FancyButtonB2 = 0xA102
FancyButtonB3 = 0xA103
FancyButtonB4 = 0xA104
FancyButtonB5 = 0xA105
FancyButtonB6 = 0xA106
FancyButtonB7 = 0xA107
FancyButtonB8 = 0xA108
FancyButtonB9 = 0xA109
FancyButtonB10 = 0xA10A
FancyButtonB11 = 0xA10B
FancyButtonB12 = 0xA10C
FancyButtonB13 = 0xA10D
FancyButtonB14 = 0xA10E
FancyButtonB15 = 0xA10F
FancyButtonB16 = 0xA110
FancyButtonB17 = 0xA111
FancyButtonB18 = 0xA112
FancyButtonB19 = 0xA113
FancyButtonB20 = 0xA114
FancyButtonB21 = 0xA115
FancyButtonB22 = 0xA116
FancyButtonB23 = 0xA117
FancyButtonB24 = 0xA118
FancyButtonB25 = 0xA119
FancyButtonB26 = 0xA11A
FancyButtonB27 = 0xA11B
FancyButtonB28 = 0xA11C
FancyButtonB29 = 0xA11D
FancyButtonB30 = 0xA11E
FancyButtonB31 = 0xA11F
FancyButtonB32 = 0xA120
FancyButtonB33 = 0xA121
FancyButtonB34 = 0xA122
FancyButtonB35 = 0xA123
FancyButtonB36 = 0xA124
FancyButtonB37 = 0xA125
FancyButtonB38 = 0xA126
FancyButtonB39 = 0xA127
FancyButtonB40 = 0xA128
FancyButtonB41 = 0xA129
FancyButtonB42 = 0xA12A
FancyButtonB43 = 0xA12B
FancyButtonB44 = 0xA12C
FancyButtonB45 = 0xA12D
FancyButtonB46 = 0xA12E
FancyButtonB47 = 0xA12F
FancyButtonB48 = 0xA130
FancyButtonB49 = 0xA131
FancyButtonB50 = 0xA132
FancyButtonB51 = 0xA133
FancyButtonB52 = 0xA134
FancyButtonB53 = 0xA135
FancyButtonB54 = 0xA136
FancyButtonB55 = 0xA137
| |
We do a couple sanity checks in here to be sure we can format a valid
AdbMessage for our underlying AdbConnection, and then send it. This method
can be used to send any message type, and doesn't do any state tracking or
acknowledgement checking.
Args:
command: The command to send, should be one of 'OKAY', 'WRTE', or 'CLSE'
timeout: timeouts.PolledTimeout to use for this operation
data: If provided, data to send with the AdbMessage.
"""
if len(data) > self.adb_connection.maxdata:
raise usb_exceptions.AdbProtocolError('Message data too long (%s>%s): %s',
len(data),
self.adb_connection.maxdata, data)
if not self.remote_id:
# If we get here, we probably missed the OKAY response to our OPEN. We
# should have failed earlier, but in case someone does something tricky
# with multiple threads, we sanity check this here.
raise usb_exceptions.AdbProtocolError('%s send before OKAY: %s',
self, data)
self.adb_connection.transport.write_message(
adb_message.AdbMessage(command, self.local_id, self.remote_id, data),
timeout)
def _handle_message(self, message, handle_wrte=True):
"""Handle a message that was read for this stream.
For each message type, this means:
OKAY: Check id's and make sure we are expecting an OKAY. Clear the
self._expecting_okay flag so any pending write()'s know.
CLSE: Set our internal state to closed.
WRTE: Add the data read to our internal read buffer. Note we don't
return the actual data because it may not be this thread that needs it.
Args:
message: Message that was read.
handle_wrte: If True, we can handle WRTE messages, otherwise raise.
Raises:
AdbProtocolError: If we get a WRTE message but handle_wrte is False.
"""
if message.command == 'OKAY':
self._set_or_check_remote_id(message.arg0)
if not self._expecting_okay:
raise usb_exceptions.AdbProtocolError(
'%s received unexpected OKAY: %s', self, message)
self._expecting_okay = False
elif message.command == 'CLSE':
self.closed_state = self.ClosedState.CLOSED
elif not handle_wrte:
raise usb_exceptions.AdbProtocolError(
'%s received WRTE before OKAY/CLSE: %s', self, message)
else:
with self._read_buffer_lock:
self._read_buffer.append(message.data)
self._buffer_size += len(message.data)
def _read_messages_until_true(self, predicate, timeout):
"""Read a message from this stream and handle it.
This method tries to read a message from this stream, blocking until a
message is read. Once read, it will handle it accordingly by calling
self._handle_message().
This is repeated as long as predicate() returns False. There is some
locking used internally here so that we don't end up with multiple threads
blocked on a call to read_for_stream when another thread has read the
message that caused predicate() to become True.
Args:
predicate: Callable, keep reading messages until it returns true. Note
that predicate() should not block, as doing so may cause this method to
hang beyond its timeout.
timeout: Timeout to use for this call.
Raises:
AdbStreamClosedError: If this stream is already closed.
"""
while not predicate():
# Hold the message_received Lock while we try to acquire the reader_lock
# and waiting on the message_received condition, to prevent another reader
# thread from notifying the condition between us failing to acquire the
# reader_lock and waiting on the condition.
self._message_received.acquire()
if self._reader_lock.acquire(False):
try:
# Release the message_received Lock while we do the read so other
# threads can wait() on the condition without having to block on
# acquiring the message_received Lock (we may have a longer timeout
# than them, so that would be bad).
self._message_received.release()
# We are now the thread responsible for reading a message. Check
# predicate() to make sure nobody else read a message between our last
# check and acquiring the reader Lock.
if predicate():
return
# Read and handle a message, using our timeout.
self._handle_message(
self.adb_connection.read_for_stream(self, timeout))
# Notify anyone interested that we handled a message, causing them to
# check their predicate again.
with self._message_received:
self._message_received.notify_all()
finally:
self._reader_lock.release()
else:
# There is some other thread reading a message. Since we are already
# holding the message_received Lock, we can immediately do the wait.
try:
self._message_received.wait(timeout.remaining)
if timeout.has_expired():
raise usb_exceptions.AdbTimeoutError(
'%s timed out reading messages.', self)
finally:
# Make sure we release this even if an exception occurred.
self._message_received.release()
def ensure_opened(self, timeout):
"""Ensure this stream transport was successfully opened.
Checks to make sure we receive our initial OKAY message. This must be
called after creating this AdbStreamTransport and before calling read() or
write().
Args:
timeout: timeouts.PolledTimeout to use for this operation.
Returns:
True if this stream was successfully opened, False if the service was
not recognized by the remote endpoint. If False is returned, then this
AdbStreamTransport will be already closed.
Raises:
AdbProtocolError: If we receive a WRTE message instead of OKAY/CLSE.
"""
self._handle_message(self.adb_connection.read_for_stream(self, timeout),
handle_wrte=False)
return self.is_open()
def is_open(self):
"""Return True iff the transport layer is open."""
return self.closed_state == self.ClosedState.OPEN
def is_closed(self):
"""Return true ifff the transport layer is closed."""
return self.closed_state == self.ClosedState.CLOSED
def enqueue_message(self, message, timeout):
"""Add the given message to this transport's queue.
This method also handles ACKing any WRTE messages.
Args:
message: The AdbMessage to enqueue.
timeout: The timeout to use for the operation. Specifically, WRTE
messages cause an OKAY to be sent; timeout is used for that send.
"""
# Ack WRTE messages immediately, handle our OPEN ack if it gets enqueued.
if message.command == 'WRTE':
self._send_command('OKAY', timeout=timeout)
elif message.command == 'OKAY':
self._set_or_check_remote_id(message.arg0)
self.message_queue.put(message)
def write(self, data, timeout):
"""Write data to this stream, using the given timeouts.PolledTimeout."""
if not self.remote_id:
raise usb_exceptions.AdbStreamClosedError(
'Cannot write() to half-opened %s', self)
if self.closed_state != self.ClosedState.OPEN:
raise usb_exceptions.AdbStreamClosedError(
'Cannot write() to closed %s', self)
elif self._expecting_okay:
raise usb_exceptions.AdbProtocolError(
'Previous WRTE failed, %s in unknown state', self)
# Make sure we only have one WRTE in flight at a time, because ADB doesn't
# identify which WRTE it is ACK'ing when it sends the OKAY message back.
with self._write_lock:
self._expecting_okay = True
self._send_command('WRTE', timeout, data)
self._read_messages_until_true(lambda: not self._expecting_okay, timeout)
def read(self, length, timeout):
"""Read 'length' bytes from this stream transport.
Args:
length: If not 0, read this many bytes from the stream, otherwise read all
available data (at least one byte).
timeout: timeouts.PolledTimeout to use for this read operation.
Returns:
The bytes read from this stream.
"""
self._read_messages_until_true(
lambda: self._buffer_size and self._buffer_size >= length, timeout)
with self._read_buffer_lock:
data, push_back = ''.join(self._read_buffer), ''
if length:
data, push_back = data[:length], data[length:]
self._read_buffer.clear()
self._buffer_size = len(push_back)
if push_back:
self._read_buffer.appendleft(push_back)
return data
def close(self, timeout_ms):
"""Close this stream, future reads/writes will fail."""
if self.closed_state == self.ClosedState.CLOSED:
return
self.closed_state = self.ClosedState.CLOSED
try:
if not self.adb_connection.close_stream_transport(
self, timeouts.PolledTimeout.from_millis(timeout_ms)):
_LOG.warning('Attempt to close mystery %s', self)
except usb_exceptions.AdbTimeoutError:
_LOG.warning('%s close() timed out, ignoring', self)
class AdbConnection(object):
"""This class represents a connection to an ADB device.
In the context of the ADB documentation (/system/core/adb/protocol.txt in the
Android source), this class corresponds to a session initiated with a CONNECT
message, likely followed by an AUTH message.
This class does NOT represent an individual stream, identified by remote and
local id's in the ADB documentation. Instead, this class has an OpenStream
method that corresponds to the OPEN message in the ADB documentation. That
method returns an AdbStream that can be used to write to a particular service
on the device.
Clients should never instantiate this class directly, but instead should use
the connect() classmethod to open a new connection to a device.
Attributes:
transport: Underlying transport for this AdbConnection, usually USB.
maxdata: Max data payload size supported by this AdbConnection.
systemtype: System type, according to ADB's protocol.txt, one of 'device',
'recovery', etc.
serial: The 'serial number', as reported by ADB in the remote banner.
banner: The 'human readable' component of the remote banner.
"""
# pylint: disable=too-many-instance-attributes
# AUTH constants for arg0.
AUTH_TOKEN = 1
AUTH_SIGNATURE = 2
AUTH_RSAPUBLICKEY = 3
def __init__(self, transport, maxdata, remote_banner):
"""Create an ADB connection to a device.
Args:
transport: AdbTransportAdapter to use for reading/writing AdbMessages
maxdata: Max data size the remote endpoint will accept.
remote_banner: Banner received from the remote endpoint.
"""
try:
self.systemtype, self.serial, self.banner = remote_banner.split(':', 2)
except ValueError:
raise usb_exceptions.AdbProtocolError('Received malformed banner %s',
remote_banner)
self.transport = transport
self.maxdata = maxdata
self._last_id_used = 0
self._reader_lock = threading.Lock()
self._open_lock = threading.Lock()
# | |
facility treatment type dictionary
treat_dict = {'raw discharge': 'wastewater_no_treatment',
'primary (45mg/l< bod)': 'wastewater_primary_treatment',
'advanced primary': 'wastewater_advanced_treatment',
'secondary wastewater treatment': 'wastewater_secondary_treatment',
'secondary': 'wastewater_secondary_treatment',
'advanced treatment': 'wastewater_advanced_treatment'}
# fix naming for one county in wastewater facility location data
df_ww_loc["PRIMARY_COUNTY"] = np.where(df_ww_loc["PRIMARY_COUNTY"] == "Bedford City", "Bedford",
df_ww_loc["PRIMARY_COUNTY"])
# reformat county identifier columns in wastewater facility location data
df_ww_loc['PRIMARY_COUNTY'] = df_ww_loc['PRIMARY_COUNTY'].str.lower() # change to lowercase
df_ww_loc["PRIMARY_COUNTY"] = df_ww_loc["PRIMARY_COUNTY"].str.replace(' ', '') # remove spaces between words
# create a state+county identifier column in wastewater facility location data
df_ww_loc['CWNS_NUMBER'] = df_ww_loc['CWNS_NUMBER'].apply(lambda x: '{0:0>11}'.format(x)) # add leading zero
df_ww_loc["county_identifier"] = df_ww_loc["STATE"] + df_ww_loc["PRIMARY_COUNTY"] # add identifier
# combine wastewater facility location data and county to FIPS crosswalk data to get a FIPS code for each plant
df_ww_loc = pd.merge(df_ww_loc, df_county, how="left", on="county_identifier") # merge dataframes
df_ww_loc = df_ww_loc.drop_duplicates(subset=["CWNS_NUMBER"], keep='first') # drop duplicate CWNS entries
df_ww_loc = df_ww_loc[["CWNS_NUMBER", "FIPS", "STATE"]] # reducing to required variables
# prepare wastewater treatment flow data
df_ww_flow = df_ww_flow[["CWNS_NUMBER", "EXIST_INFILTRATION", "EXIST_TOTAL"]] # reducing to required variables
df_ww_flow = df_ww_flow.dropna(subset=["EXIST_TOTAL"]) # drop treatment plants with zero flows
df_ww_flow["EXIST_INFILTRATION"] = df_ww_flow["EXIST_INFILTRATION"].fillna(0) # fill blank infiltration with zero
# calculate municipal water flows for each facility in wastewater treatment flow data
df_ww_flow['EXIST_MUNI'] = df_ww_flow["EXIST_TOTAL"] - df_ww_flow["EXIST_INFILTRATION"] # subtract infiltration
# reformat and rename wastewater treatment facility flow data
df_ww_flow['CWNS_NUMBER'] = df_ww_flow['CWNS_NUMBER'].apply(lambda x: '{0:0>11}'.format(x)) # add leading zero
df_ww_flow.rename(columns=flow_dict, inplace=True) # rename columns to add descriptive language
# combine wastewater treatment facility flow data and wastewater treatment facility location data
df_ww_flow = pd.merge(df_ww_flow, df_ww_loc, how="left", on='CWNS_NUMBER') # merge dataframes
# remove wastewater treatment facility flow data rows for geographic areas not included in other datasets
df_ww_flow = df_ww_flow[df_ww_flow.STATE != "AS"] # remove flow values for American Samoa
df_ww_flow = df_ww_flow[df_ww_flow.STATE != "GU"] # remove flow values for Guam
df_ww_flow = df_ww_flow[df_ww_flow.STATE != "PR"] # remove flow values for Puerto Rico
df_ww_flow = df_ww_flow[df_ww_flow.STATE != "VI"] # remove flow values for US Virgin Islands
# prep wastewater treatment facility discharge type data to remove naming and capitalization inconsistencies
df_ww_dis['DISCHARGE_METHOD'] = df_ww_dis['DISCHARGE_METHOD'].str.replace(',', '') # remove commas
df_ww_dis['DISCHARGE_METHOD'] = df_ww_dis['DISCHARGE_METHOD'].str.lower() # change to lowercase
df_ww_dis['DISCHARGE_METHOD'] = np.where(df_ww_dis['DISCHARGE_METHOD'] == "reuse: ground water recharge", # rename
"reuse: groundwater recharge",
df_ww_dis['DISCHARGE_METHOD'])
df_ww_dis['DISCHARGE_METHOD'] = np.where(df_ww_dis['DISCHARGE_METHOD'] == "cso discharge", # rename
"combined sewer overflow (cso) discharge",
df_ww_dis['DISCHARGE_METHOD'])
# rename wastewater treatment discharge types
df_ww_dis['DISCHARGE_METHOD_BIN'] = df_ww_dis['DISCHARGE_METHOD'].map(dis_dict) # map to discharge dictionary
# reduce and reformat variables in wastewater treatment facility discharge data
df_ww_dis = df_ww_dis[["CWNS_NUMBER", 'DISCHARGE_METHOD_BIN', 'PRES_FLOW_PERCENTAGE']] # keep required columns
df_ww_dis['CWNS_NUMBER'] = df_ww_dis['CWNS_NUMBER'].apply(lambda x: '{0:0>11}'.format(x)) # add leading zero
df_ww_dis['PRES_FLOW_PERCENTAGE'] = df_ww_dis['PRES_FLOW_PERCENTAGE'] / 100 # convert to fraction
# pivot wastewater treatment facility discharge dataframe to get discharge type as columns
df_ww_dis = pd.pivot_table(df_ww_dis, values='PRES_FLOW_PERCENTAGE', index=['CWNS_NUMBER'],
columns=['DISCHARGE_METHOD_BIN'],
aggfunc=np.sum) # pivot to get discharge types as columns
df_ww_dis = df_ww_dis.reset_index() # reset index to remove multi-index from pivot table
df_ww_dis = df_ww_dis.rename_axis(None, axis=1) # drop index name
# fill blank discharge percentage values in wastewater facility discharge data with 0 percent
for col in df_ww_dis.columns[1:]:
df_ww_dis[col] = df_ww_dis[col].fillna(0) # fill nan rows with 0
# calculate the sum of all discharge type percentages by plant in wastewater treatment facility discharge data
df_ww_dis['sum_pct'] = df_ww_dis.iloc[:, 1:].sum(axis=1) # calculate sum of all flow percentages
# for treatment plants with no discharge data, assume 70% of discharge is to surface discharge
df_ww_dis['wastewater_surface_discharge'] = np.where(df_ww_dis['sum_pct'] == 0, # fill blanks values
.68,
df_ww_dis['wastewater_surface_discharge'])
# for treatment plants with no discharge data, assume 18% of discharge is to groundwater
df_ww_dis['wastewater_groundwater_discharge'] = np.where(df_ww_dis['sum_pct'] == 0, # fill blanks values
.19,
df_ww_dis['wastewater_groundwater_discharge'])
# for treatment plants with no discharge data, assume 8% of discharge is to irrigation
df_ww_dis['wastewater_irrigation_discharge'] = np.where(df_ww_dis['sum_pct'] == 0, # fill blanks values
.08,
df_ww_dis['wastewater_irrigation_discharge'])
# for treatment plants with no discharge data, assume 5% of discharge is to consumption
df_ww_dis['wastewater_consumption'] = np.where(df_ww_dis['sum_pct'] == 0, # fill blanks values
.05,
df_ww_dis['wastewater_consumption'])
# for treatment plants with discharge to another facility, assume 70% of discharge is to surface discharge
df_ww_dis['wastewater_surface_discharge'] = np.where(df_ww_dis['wastewater_wastewater_discharge'] > 0,
df_ww_dis['wastewater_surface_discharge']
+ (.68 * df_ww_dis['wastewater_wastewater_discharge']),
df_ww_dis['wastewater_surface_discharge'])
# for treatment plants with discharge to another facility, assume 18% of discharge is to groundwater
df_ww_dis['wastewater_groundwater_discharge'] = np.where(df_ww_dis['wastewater_wastewater_discharge'] > 0,
# fill blanks values
df_ww_dis['wastewater_groundwater_discharge']
+ (.19 * df_ww_dis['wastewater_wastewater_discharge']),
df_ww_dis['wastewater_groundwater_discharge'])
# for treatment plants with discharge to another facility, assume 8% of discharge is to irrigation
df_ww_dis['wastewater_irrigation_discharge'] = np.where(df_ww_dis['wastewater_wastewater_discharge'] > 0,
# fill blanks values
df_ww_dis['wastewater_irrigation_discharge']
+ (.08 * df_ww_dis['wastewater_wastewater_discharge']),
df_ww_dis['wastewater_irrigation_discharge'])
# for treatment plants with discharge to another facility, assume 5% of discharge is to consumption
df_ww_dis['wastewater_consumption'] = np.where(df_ww_dis['wastewater_wastewater_discharge'] > 0,
# fill blanks values
df_ww_dis['wastewater_consumption']
+ (.05 * df_ww_dis['wastewater_wastewater_discharge']),
df_ww_dis['wastewater_consumption'])
# drop discharges to wastewater from the dataset
df_ww_dis = df_ww_dis.drop(['wastewater_wastewater_discharge'], axis=1)
# recalculate discharge percent sum
df_ww_dis['sum_pct'] = df_ww_dis.iloc[:, 1:-1].sum(axis=1) # recalculate sum
# combine wastewater treatment facility flow data and wastewater treatment facility discharge data
df_ww_flow = pd.merge(df_ww_flow, df_ww_dis, how='left', on='CWNS_NUMBER')
# prep wastewater treatment facility treatment type data
df_ww_type = df_ww_type[['CWNS_NUMBER', 'PRES_EFFLUENT_TREATMENT_LEVEL']] # reducing to required variables
df_ww_type['pct'] = 1 # add a percent column
# reduce and reformat variables in wastewater treatment facility treatment type data
df_ww_type['PRES_EFFLUENT_TREATMENT_LEVEL'] = df_ww_type['PRES_EFFLUENT_TREATMENT_LEVEL'].str.lower() # lowercase
df_ww_type['PRES_EFFLUENT_TREATMENT_LEVEL'] = np.where(df_ww_type['PRES_EFFLUENT_TREATMENT_LEVEL'] == # rename
"primary (45mg/l is less than bod)",
"primary (45mg/l< bod)",
df_ww_type['PRES_EFFLUENT_TREATMENT_LEVEL'])
# bin wastewater treatment facility treatment types
df_ww_type['TREATMENT_LEVEL_BIN'] = df_ww_type['PRES_EFFLUENT_TREATMENT_LEVEL'].map(treat_dict)
# pivot wastewater treatment facility treatment type dataframe to get treatment type as columns
df_ww_type = pd.pivot_table(df_ww_type, values='pct', index=['CWNS_NUMBER'],
columns=['TREATMENT_LEVEL_BIN'],
aggfunc=np.sum) # pivot to get treatment types as columns
df_ww_type = df_ww_type.reset_index() # reset index to remove multi-index from pivot table
df_ww_type = df_ww_type.rename_axis(None, axis=1) # drop index name
# fill blank treatment type values with 0 percent
for col in df_ww_type.columns[1:]: # fill nan rows with 0
df_ww_type[col] = df_ww_type[col].fillna(0)
# calculate the sum of the treatment type percentages
df_ww_type['sum_type'] = df_ww_type.iloc[:, 1:].sum(axis=1) # calculate sum
df_ww_type['CWNS_NUMBER'] = df_ww_type['CWNS_NUMBER'].apply(lambda x: '{0:0>11}'.format(x)) # add leading zero
# combine wastewater treatment facility flow data and wastewater treatment facility type data
df_ww_flow = pd.merge(df_ww_flow, df_ww_type, how='left', on='CWNS_NUMBER')
# fill blanks with 0
for col in df_ww_type.columns: # fill nan rows with 0
df_ww_flow[col] = df_ww_flow[col].fillna(0)
# for treatment plants with flow data but no treatment type data, assume 60% of treatment type is secondary
df_ww_flow['wastewater_secondary_treatment'] = np.where(df_ww_flow['sum_type'] < 1,
.6,
df_ww_flow['wastewater_secondary_treatment'])
# for treatment plants with flow data but no treatment type data, assume 40% of treatment type is advanced
df_ww_flow['wastewater_advanced_treatment'] = np.where(df_ww_flow['sum_type'] < 1,
.4,
df_ww_flow['wastewater_advanced_treatment'])
# recalculate sum of percents
df_ww_flow['sum_type'] = df_ww_flow.iloc[:, 15:-1].sum(axis=1) # recalculate sum
# creating new dataframe and reducing list of variables
df_ww_fractions = df_ww_flow.drop(['sum_type', 'sum_pct', 'infiltration_wastewater_mgd', 'total_wastewater_mgd',
'municipal_wastewater_mgd'], axis=1)
df_ww_flow = df_ww_flow[['FIPS', 'CWNS_NUMBER', 'infiltration_wastewater_mgd', 'total_wastewater_mgd',
'municipal_wastewater_mgd']]
# group by FIPS code to get average wastewater discharge and treatment types by county
df_ww_fractions = df_ww_fractions.groupby("FIPS", as_index=False).mean()
# combine with full county list to get values for each county
df_ww_fractions = pd.merge(df_county_list, df_ww_fractions, how='left', on='FIPS')
# group by FIPS code to get total wastewater by county
df_ww_flow = df_ww_flow.groupby("FIPS", as_index=False).sum()
# combine with full county list to get values for each county and fill counties with no plants with 0
df_ww_flow = pd.merge(df_county_list, df_ww_flow, how='left', on='FIPS')
df_ww_flow.fillna(0, inplace=True)
# recombine flow and fraction dataframes
df_ww = pd.merge(df_ww_flow, df_ww_fractions, how='left', on=['FIPS', 'State', 'County'])
# fill missing discharge and treatment fractions with zero for rows with no treatment flows
df_ww.fillna(0, inplace=True)
# fill south carolina discharge estimates with established percentages to prepare for flows calculated later
df_ww['wastewater_consumption'] = np.where(df_ww.State == 'SC', .05, df_ww['wastewater_consumption'])
df_ww['wastewater_groundwater_discharge'] = np.where(df_ww.State == 'SC', .19,
df_ww['wastewater_groundwater_discharge'])
df_ww['wastewater_irrigation_discharge'] = np.where(df_ww.State == 'SC', .08,
df_ww['wastewater_irrigation_discharge'])
df_ww['wastewater_surface_discharge'] = np.where(df_ww.State == 'SC', .68, df_ww['wastewater_surface_discharge'])
# create output df
df_out = df_ww.copy()
# add column indicating percentage of energy from electricity, assumed 100%
df_out['WWD_treatment_advanced_total_total_bbtu_from_EGD_total_total_total_total_bbtu_fraction'] = 1
df_out['WWD_treatment_primary_total_total_bbtu_from_EGD_total_total_total_total_bbtu_fraction'] = 1
df_out['WWD_treatment_secondary_total_total_bbtu_from_EGD_total_total_total_total_bbtu_fraction'] = 1
df_ww['advanced_infiltration_flows_mgd'] = df_ww['wastewater_advanced_treatment'] \
* df_ww['infiltration_wastewater_mgd']
df_ww['primary_infiltration_flows_mgd'] = df_ww['wastewater_primary_treatment'] \
* df_ww['infiltration_wastewater_mgd']
df_ww['secondary_infiltration_flows_mgd'] = | |
"Huawei CLT-L09",
"Huawei CLT-L29",
"Huawei CLT-TL00",
"Huawei CLT-TL01",
"Huawei ANE-LX1",
"Huawei ANE-LX2",
"Huawei ANE-LX3",
"Huawei CLT-L09",
"Huawei CLT-L29",
"Huawei HW-02L",
"Huawei VOG-AL00",
"Huawei VOG-AL10",
"Huawei VOG-L04",
"Huawei VOG-L09",
"Huawei VOG-L29",
"Huawei VOG-TL00",
"Huawei MAR-LX2J",
"Huawei HUAWEI P6-C00",
"Huawei HUAWEI P6-T00",
"Huawei HUAWEI P6-T00V",
"Huawei HUAWEI Ascend P6",
"Huawei HUAWEI P6-U06",
"Huawei HUAWEI P6-U06-orange",
"Huawei P6 S-L04",
"Huawei 302HW",
"Huawei HUAWEI P6 S-U06",
"Huawei HUAWEI P7-L00",
"Huawei HUAWEI P7-L05",
"Huawei HUAWEI P7-L07",
"Huawei HUAWEI P7-L10",
"Huawei HUAWEI P7-L11",
"Huawei HUAWEI P7-L12",
"Huawei HUAWEI P7 mini",
"Huawei HUAWEI P7-L09",
"Huawei HUAWEI GRA-CL00",
"Huawei HUAWEI GRA-CL10",
"Huawei HUAWEI GRA-L09",
"Huawei HUAWEI GRA-TL00",
"Huawei HUAWEI GRA-UL00",
"Huawei HUAWEI GRA-UL10",
"Huawei 503HW",
"Huawei ALE-L02",
"Huawei ALE-L21",
"Huawei ALE-L23",
"Huawei Autana LTE",
"Huawei HUAWEI ALE-CL00",
"Huawei HUAWEI ALE-L04",
"Huawei PRA-LA1",
"Huawei PRA-LX1",
"Huawei ALE-TL00",
"Huawei ALE-UL00",
"Huawei HUAWEI P8max",
"Huawei EVA-AL00",
"Huawei EVA-AL10",
"Huawei EVA-CL00",
"Huawei EVA-DL00",
"Huawei EVA-L09",
"Huawei EVA-L19",
"Huawei EVA-L29",
"Huawei EVA-TL00",
"Huawei HUAWEI VNS-L52",
"Huawei VIE-AL10",
"Huawei VIE-L09",
"Huawei VIE-L29",
"Huawei HUAWEI VNS-L21",
"Huawei HUAWEI VNS-L22",
"Huawei HUAWEI VNS-L23",
"Huawei HUAWEI VNS-L31",
"Huawei HUAWEI VNS-L53",
"Huawei HUAWEI VNS-L62",
"Huawei DIG-L03",
"Huawei DIG-L23",
"Huawei PE-CL00",
"Huawei PE-TL00M",
"Huawei PE-TL10",
"Huawei PE-TL20",
"Huawei PE-UL00",
"Huawei NEO-AL00",
"Huawei NEO-L29",
"Huawei Prism II",
"Huawei Q22",
"Huawei HUAWEI RIO-CL00",
"Huawei MediaPad 10 FHD",
"Huawei MediaPad 10 LINK",
"Huawei MediaPad 7 Vogue",
"Huawei MediaPad 7 Vogue",
"Huawei MediaPad 7 Youth",
"Huawei Orinoquia Roraima S7-932u",
"Huawei MediaPad 7 Lite+",
"Huawei Telpad Dual S",
"Huawei HUAWEI SC-CL00",
"Huawei HUAWEI SC-UL10",
"Huawei H710VL",
"Huawei H715BL",
"Huawei HUAWEI ATH-UL01",
"Huawei HUAWEI ATH-UL06",
"Huawei Huawei_8100-9",
"Huawei Tactile internet",
"Huawei U8100",
"Huawei Videocon_V7400",
"Huawei T1-821L",
"Huawei T1-821W",
"Huawei T1-821w",
"Huawei T1-823L",
"Huawei T1-823L",
"Huawei HUAWEI MediaPad T1 10 4G",
"Huawei T1-A21L",
"Huawei T1-A21W",
"Huawei T1-A21w",
"Huawei T1-A22L",
"Huawei T1-A23L",
"Huawei T-101",
"Huawei T101 PAD",
"Huawei QH-10",
"Huawei T102 PAD",
"Huawei T801 PAD",
"Huawei MT-803G",
"Huawei T802 PAD",
"Huawei HUAWEI T8808D",
"Huawei HUAWEI TAG-AL00",
"Huawei HUAWEI TAG-CL00",
"Huawei HUAWEI TAG-TL00",
"Huawei Vodafone 845",
"Huawei Pulse",
"Huawei U8220",
"Huawei U8220PLUS",
"Huawei U8230",
"Huawei Huawei-U8652",
"Huawei U8652",
"Huawei Huawei-U8687",
"Huawei U8812D",
"Huawei U8832D",
"Huawei U8836D",
"Huawei HUAWEI-U8850",
"Huawei HUAWEI-U9000",
"Huawei Y538",
"Huawei Huawei 858",
"Huawei MTC 950",
"Huawei MTC Mini",
"Huawei Vodafone 858",
"Huawei MediaPad 7 Classic",
"Huawei MediaPad 7 Lite II",
"Huawei MediaPad 7 Vogue" "Huawei LEO-BX9",
"Huawei LEO-DLXX",
"Huawei GEM-701L",
"Huawei GEM-702L",
"Huawei GEM-703L",
"Huawei GEM-703LT",
"Huawei Orinoquia Auyantepui Y210",
"Huawei Y220-U00",
"Huawei Y220-U05",
"Huawei Y220-U17",
"Huawei HUAWEI Y220-T10",
"Huawei Y220-U10",
"Huawei HUAWEI Y 220T",
"Huawei HUAWEI Y221-U03",
"Huawei ORINOQUIA Auyantepui+Y221-U03",
"Huawei HUAWEI Y221-U12",
"Huawei HUAWEI Y221-U22",
"Huawei HUAWEI Y221-U33",
"Huawei HUAWEI Y221-U43",
"Huawei HUAWEI Y221-U53",
"Huawei HUAWEI Ascend Y300",
"Huawei HUAWEI Y300-0100",
"Huawei HUAWEI Y300-0151",
"Huawei Pelephone-Y300-",
"Huawei HUAWEI Y300-0000",
"Huawei Huawei Y301A1",
"Huawei Huawei Y301A2",
"Huawei HUAWEI Y310-5000",
"Huawei HUAWEI Y310-T10",
"Huawei HUAWEI Y320-C00",
"Huawei HUAWEI Y320-T00",
"Huawei HUAWEI Y320-U01",
"Huawei Y320-U01",
"Huawei HUAWEI Y320-U10",
"Huawei HUAWEI Y320-U151",
"Huawei HUAWEI Y320-U30",
"Huawei HUAWEI Y320-U351",
"Huawei HUAWEI Y321-U051",
"Huawei HUAWEI Y321-C00",
"Huawei HUAWEI Y325-T00",
"Huawei Bucare Y330-U05",
"Huawei HUAWEI Y330-U05",
"Huawei HUAWEI Y330-U21",
"Huawei HUAWEI Y330-C00",
"Huawei HUAWEI Y330-U01",
"Huawei Luno",
"Huawei HUAWEI Y330-U07",
"Huawei HUAWEI Y330-U08",
"Huawei HUAWEI Y330-U11",
"Huawei V8510",
"Huawei HUAWEI Y330-U15",
"Huawei HUAWEI Y330-U17",
"Huawei HUAWEI Y336-A1",
"Huawei HUAWEI Y336-U02",
"Huawei HUAWEI Y336-U12",
"Huawei Y340-U081",
"Huawei HUAWEI Y360-U03",
"Huawei HUAWEI Y360-U103",
"Huawei HUAWEI Y360-U12",
"Huawei HUAWEI Y360-U23",
"Huawei HUAWEI Y360-U31",
"Huawei HUAWEI Y360-U42",
"Huawei HUAWEI Y360-U61",
"Huawei HUAWEI Y360-U72",
"Huawei HUAWEI Y360-U82",
"Huawei Delta Y360-U93",
"Huawei HUAWEI Y360-U93",
"Huawei HUAWEI LUA-L01",
"Huawei HUAWEI LUA-L02",
"Huawei HUAWEI LUA-L21",
"Huawei HUAWEI LUA-U02",
"Huawei HUAWEI LUA-U22",
"Huawei CRO-U00",
"Huawei CRO-U23",
"Huawei HUAWEI CRO-U00",
"Huawei HUAWEI CRO-U23",
"Huawei HUAWEI Y560-L02",
"Huawei HUAWEI Y560-L23",
"Huawei HUAWEI Y560-U03",
"Huawei MYA-AL00",
"Huawei MYA-L02",
"Huawei MYA-L03",
"Huawei MYA-L13",
"Huawei MYA-L22",
"Huawei MYA-L23",
"Huawei MYA-TL00",
"Huawei MYA-U29",
"Huawei HUAWEI Y500-T00",
"Huawei HUAWEI Y511-T00",
"Huawei Y511-T00",
"Huawei Y511-U00",
"Huawei HUAWEI Y511-U10",
"Huawei HUAWEI Y511-U251",
"Huawei HUAWEI Y511-U30",
"Huawei VIETTEL V8506",
"Huawei HUAWEI Y516-T00",
"Huawei HUAWEI Y518-T00",
"Huawei HUAWEI Y520-U03",
"Huawei HUAWEI Y520-U12",
"Huawei HUAWEI Y520-U22",
"Huawei HUAWEI Y520-U33",
"Huawei HUAWEI Y523-L076",
"Huawei HUAWEI Y523-L176",
"Huawei HUAWEI Y530-U00",
"Huawei HUAWEI Y530",
"Huawei HUAWEI Y530-U051",
"Huawei HUAWEI Y535-C00",
"Huawei HUAWEI Y535D-C00",
"Huawei HUAWEI Y536A1",
"Huawei HUAWEI Y540-U01",
"Huawei HUAWEI Y541-U02",
"Huawei Y541-U02",
"Huawei Y545-U05",
"Huawei HUAWEI Y550-L01",
"Huawei HUAWEI Y550-L02",
"Huawei Y550-L02",
"Huawei HUAWEI Y550",
"Huawei HUAWEI Y550-L03",
"Huawei Personal Huawei Y550",
"Huawei Y550-L03",
"Huawei HUAWEI Y560-CL00",
"Huawei HUAWEI Y560-L01",
"Huawei HUAWEI Y560-L03",
"Huawei HUAWEI Y560-U02",
"Huawei HUAWEI Y560-U12",
"Huawei HUAWEI Y560-U23",
"Huawei CUN-L22",
"Huawei HUAWEI CUN-L01",
"Huawei HUAWEI CUN-L02",
"Huawei HUAWEI CUN-L03",
"Huawei HUAWEI CUN-L21",
"Huawei HUAWEI CUN-L22",
"Huawei HUAWEI CUN-L23",
"Huawei HUAWEI CUN-L33",
"Huawei HUAWEI CUN-U29",
"Huawei HUAWEI SCC-U21",
"Huawei SCC-U21",
"Huawei HUAWEI SCL-L01",
"Huawei HUAWEI SCL-L02",
"Huawei HUAWEI SCL-L03",
"Huawei HUAWEI SCL-L04",
"Huawei HUAWEI SCL-L21",
"Huawei HW-SCL-L32",
"Huawei SCL-L01",
"Huawei HUAWEI SCL-U23",
"Huawei HUAWEI SCL-U31",
"Huawei SCL-U23",
"Huawei MYA-L41",
"Huawei HUAWEI LYO-L02",
"Huawei HUAWEI TIT-AL00",
"Huawei HUAWEI TIT-AL00",
"Huawei HUAWEI TIT-CL10",
"Huawei HUAWEI TIT-L01",
"Huawei HUAWEI TIT-TL00",
"Huawei TIT-AL00",
"Huawei TIT-L01",
"Huawei HUAWEI TIT-CL00",
"Huawei HUAWEI TIT-U02",
"Huawei HUAWEI LYO-L01",
"Huawei HUAWEI Y600-U00",
"Huawei HUAWEI Y600-U151",
"Huawei HUAWEI Y600-U20",
"Huawei HUAWEI Y600-U351",
"Huawei HUAWEI Y600-U40",
"Huawei HUAWEI Y600D-C00",
"Huawei HUAWEI Y610-U00",
"Huawei HUAWEI Y618-T00",
"Huawei Kavak Y625-U03",
"Huawei HUAWEI Y625-U13",
"Huawei HUAWEI Y625-U21",
"Huawei HUAWEI Y625-U32",
"Huawei HUAWEI Y625-U43",
"Huawei HUAWEI Y625-U51",
"Huawei HUAWEI Y635-CL00",
"Huawei Y635-L01",
"Huawei HUAWEI Y635-L02",
"Huawei Y635-L02",
"Huawei HUAWEI Y635-L03",
"Huawei Y635-L03",
"Huawei Y635-L21",
"Huawei HUAWEI Y635-TL00",
"Huawei Y635-TL00",
"Huawei CAM-L03",
"Huawei CAM-L21",
"Huawei CAM-L23",
"Huawei CAM-U22",
"Huawei HUAWEI CAM-L03",
"Huawei HUAWEI CAM-L21",
"Huawei HUAWEI CAM-L23",
"Huawei HUAWEI CAM-U22",
"Huawei CAM-L32",
"Huawei HUAWEI LYO-L03",
"Huawei TRT-L21A",
"Huawei TRT-L53",
"Huawei TRT-LX1",
"Huawei TRT-LX2",
"Huawei TRT-LX3",
"Huawei DUB-LX1",
"Huawei DUB-AL20",
"Huawei STK-L21",
"Huawei STK-L22",
"Huawei STK-LX3",
"Huawei Orinoquia Gran Roraima S7-702u",
"Huawei H1623",
"Huawei d-01G",
"Huawei d-01H",
"Huawei d-02H",
"Huawei eH811",
"Huawei HRY-LX1",
"Huawei HRY-LX1MEB",
"Huawei HRY-LX2",
"Huawei HRY-AL00",
"Huawei KIW-L22",
"Huawei KIW-L23",
"Huawei KIW-L24",
"Huawei DLI-L42",
"Huawei DIG-L21HN",
"Huawei JMM-L22",
"Huawei BLN-L21",
"Huawei BLN-L22",
"Huawei BKK-AL10",
"Huawei BKK-L21",
"Huawei BKK-L22",
"Huawei BKK-LX2",
"Huawei HWV31",
"Huawei 204HW",
"Huawei HUAWEI M881",
"Huawei HUAWEI CAN-AL10",
"Huawei HUAWEI CAN-L01",
"Huawei HUAWEI CAN-L02",
"Huawei HUAWEI CAN-L03",
"Huawei HUAWEI CAN-L11",
"Huawei HUAWEI CAN-L12",
"Huawei HUAWEI CAN-L13",
"Huawei HUAWEI CAZ-AL10",
"Huawei HUAWEI CAZ-AL00",
"Huawei HUAWEI CAZ-AL10",
"Huawei HUAWEI CAZ-TL10",
"Huawei HUAWEI CAZ-TL20",
"Huawei PIC-AL00",
"Huawei PIC-TL00",
"Huawei BAC-AL00",
"Huawei BAC-L01",
"Huawei BAC-L03",
"Huawei BAC-L23",
"Huawei BAC-TL00",
"Huawei RNE-L02",
"Huawei RNE-L22",
"Huawei HWI-AL00",
"Huawei HWI-TL00",
"Huawei PAR-AL00",
"Huawei PAR-L21",
"Huawei PAR-L29",
"Huawei PAR-LX1",
"Huawei PAR-LX1M",
"Huawei PAR-LX9",
"Huawei PAR-TL00",
"Huawei PAR-TL20",
"Huawei ANE-AL00",
"Huawei ANE-TL00",
"Huawei INE-AL00",
"Huawei INE-LX1",
"Huawei INE-LX1r",
"Huawei INE-LX2",
"Huawei INE-TL00",
"Huawei VCE-AL00",
"Huawei VCE-L22",
"Huawei VCE-TL00",
"Huawei MAR-AL00",
"Huawei MAR-TL00",
"Huawei PRA-LX2",
"Huawei PRA-LX3",
"Huawei HUAWEI MLA-L01",
"Huawei HUAWEI MLA-L02",
"Huawei HUAWEI MLA-L03",
"Huawei HUAWEI MLA-L11",
"Huawei HUAWEI MLA-L12",
"Huawei HUAWEI MLA-L13",
"Huawei MLA-L01",
"Huawei MLA-L02",
"Huawei MLA-L03",
"Huawei MLA-L11",
"Huawei MLA-L12",
"Huawei MLA-L13",
"Huawei WAS-AL00",
"Huawei WAS-TL10",
"Huawei MediaPad T1 8.0",
"Huawei S8-701u",
"Huawei S8-701w",
"Huawei HUAWEI MediaPad T1 8.0 4G",
"Huawei Honor T1 8.0",
"Huawei MediaPad T1 8.0 Pro",
"Huawei S8-821w",
"Huawei T1-821w",
"Huawei T1-823L",
"Huawei HUAWEI VNS-DL00",
"Huawei HUAWEI VNS-TL00",
"Huawei BZT-AL00",
"Huawei BZT-AL10",
"Huawei BZT-W09",
"Huawei BZW-AL00",
"Huawei BZW-AL10",
"Huawei MON-AL19",
"Huawei MON-AL19B",
"Huawei MON-W19",
"Huawei BAH2-AL00",
"Huawei BAH2-AL10",
"Huawei BAH2-W09",
"Huawei JDN2-AL00",
"Huawei JDN2-W09",
"Huawei BZK-L00",
"Huawei BZK-W00",
"Huawei PLE-701L",
"Huawei PLE-703L",
"Huawei DRA-AL00",
"Huawei DRA-TL00",
"Huawei POT-AL00a",
"Huawei POT-TL00a",
"Huawei MRD-AL00",
"Huawei MRD-TL00",
"Huawei ARS-AL00",
"Huawei ARS-TL00",
"Huawei STK-AL00",
"Huawei STK-TL00",
"Huawei NCE-AL00",
"Huawei NCE-AL10",
"Huawei NCE-TL10",
"Huawei DIG-AL00",
"Huawei DIG-TL10",
"Huawei SLA-AL00",
"Huawei SLA-TL10",
"Huawei FIG-AL00",
"Huawei FIG-AL10",
"Huawei FIG-TL00",
"Huawei FIG-TL10",
"Huawei LDN-AL00",
"Huawei LDN-AL10",
"Huawei LDN-AL20",
"Huawei LDN-TL00",
"Huawei LDN-TL10",
"Huawei LDN-TL20",
"Huawei FLA-AL00",
"Huawei FLA-AL10",
"Huawei FLA-AL20",
"Huawei FLA-TL00",
"Huawei FLA-TL10",
"Huawei ATU-AL10",
"Huawei ATU-TL10",
"Huawei DUB-AL00",
"Huawei DUB-AL00a",
"Huawei DUB-AL20",
"Huawei DUB-TL00",
"Huawei DUB-TL00a",
"Huawei JKM-AL00",
"Huawei JKM-TL00",
"Huawei JKM-AL00a",
"Huawei JKM-AL00b",
"Huawei AGS2-AL00",
"Huawei JSN-AL00",
"Huawei JSN-TL00",
"Huawei JSN-AL00a",
"Huawei JMM-AL00",
"Huawei JMM-AL10",
"Huawei JMM-TL00",
"Huawei | |
<reponame>ethers/pyethereum
import rlp
from opcodes import opcodes
import utils
import time
import blocks
import transactions
import trie
import sys
import logging
import json
import time
logger = logging.getLogger(__name__)
class PBLogger(object):
log_pre_state = True # dump storage at account before execution
log_post_state = True # dump storage at account after execution
log_block = False # dump block after TX was applied
log_memory = False # dump memory before each op
log_op = True # log op, gas, stack before each op
log_json = False # generate machine readable output
def __init__(self):
self.listeners = [] # register callbacks here
def log(self, name, **kargs):
# call callbacks
for l in self.listeners:
l(name, kargs)
if self.log_json:
logger.debug(json.dumps({name:kargs}))
else:
order = dict(pc=-2, op=-1, stackargs=1, data=2, code=3)
items = sorted(kargs.items(), key=lambda x: order.get(x[0], 0))
msg = ", ".join("%s=%s" % (k,v) for k,v in items)
logger.debug("%s: %s", name.ljust(15), msg)
pblogger = PBLogger()
GDEFAULT = 1
GMEMORY = 1
GTXDATA = 5
GTXCOST = 500
OUT_OF_GAS = -1
CREATE_CONTRACT_ADDRESS = '0000000000000000000000000000000000000000'
def verify(block, parent):
assert block.timestamp >= parent.timestamp
assert block.timestamp <= time.time() + 900
block2 = blocks.Block.init_from_parent(parent,
block.coinbase,
extra_data=block.extra_data,
timestamp=block.timestamp,
uncles=block.uncles)
assert block2.difficulty == block.difficulty
assert block2.gas_limit == block.gas_limit
for i in range(block.transaction_count):
tx, s, g = rlp.decode(
block.transactions.get(rlp.encode(utils.encode_int(i))))
tx = transactions.Transaction.create(tx)
assert tx.startgas + block2.gas_used <= block.gas_limit
apply_transaction(block2, tx)
assert s == block2.state.root_hash
assert g == utils.encode_int(block2.gas_used)
block2.finalize()
assert block2.state.root_hash == block.state.root_hash
assert block2.gas_used == block.gas_used
return True
class Message(object):
def __init__(self, sender, to, value, gas, data):
self.sender = sender
self.to = to
self.value = value
self.gas = gas
self.data = data
def __repr__(self):
return '<Message(to:%s...)>' % self.to[:8]
class InvalidTransaction(Exception):
pass
class UnsignedTransaction(InvalidTransaction):
pass
class InvalidNonce(InvalidTransaction):
pass
class InsufficientBalance(InvalidTransaction):
pass
class InsufficientStartGas(InvalidTransaction):
pass
class BlockGasLimitReached(InvalidTransaction):
pass
class GasPriceTooLow(InvalidTransaction):
pass
def apply_transaction(block, tx):
def rp(actual, target):
return '%r, actual:%r target:%r' % (tx, actual, target)
# (1) The transaction signature is valid;
if not tx.sender:
raise UnsignedTransaction(tx)
# (2) the transaction nonce is valid (equivalent to the
# sender account's current nonce);
acctnonce = block.get_nonce(tx.sender)
if acctnonce != tx.nonce:
raise InvalidNonce(rp(tx.nonce, acctnonce))
# (3) the gas limit is no smaller than the intrinsic gas,
# g0, used by the transaction;
intrinsic_gas_used = GTXDATA * len(tx.data) + GTXCOST
if tx.startgas < intrinsic_gas_used:
raise InsufficientStartGas(rp(tx.startgas, intrinsic_gas_used))
# (4) the sender account balance contains at least the
# cost, v0, required in up-front payment.
total_cost = tx.value + tx.gasprice * tx.startgas
if block.get_balance(tx.sender) < total_cost:
raise InsufficientBalance(
rp(block.get_balance(tx.sender), total_cost))
# check offered gas price is enough
if tx.gasprice < block.min_gas_price:
raise GasPriceTooLow(rp(tx.gasprice, block.min_gas_price))
# check block gas limit
if block.gas_used + tx.startgas > block.gas_limit:
BlockGasLimitReached(
rp(block.gas_used + tx.startgas, block.gas_limit))
pblogger.log('TX NEW', tx=tx.hex_hash(), tx_dict=tx.to_dict())
# start transacting #################
block.increment_nonce(tx.sender)
# buy startgas
success = block.transfer_value(tx.sender, block.coinbase,
tx.gasprice * tx.startgas)
assert success
if pblogger.log_pre_state:
pblogger.log('TX PRE STATE', account=tx.sender, state=block.account_to_dict(tx.sender))
message_gas = tx.startgas - intrinsic_gas_used
message = Message(tx.sender, tx.to, tx.value, message_gas, tx.data)
block.postqueue = [ message ]
while len(block.postqueue):
message = block.postqueue.pop(0)
# MESSAGE
if tx.to and tx.to != CREATE_CONTRACT_ADDRESS:
result, gas_remained, data = apply_msg_send(block, tx, message)
else: # CREATE
result, gas_remained, data = create_contract(block, tx, message)
if result > 0:
result = utils.coerce_addr_to_hex(result)
assert gas_remained >= 0
pblogger.log("TX APPLIED", result=result, gas_remained=gas_remained,
data=''.join(map(chr, data)).encode('hex'))
if pblogger.log_block:
pblogger.log('BLOCK', block=block.to_dict(with_state=True, full_transactions=True))
if not result: # 0 = OOG failure in both cases
pblogger.log('TX FAILED', reason='out of gas', startgas=tx.startgas, gas_remained=gas_remained)
block.gas_used += tx.startgas
output = OUT_OF_GAS
else:
pblogger.log('TX SUCCESS')
gas_used = tx.startgas - gas_remained
# sell remaining gas
block.transfer_value(
block.coinbase, tx.sender, tx.gasprice * gas_remained)
block.gas_used += gas_used
if tx.to:
output = ''.join(map(chr, data))
else:
output = result
block.commit_state()
if pblogger.log_post_state:
pblogger.log('TX POST STATE', account=tx.sender, state=block.account_to_dict(tx.sender))
suicides = block.suicides
block.suicides = []
for s in suicides:
block.del_account(s)
block.add_transaction_to_list(tx)
success = output is not OUT_OF_GAS
return success, output if success else ''
class Compustate():
def __init__(self, **kwargs):
self.memory = []
self.stack = []
self.pc = 0
self.gas = 0
for kw in kwargs:
setattr(self, kw, kwargs[kw])
def decode_datalist(arr):
if isinstance(arr, list):
arr = ''.join(map(chr, arr))
o = []
for i in range(0, len(arr), 32):
o.append(utils.big_endian_to_int(arr[i:i + 32]))
return o
def apply_msg(block, tx, msg, code):
pblogger.log("MSG APPLY", tx=tx.hex_hash(), to=msg.to, gas=msg.gas)
# Transfer value, instaquit if not enough
o = block.transfer_value(msg.sender, msg.to, msg.value)
if not o:
return 1, msg.gas, []
snapshot = block.snapshot()
compustate = Compustate(gas=msg.gas)
t, ops = time.time(), 0
# Main loop
while 1:
o = apply_op(block, tx, msg, code, compustate)
ops += 1
if o is not None:
pblogger.log('PERFORMAMCE', ops=ops, time_per_op=(time.time() - t) / ops)
pblogger.log('MSG APPLIED', result=o)
if o == OUT_OF_GAS:
block.revert(snapshot)
return 0, compustate.gas, []
else:
return 1, compustate.gas, o
def apply_msg_send(block, tx, msg):
return apply_msg(block, tx, msg, block.get_code(msg.to))
def create_contract(block, tx, msg):
sender = msg.sender.decode('hex') if len(msg.sender) == 40 else msg.sender
if tx.sender != msg.sender:
block.increment_nonce(msg.sender)
nonce = utils.encode_int(block.get_nonce(msg.sender) - 1)
msg.to = utils.sha3(rlp.encode([sender, nonce]))[12:].encode('hex')
assert not block.get_code(msg.to)
res, gas, dat = apply_msg(block, tx, msg, msg.data)
if res:
block.set_code(msg.to, ''.join(map(chr, dat)))
return utils.coerce_to_int(msg.to), gas, dat
else:
if tx.sender != msg.sender:
block.decrement_nonce(msg.sender)
block.del_account(msg.to)
return res, gas, dat
def get_opcode(code, index):
return ord(code[index]) if index < len(code) else 0
def get_op_data(code, index):
return opcodes.get(get_opcode(code, index), ['INVALID', 0, 0, []])
def ceil32(x):
return x if x % 32 == 0 else x + 32 - (x % 32)
def calcfee(block, tx, msg, compustate, op_data):
stk, mem = compustate.stack, compustate.memory
op, ins, outs, memuse, base_gas = op_data
m_extend = 0
for start, sz in memuse:
start = start if start >= 0 else stk[start]
sz = sz if sz >= 0 else stk[sz]
m_extend = max(m_extend, ceil32(start + sz) - len(mem))
COST = m_extend / 32 * GMEMORY + base_gas
if op == 'CALL' or op == 'POST':
return COST + stk[-1]
elif op == 'SSTORE':
pre_occupied = COST if block.get_storage_data(msg.to, stk[-1]) else 0
post_occupied = COST if stk[-2] else 0
return COST + post_occupied - pre_occupied
else:
return COST
# Does not include paying opfee
def apply_op(block, tx, msg, code, compustate):
op, in_args, out_args, mem_grabs, base_gas = opdata = get_op_data(code, compustate.pc)
# empty stack error
if in_args > len(compustate.stack):
return []
# out of gas error
fee = calcfee(block, tx, msg, compustate, opdata)
if fee > compustate.gas:
pblogger.log('OUT OF GAS', needed=fee, available=compustate.gas,
op=op, stack=list(reversed(compustate.stack)))
return OUT_OF_GAS
stackargs = []
for i in range(in_args):
stackargs.append(compustate.stack.pop())
if pblogger.log_op:
log_args = dict(pc=compustate.pc, op=op, stackargs=stackargs, gas=compustate.gas)
if op[:4] == 'PUSH':
ind = compustate.pc + 1
log_args['value'] = utils.big_endian_to_int(code[ind: ind + int(op[4:])])
elif op == 'CALLDATACOPY':
log_args['data'] = msg.data.encode('hex')
elif op == 'SSTORE':
log_args['key'] = stackargs[0]
log_args['value'] = stackargs[1]
pblogger.log('OP', **log_args)
if pblogger.log_memory:
for i in range(0, len(compustate.memory), 16):
memblk = compustate.memory[i:i+16]
memline = ' '.join([chr(x).encode('hex') for x in memblk])
pblogger.log('MEM', mem=memline)
# Apply operation
oldpc = compustate.pc
compustate.gas -= fee
compustate.pc += 1
stk = compustate.stack
mem = compustate.memory
if op == 'STOP' or op == 'INVALID':
return []
elif op == 'ADD':
stk.append((stackargs[0] + stackargs[1]) % 2 ** 256)
elif op == 'SUB':
stk.append((stackargs[0] - stackargs[1]) % 2 ** 256)
elif op == 'MUL':
stk.append((stackargs[0] * stackargs[1]) % 2 ** 256)
elif op == 'DIV':
stk.append(0 if stackargs[1] == 0 else stackargs[0] / stackargs[1])
elif op == 'MOD':
stk.append(0 if stackargs[1] == 0 else stackargs[0] % stackargs[1])
elif op == 'SDIV':
if stackargs[0] >= 2 ** 255:
stackargs[0] -= 2 ** 256
if stackargs[1] >= 2 ** 255:
stackargs[1] -= 2 ** 256
stk.append(0 if stackargs[1] == 0 else
(stackargs[0] / stackargs[1]) % 2 ** 256)
elif op == 'SMOD':
if stackargs[0] >= 2 ** 255:
stackargs[0] -= 2 ** 256
if stackargs[1] >= 2 ** 255:
stackargs[1] -= 2 ** 256
stk.append(0 if stackargs[1] == 0 else
(stackargs[0] % stackargs[1]) % 2 ** 256)
elif op == 'EXP':
stk.append(pow(stackargs[0], stackargs[1], 2 ** 256))
elif op == 'NEG':
stk.append(-stackargs[0] % 2**256)
elif op == 'LT':
stk.append(1 if stackargs[0] < stackargs[1] else 0)
elif op == 'GT':
stk.append(1 if stackargs[0] > stackargs[1] else 0)
elif op == 'SLT':
if stackargs[0] >= 2 ** 255:
stackargs[0] -= 2 ** 256
if stackargs[1] >= 2 ** 255:
stackargs[1] -= 2 ** 256
stk.append(1 if stackargs[0] < stackargs[1] else 0)
elif op == 'SGT':
if stackargs[0] >= | |
<filename>dit/abstractdist.py
"""
Abstract implementation of dense random vectors.
"""
import itertools
import numpy as np
__all__ = (
'AbstractDenseDistribution',
'distribution_constraint',
'brute_marginal_array',
'get_abstract_dist',
)
class AbstractDenseDistribution(object):
"""
An abstract, dense distribution.
We can think of this as the distribution for a random vector, where the
the sample space for each random variable is identical. If L is the length
of the vector and K is the size of the sample space for each random
variable, then the distribution's pmf is an array consisting of `K**L`
probabilities. We parameterize this pmf in the following way:
d[i] = Pr( 'i'th lexicographically ordered word )
For example, suppose L = 2 and K = 2, with alphabet {0,1}. Then, our
distribution is:
d = Pr(X_0, X_1)
where:
d[0] = Pr(00)
d[1] = Pr(01)
d[2] = Pr(10)
d[3] = Pr(11)
---
This class provides convenient lookups for any subset of random variables
in terms of the parameters. For example,
Pr(X_0 = 0) = d[0] + d[1]
Pr(X_0 = 1) = d[2] + d[3]
Pr(X_1 = 0) = d[0] + d[2]
Pr(X_1 = 1) = d[1] + d[3]
Thus, we can represent the random variables as matrices:
X_0 -> [[0,1],[2,3]]
X_1 -> [[0,2],[1,3]]
Applications of this class are numerous. For example, now we can quickly
obtain any marginal distributions from a dense representation of a joint
distribution. It also allows us to write down constraint equations when
declaring distributions equal to each other. For example, suppose we wanted
P(X_0) = P(X_1). This corresponds to:
d[0] + d[1] = d[0] + d[2]
d[2] + d[3] = d[1] + d[3]
which can be represented as a matrix equation `np.dot(A,d) = b` with:
A = [[0,1,-1,0], b = [[0],
[0,-1,1,0]] [0]]
"""
def __init__(self, n_variables, n_symbols):
"""
Initialize the abstract distribution.
Parameters
----------
n_variables : int
The number of random variables.
n_symbols : int
The number of symbols in sample space of every random variable.
"""
self.n_variables = n_variables
self.n_symbols = n_symbols
self.n_elements = n_symbols ** n_variables
self._initialize_singletons(n_variables, n_symbols)
def _initialize_singletons(self, n_variables, n_symbols):
"""
Populates the singleton marginal distribution matrices.
P(X_i) for i = 0, ..., L-1
"""
# For each X_t, we have an array of shape ( |A|, |A|^(L-1) )
# giving a total of |A|^L elements. The columns break into |A|^t
# blocks, each block having |A| rows and |A|^(L-t-1) columns.
# To populate the array, we proceed by block, by row, by column.
shape = (n_variables, n_symbols, n_symbols ** (n_variables - 1))
rvs = np.empty(shape, dtype=int)
rows = list(range(n_symbols))
for t in range(n_variables):
Xt = rvs[t]
blocks = range(n_symbols**t)
blockCols = n_symbols**(n_variables - t - 1)
cols = range(blockCols)
locations = itertools.product(blocks, rows, cols)
for idx, (block, row, col) in enumerate(locations):
i, j = row, block * blockCols + col
Xt[i, j] = idx
# Convert rvs to a 2D array of sets.
# This makes it quicker to form marginals.
self.rvs = rvs_set = np.empty((n_variables, n_symbols), dtype=object)
indexes = itertools.product(range(n_variables), range(n_symbols))
for index in indexes:
rvs_set[index] = set(rvs[index])
def parameter_array(self, indexes, cache=None):
"""
Returns a 2D NumPy array representing the distribution on `indexes`.
For example, indexes=[0,1] returns an array representing P(X_0,X_1)
in terms of the parameters of the joint distribution.
Parameters
----------
indexes : list or set
A list or set of integers, specifying which indexes should be
included in the distribution.
cache : dict or None
If you intend on calculating the parameter arrays for a large number
of possible indexes, then pass in the same dictionary to cache each
time and the arrays will be calculated efficiently.
Returns
-------
p : NumPy array, shape (m,n)
The representation of the distribution in terms of the parameters
of the original distribution. The number of rows, m, is equal to:
m = self.n_symbols ** len(indexes)
n = self.n_symbols ** (self.n_variables - len(indexes))
"""
if cache is None:
# Then we use an internal cache for this call only.
cache = {}
indexes = set(indexes)
if min(indexes) < 0 or max(indexes) >= self.n_variables:
msg = 'Invalid indexes: ' + str(indexes)
raise Exception(msg)
indexes = tuple(sorted(indexes))
# We want to return 2D arrays, but for efficiency reasons, the cache
# must store 1D arrays of sets. Thus, we make 'calculate' a closure.
def calculate(indexes):
"""
Internal function which calculates parameter arrays.
"""
# The singleton random variables have already been computed.
if len(indexes) == 1:
idx = next(iter(indexes))
cache[indexes] = p = self.rvs[idx]
# If indexes are consecutive from zero, then we can do these easily.
elif (indexes[0] == 0) and (np.all(np.diff(indexes) == 1)):
p = np.arange(self.n_symbols**self.n_variables)
shape = (self.n_symbols**len(indexes),
self.n_symbols**(self.n_variables - len(indexes)))
p = p.reshape(shape)
cache[indexes] = p = np.array([set(row) for row in p])
else:
# We take intersections to find the parameters for each word.
# To avoid repeatedly taking intersections of the same sets, we
# need to implement this recursively from the left.
# Note, we catch len(indexes) == 1 earlier.
left = calculate(indexes[:-1])
right = calculate(indexes[-1:])
# The new p is a Cartestian product of the row intersections.
p = np.empty(len(left) * len(right), dtype=object)
for i, (rowL, rowR) in enumerate(itertools.product(left, right)):
p[i] = rowL.intersection(rowR)
cache[indexes] = p
return p
if indexes in cache:
p = cache[indexes]
else:
p = calculate(indexes)
# p is a 1D array of sets. Convert it to a 2D array.
p = np.array([sorted(element) for element in p])
# If each set is not of the same length, then NumPy will create
# a 1D array with dtype=object. This should not happen.
assert len(p.shape) == 2, "Parameter array is not 2D!"
return p
def marginal(self, indexes):
"""
Returns an abstract representation of a marginal distribution.
Parameters
----------
indexes : list or set
A list or set of integers, specifying which indexes to keep. In
truth, the index values do not matter since the new distribution
object only needs to know the word length. However, we do some
checks to make sure the indexes are valid.
Returns
-------
d : AbstractDenseDistribution
The new abstract representation of the marignal distribution.
"""
indexes = set(indexes)
if min(indexes) < 0 or max(indexes) >= self.n_variables:
msg = 'Invalid indexes.'
raise Exception(msg)
d = AbstractDenseDistribution(len(indexes), self.n_symbols)
return d
def distribution_constraint(indexes1, indexes2, distribution):
"""
Returns an array representing an equality constraint on two distributions.
Suppose indexes1=(0,1)
indexes2=(1,2)
Then, we are demanding that Pr(X_0 X_1) = Pr(X_1 X_2). The indexes are
assumed to come from some larger joint distribution of length `n_variables`,
which is parametrized as a vector d. Each d[i] corresponds to the
probability of a single word of length `n_variables`, lexicographically
ordered. Thus, an equality distribution constraint provides
`n_symbols ** len(indexes1)` equations and satisfies:
np.dot(A,d) = 0
where A is the matrix of coeffecients defining the constraints. Note, it is
not generically true that the rows of this matrix are linearly independent.
Parameters
----------
indexes1 : tuple
A tuple of integers representing the indexes of the first distribution.
The length of indexes1 must equal the length of indexes2.
indexes2 : tuple
A tuple of integers representing the indexes of the second distribution.
The length of indexes1 must equal the length of indexes2.
distribution : AbstractDenseDistribution
An abstract joint distribution compatible with the indexes.
Returns
-------
A : NumPy array, shape (m,n)
The constraint matrix A. The number of rows, m, is equal to
`n_symbols ** len(indexes1)`. The number of columns, n, is
equal to the number of parameters in the joint distribution.
b : NumPy array, shape (n,)
An array of zeros.
"""
if len(set(indexes1)) != len(set(indexes2)):
raise Exception("Incompatible distributions.")
cache = {}
d1 = distribution.parameter_array(indexes1, cache=cache)
d2 = distribution.parameter_array(indexes2, cache=cache)
A = np.zeros((len(d1), distribution.n_elements), dtype=int)
b = np.zeros(distribution.n_elements, dtype=int)
for idx, (w1, w2) in enumerate(zip(d1, d2)):
symdiff = set.symmetric_difference(set(w1), set(w2))
vec = [1 if i in w1 else -1 for i in symdiff]
A[(idx,), tuple(symdiff)] = vec
return A, b
def brute_marginal_array(d, rvs, rv_mode=None):
| |
the module
To print customized extra information, you should reimplement
this method in your own modules. Both single-line and multi-line
strings are acceptable.
"""
return ''
def __repr__(self):
# We treat the extra repr like the sub-module, one item per line
extra_lines = []
extra_repr = self.extra_repr()
# empty string will be split into list ['']
if extra_repr:
extra_lines = extra_repr.split('\n')
child_lines = []
for key, module in self._modules.items():
mod_str = repr(module)
mod_str = _addindent(mod_str, 2)
child_lines.append('(' + key + '): ' + mod_str)
lines = extra_lines + child_lines
main_str = self._get_name() + '('
if lines:
# simple one-liner info, which most builtin Modules will use
if len(extra_lines) == 1 and not child_lines:
main_str += extra_lines[0]
else:
main_str += '\n ' + '\n '.join(lines) + '\n'
main_str += ')'
return main_str
def __dir__(self):
module_attrs = dir(self.__class__)
attrs = list(self.__dict__.keys())
parameters = list(self._parameters.keys())
modules = list(self._modules.keys())
buffers = list(self._buffers.keys())
keys = module_attrs + attrs + parameters + modules + buffers
# Eliminate attrs that are not legal Python variable names
keys = [key for key in keys if not key[0].isdigit()]
return sorted(keys)
def _replicate_for_data_parallel(self):
replica = self.__new__(type(self))
replica.__dict__ = self.__dict__.copy()
# replicas do not have parameters themselves, the replicas reference the original
# module.
replica._parameters = OrderedDict()
replica._buffers = replica._buffers.copy()
replica._modules = replica._modules.copy()
replica._is_replica = True
return replica
class Sequential(Layer):
r"""A sequential container.
Modules will be added to it in the order they are passed in the constructor.
Alternatively, an ordered dict of modules can also be passed in.
To make it easier to understand, here is a small example::
# Example of using Sequential
model = nn.Sequential(
nn.Conv2d(1,20,5),
nn.ReLU(),
nn.Conv2d(20,64,5),
nn.ReLU()
)
# Example of using Sequential with OrderedDict
model = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(1,20,5)),
('relu1', nn.ReLU()),
('conv2', nn.Conv2d(20,64,5)),
('relu2', nn.ReLU())
]))
"""
def __init__(self, *args, name=None):
super(Sequential, self).__init__(name=name)
self._built = False
args = unpack_singleton(args)
if isinstance(args, (dict, OrderedDict, ModuleDict)):
for key, module in args.items():
module.name = key
self.add_module(key, module)
elif isinstance(args, (list, tuple, ModuleList)):
for idx, module in enumerate(args):
self.add_module(str(idx), module)
else:
for idx, module in enumerate(args):
if module._name is not None and len(module._name) > 0:
self.add_module(module._name, module)
else:
self.add_module(str(idx), module)
# self.to(self.device)
# @property
# def output_shape(self):
# if len(self)>0:
# return self[-1]._output_shape
# else:
# return None
#
# @output_shape.setter
# def output_shape(self, value):
# if len(self) > 0:
# if isinstance(value, tf.TensorShape):
# value = to_tensor(value.as_list()).to('int')
# elif isinstance(value, (list, tuple)) and len(value) > 0:
# value = tuple(
# [to_tensor(tensor_shape.as_list()).to('int') if isinstance(tensor_shape, tf.TensorShape) else to_tensor(tensor_shape).to('int') for tensor_shape in value])
#
# else:
# value = to_tensor(value).to('int')
# self[-1]._output_shape = value
# self._signature=None
def build(self, input_shape: TensorShape):
if self._built == False and len(self._modules) > 0:
self.__getitem__(0).input_shape = input_shape
self._built = True
def add_module(self, name, module):
r"""Adds a child module to the current module.
The module can be accessed as an attribute using the given name.
Args:
name (string): name of the child module. The child module can be
accessed from this module using the given name
module (Module): child module to be added to the module.
"""
if len(self._modules) > 0 and self._input_shape is not None and self[-1].built and self[ -1]._output_shape is not None:
last_output = self[-1]._output_shape
dummay_input = to_tensor(last_output.get_dummy_tensor()).to(self.device)
out = module(dummay_input)
self._modules[name] = module
if isinstance(out, OrderedDict):
self._output_shape = tuple(
[tensor_to_shape(o, need_exclude_batch_axis=True, is_singleton=False) for o in out.value_list])
self.get_root().signature.outputs = OrderedDict()
for k, v in out.item_list:
self.get_root().signature.outputs[k] = tensor_to_shape(v, need_exclude_batch_axis=True, is_singleton=False)
else:
out = enforce_singleton(out)
self._output_shape = tensor_to_shape(out, need_exclude_batch_axis=True, is_singleton=False)
self._signature.outputs[self._signature.outputs.key_list[0]].shape = self._output_shape
# if len(self.get_root().signature.outputs) > 0:
# self.get_root().signature=get_signature(self)
# else:
# self.get_root().signature.outputs['output'] = self._output_shape.copy()
else:
if not hasattr(module, '_signature') or module._signature is None:
module._signature = get_signature(module)
sig = copy.deepcopy(module._signature)
super(Sequential, self).add_module(name, module)
if len(self) == 1 or self._signature is None:
self._signature = sig
elif len(self) > 1:
self._signature.outputs = copy.deepcopy(sig.outputs)
def remove_at(self, idx):
self.__delitem__(idx)
if len(self._modules) > 0:
self._output_shape = self[-1]._output_shape
if isinstance(self._signature, Signature):
self._signature.outputs[self._signature.outputs.key_list[0]].shape = self[-1]._output_shape
def _get_item_by_idx(self, iterator, idx):
"""Get the idx-th item of the iterator"""
size = len(self)
idx = idx.__index__()
if not -size <= idx < size:
raise IndexError('index {} is out of range'.format(idx))
idx %= size
return next(islice(iterator, idx, None))
def __getitem__(self, idx):
if isinstance(idx, slice):
returnDict = OrderedDict()
for k, v in list(self._modules.items())[idx]:
returnDict[k] = v
return tuple(returnDict.value_list)
else:
return self._get_item_by_idx(self._modules.values(), idx)
def __setitem__(self, idx, module):
key = self._get_item_by_idx(self._modules.keys(), idx)
return setattr(self, key, module)
def __delitem__(self, idx):
if isinstance(idx, slice):
for key in list(self._modules.keys())[idx]:
delattr(self, key)
else:
key = self._get_item_by_idx(self._modules.keys(), idx)
delattr(self, key)
def __len__(self):
return len(self._modules)
def __dir__(self):
keys = super(Sequential, self).__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def forward(self, *x, **kwargs):
x = unpack_singleton(x)
for module in self._modules.values():
if isinstance(x, tuple):
if len(module.signature.inputs) == len(x): # self,x
x = module(*x, **kwargs)
else:
x = enforce_singleton(x)
x = module(x, **kwargs)
else:
x = module(x, **kwargs)
return x
class ModuleList(Layer):
r"""Holds submodules in a list.
:class:`~trident.backend.tensorflow_backend.ModuleList` can be indexed like a regular Python list, but
modules it contains are properly registered, and will be visible by all
:class:`~trident.backend.tensorflow_backend..Layer` methods.
Arguments:
modules (iterable, optional): an iterable of modules to add
"""
def __init__(self, modules: Optional[Iterable[Layer]] = None, name=None, keep_output=False, **kwargs) -> None:
super(ModuleList, self).__init__(name=None, keep_output=False, **kwargs)
name = self._name
if modules is not None:
for i in range(len(list(modules))):
module = list(modules)[i]
module.is_root = False
for mod in module.modules():
if isinstance(mod, Layer) and mod.uuid != module.uuid:
mod.is_root = False
reset_name(module, self._uid_prefixs)
module.relative_name = name if not hasattr(module, 'relative_name') or module.relative_name == '' else name + '.' + module.relative_name
self += modules
def _get_abs_string_index(self, idx):
"""Get the absolute index for the list of modules"""
idx = operator.index(idx)
if not (-len(self) <= idx < len(self)):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx += len(self)
return str(idx)
# @_copy_to_script_wrapper
def __getitem__(self, idx: int) -> Layer:
if isinstance(idx, slice):
return self.__class__(list(self._modules.values())[idx])
else:
return self._modules[self._get_abs_string_index(idx)]
def __setitem__(self, idx: int, module: Layer) -> None:
idx = self._get_abs_string_index(idx)
return setattr(self, str(idx), module)
def __delitem__(self, idx: Union[int, slice]) -> None:
if isinstance(idx, slice):
for k in range(len(self._modules))[idx]:
delattr(self, str(k))
else:
delattr(self, self._get_abs_string_index(idx))
# To preserve numbering, self._modules is being reconstructed with modules after deletion
str_indices = [str(i) for i in range(len(self._modules))]
self._modules = OrderedDict(list(zip(str_indices, self._modules.values())))
# @_copy_to_script_wrapper
def __len__(self) -> int:
return len(self._modules)
# @_copy_to_script_wrapper
def __iter__(self) -> typing.Iterator[Layer]:
return iter(self._modules.values())
def __iadd__(self: T, modules: Iterable[Layer]) -> T:
return self.extend(modules)
# @_copy_to_script_wrapper
def __dir__(self):
keys = super(ModuleList, self).__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def insert(self, index: int, module: Layer) -> None:
r"""Insert a given module before a given index in the list.
Arguments:
index (int): index to insert.
module (nn.Module): module to insert
"""
for i in range(len(self._modules), index, -1):
self._modules[str(i)] = self._modules[str(i - 1)]
self._modules[str(index)] = module
def append(self: T, module: Layer) -> T:
r"""Appends a given module to the end of the list.
Arguments:
module (nn.Module): module to append
"""
self.add_module(str(len(self)), module)
return self
def extend(self: T, modules: Iterable[Layer]) -> T:
r"""Appends modules from a Python iterable to the end of the list.
Arguments:
modules (iterable): iterable of modules to append
"""
if not isinstance(modules, abc.Iterable):
raise TypeError("ModuleList.extend should be called with an "
"iterable, but got " + type(modules).__name__)
offset = len(self)
for i, module in enumerate(modules):
self.add_module(str(offset + i), module)
return self
def forward(self):
raise NotImplementedError()
class ModuleDict(Layer):
r"""Holds submodules in a dictionary.
:class:`~torch.nn.ModuleDict` can be indexed like a regular Python dictionary,
but modules it contains are properly registered, and will be visible by all
:class:`~torch.nn.Module` methods.
:class:`~torch.nn.ModuleDict` is an **ordered** dictionary that respects
* the order of insertion, and
* in :meth:`~torch.nn.ModuleDict.update`, the order of the merged ``OrderedDict``
or another :class:`~torch.nn.ModuleDict` (the argument to :meth:`~torch.nn.ModuleDict.update`).
Note that :meth:`~torch.nn.ModuleDict.update` with other unordered mapping
types (e.g., Python's plain ``dict``) does not preserve the order of the
merged mapping.
Arguments:
modules (iterable, optional): a mapping (dictionary) of (string: module)
or an |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.