body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
6d696bf079093eec1bd87ef0ace66a0024ea838d2c8f7ab543ae557c166b4426 | def _inception(model_path, **kwargs):
'Constructs a ResNet-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = InceptionI3d(400, in_channels=3)
if (model_path == ''):
return model
params = torch.load(model_path)
model.load_state_dict(params)
return model | Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | models/rgb_I3D.py | _inception | victor-gui/LateTemporalModeling3DCNN | 144 | python | def _inception(model_path, **kwargs):
'Constructs a ResNet-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = InceptionI3d(400, in_channels=3)
if (model_path == ):
return model
params = torch.load(model_path)
model.load_state_dict(params)
return model | def _inception(model_path, **kwargs):
'Constructs a ResNet-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = InceptionI3d(400, in_channels=3)
if (model_path == ):
return model
params = torch.load(model_path)
model.load_state_dict(params)
return model<|docstring|>Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet<|endoftext|> |
495e9478cb812190101f01ab53a3ee067bf110ada5784cfedd784480079fe2ac | def _inception_flow(model_path, **kwargs):
'Constructs a ResNet-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = InceptionI3d(400, in_channels=2)
if (model_path == ''):
return model
params = torch.load(model_path)
model.load_state_dict(params)
return model | Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | models/rgb_I3D.py | _inception_flow | victor-gui/LateTemporalModeling3DCNN | 144 | python | def _inception_flow(model_path, **kwargs):
'Constructs a ResNet-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = InceptionI3d(400, in_channels=2)
if (model_path == ):
return model
params = torch.load(model_path)
model.load_state_dict(params)
return model | def _inception_flow(model_path, **kwargs):
'Constructs a ResNet-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = InceptionI3d(400, in_channels=2)
if (model_path == ):
return model
params = torch.load(model_path)
model.load_state_dict(params)
return model<|docstring|>Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet<|endoftext|> |
b9c3d149a5806708606392c77eb42ae986078bd06843aae8240fe20321f20414 | def __init__(self, in_channels, output_channels, kernel_shape=(1, 1, 1), stride=(1, 1, 1), padding=0, activation_fn=F.relu, use_batch_norm=True, use_bias=False, name='unit_3d'):
'Initializes Unit3D module.'
super(Unit3D, self).__init__()
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
self._use_batch_norm = use_batch_norm
self._activation_fn = activation_fn
self._use_bias = use_bias
self.name = name
self.padding = padding
self.conv3d = nn.Conv3d(in_channels=in_channels, out_channels=self._output_channels, kernel_size=self._kernel_shape, stride=self._stride, padding=0, bias=self._use_bias)
if self._use_batch_norm:
self.bn = nn.BatchNorm3d(self._output_channels, eps=0.001, momentum=0.01) | Initializes Unit3D module. | models/rgb_I3D.py | __init__ | victor-gui/LateTemporalModeling3DCNN | 144 | python | def __init__(self, in_channels, output_channels, kernel_shape=(1, 1, 1), stride=(1, 1, 1), padding=0, activation_fn=F.relu, use_batch_norm=True, use_bias=False, name='unit_3d'):
super(Unit3D, self).__init__()
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
self._use_batch_norm = use_batch_norm
self._activation_fn = activation_fn
self._use_bias = use_bias
self.name = name
self.padding = padding
self.conv3d = nn.Conv3d(in_channels=in_channels, out_channels=self._output_channels, kernel_size=self._kernel_shape, stride=self._stride, padding=0, bias=self._use_bias)
if self._use_batch_norm:
self.bn = nn.BatchNorm3d(self._output_channels, eps=0.001, momentum=0.01) | def __init__(self, in_channels, output_channels, kernel_shape=(1, 1, 1), stride=(1, 1, 1), padding=0, activation_fn=F.relu, use_batch_norm=True, use_bias=False, name='unit_3d'):
super(Unit3D, self).__init__()
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
self._use_batch_norm = use_batch_norm
self._activation_fn = activation_fn
self._use_bias = use_bias
self.name = name
self.padding = padding
self.conv3d = nn.Conv3d(in_channels=in_channels, out_channels=self._output_channels, kernel_size=self._kernel_shape, stride=self._stride, padding=0, bias=self._use_bias)
if self._use_batch_norm:
self.bn = nn.BatchNorm3d(self._output_channels, eps=0.001, momentum=0.01)<|docstring|>Initializes Unit3D module.<|endoftext|> |
569356b609e7aa39a47b7b3e64f31f4f0054e6156a0f8eafc54077cd16887f27 | def __init__(self, num_classes=400, spatial_squeeze=True, final_endpoint='Logits', name='inception_i3d', in_channels=3, dropout_keep_prob=0.5):
"Initializes I3D model instance.\n Args:\n num_classes: The number of outputs in the logit layer (default 400, which\n matches the Kinetics dataset).\n spatial_squeeze: Whether to squeeze the spatial dimensions for the logits\n before returning (default True).\n final_endpoint: The model contains many possible endpoints.\n `final_endpoint` specifies the last endpoint for the model to be built\n up to. In addition to the output at `final_endpoint`, all the outputs\n at endpoints up to `final_endpoint` will also be returned, in a\n dictionary. `final_endpoint` must be one of\n InceptionI3d.VALID_ENDPOINTS (default 'Logits').\n name: A string (optional). The name of this module.\n Raises:\n ValueError: if `final_endpoint` is not recognized.\n "
if (final_endpoint not in self.VALID_ENDPOINTS):
raise ValueError(('Unknown final endpoint %s' % final_endpoint))
super(InceptionI3d, self).__init__()
self._num_classes = num_classes
self._spatial_squeeze = spatial_squeeze
self._final_endpoint = final_endpoint
self.logits = None
if (self._final_endpoint not in self.VALID_ENDPOINTS):
raise ValueError(('Unknown final endpoint %s' % self._final_endpoint))
self.end_points = {}
end_point = 'Conv3d_1a_7x7'
self.end_points[end_point] = Unit3D(in_channels=in_channels, output_channels=64, kernel_shape=[7, 7, 7], stride=(2, 2, 2), padding=(3, 3, 3), name=(name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'MaxPool3d_2a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2), padding=0)
if (self._final_endpoint == end_point):
return
end_point = 'Conv3d_2b_1x1'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=64, kernel_shape=[1, 1, 1], padding=0, name=(name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Conv3d_2c_3x3'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=192, kernel_shape=[3, 3, 3], padding=1, name=(name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'MaxPool3d_3a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2), padding=0)
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_3b'
self.end_points[end_point] = InceptionModule(192, [64, 96, 128, 16, 32, 32], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_3c'
self.end_points[end_point] = InceptionModule(256, [128, 128, 192, 32, 96, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'MaxPool3d_4a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[3, 3, 3], stride=(2, 2, 2), padding=0)
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4b'
self.end_points[end_point] = InceptionModule((((128 + 192) + 96) + 64), [192, 96, 208, 16, 48, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4c'
self.end_points[end_point] = InceptionModule((((192 + 208) + 48) + 64), [160, 112, 224, 24, 64, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4d'
self.end_points[end_point] = InceptionModule((((160 + 224) + 64) + 64), [128, 128, 256, 24, 64, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4e'
self.end_points[end_point] = InceptionModule((((128 + 256) + 64) + 64), [112, 144, 288, 32, 64, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4f'
self.end_points[end_point] = InceptionModule((((112 + 288) + 64) + 64), [256, 160, 320, 32, 128, 128], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'MaxPool3d_5a_2x2'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[2, 2, 2], stride=(2, 2, 2), padding=0)
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_5b'
self.end_points[end_point] = InceptionModule((((256 + 320) + 128) + 128), [256, 160, 320, 32, 128, 128], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_5c'
self.end_points[end_point] = InceptionModule((((256 + 320) + 128) + 128), [384, 192, 384, 48, 128, 128], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Logits'
self.avg_pool = nn.AvgPool3d(kernel_size=[2, 7, 7], stride=(1, 1, 1))
self.dropout = nn.Dropout(dropout_keep_prob)
self.logits = Unit3D(in_channels=(((384 + 384) + 128) + 128), output_channels=self._num_classes, kernel_shape=[1, 1, 1], padding=0, activation_fn=None, use_batch_norm=False, use_bias=True, name='logits')
self.build() | Initializes I3D model instance.
Args:
num_classes: The number of outputs in the logit layer (default 400, which
matches the Kinetics dataset).
spatial_squeeze: Whether to squeeze the spatial dimensions for the logits
before returning (default True).
final_endpoint: The model contains many possible endpoints.
`final_endpoint` specifies the last endpoint for the model to be built
up to. In addition to the output at `final_endpoint`, all the outputs
at endpoints up to `final_endpoint` will also be returned, in a
dictionary. `final_endpoint` must be one of
InceptionI3d.VALID_ENDPOINTS (default 'Logits').
name: A string (optional). The name of this module.
Raises:
ValueError: if `final_endpoint` is not recognized. | models/rgb_I3D.py | __init__ | victor-gui/LateTemporalModeling3DCNN | 144 | python | def __init__(self, num_classes=400, spatial_squeeze=True, final_endpoint='Logits', name='inception_i3d', in_channels=3, dropout_keep_prob=0.5):
"Initializes I3D model instance.\n Args:\n num_classes: The number of outputs in the logit layer (default 400, which\n matches the Kinetics dataset).\n spatial_squeeze: Whether to squeeze the spatial dimensions for the logits\n before returning (default True).\n final_endpoint: The model contains many possible endpoints.\n `final_endpoint` specifies the last endpoint for the model to be built\n up to. In addition to the output at `final_endpoint`, all the outputs\n at endpoints up to `final_endpoint` will also be returned, in a\n dictionary. `final_endpoint` must be one of\n InceptionI3d.VALID_ENDPOINTS (default 'Logits').\n name: A string (optional). The name of this module.\n Raises:\n ValueError: if `final_endpoint` is not recognized.\n "
if (final_endpoint not in self.VALID_ENDPOINTS):
raise ValueError(('Unknown final endpoint %s' % final_endpoint))
super(InceptionI3d, self).__init__()
self._num_classes = num_classes
self._spatial_squeeze = spatial_squeeze
self._final_endpoint = final_endpoint
self.logits = None
if (self._final_endpoint not in self.VALID_ENDPOINTS):
raise ValueError(('Unknown final endpoint %s' % self._final_endpoint))
self.end_points = {}
end_point = 'Conv3d_1a_7x7'
self.end_points[end_point] = Unit3D(in_channels=in_channels, output_channels=64, kernel_shape=[7, 7, 7], stride=(2, 2, 2), padding=(3, 3, 3), name=(name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'MaxPool3d_2a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2), padding=0)
if (self._final_endpoint == end_point):
return
end_point = 'Conv3d_2b_1x1'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=64, kernel_shape=[1, 1, 1], padding=0, name=(name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Conv3d_2c_3x3'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=192, kernel_shape=[3, 3, 3], padding=1, name=(name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'MaxPool3d_3a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2), padding=0)
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_3b'
self.end_points[end_point] = InceptionModule(192, [64, 96, 128, 16, 32, 32], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_3c'
self.end_points[end_point] = InceptionModule(256, [128, 128, 192, 32, 96, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'MaxPool3d_4a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[3, 3, 3], stride=(2, 2, 2), padding=0)
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4b'
self.end_points[end_point] = InceptionModule((((128 + 192) + 96) + 64), [192, 96, 208, 16, 48, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4c'
self.end_points[end_point] = InceptionModule((((192 + 208) + 48) + 64), [160, 112, 224, 24, 64, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4d'
self.end_points[end_point] = InceptionModule((((160 + 224) + 64) + 64), [128, 128, 256, 24, 64, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4e'
self.end_points[end_point] = InceptionModule((((128 + 256) + 64) + 64), [112, 144, 288, 32, 64, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4f'
self.end_points[end_point] = InceptionModule((((112 + 288) + 64) + 64), [256, 160, 320, 32, 128, 128], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'MaxPool3d_5a_2x2'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[2, 2, 2], stride=(2, 2, 2), padding=0)
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_5b'
self.end_points[end_point] = InceptionModule((((256 + 320) + 128) + 128), [256, 160, 320, 32, 128, 128], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_5c'
self.end_points[end_point] = InceptionModule((((256 + 320) + 128) + 128), [384, 192, 384, 48, 128, 128], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Logits'
self.avg_pool = nn.AvgPool3d(kernel_size=[2, 7, 7], stride=(1, 1, 1))
self.dropout = nn.Dropout(dropout_keep_prob)
self.logits = Unit3D(in_channels=(((384 + 384) + 128) + 128), output_channels=self._num_classes, kernel_shape=[1, 1, 1], padding=0, activation_fn=None, use_batch_norm=False, use_bias=True, name='logits')
self.build() | def __init__(self, num_classes=400, spatial_squeeze=True, final_endpoint='Logits', name='inception_i3d', in_channels=3, dropout_keep_prob=0.5):
"Initializes I3D model instance.\n Args:\n num_classes: The number of outputs in the logit layer (default 400, which\n matches the Kinetics dataset).\n spatial_squeeze: Whether to squeeze the spatial dimensions for the logits\n before returning (default True).\n final_endpoint: The model contains many possible endpoints.\n `final_endpoint` specifies the last endpoint for the model to be built\n up to. In addition to the output at `final_endpoint`, all the outputs\n at endpoints up to `final_endpoint` will also be returned, in a\n dictionary. `final_endpoint` must be one of\n InceptionI3d.VALID_ENDPOINTS (default 'Logits').\n name: A string (optional). The name of this module.\n Raises:\n ValueError: if `final_endpoint` is not recognized.\n "
if (final_endpoint not in self.VALID_ENDPOINTS):
raise ValueError(('Unknown final endpoint %s' % final_endpoint))
super(InceptionI3d, self).__init__()
self._num_classes = num_classes
self._spatial_squeeze = spatial_squeeze
self._final_endpoint = final_endpoint
self.logits = None
if (self._final_endpoint not in self.VALID_ENDPOINTS):
raise ValueError(('Unknown final endpoint %s' % self._final_endpoint))
self.end_points = {}
end_point = 'Conv3d_1a_7x7'
self.end_points[end_point] = Unit3D(in_channels=in_channels, output_channels=64, kernel_shape=[7, 7, 7], stride=(2, 2, 2), padding=(3, 3, 3), name=(name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'MaxPool3d_2a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2), padding=0)
if (self._final_endpoint == end_point):
return
end_point = 'Conv3d_2b_1x1'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=64, kernel_shape=[1, 1, 1], padding=0, name=(name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Conv3d_2c_3x3'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=192, kernel_shape=[3, 3, 3], padding=1, name=(name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'MaxPool3d_3a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2), padding=0)
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_3b'
self.end_points[end_point] = InceptionModule(192, [64, 96, 128, 16, 32, 32], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_3c'
self.end_points[end_point] = InceptionModule(256, [128, 128, 192, 32, 96, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'MaxPool3d_4a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[3, 3, 3], stride=(2, 2, 2), padding=0)
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4b'
self.end_points[end_point] = InceptionModule((((128 + 192) + 96) + 64), [192, 96, 208, 16, 48, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4c'
self.end_points[end_point] = InceptionModule((((192 + 208) + 48) + 64), [160, 112, 224, 24, 64, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4d'
self.end_points[end_point] = InceptionModule((((160 + 224) + 64) + 64), [128, 128, 256, 24, 64, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4e'
self.end_points[end_point] = InceptionModule((((128 + 256) + 64) + 64), [112, 144, 288, 32, 64, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4f'
self.end_points[end_point] = InceptionModule((((112 + 288) + 64) + 64), [256, 160, 320, 32, 128, 128], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'MaxPool3d_5a_2x2'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[2, 2, 2], stride=(2, 2, 2), padding=0)
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_5b'
self.end_points[end_point] = InceptionModule((((256 + 320) + 128) + 128), [256, 160, 320, 32, 128, 128], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_5c'
self.end_points[end_point] = InceptionModule((((256 + 320) + 128) + 128), [384, 192, 384, 48, 128, 128], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Logits'
self.avg_pool = nn.AvgPool3d(kernel_size=[2, 7, 7], stride=(1, 1, 1))
self.dropout = nn.Dropout(dropout_keep_prob)
self.logits = Unit3D(in_channels=(((384 + 384) + 128) + 128), output_channels=self._num_classes, kernel_shape=[1, 1, 1], padding=0, activation_fn=None, use_batch_norm=False, use_bias=True, name='logits')
self.build()<|docstring|>Initializes I3D model instance.
Args:
num_classes: The number of outputs in the logit layer (default 400, which
matches the Kinetics dataset).
spatial_squeeze: Whether to squeeze the spatial dimensions for the logits
before returning (default True).
final_endpoint: The model contains many possible endpoints.
`final_endpoint` specifies the last endpoint for the model to be built
up to. In addition to the output at `final_endpoint`, all the outputs
at endpoints up to `final_endpoint` will also be returned, in a
dictionary. `final_endpoint` must be one of
InceptionI3d.VALID_ENDPOINTS (default 'Logits').
name: A string (optional). The name of this module.
Raises:
ValueError: if `final_endpoint` is not recognized.<|endoftext|> |
31ca0c4bb06d2e46a6935908a57f3090dc69bb2f33115b7767d7fd18e0c9821e | def is_clockwise(x_triads, y_triads):
'Returns boolean array which tells whether the three points in 2D plane given by x_triads & y_triads are\n oriented clockwise. https://en.wikipedia.org/wiki/Curve_orientation#Orientation_of_a_simple_polygon\n\n Parameters\n ----------\n x_triads, y_triads : np.ndarray\n Array of 2D coordinates for triangles in a plane\n\n Returns\n -------\n np.ndarray\n '
return (cw_or_ccw(x_triads, y_triads) < 0) | Returns boolean array which tells whether the three points in 2D plane given by x_triads & y_triads are
oriented clockwise. https://en.wikipedia.org/wiki/Curve_orientation#Orientation_of_a_simple_polygon
Parameters
----------
x_triads, y_triads : np.ndarray
Array of 2D coordinates for triangles in a plane
Returns
-------
np.ndarray | src/matching/utils.py | is_clockwise | wdoppenberg/crater-detection | 8 | python | def is_clockwise(x_triads, y_triads):
'Returns boolean array which tells whether the three points in 2D plane given by x_triads & y_triads are\n oriented clockwise. https://en.wikipedia.org/wiki/Curve_orientation#Orientation_of_a_simple_polygon\n\n Parameters\n ----------\n x_triads, y_triads : np.ndarray\n Array of 2D coordinates for triangles in a plane\n\n Returns\n -------\n np.ndarray\n '
return (cw_or_ccw(x_triads, y_triads) < 0) | def is_clockwise(x_triads, y_triads):
'Returns boolean array which tells whether the three points in 2D plane given by x_triads & y_triads are\n oriented clockwise. https://en.wikipedia.org/wiki/Curve_orientation#Orientation_of_a_simple_polygon\n\n Parameters\n ----------\n x_triads, y_triads : np.ndarray\n Array of 2D coordinates for triangles in a plane\n\n Returns\n -------\n np.ndarray\n '
return (cw_or_ccw(x_triads, y_triads) < 0)<|docstring|>Returns boolean array which tells whether the three points in 2D plane given by x_triads & y_triads are
oriented clockwise. https://en.wikipedia.org/wiki/Curve_orientation#Orientation_of_a_simple_polygon
Parameters
----------
x_triads, y_triads : np.ndarray
Array of 2D coordinates for triangles in a plane
Returns
-------
np.ndarray<|endoftext|> |
2aea27e609fd5b333797650683c23c65eb8e22f2f9e279c37e6170bae3d735de | def cyclic_permutations(it, step=1):
'Returns cyclic permutations for iterable.\n\n Parameters\n ----------\n it : iterable object\n step : int, optional\n\n Yields\n -------\n Cyclic permutation of it\n '
(yield it)
for k in range(step, len(it), step):
if isinstance(it, list):
p = (it[k:] + it[:k])
if (p == it):
break
elif isinstance(it, np.ndarray):
p = np.concatenate((it[k:], it[:k]))
if (p == it).all():
break
else:
raise ValueError('Iterable has to be of type list or np.ndarray!')
(yield p) | Returns cyclic permutations for iterable.
Parameters
----------
it : iterable object
step : int, optional
Yields
-------
Cyclic permutation of it | src/matching/utils.py | cyclic_permutations | wdoppenberg/crater-detection | 8 | python | def cyclic_permutations(it, step=1):
'Returns cyclic permutations for iterable.\n\n Parameters\n ----------\n it : iterable object\n step : int, optional\n\n Yields\n -------\n Cyclic permutation of it\n '
(yield it)
for k in range(step, len(it), step):
if isinstance(it, list):
p = (it[k:] + it[:k])
if (p == it):
break
elif isinstance(it, np.ndarray):
p = np.concatenate((it[k:], it[:k]))
if (p == it).all():
break
else:
raise ValueError('Iterable has to be of type list or np.ndarray!')
(yield p) | def cyclic_permutations(it, step=1):
'Returns cyclic permutations for iterable.\n\n Parameters\n ----------\n it : iterable object\n step : int, optional\n\n Yields\n -------\n Cyclic permutation of it\n '
(yield it)
for k in range(step, len(it), step):
if isinstance(it, list):
p = (it[k:] + it[:k])
if (p == it):
break
elif isinstance(it, np.ndarray):
p = np.concatenate((it[k:], it[:k]))
if (p == it).all():
break
else:
raise ValueError('Iterable has to be of type list or np.ndarray!')
(yield p)<|docstring|>Returns cyclic permutations for iterable.
Parameters
----------
it : iterable object
step : int, optional
Yields
-------
Cyclic permutation of it<|endoftext|> |
9235841bae929892ed9b560a7e931404f3df9c884e7014cfc22851bc803abd77 | def get_cliques_by_length(G, length_clique):
' Return the list of all cliques in an undirected graph G with length\n equal to length_clique. '
cliques = []
for c in nx.enumerate_all_cliques(G):
if (len(c) <= length_clique):
if (len(c) == length_clique):
cliques.append(c)
else:
return cliques
return cliques | Return the list of all cliques in an undirected graph G with length
equal to length_clique. | src/matching/utils.py | get_cliques_by_length | wdoppenberg/crater-detection | 8 | python | def get_cliques_by_length(G, length_clique):
' Return the list of all cliques in an undirected graph G with length\n equal to length_clique. '
cliques = []
for c in nx.enumerate_all_cliques(G):
if (len(c) <= length_clique):
if (len(c) == length_clique):
cliques.append(c)
else:
return cliques
return cliques | def get_cliques_by_length(G, length_clique):
' Return the list of all cliques in an undirected graph G with length\n equal to length_clique. '
cliques = []
for c in nx.enumerate_all_cliques(G):
if (len(c) <= length_clique):
if (len(c) == length_clique):
cliques.append(c)
else:
return cliques
return cliques<|docstring|>Return the list of all cliques in an undirected graph G with length
equal to length_clique.<|endoftext|> |
86de9b1a039ba9be8ad35b9b59d57578d4e5300a36cdfbf15ee2827163bea362 | def latlong2cartesian(lat, long, alt=0, rad=1737.1):
'\n Calculate Cartesian coordinates from latitude + longitude information\n '
f = (1.0 / 825.0)
ls = np.arctan((((1 - f) ** 2) * np.tan(lat)))
x = (((rad * np.cos(ls)) * np.cos(long)) + ((alt * np.cos(lat)) * np.cos(long)))
y = (((rad * np.cos(ls)) * np.sin(long)) + ((alt * np.cos(lat)) * np.sin(long)))
z = ((rad * np.sin(ls)) + (alt * np.sin(lat)))
return (x, y, z) | Calculate Cartesian coordinates from latitude + longitude information | src/matching/utils.py | latlong2cartesian | wdoppenberg/crater-detection | 8 | python | def latlong2cartesian(lat, long, alt=0, rad=1737.1):
'\n \n '
f = (1.0 / 825.0)
ls = np.arctan((((1 - f) ** 2) * np.tan(lat)))
x = (((rad * np.cos(ls)) * np.cos(long)) + ((alt * np.cos(lat)) * np.cos(long)))
y = (((rad * np.cos(ls)) * np.sin(long)) + ((alt * np.cos(lat)) * np.sin(long)))
z = ((rad * np.sin(ls)) + (alt * np.sin(lat)))
return (x, y, z) | def latlong2cartesian(lat, long, alt=0, rad=1737.1):
'\n \n '
f = (1.0 / 825.0)
ls = np.arctan((((1 - f) ** 2) * np.tan(lat)))
x = (((rad * np.cos(ls)) * np.cos(long)) + ((alt * np.cos(lat)) * np.cos(long)))
y = (((rad * np.cos(ls)) * np.sin(long)) + ((alt * np.cos(lat)) * np.sin(long)))
z = ((rad * np.sin(ls)) + (alt * np.sin(lat)))
return (x, y, z)<|docstring|>Calculate Cartesian coordinates from latitude + longitude information<|endoftext|> |
35d1653ab680874fb50594cf3275d80237d33c677e911a80429fecab418d3bb1 | @njit
def enhanced_pattern_shifting(n, start_n=0) -> Tuple[(int, int, int)]:
'Generator function returning next crater triad according to Enhanced Pattern Shifting Method [1].\n\n Parameters\n ----------\n n : int\n Number of detected instances.\n start_n: int\n Iteration to start from, useful for batch processing of triads.\n\n Returns\n -------\n i, j, k : int\n\n References\n ----------\n .. [1] Arnas, D., Fialho, M. A. A., & Mortari, D. (2017). Fast and robust kernel generators for star trackers. Acta Astronautica, 134 (August 2016), 291–302. https://doi.org/10.1016/j.actaastro.2017.02.016\n '
if (n < 3):
raise ValueError('Number of detections must be equal or higher than 3!')
index = 0
for dj in range(1, (n - 1)):
for dk in range(1, (n - dj)):
for ii in range(1, 4):
for i in range(ii, (((n - dj) - dk) + 1), 3):
j = (i + dj)
k = (j + dk)
if (index >= start_n):
(yield ((i - 1), (j - 1), (k - 1)))
index += 1 | Generator function returning next crater triad according to Enhanced Pattern Shifting Method [1].
Parameters
----------
n : int
Number of detected instances.
start_n: int
Iteration to start from, useful for batch processing of triads.
Returns
-------
i, j, k : int
References
----------
.. [1] Arnas, D., Fialho, M. A. A., & Mortari, D. (2017). Fast and robust kernel generators for star trackers. Acta Astronautica, 134 (August 2016), 291–302. https://doi.org/10.1016/j.actaastro.2017.02.016 | src/matching/utils.py | enhanced_pattern_shifting | wdoppenberg/crater-detection | 8 | python | @njit
def enhanced_pattern_shifting(n, start_n=0) -> Tuple[(int, int, int)]:
'Generator function returning next crater triad according to Enhanced Pattern Shifting Method [1].\n\n Parameters\n ----------\n n : int\n Number of detected instances.\n start_n: int\n Iteration to start from, useful for batch processing of triads.\n\n Returns\n -------\n i, j, k : int\n\n References\n ----------\n .. [1] Arnas, D., Fialho, M. A. A., & Mortari, D. (2017). Fast and robust kernel generators for star trackers. Acta Astronautica, 134 (August 2016), 291–302. https://doi.org/10.1016/j.actaastro.2017.02.016\n '
if (n < 3):
raise ValueError('Number of detections must be equal or higher than 3!')
index = 0
for dj in range(1, (n - 1)):
for dk in range(1, (n - dj)):
for ii in range(1, 4):
for i in range(ii, (((n - dj) - dk) + 1), 3):
j = (i + dj)
k = (j + dk)
if (index >= start_n):
(yield ((i - 1), (j - 1), (k - 1)))
index += 1 | @njit
def enhanced_pattern_shifting(n, start_n=0) -> Tuple[(int, int, int)]:
'Generator function returning next crater triad according to Enhanced Pattern Shifting Method [1].\n\n Parameters\n ----------\n n : int\n Number of detected instances.\n start_n: int\n Iteration to start from, useful for batch processing of triads.\n\n Returns\n -------\n i, j, k : int\n\n References\n ----------\n .. [1] Arnas, D., Fialho, M. A. A., & Mortari, D. (2017). Fast and robust kernel generators for star trackers. Acta Astronautica, 134 (August 2016), 291–302. https://doi.org/10.1016/j.actaastro.2017.02.016\n '
if (n < 3):
raise ValueError('Number of detections must be equal or higher than 3!')
index = 0
for dj in range(1, (n - 1)):
for dk in range(1, (n - dj)):
for ii in range(1, 4):
for i in range(ii, (((n - dj) - dk) + 1), 3):
j = (i + dj)
k = (j + dk)
if (index >= start_n):
(yield ((i - 1), (j - 1), (k - 1)))
index += 1<|docstring|>Generator function returning next crater triad according to Enhanced Pattern Shifting Method [1].
Parameters
----------
n : int
Number of detected instances.
start_n: int
Iteration to start from, useful for batch processing of triads.
Returns
-------
i, j, k : int
References
----------
.. [1] Arnas, D., Fialho, M. A. A., & Mortari, D. (2017). Fast and robust kernel generators for star trackers. Acta Astronautica, 134 (August 2016), 291–302. https://doi.org/10.1016/j.actaastro.2017.02.016<|endoftext|> |
6c8e243967c37a061f1b6679d9efd34d08c4f54fd89eb7a4d4d877a5f19fce09 | def test_execution(self):
'Just checks that the visualizer at least runs without errors.'
self.result = self.plot(tree=self.tree, feature_table=self.table, sample_metadata=self.md, feature_metadata=self.fmd)
self.assertIsInstance(self.result, Results)
self.assertIsInstance(self.result.visualization, Visualization) | Just checks that the visualizer at least runs without errors. | tests/python/test_integration.py | test_execution | sjanssen2/empress | 0 | python | def test_execution(self):
self.result = self.plot(tree=self.tree, feature_table=self.table, sample_metadata=self.md, feature_metadata=self.fmd)
self.assertIsInstance(self.result, Results)
self.assertIsInstance(self.result.visualization, Visualization) | def test_execution(self):
self.result = self.plot(tree=self.tree, feature_table=self.table, sample_metadata=self.md, feature_metadata=self.fmd)
self.assertIsInstance(self.result, Results)
self.assertIsInstance(self.result.visualization, Visualization)<|docstring|>Just checks that the visualizer at least runs without errors.<|endoftext|> |
2ff8482cd78e86a304617d476c63ba6882305400ccd22cb88ae0c2d26a2c5e31 | def _get_measure(self, measure):
"Execute a 'get' measure command.\n\n :param measure: The measure command to execute.\n :type measure: str\n\n :returns: A list with the result of the executed measure command.\n :rtype: list\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
while True:
self._master._execute(IOLinkProtocol.COMMAND_ICD, IOLinkProtocol.REQUEST_SLAVE)
self._master._execute((str((self._position - 1)) + IOLinkProtocol.TERMINATOR_SEQ), IOLinkProtocol.REQUEST_SENSOR_COMMAND)
self._master._execute(IOLinkProtocol.COMMAND_MEAS1, IOLinkProtocol.REQUEST_MEASURE_TYPE)
if (not self._master._execute(measure, IOLinkProtocol.MESSAGE_TRANSMISSION_COMPLETED)):
self._master._execute(IOLinkProtocol.COMMAND_END, IOLinkProtocol.REQUEST_MOD)
else:
break
if (not IOLinkProtocol.BYTES_TRANSMISSION):
info = self._master._get_answer()[len(IOLinkProtocol.TERMINATOR_SEQ):(((- 2) * len(IOLinkProtocol.TERMINATOR_SEQ)) - len(IOLinkProtocol.MESSAGE_TRANSMISSION_COMPLETED))]
else:
info = self._master._get_answer()[:((- len(IOLinkProtocol.TERMINATOR_SEQ)) - len(IOLinkProtocol.MESSAGE_TRANSMISSION_COMPLETED))]
self._master._execute(IOLinkProtocol.COMMAND_END, IOLinkProtocol.REQUEST_MOD)
return info
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | Execute a 'get' measure command.
:param measure: The measure command to execute.
:type measure: str
:returns: A list with the result of the executed measure command.
:rtype: list
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully. | wire_st_sdk/iolink/iolink_sensor.py | _get_measure | STMicroelectronics/WireSTSDK_Python | 9 | python | def _get_measure(self, measure):
"Execute a 'get' measure command.\n\n :param measure: The measure command to execute.\n :type measure: str\n\n :returns: A list with the result of the executed measure command.\n :rtype: list\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
while True:
self._master._execute(IOLinkProtocol.COMMAND_ICD, IOLinkProtocol.REQUEST_SLAVE)
self._master._execute((str((self._position - 1)) + IOLinkProtocol.TERMINATOR_SEQ), IOLinkProtocol.REQUEST_SENSOR_COMMAND)
self._master._execute(IOLinkProtocol.COMMAND_MEAS1, IOLinkProtocol.REQUEST_MEASURE_TYPE)
if (not self._master._execute(measure, IOLinkProtocol.MESSAGE_TRANSMISSION_COMPLETED)):
self._master._execute(IOLinkProtocol.COMMAND_END, IOLinkProtocol.REQUEST_MOD)
else:
break
if (not IOLinkProtocol.BYTES_TRANSMISSION):
info = self._master._get_answer()[len(IOLinkProtocol.TERMINATOR_SEQ):(((- 2) * len(IOLinkProtocol.TERMINATOR_SEQ)) - len(IOLinkProtocol.MESSAGE_TRANSMISSION_COMPLETED))]
else:
info = self._master._get_answer()[:((- len(IOLinkProtocol.TERMINATOR_SEQ)) - len(IOLinkProtocol.MESSAGE_TRANSMISSION_COMPLETED))]
self._master._execute(IOLinkProtocol.COMMAND_END, IOLinkProtocol.REQUEST_MOD)
return info
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | def _get_measure(self, measure):
"Execute a 'get' measure command.\n\n :param measure: The measure command to execute.\n :type measure: str\n\n :returns: A list with the result of the executed measure command.\n :rtype: list\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
while True:
self._master._execute(IOLinkProtocol.COMMAND_ICD, IOLinkProtocol.REQUEST_SLAVE)
self._master._execute((str((self._position - 1)) + IOLinkProtocol.TERMINATOR_SEQ), IOLinkProtocol.REQUEST_SENSOR_COMMAND)
self._master._execute(IOLinkProtocol.COMMAND_MEAS1, IOLinkProtocol.REQUEST_MEASURE_TYPE)
if (not self._master._execute(measure, IOLinkProtocol.MESSAGE_TRANSMISSION_COMPLETED)):
self._master._execute(IOLinkProtocol.COMMAND_END, IOLinkProtocol.REQUEST_MOD)
else:
break
if (not IOLinkProtocol.BYTES_TRANSMISSION):
info = self._master._get_answer()[len(IOLinkProtocol.TERMINATOR_SEQ):(((- 2) * len(IOLinkProtocol.TERMINATOR_SEQ)) - len(IOLinkProtocol.MESSAGE_TRANSMISSION_COMPLETED))]
else:
info = self._master._get_answer()[:((- len(IOLinkProtocol.TERMINATOR_SEQ)) - len(IOLinkProtocol.MESSAGE_TRANSMISSION_COMPLETED))]
self._master._execute(IOLinkProtocol.COMMAND_END, IOLinkProtocol.REQUEST_MOD)
return info
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e<|docstring|>Execute a 'get' measure command.
:param measure: The measure command to execute.
:type measure: str
:returns: A list with the result of the executed measure command.
:rtype: list
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully.<|endoftext|> |
8925d3f42770b42eb893d89615e7d0726261bfd7b15801cc489cf81014f34758 | def _bytes_to_floats(self, data, precision=0):
'Converts an array of bytes to floating point numbers in Little Endian\n order (four bytes per number).\n\n :param data: Input array of bytes that contains the values to convert.\n :type data: str\n\n :param precision: Number of digits after the decimal point.\n :type precision: int\n\n :returns: A list of floating point numbers.\n :rtype: list\n '
return [round(struct.unpack('<f', data[(i * 4):((i * 4) + 4)])[0], precision) for i in range(0, int((len(data) / 4)))] | Converts an array of bytes to floating point numbers in Little Endian
order (four bytes per number).
:param data: Input array of bytes that contains the values to convert.
:type data: str
:param precision: Number of digits after the decimal point.
:type precision: int
:returns: A list of floating point numbers.
:rtype: list | wire_st_sdk/iolink/iolink_sensor.py | _bytes_to_floats | STMicroelectronics/WireSTSDK_Python | 9 | python | def _bytes_to_floats(self, data, precision=0):
'Converts an array of bytes to floating point numbers in Little Endian\n order (four bytes per number).\n\n :param data: Input array of bytes that contains the values to convert.\n :type data: str\n\n :param precision: Number of digits after the decimal point.\n :type precision: int\n\n :returns: A list of floating point numbers.\n :rtype: list\n '
return [round(struct.unpack('<f', data[(i * 4):((i * 4) + 4)])[0], precision) for i in range(0, int((len(data) / 4)))] | def _bytes_to_floats(self, data, precision=0):
'Converts an array of bytes to floating point numbers in Little Endian\n order (four bytes per number).\n\n :param data: Input array of bytes that contains the values to convert.\n :type data: str\n\n :param precision: Number of digits after the decimal point.\n :type precision: int\n\n :returns: A list of floating point numbers.\n :rtype: list\n '
return [round(struct.unpack('<f', data[(i * 4):((i * 4) + 4)])[0], precision) for i in range(0, int((len(data) / 4)))]<|docstring|>Converts an array of bytes to floating point numbers in Little Endian
order (four bytes per number).
:param data: Input array of bytes that contains the values to convert.
:type data: str
:param precision: Number of digits after the decimal point.
:type precision: int
:returns: A list of floating point numbers.
:rtype: list<|endoftext|> |
73e415dd36430349fefe5df75a340e053758c95d1644b6aafddbb47e1fe2fc6d | def get_env(self):
'Get environmental data.\n\n :returns: A list with Pressure [mbar], Humidity [%], and Temperature [C]\n values.\n :rtype: list\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n '
try:
with lock_for_object(self._master):
while True:
info = self._get_measure(IOLinkProtocol.COMMAND_MEAS1_3)
if (not IOLinkProtocol.BYTES_TRANSMISSION):
info = info.split(IOLinkProtocol.TERMINATOR_SEQ.encode('utf-8'))[2:]
info = [i for i in info if (i != '')]
if (len(info) == IOLinkSensor._SIZE_OF_ENV):
break
elif (len(info) == (IOLinkSensor._SIZE_OF_ENV * IOLinkSensor._SIZE_OF_FLOAT_bytes)):
break
if (not IOLinkProtocol.BYTES_TRANSMISSION):
info = list(map((lambda s: float(s)), info))
else:
info = self._bytes_to_floats(info, self._FLOAT_PRECISION)
return info
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | Get environmental data.
:returns: A list with Pressure [mbar], Humidity [%], and Temperature [C]
values.
:rtype: list
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully. | wire_st_sdk/iolink/iolink_sensor.py | get_env | STMicroelectronics/WireSTSDK_Python | 9 | python | def get_env(self):
'Get environmental data.\n\n :returns: A list with Pressure [mbar], Humidity [%], and Temperature [C]\n values.\n :rtype: list\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n '
try:
with lock_for_object(self._master):
while True:
info = self._get_measure(IOLinkProtocol.COMMAND_MEAS1_3)
if (not IOLinkProtocol.BYTES_TRANSMISSION):
info = info.split(IOLinkProtocol.TERMINATOR_SEQ.encode('utf-8'))[2:]
info = [i for i in info if (i != )]
if (len(info) == IOLinkSensor._SIZE_OF_ENV):
break
elif (len(info) == (IOLinkSensor._SIZE_OF_ENV * IOLinkSensor._SIZE_OF_FLOAT_bytes)):
break
if (not IOLinkProtocol.BYTES_TRANSMISSION):
info = list(map((lambda s: float(s)), info))
else:
info = self._bytes_to_floats(info, self._FLOAT_PRECISION)
return info
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | def get_env(self):
'Get environmental data.\n\n :returns: A list with Pressure [mbar], Humidity [%], and Temperature [C]\n values.\n :rtype: list\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n '
try:
with lock_for_object(self._master):
while True:
info = self._get_measure(IOLinkProtocol.COMMAND_MEAS1_3)
if (not IOLinkProtocol.BYTES_TRANSMISSION):
info = info.split(IOLinkProtocol.TERMINATOR_SEQ.encode('utf-8'))[2:]
info = [i for i in info if (i != )]
if (len(info) == IOLinkSensor._SIZE_OF_ENV):
break
elif (len(info) == (IOLinkSensor._SIZE_OF_ENV * IOLinkSensor._SIZE_OF_FLOAT_bytes)):
break
if (not IOLinkProtocol.BYTES_TRANSMISSION):
info = list(map((lambda s: float(s)), info))
else:
info = self._bytes_to_floats(info, self._FLOAT_PRECISION)
return info
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e<|docstring|>Get environmental data.
:returns: A list with Pressure [mbar], Humidity [%], and Temperature [C]
values.
:rtype: list
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully.<|endoftext|> |
ae2607833f4d2d14a336dbfd729740014326b75bf0bd7c7347e78618bd23d523 | def get_tdm(self):
'Get time domain data.\n\n :returns: A two-elements list, with a list of RMS Speed values on X,Y,Z\n axes [mm/s] as the first element, and a list of Peak Acceleration\n values on X,Y,Z axes [m/s2] as the second element.\n :rtype: list\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n '
try:
with lock_for_object(self._master):
while True:
info = self._get_measure(IOLinkProtocol.COMMAND_MEAS1_2)
if (not IOLinkProtocol.BYTES_TRANSMISSION):
info = info.split(IOLinkProtocol.TERMINATOR_SEQ.encode('utf-8'))[2:]
if (len(info) == IOLinkSensor._SIZE_OF_TDM):
break
elif (len(info) == (IOLinkSensor._SIZE_OF_TDM * IOLinkSensor._SIZE_OF_FLOAT_bytes)):
break
if (not IOLinkProtocol.BYTES_TRANSMISSION):
info = [list(map((lambda s: float(s)), info[0:3])), list(map((lambda s: float(s)), info[3:6]))]
else:
info = self._bytes_to_floats(info, self._FLOAT_PRECISION)
info = [info[0:3], info[3:6]]
return info
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | Get time domain data.
:returns: A two-elements list, with a list of RMS Speed values on X,Y,Z
axes [mm/s] as the first element, and a list of Peak Acceleration
values on X,Y,Z axes [m/s2] as the second element.
:rtype: list
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully. | wire_st_sdk/iolink/iolink_sensor.py | get_tdm | STMicroelectronics/WireSTSDK_Python | 9 | python | def get_tdm(self):
'Get time domain data.\n\n :returns: A two-elements list, with a list of RMS Speed values on X,Y,Z\n axes [mm/s] as the first element, and a list of Peak Acceleration\n values on X,Y,Z axes [m/s2] as the second element.\n :rtype: list\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n '
try:
with lock_for_object(self._master):
while True:
info = self._get_measure(IOLinkProtocol.COMMAND_MEAS1_2)
if (not IOLinkProtocol.BYTES_TRANSMISSION):
info = info.split(IOLinkProtocol.TERMINATOR_SEQ.encode('utf-8'))[2:]
if (len(info) == IOLinkSensor._SIZE_OF_TDM):
break
elif (len(info) == (IOLinkSensor._SIZE_OF_TDM * IOLinkSensor._SIZE_OF_FLOAT_bytes)):
break
if (not IOLinkProtocol.BYTES_TRANSMISSION):
info = [list(map((lambda s: float(s)), info[0:3])), list(map((lambda s: float(s)), info[3:6]))]
else:
info = self._bytes_to_floats(info, self._FLOAT_PRECISION)
info = [info[0:3], info[3:6]]
return info
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | def get_tdm(self):
'Get time domain data.\n\n :returns: A two-elements list, with a list of RMS Speed values on X,Y,Z\n axes [mm/s] as the first element, and a list of Peak Acceleration\n values on X,Y,Z axes [m/s2] as the second element.\n :rtype: list\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n '
try:
with lock_for_object(self._master):
while True:
info = self._get_measure(IOLinkProtocol.COMMAND_MEAS1_2)
if (not IOLinkProtocol.BYTES_TRANSMISSION):
info = info.split(IOLinkProtocol.TERMINATOR_SEQ.encode('utf-8'))[2:]
if (len(info) == IOLinkSensor._SIZE_OF_TDM):
break
elif (len(info) == (IOLinkSensor._SIZE_OF_TDM * IOLinkSensor._SIZE_OF_FLOAT_bytes)):
break
if (not IOLinkProtocol.BYTES_TRANSMISSION):
info = [list(map((lambda s: float(s)), info[0:3])), list(map((lambda s: float(s)), info[3:6]))]
else:
info = self._bytes_to_floats(info, self._FLOAT_PRECISION)
info = [info[0:3], info[3:6]]
return info
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e<|docstring|>Get time domain data.
:returns: A two-elements list, with a list of RMS Speed values on X,Y,Z
axes [mm/s] as the first element, and a list of Peak Acceleration
values on X,Y,Z axes [m/s2] as the second element.
:rtype: list
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully.<|endoftext|> |
7be130c8404479aca54ca0e47c99cfb14115f26ac9eb0f67be7d54f098920957 | def get_fft(self):
'Get Fast Fourier Transform of vibration data.\n\n :returns: A n-elements list, with each element being a list of four\n values: the first is a frequency [Hz] and the other three are the\n corresponding vibration values on the three axis [m/s2].\n :rtype: list\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n '
try:
with lock_for_object(self._master):
l = IOLinkSensor._SIZE_OF_FDM_LINES
n = (IOLinkSensor._SIZE_OF_FDM * IOLinkSensor._SIZE_OF_FLOAT_bytes)
while True:
info = self._get_measure(IOLinkProtocol.COMMAND_MEAS1_4)
if (not IOLinkProtocol.BYTES_TRANSMISSION):
info = info.split(IOLinkProtocol.TERMINATOR_SEQ.encode('utf-8'))[2:]
if (len(info) == l):
break
elif (len(info) == (l * n)):
break
if (not IOLinkProtocol.BYTES_TRANSMISSION):
for i in range(0, len(info)):
info[i] = list(map((lambda s: float(s)), info[i].split('\t')[0:(- 1)]))
else:
info = [info[(i * n):((i * n) + n)] for i in range(0, l)]
info = list(map((lambda s: self._bytes_to_floats(s, self._FLOAT_PRECISION)), info))
return info
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | Get Fast Fourier Transform of vibration data.
:returns: A n-elements list, with each element being a list of four
values: the first is a frequency [Hz] and the other three are the
corresponding vibration values on the three axis [m/s2].
:rtype: list
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully. | wire_st_sdk/iolink/iolink_sensor.py | get_fft | STMicroelectronics/WireSTSDK_Python | 9 | python | def get_fft(self):
'Get Fast Fourier Transform of vibration data.\n\n :returns: A n-elements list, with each element being a list of four\n values: the first is a frequency [Hz] and the other three are the\n corresponding vibration values on the three axis [m/s2].\n :rtype: list\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n '
try:
with lock_for_object(self._master):
l = IOLinkSensor._SIZE_OF_FDM_LINES
n = (IOLinkSensor._SIZE_OF_FDM * IOLinkSensor._SIZE_OF_FLOAT_bytes)
while True:
info = self._get_measure(IOLinkProtocol.COMMAND_MEAS1_4)
if (not IOLinkProtocol.BYTES_TRANSMISSION):
info = info.split(IOLinkProtocol.TERMINATOR_SEQ.encode('utf-8'))[2:]
if (len(info) == l):
break
elif (len(info) == (l * n)):
break
if (not IOLinkProtocol.BYTES_TRANSMISSION):
for i in range(0, len(info)):
info[i] = list(map((lambda s: float(s)), info[i].split('\t')[0:(- 1)]))
else:
info = [info[(i * n):((i * n) + n)] for i in range(0, l)]
info = list(map((lambda s: self._bytes_to_floats(s, self._FLOAT_PRECISION)), info))
return info
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | def get_fft(self):
'Get Fast Fourier Transform of vibration data.\n\n :returns: A n-elements list, with each element being a list of four\n values: the first is a frequency [Hz] and the other three are the\n corresponding vibration values on the three axis [m/s2].\n :rtype: list\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n '
try:
with lock_for_object(self._master):
l = IOLinkSensor._SIZE_OF_FDM_LINES
n = (IOLinkSensor._SIZE_OF_FDM * IOLinkSensor._SIZE_OF_FLOAT_bytes)
while True:
info = self._get_measure(IOLinkProtocol.COMMAND_MEAS1_4)
if (not IOLinkProtocol.BYTES_TRANSMISSION):
info = info.split(IOLinkProtocol.TERMINATOR_SEQ.encode('utf-8'))[2:]
if (len(info) == l):
break
elif (len(info) == (l * n)):
break
if (not IOLinkProtocol.BYTES_TRANSMISSION):
for i in range(0, len(info)):
info[i] = list(map((lambda s: float(s)), info[i].split('\t')[0:(- 1)]))
else:
info = [info[(i * n):((i * n) + n)] for i in range(0, l)]
info = list(map((lambda s: self._bytes_to_floats(s, self._FLOAT_PRECISION)), info))
return info
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e<|docstring|>Get Fast Fourier Transform of vibration data.
:returns: A n-elements list, with each element being a list of four
values: the first is a frequency [Hz] and the other three are the
corresponding vibration values on the three axis [m/s2].
:rtype: list
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully.<|endoftext|> |
9f930e063403e8f80c4e5bc5e934994108378d0db7c281c44541f0f28249547e | def _set_parameter(self, parameter, value):
"Execute a 'set' parameter command.\n\n :param parameter: The parameter command to execute.\n :type parameter: str\n\n :param value: The parameter value to set.\n :type value: str\n\n :returns: True if the parameter has been set correctly, False otherwise.\n :rtype: bool\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
self._master._execute(IOLinkProtocol.COMMAND_ICD, IOLinkProtocol.REQUEST_SLAVE)
self._master._execute((str((self._position - 1)) + IOLinkProtocol.TERMINATOR_SEQ), IOLinkProtocol.REQUEST_SENSOR_COMMAND)
self._master._execute(IOLinkProtocol.COMMAND_SET, IOLinkProtocol.REQUEST_PARAMETER_NAME)
self._master._execute(parameter, IOLinkProtocol.REQUEST_PARAMETER_VALUE)
self._master._execute((value + IOLinkProtocol.TERMINATOR_SEQ), IOLinkProtocol.TERMINATOR_SEQ)
self._master._execute(None, IOLinkProtocol.TERMINATOR_SEQ)
info = self._master._get_answer()
self._master._execute(IOLinkProtocol.COMMAND_END, IOLinkProtocol.REQUEST_MOD)
return (True if (IOLinkProtocol.MESSAGE_PARAMETER_UPDATED in info) else False)
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | Execute a 'set' parameter command.
:param parameter: The parameter command to execute.
:type parameter: str
:param value: The parameter value to set.
:type value: str
:returns: True if the parameter has been set correctly, False otherwise.
:rtype: bool
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully. | wire_st_sdk/iolink/iolink_sensor.py | _set_parameter | STMicroelectronics/WireSTSDK_Python | 9 | python | def _set_parameter(self, parameter, value):
"Execute a 'set' parameter command.\n\n :param parameter: The parameter command to execute.\n :type parameter: str\n\n :param value: The parameter value to set.\n :type value: str\n\n :returns: True if the parameter has been set correctly, False otherwise.\n :rtype: bool\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
self._master._execute(IOLinkProtocol.COMMAND_ICD, IOLinkProtocol.REQUEST_SLAVE)
self._master._execute((str((self._position - 1)) + IOLinkProtocol.TERMINATOR_SEQ), IOLinkProtocol.REQUEST_SENSOR_COMMAND)
self._master._execute(IOLinkProtocol.COMMAND_SET, IOLinkProtocol.REQUEST_PARAMETER_NAME)
self._master._execute(parameter, IOLinkProtocol.REQUEST_PARAMETER_VALUE)
self._master._execute((value + IOLinkProtocol.TERMINATOR_SEQ), IOLinkProtocol.TERMINATOR_SEQ)
self._master._execute(None, IOLinkProtocol.TERMINATOR_SEQ)
info = self._master._get_answer()
self._master._execute(IOLinkProtocol.COMMAND_END, IOLinkProtocol.REQUEST_MOD)
return (True if (IOLinkProtocol.MESSAGE_PARAMETER_UPDATED in info) else False)
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | def _set_parameter(self, parameter, value):
"Execute a 'set' parameter command.\n\n :param parameter: The parameter command to execute.\n :type parameter: str\n\n :param value: The parameter value to set.\n :type value: str\n\n :returns: True if the parameter has been set correctly, False otherwise.\n :rtype: bool\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
self._master._execute(IOLinkProtocol.COMMAND_ICD, IOLinkProtocol.REQUEST_SLAVE)
self._master._execute((str((self._position - 1)) + IOLinkProtocol.TERMINATOR_SEQ), IOLinkProtocol.REQUEST_SENSOR_COMMAND)
self._master._execute(IOLinkProtocol.COMMAND_SET, IOLinkProtocol.REQUEST_PARAMETER_NAME)
self._master._execute(parameter, IOLinkProtocol.REQUEST_PARAMETER_VALUE)
self._master._execute((value + IOLinkProtocol.TERMINATOR_SEQ), IOLinkProtocol.TERMINATOR_SEQ)
self._master._execute(None, IOLinkProtocol.TERMINATOR_SEQ)
info = self._master._get_answer()
self._master._execute(IOLinkProtocol.COMMAND_END, IOLinkProtocol.REQUEST_MOD)
return (True if (IOLinkProtocol.MESSAGE_PARAMETER_UPDATED in info) else False)
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e<|docstring|>Execute a 'set' parameter command.
:param parameter: The parameter command to execute.
:type parameter: str
:param value: The parameter value to set.
:type value: str
:returns: True if the parameter has been set correctly, False otherwise.
:rtype: bool
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully.<|endoftext|> |
55938d21e05be62ca87a99476be3046abcc315faba72f1fdbb9131a413f5206b | def set_odr(self, odr):
"Set accelerometer's output data rate.\n\n :param odr: Accelerometer's output data rate.\n :type odr: :class:`wire_st_sdk.iolink.iolink_protocol.ODR`\n\n :returns: True if the parameter has been set correctly, False otherwise.\n :rtype: bool\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
return self._set_parameter(IOLinkProtocol.COMMAND_ODR, '{:04d}'.format(odr.value))
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | Set accelerometer's output data rate.
:param odr: Accelerometer's output data rate.
:type odr: :class:`wire_st_sdk.iolink.iolink_protocol.ODR`
:returns: True if the parameter has been set correctly, False otherwise.
:rtype: bool
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully. | wire_st_sdk/iolink/iolink_sensor.py | set_odr | STMicroelectronics/WireSTSDK_Python | 9 | python | def set_odr(self, odr):
"Set accelerometer's output data rate.\n\n :param odr: Accelerometer's output data rate.\n :type odr: :class:`wire_st_sdk.iolink.iolink_protocol.ODR`\n\n :returns: True if the parameter has been set correctly, False otherwise.\n :rtype: bool\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
return self._set_parameter(IOLinkProtocol.COMMAND_ODR, '{:04d}'.format(odr.value))
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | def set_odr(self, odr):
"Set accelerometer's output data rate.\n\n :param odr: Accelerometer's output data rate.\n :type odr: :class:`wire_st_sdk.iolink.iolink_protocol.ODR`\n\n :returns: True if the parameter has been set correctly, False otherwise.\n :rtype: bool\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
return self._set_parameter(IOLinkProtocol.COMMAND_ODR, '{:04d}'.format(odr.value))
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e<|docstring|>Set accelerometer's output data rate.
:param odr: Accelerometer's output data rate.
:type odr: :class:`wire_st_sdk.iolink.iolink_protocol.ODR`
:returns: True if the parameter has been set correctly, False otherwise.
:rtype: bool
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully.<|endoftext|> |
20c06e11e022dc8d0cc15e74835042657fd96cdd7798e5956242d5f388b45fd5 | def set_fls(self, fls):
"Set accelerometer's full scale.\n\n :param fls: Accelerometer's full scale.\n :type fls: :class:`wire_st_sdk.iolink.iolink_protocol.FLS`\n\n :returns: True if the parameter has been set correctly, False otherwise.\n :rtype: bool\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
return self._set_parameter(IOLinkProtocol.COMMAND_FLS, '{:02d}'.format(fls.value))
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | Set accelerometer's full scale.
:param fls: Accelerometer's full scale.
:type fls: :class:`wire_st_sdk.iolink.iolink_protocol.FLS`
:returns: True if the parameter has been set correctly, False otherwise.
:rtype: bool
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully. | wire_st_sdk/iolink/iolink_sensor.py | set_fls | STMicroelectronics/WireSTSDK_Python | 9 | python | def set_fls(self, fls):
"Set accelerometer's full scale.\n\n :param fls: Accelerometer's full scale.\n :type fls: :class:`wire_st_sdk.iolink.iolink_protocol.FLS`\n\n :returns: True if the parameter has been set correctly, False otherwise.\n :rtype: bool\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
return self._set_parameter(IOLinkProtocol.COMMAND_FLS, '{:02d}'.format(fls.value))
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | def set_fls(self, fls):
"Set accelerometer's full scale.\n\n :param fls: Accelerometer's full scale.\n :type fls: :class:`wire_st_sdk.iolink.iolink_protocol.FLS`\n\n :returns: True if the parameter has been set correctly, False otherwise.\n :rtype: bool\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
return self._set_parameter(IOLinkProtocol.COMMAND_FLS, '{:02d}'.format(fls.value))
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e<|docstring|>Set accelerometer's full scale.
:param fls: Accelerometer's full scale.
:type fls: :class:`wire_st_sdk.iolink.iolink_protocol.FLS`
:returns: True if the parameter has been set correctly, False otherwise.
:rtype: bool
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully.<|endoftext|> |
4d04b6c41e2a858007bef9d5b2a11d69a5c8e084f0d5d12e9f4677149e6c4aa8 | def set_sze(self, sze):
"Set accelerometer's input array size for FFT.\n\n :param sze: Accelerometer's input array size for FFT.\n :type sze: :class:`wire_st_sdk.iolink.iolink_protocol.SZE`\n\n :returns: True if the parameter has been set correctly, False otherwise.\n :rtype: bool\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
return self._set_parameter(IOLinkProtocol.COMMAND_SZE, '{:04d}'.format(sze.value))
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | Set accelerometer's input array size for FFT.
:param sze: Accelerometer's input array size for FFT.
:type sze: :class:`wire_st_sdk.iolink.iolink_protocol.SZE`
:returns: True if the parameter has been set correctly, False otherwise.
:rtype: bool
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully. | wire_st_sdk/iolink/iolink_sensor.py | set_sze | STMicroelectronics/WireSTSDK_Python | 9 | python | def set_sze(self, sze):
"Set accelerometer's input array size for FFT.\n\n :param sze: Accelerometer's input array size for FFT.\n :type sze: :class:`wire_st_sdk.iolink.iolink_protocol.SZE`\n\n :returns: True if the parameter has been set correctly, False otherwise.\n :rtype: bool\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
return self._set_parameter(IOLinkProtocol.COMMAND_SZE, '{:04d}'.format(sze.value))
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | def set_sze(self, sze):
"Set accelerometer's input array size for FFT.\n\n :param sze: Accelerometer's input array size for FFT.\n :type sze: :class:`wire_st_sdk.iolink.iolink_protocol.SZE`\n\n :returns: True if the parameter has been set correctly, False otherwise.\n :rtype: bool\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
return self._set_parameter(IOLinkProtocol.COMMAND_SZE, '{:04d}'.format(sze.value))
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e<|docstring|>Set accelerometer's input array size for FFT.
:param sze: Accelerometer's input array size for FFT.
:type sze: :class:`wire_st_sdk.iolink.iolink_protocol.SZE`
:returns: True if the parameter has been set correctly, False otherwise.
:rtype: bool
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully.<|endoftext|> |
ff33b2bf838daaf38c207ceab20197479edc871cc8b2b802280a3a30035f3920 | def set_sub(self, sub):
"Set accelerometer's number of subranges.\n\n :param sub: Number of subranges.\n :type sub: :class:`wire_st_sdk.iolink.iolink_protocol.SUB`\n\n :returns: True if the parameter has been set correctly, False otherwise.\n :rtype: bool\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
return self._set_parameter(IOLinkProtocol.COMMAND_SUB, '{:02d}'.format(sub.value))
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | Set accelerometer's number of subranges.
:param sub: Number of subranges.
:type sub: :class:`wire_st_sdk.iolink.iolink_protocol.SUB`
:returns: True if the parameter has been set correctly, False otherwise.
:rtype: bool
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully. | wire_st_sdk/iolink/iolink_sensor.py | set_sub | STMicroelectronics/WireSTSDK_Python | 9 | python | def set_sub(self, sub):
"Set accelerometer's number of subranges.\n\n :param sub: Number of subranges.\n :type sub: :class:`wire_st_sdk.iolink.iolink_protocol.SUB`\n\n :returns: True if the parameter has been set correctly, False otherwise.\n :rtype: bool\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
return self._set_parameter(IOLinkProtocol.COMMAND_SUB, '{:02d}'.format(sub.value))
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | def set_sub(self, sub):
"Set accelerometer's number of subranges.\n\n :param sub: Number of subranges.\n :type sub: :class:`wire_st_sdk.iolink.iolink_protocol.SUB`\n\n :returns: True if the parameter has been set correctly, False otherwise.\n :rtype: bool\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
return self._set_parameter(IOLinkProtocol.COMMAND_SUB, '{:02d}'.format(sub.value))
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e<|docstring|>Set accelerometer's number of subranges.
:param sub: Number of subranges.
:type sub: :class:`wire_st_sdk.iolink.iolink_protocol.SUB`
:returns: True if the parameter has been set correctly, False otherwise.
:rtype: bool
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully.<|endoftext|> |
c6a90cd6da968b4db1e06eb544fc954160bfd553a7261039dbbb8348a40928f3 | def set_acq(self, acq):
"Set accelerometer's total acquisition time, which is valid for all\n types of analysis.\n\n :param acq: Accelerometer's total acquisition time (must be in the range\n [ACQ_MIN..ACQ_MAX]).\n :type acq: int\n\n :returns: True if the parameter has been set correctly, False otherwise.\n :rtype: bool\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
self._acq = acq
return self._set_parameter(IOLinkProtocol.COMMAND_ACQ, '{:05d}'.format(acq))
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | Set accelerometer's total acquisition time, which is valid for all
types of analysis.
:param acq: Accelerometer's total acquisition time (must be in the range
[ACQ_MIN..ACQ_MAX]).
:type acq: int
:returns: True if the parameter has been set correctly, False otherwise.
:rtype: bool
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully. | wire_st_sdk/iolink/iolink_sensor.py | set_acq | STMicroelectronics/WireSTSDK_Python | 9 | python | def set_acq(self, acq):
"Set accelerometer's total acquisition time, which is valid for all\n types of analysis.\n\n :param acq: Accelerometer's total acquisition time (must be in the range\n [ACQ_MIN..ACQ_MAX]).\n :type acq: int\n\n :returns: True if the parameter has been set correctly, False otherwise.\n :rtype: bool\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
self._acq = acq
return self._set_parameter(IOLinkProtocol.COMMAND_ACQ, '{:05d}'.format(acq))
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | def set_acq(self, acq):
"Set accelerometer's total acquisition time, which is valid for all\n types of analysis.\n\n :param acq: Accelerometer's total acquisition time (must be in the range\n [ACQ_MIN..ACQ_MAX]).\n :type acq: int\n\n :returns: True if the parameter has been set correctly, False otherwise.\n :rtype: bool\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
self._acq = acq
return self._set_parameter(IOLinkProtocol.COMMAND_ACQ, '{:05d}'.format(acq))
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e<|docstring|>Set accelerometer's total acquisition time, which is valid for all
types of analysis.
:param acq: Accelerometer's total acquisition time (must be in the range
[ACQ_MIN..ACQ_MAX]).
:type acq: int
:returns: True if the parameter has been set correctly, False otherwise.
:rtype: bool
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully.<|endoftext|> |
89ec337d84680b1978a32c1001a3a420b9a006a4b6fd39b43ec6a8c88c051cd0 | def set_ovl(self, ovl):
"Set accelerometer's overlapping percentage between two consecutive\n FFT analysis.\n\n :param ovl: Accelerometer's overlapping percentage between two\n consecutive FFT analysis (must be in the range [OVL_MIN..OVL_MAX]).\n :type ovl: int\n\n :returns: True if the parameter has been set correctly, False otherwise.\n :rtype: bool\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
return self._set_parameter(IOLinkProtocol.COMMAND_OVL, '{:02d}'.format(ovl))
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | Set accelerometer's overlapping percentage between two consecutive
FFT analysis.
:param ovl: Accelerometer's overlapping percentage between two
consecutive FFT analysis (must be in the range [OVL_MIN..OVL_MAX]).
:type ovl: int
:returns: True if the parameter has been set correctly, False otherwise.
:rtype: bool
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully. | wire_st_sdk/iolink/iolink_sensor.py | set_ovl | STMicroelectronics/WireSTSDK_Python | 9 | python | def set_ovl(self, ovl):
"Set accelerometer's overlapping percentage between two consecutive\n FFT analysis.\n\n :param ovl: Accelerometer's overlapping percentage between two\n consecutive FFT analysis (must be in the range [OVL_MIN..OVL_MAX]).\n :type ovl: int\n\n :returns: True if the parameter has been set correctly, False otherwise.\n :rtype: bool\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
return self._set_parameter(IOLinkProtocol.COMMAND_OVL, '{:02d}'.format(ovl))
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e | def set_ovl(self, ovl):
"Set accelerometer's overlapping percentage between two consecutive\n FFT analysis.\n\n :param ovl: Accelerometer's overlapping percentage between two\n consecutive FFT analysis (must be in the range [OVL_MIN..OVL_MAX]).\n :type ovl: int\n\n :returns: True if the parameter has been set correctly, False otherwise.\n :rtype: bool\n\n :raises SerialException, SerialTimeoutException: are raised if something\n with the serial communication does not work.\n :raises WireSTInvalidOperationException: is raised if the command has\n not been executed successfully.\n "
try:
with lock_for_object(self._master):
return self._set_parameter(IOLinkProtocol.COMMAND_OVL, '{:02d}'.format(ovl))
except (SerialException, SerialTimeoutException, WireSTInvalidOperationException) as e:
raise e<|docstring|>Set accelerometer's overlapping percentage between two consecutive
FFT analysis.
:param ovl: Accelerometer's overlapping percentage between two
consecutive FFT analysis (must be in the range [OVL_MIN..OVL_MAX]).
:type ovl: int
:returns: True if the parameter has been set correctly, False otherwise.
:rtype: bool
:raises SerialException, SerialTimeoutException: are raised if something
with the serial communication does not work.
:raises WireSTInvalidOperationException: is raised if the command has
not been executed successfully.<|endoftext|> |
8bf68e32bc538376a22b072ea384100180ca0917f9ab50f79237b493fca5d030 | def epoch_to_text(ts):
'\n Convert an epoch timestamp to UK local time as text\n '
return datetime.fromtimestamp(ts, tz=timezone('Europe/London')).isoformat() | Convert an epoch timestamp to UK local time as text | tfc_web/api/extractors/util.py | epoch_to_text | SmartCambridge/tfc_web | 2 | python | def epoch_to_text(ts):
'\n \n '
return datetime.fromtimestamp(ts, tz=timezone('Europe/London')).isoformat() | def epoch_to_text(ts):
'\n \n '
return datetime.fromtimestamp(ts, tz=timezone('Europe/London')).isoformat()<|docstring|>Convert an epoch timestamp to UK local time as text<|endoftext|> |
5ec3d64ada9780a4b8aa4645ce518e3a6cb57deb044ab31764bad0edb582a04c | def __init__(self, dbcontext, indexing_root, catalog_key, doc_map_key, catalog_factory, metadata_factory):
'\n dbcontext\n IZODBNode instance.\n indexing_root\n Indexing root. defaults to self.dbcontext.\n catalog_key\n DB key of catalog.\n doc_map_key\n DB key of doc_map.\n catalog_factory\n Factory callback for creating catalog instance.\n metadata_factory\n Factory callback for creating node metadata.\n '
self.dbcontext = dbcontext
self.indexing_root = indexing_root
self.catalog_key = catalog_key
self.doc_map_key = doc_map_key
self.catalog_factory = catalog_factory
self.metadata_factory = metadata_factory | dbcontext
IZODBNode instance.
indexing_root
Indexing root. defaults to self.dbcontext.
catalog_key
DB key of catalog.
doc_map_key
DB key of doc_map.
catalog_factory
Factory callback for creating catalog instance.
metadata_factory
Factory callback for creating node metadata. | src/cone/zodb/catalog.py | __init__ | conestack/cone.zodb | 0 | python | def __init__(self, dbcontext, indexing_root, catalog_key, doc_map_key, catalog_factory, metadata_factory):
'\n dbcontext\n IZODBNode instance.\n indexing_root\n Indexing root. defaults to self.dbcontext.\n catalog_key\n DB key of catalog.\n doc_map_key\n DB key of doc_map.\n catalog_factory\n Factory callback for creating catalog instance.\n metadata_factory\n Factory callback for creating node metadata.\n '
self.dbcontext = dbcontext
self.indexing_root = indexing_root
self.catalog_key = catalog_key
self.doc_map_key = doc_map_key
self.catalog_factory = catalog_factory
self.metadata_factory = metadata_factory | def __init__(self, dbcontext, indexing_root, catalog_key, doc_map_key, catalog_factory, metadata_factory):
'\n dbcontext\n IZODBNode instance.\n indexing_root\n Indexing root. defaults to self.dbcontext.\n catalog_key\n DB key of catalog.\n doc_map_key\n DB key of doc_map.\n catalog_factory\n Factory callback for creating catalog instance.\n metadata_factory\n Factory callback for creating node metadata.\n '
self.dbcontext = dbcontext
self.indexing_root = indexing_root
self.catalog_key = catalog_key
self.doc_map_key = doc_map_key
self.catalog_factory = catalog_factory
self.metadata_factory = metadata_factory<|docstring|>dbcontext
IZODBNode instance.
indexing_root
Indexing root. defaults to self.dbcontext.
catalog_key
DB key of catalog.
doc_map_key
DB key of doc_map.
catalog_factory
Factory callback for creating catalog instance.
metadata_factory
Factory callback for creating node metadata.<|endoftext|> |
7155c56248199d490e840ed06b945bc1f324b30f1f737b3d7173e8d16f9e7507 | def validate_field_data(self, validation, data):
"\n Ensure we've been passed legitimate values, particularly for the audio URLs.\n "
if (data.sources is None):
validation.add(ValidationMessage(ValidationMessage.ERROR, _(u'You must specify at least one source URL!')))
else:
sources = filter(None, data.sources.split('\n'))
if (len(sources) == 0):
validation.add(ValidationMessage(ValidationMessage.ERROR, _(u'You must specify at least one source URL!')))
else:
for source in sources:
(_scheme, _netloc, path, _params, _qs, _fragment) = urlparse(source)
if (path is None):
validation.add(ValidationMessage(ValidationMessage.ERROR, ((_(u"Invalid URL '") + unicode(source)) + _(u"' entered.")))) | Ensure we've been passed legitimate values, particularly for the audio URLs. | audio/fields.py | validate_field_data | nuclearfurnace/xblock-audio | 0 | python | def validate_field_data(self, validation, data):
"\n \n "
if (data.sources is None):
validation.add(ValidationMessage(ValidationMessage.ERROR, _(u'You must specify at least one source URL!')))
else:
sources = filter(None, data.sources.split('\n'))
if (len(sources) == 0):
validation.add(ValidationMessage(ValidationMessage.ERROR, _(u'You must specify at least one source URL!')))
else:
for source in sources:
(_scheme, _netloc, path, _params, _qs, _fragment) = urlparse(source)
if (path is None):
validation.add(ValidationMessage(ValidationMessage.ERROR, ((_(u"Invalid URL '") + unicode(source)) + _(u"' entered.")))) | def validate_field_data(self, validation, data):
"\n \n "
if (data.sources is None):
validation.add(ValidationMessage(ValidationMessage.ERROR, _(u'You must specify at least one source URL!')))
else:
sources = filter(None, data.sources.split('\n'))
if (len(sources) == 0):
validation.add(ValidationMessage(ValidationMessage.ERROR, _(u'You must specify at least one source URL!')))
else:
for source in sources:
(_scheme, _netloc, path, _params, _qs, _fragment) = urlparse(source)
if (path is None):
validation.add(ValidationMessage(ValidationMessage.ERROR, ((_(u"Invalid URL '") + unicode(source)) + _(u"' entered."))))<|docstring|>Ensure we've been passed legitimate values, particularly for the audio URLs.<|endoftext|> |
e71ff5a9af01b3c6b13ebfec61c80562225561b9499413415fd7fbdec885b6a5 | def upgrade():
'Migrations for the upgrade.'
conn = op.get_bind()
statement = text("\n UPDATE db_dbnode SET type = 'data.code.Code.' WHERE type = 'code.Code.';\n ")
conn.execute(statement) | Migrations for the upgrade. | aiida/backends/sqlalchemy/migrations/versions/a603da2cc809_code_sub_class_of_data.py | upgrade | azadoks/aiida-core | 180 | python | def upgrade():
conn = op.get_bind()
statement = text("\n UPDATE db_dbnode SET type = 'data.code.Code.' WHERE type = 'code.Code.';\n ")
conn.execute(statement) | def upgrade():
conn = op.get_bind()
statement = text("\n UPDATE db_dbnode SET type = 'data.code.Code.' WHERE type = 'code.Code.';\n ")
conn.execute(statement)<|docstring|>Migrations for the upgrade.<|endoftext|> |
e77c1f8aca2a42899ef5114c40edd8b4b1985d0aa326cee757687484742c3db9 | def downgrade():
'Migrations for the downgrade.'
conn = op.get_bind()
statement = text("\n UPDATE db_dbnode SET type = 'code.Code.' WHERE type = 'data.code.Code.';\n ")
conn.execute(statement) | Migrations for the downgrade. | aiida/backends/sqlalchemy/migrations/versions/a603da2cc809_code_sub_class_of_data.py | downgrade | azadoks/aiida-core | 180 | python | def downgrade():
conn = op.get_bind()
statement = text("\n UPDATE db_dbnode SET type = 'code.Code.' WHERE type = 'data.code.Code.';\n ")
conn.execute(statement) | def downgrade():
conn = op.get_bind()
statement = text("\n UPDATE db_dbnode SET type = 'code.Code.' WHERE type = 'data.code.Code.';\n ")
conn.execute(statement)<|docstring|>Migrations for the downgrade.<|endoftext|> |
d7be571e3035ca1c2e09371a43c10239a1f320b92e10350142caabf74194c316 | @not_implemented_for('multigraph')
def closeness_centrality(G, weight=None, n_workers=None):
'Compute closeness centrality for nodes.\n\n .. math::\n\n C_{WF}(u) = \x0crac{n-1}{N-1} \x0crac{n - 1}{\\sum_{v=1}^{n-1} d(v, u)},\n \n Notice that the closeness distance function computes the \n outcoming distance to `u` for directed graphs. To use \n incoming distance, act on `G.reverse()`.\n\n Parameters\n ----------\n G : graph\n A easygraph graph\n\n weight : None or string, optional (default=None)\n If None, all edge weights are considered equal.\n Otherwise holds the name of the edge attribute used as weight.\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with closeness centrality as the value.\n\n '
closeness = dict()
nodes = G.nodes
length = len(nodes)
import functools
if (weight is not None):
path_length = functools.partial(single_source_dijkstra, weight=weight)
else:
path_length = functools.partial(single_source_bfs)
if (n_workers is not None):
from multiprocessing import Pool
from functools import partial
import random
nodes = list(nodes)
random.shuffle(nodes)
if (len(nodes) > (n_workers * 30000)):
nodes = split_len(nodes, step=30000)
else:
nodes = split(nodes, n_workers)
local_function = partial(closeness_centrality_parallel, G=G, path_length=path_length)
with Pool(n_workers) as p:
ret = p.imap(local_function, nodes)
res = [x for i in ret for x in i]
closeness = dict(res)
else:
for node in nodes:
x = path_length(G, node)
dist = sum(x.values())
cnt = len(x)
if (dist == 0):
closeness[node] = 0
else:
closeness[node] = (((cnt - 1) * (cnt - 1)) / (dist * (length - 1)))
return closeness | Compute closeness centrality for nodes.
.. math::
C_{WF}(u) = rac{n-1}{N-1} rac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},
Notice that the closeness distance function computes the
outcoming distance to `u` for directed graphs. To use
incoming distance, act on `G.reverse()`.
Parameters
----------
G : graph
A easygraph graph
weight : None or string, optional (default=None)
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
Returns
-------
nodes : dictionary
Dictionary of nodes with closeness centrality as the value. | easygraph/functions/centrality/clossness.py | closeness_centrality | tddschn/Easy-Graph | 1 | python | @not_implemented_for('multigraph')
def closeness_centrality(G, weight=None, n_workers=None):
'Compute closeness centrality for nodes.\n\n .. math::\n\n C_{WF}(u) = \x0crac{n-1}{N-1} \x0crac{n - 1}{\\sum_{v=1}^{n-1} d(v, u)},\n \n Notice that the closeness distance function computes the \n outcoming distance to `u` for directed graphs. To use \n incoming distance, act on `G.reverse()`.\n\n Parameters\n ----------\n G : graph\n A easygraph graph\n\n weight : None or string, optional (default=None)\n If None, all edge weights are considered equal.\n Otherwise holds the name of the edge attribute used as weight.\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with closeness centrality as the value.\n\n '
closeness = dict()
nodes = G.nodes
length = len(nodes)
import functools
if (weight is not None):
path_length = functools.partial(single_source_dijkstra, weight=weight)
else:
path_length = functools.partial(single_source_bfs)
if (n_workers is not None):
from multiprocessing import Pool
from functools import partial
import random
nodes = list(nodes)
random.shuffle(nodes)
if (len(nodes) > (n_workers * 30000)):
nodes = split_len(nodes, step=30000)
else:
nodes = split(nodes, n_workers)
local_function = partial(closeness_centrality_parallel, G=G, path_length=path_length)
with Pool(n_workers) as p:
ret = p.imap(local_function, nodes)
res = [x for i in ret for x in i]
closeness = dict(res)
else:
for node in nodes:
x = path_length(G, node)
dist = sum(x.values())
cnt = len(x)
if (dist == 0):
closeness[node] = 0
else:
closeness[node] = (((cnt - 1) * (cnt - 1)) / (dist * (length - 1)))
return closeness | @not_implemented_for('multigraph')
def closeness_centrality(G, weight=None, n_workers=None):
'Compute closeness centrality for nodes.\n\n .. math::\n\n C_{WF}(u) = \x0crac{n-1}{N-1} \x0crac{n - 1}{\\sum_{v=1}^{n-1} d(v, u)},\n \n Notice that the closeness distance function computes the \n outcoming distance to `u` for directed graphs. To use \n incoming distance, act on `G.reverse()`.\n\n Parameters\n ----------\n G : graph\n A easygraph graph\n\n weight : None or string, optional (default=None)\n If None, all edge weights are considered equal.\n Otherwise holds the name of the edge attribute used as weight.\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with closeness centrality as the value.\n\n '
closeness = dict()
nodes = G.nodes
length = len(nodes)
import functools
if (weight is not None):
path_length = functools.partial(single_source_dijkstra, weight=weight)
else:
path_length = functools.partial(single_source_bfs)
if (n_workers is not None):
from multiprocessing import Pool
from functools import partial
import random
nodes = list(nodes)
random.shuffle(nodes)
if (len(nodes) > (n_workers * 30000)):
nodes = split_len(nodes, step=30000)
else:
nodes = split(nodes, n_workers)
local_function = partial(closeness_centrality_parallel, G=G, path_length=path_length)
with Pool(n_workers) as p:
ret = p.imap(local_function, nodes)
res = [x for i in ret for x in i]
closeness = dict(res)
else:
for node in nodes:
x = path_length(G, node)
dist = sum(x.values())
cnt = len(x)
if (dist == 0):
closeness[node] = 0
else:
closeness[node] = (((cnt - 1) * (cnt - 1)) / (dist * (length - 1)))
return closeness<|docstring|>Compute closeness centrality for nodes.
.. math::
C_{WF}(u) = rac{n-1}{N-1} rac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},
Notice that the closeness distance function computes the
outcoming distance to `u` for directed graphs. To use
incoming distance, act on `G.reverse()`.
Parameters
----------
G : graph
A easygraph graph
weight : None or string, optional (default=None)
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
Returns
-------
nodes : dictionary
Dictionary of nodes with closeness centrality as the value.<|endoftext|> |
8fa03785b0f609ff90356f694dc75387bf73e10427858b16aa6469ebad07b1b4 | def find_missing(list_a, list_b):
'\n it takes list_a and list_b, which defers by one item,\n and returns the extra number in the longer list\n :parameter list_a list containing the int items\n :parameter list_b list containing the int items\n return: the extra number in the longer list\n '
if (not isinstance(list_a, list)):
raise TypeError('Expected list_a to be a list')
if (not isinstance(list_b, list)):
raise TypeError('Expected list_b to be a list')
NUM_START_LIST = (- 1)
(len_a, len_b) = (len(list_a), len(list_b))
if (len_a == len_b):
return 0
(list_a, list_b) = (sorted(list_a), sorted(list_b))
list_a_is_longer = (len_a > len_b)
common_len = (len_b if list_a_is_longer else len_a)
first = 0
last = (common_len - 1)
dif_index = (- 1)
found = False
while ((first <= last) and (not found)):
midpoint = ((first + last) / 2)
if (list_a[midpoint] != list_b[midpoint]):
found = True
dif_index = midpoint
elif (list_a[midpoint] < list_b[midpoint]):
last = (midpoint - 1)
else:
first = (midpoint + 1)
if (dif_index > NUM_START_LIST):
if list_a_is_longer:
return list_a[0]
return list_b[0]
if list_a_is_longer:
return list_a[common_len]
return list_b[common_len] | it takes list_a and list_b, which defers by one item,
and returns the extra number in the longer list
:parameter list_a list containing the int items
:parameter list_b list containing the int items
return: the extra number in the longer list | andela_labs/Missing Number Lab (Programming Logic)/missing_numbers.py | find_missing | brotich/andela_bootcamp_X | 0 | python | def find_missing(list_a, list_b):
'\n it takes list_a and list_b, which defers by one item,\n and returns the extra number in the longer list\n :parameter list_a list containing the int items\n :parameter list_b list containing the int items\n return: the extra number in the longer list\n '
if (not isinstance(list_a, list)):
raise TypeError('Expected list_a to be a list')
if (not isinstance(list_b, list)):
raise TypeError('Expected list_b to be a list')
NUM_START_LIST = (- 1)
(len_a, len_b) = (len(list_a), len(list_b))
if (len_a == len_b):
return 0
(list_a, list_b) = (sorted(list_a), sorted(list_b))
list_a_is_longer = (len_a > len_b)
common_len = (len_b if list_a_is_longer else len_a)
first = 0
last = (common_len - 1)
dif_index = (- 1)
found = False
while ((first <= last) and (not found)):
midpoint = ((first + last) / 2)
if (list_a[midpoint] != list_b[midpoint]):
found = True
dif_index = midpoint
elif (list_a[midpoint] < list_b[midpoint]):
last = (midpoint - 1)
else:
first = (midpoint + 1)
if (dif_index > NUM_START_LIST):
if list_a_is_longer:
return list_a[0]
return list_b[0]
if list_a_is_longer:
return list_a[common_len]
return list_b[common_len] | def find_missing(list_a, list_b):
'\n it takes list_a and list_b, which defers by one item,\n and returns the extra number in the longer list\n :parameter list_a list containing the int items\n :parameter list_b list containing the int items\n return: the extra number in the longer list\n '
if (not isinstance(list_a, list)):
raise TypeError('Expected list_a to be a list')
if (not isinstance(list_b, list)):
raise TypeError('Expected list_b to be a list')
NUM_START_LIST = (- 1)
(len_a, len_b) = (len(list_a), len(list_b))
if (len_a == len_b):
return 0
(list_a, list_b) = (sorted(list_a), sorted(list_b))
list_a_is_longer = (len_a > len_b)
common_len = (len_b if list_a_is_longer else len_a)
first = 0
last = (common_len - 1)
dif_index = (- 1)
found = False
while ((first <= last) and (not found)):
midpoint = ((first + last) / 2)
if (list_a[midpoint] != list_b[midpoint]):
found = True
dif_index = midpoint
elif (list_a[midpoint] < list_b[midpoint]):
last = (midpoint - 1)
else:
first = (midpoint + 1)
if (dif_index > NUM_START_LIST):
if list_a_is_longer:
return list_a[0]
return list_b[0]
if list_a_is_longer:
return list_a[common_len]
return list_b[common_len]<|docstring|>it takes list_a and list_b, which defers by one item,
and returns the extra number in the longer list
:parameter list_a list containing the int items
:parameter list_b list containing the int items
return: the extra number in the longer list<|endoftext|> |
6bf082d2e97d5e6ac9e21bae08c1b77b5c43b19fad705c58ba8ee8f4e62007b2 | def build_spans(s, blocks):
's:string, blocks are pairs of (idx,len) of perfect matches'
if (not blocks):
return ([], 0, 0)
matched_indices = ([0] * len(s))
for (i, l) in blocks:
for idx in range(i, (i + l)):
matched_indices[idx] = max(matched_indices[idx], l)
spandata = []
for (c, matched_len) in zip(s, matched_indices):
if ((not spandata) or (spandata[(- 1)][1] != matched_len)):
spandata.append(([], matched_len))
spandata[(- 1)][0].append(c)
merged_spans = [(html.escape(''.join(chars)), matched_len) for (chars, matched_len) in spandata]
return (merged_spans, min(matched_indices), max(matched_indices)) | s:string, blocks are pairs of (idx,len) of perfect matches | paraanno/app.py | build_spans | TurkuNLP/rew-para-anno | 0 | python | def build_spans(s, blocks):
if (not blocks):
return ([], 0, 0)
matched_indices = ([0] * len(s))
for (i, l) in blocks:
for idx in range(i, (i + l)):
matched_indices[idx] = max(matched_indices[idx], l)
spandata = []
for (c, matched_len) in zip(s, matched_indices):
if ((not spandata) or (spandata[(- 1)][1] != matched_len)):
spandata.append(([], matched_len))
spandata[(- 1)][0].append(c)
merged_spans = [(html.escape(.join(chars)), matched_len) for (chars, matched_len) in spandata]
return (merged_spans, min(matched_indices), max(matched_indices)) | def build_spans(s, blocks):
if (not blocks):
return ([], 0, 0)
matched_indices = ([0] * len(s))
for (i, l) in blocks:
for idx in range(i, (i + l)):
matched_indices[idx] = max(matched_indices[idx], l)
spandata = []
for (c, matched_len) in zip(s, matched_indices):
if ((not spandata) or (spandata[(- 1)][1] != matched_len)):
spandata.append(([], matched_len))
spandata[(- 1)][0].append(c)
merged_spans = [(html.escape(.join(chars)), matched_len) for (chars, matched_len) in spandata]
return (merged_spans, min(matched_indices), max(matched_indices))<|docstring|>s:string, blocks are pairs of (idx,len) of perfect matches<|endoftext|> |
6f319ebd5d4c7854b7a2ccc8298dbd661cb69653af5468bc73ca56b3056e16a9 | def __init__(self, child1, child2, is_pure_python):
'Constructor for Conjunction node\n\n Parameters:\n child1 : stl.Node\n child2 : stl.Node\n '
super(Addition, self).__init__()
self.addChild(child1)
self.addChild(child2)
self.in_vars = (child1.in_vars + child2.in_vars)
self.out_vars = (child1.out_vars + child2.out_vars)
if is_pure_python:
name = 'rtamt.operation.arithmetic.addition_operation'
mod = __import__(name, fromlist=[''])
self.node = mod.AdditionOperation()
else:
name = 'rtamt.lib.rtamt_stl_library_wrapper.stl_node'
mod = __import__(name, fromlist=[''])
name = 'rtamt.lib.rtamt_stl_library_wrapper.stl_addition_node'
mod = __import__(name, fromlist=[''])
self.node = mod.StlAdditionNode() | Constructor for Conjunction node
Parameters:
child1 : stl.Node
child2 : stl.Node | rtamt/node/stl/addition.py | __init__ | BentleyJOakes/rtamt | 0 | python | def __init__(self, child1, child2, is_pure_python):
'Constructor for Conjunction node\n\n Parameters:\n child1 : stl.Node\n child2 : stl.Node\n '
super(Addition, self).__init__()
self.addChild(child1)
self.addChild(child2)
self.in_vars = (child1.in_vars + child2.in_vars)
self.out_vars = (child1.out_vars + child2.out_vars)
if is_pure_python:
name = 'rtamt.operation.arithmetic.addition_operation'
mod = __import__(name, fromlist=[])
self.node = mod.AdditionOperation()
else:
name = 'rtamt.lib.rtamt_stl_library_wrapper.stl_node'
mod = __import__(name, fromlist=[])
name = 'rtamt.lib.rtamt_stl_library_wrapper.stl_addition_node'
mod = __import__(name, fromlist=[])
self.node = mod.StlAdditionNode() | def __init__(self, child1, child2, is_pure_python):
'Constructor for Conjunction node\n\n Parameters:\n child1 : stl.Node\n child2 : stl.Node\n '
super(Addition, self).__init__()
self.addChild(child1)
self.addChild(child2)
self.in_vars = (child1.in_vars + child2.in_vars)
self.out_vars = (child1.out_vars + child2.out_vars)
if is_pure_python:
name = 'rtamt.operation.arithmetic.addition_operation'
mod = __import__(name, fromlist=[])
self.node = mod.AdditionOperation()
else:
name = 'rtamt.lib.rtamt_stl_library_wrapper.stl_node'
mod = __import__(name, fromlist=[])
name = 'rtamt.lib.rtamt_stl_library_wrapper.stl_addition_node'
mod = __import__(name, fromlist=[])
self.node = mod.StlAdditionNode()<|docstring|>Constructor for Conjunction node
Parameters:
child1 : stl.Node
child2 : stl.Node<|endoftext|> |
e10c4ee5a323226642487c4e30556be8083fad29819fe0bfa977bbded4a9ccfc | def main():
'\n NAME\n tk03.py\n\n DESCRIPTION\n generates set of vectors drawn from TK03.gad at given lat and\n rotated about vertical axis by given Dec\n\n INPUT (COMMAND LINE ENTRY)\n OUTPUT\n dec, inc, int\n\n SYNTAX\n tk03.py [command line options] [> OutputFileName]\n\n OPTIONS\n -n N specify N, default is 100\n -d D specify mean Dec, default is 0\n -lat LAT specify latitude, default is 0\n -rev include reversals\n -t INT truncates intensities to >INT uT\n -G2 FRAC specify average g_2^0 fraction (default is 0)\n -G3 FRAC specify average g_3^0 fraction (default is 0)\n '
(N, L, D, R) = (100, 0.0, 0.0, 0)
(G2, G3) = (0.0, 0.0)
cnt = 1
Imax = 0
if ((len(sys.argv) != 0) and ('-h' in sys.argv)):
print(main.__doc__)
sys.exit()
else:
if ('-n' in sys.argv):
ind = sys.argv.index('-n')
N = int(sys.argv[(ind + 1)])
if ('-d' in sys.argv):
ind = sys.argv.index('-d')
D = float(sys.argv[(ind + 1)])
if ('-lat' in sys.argv):
ind = sys.argv.index('-lat')
L = float(sys.argv[(ind + 1)])
if ('-t' in sys.argv):
ind = sys.argv.index('-t')
Imax = (1000.0 * float(sys.argv[(ind + 1)]))
if ('-rev' in sys.argv):
R = 1
if ('-G2' in sys.argv):
ind = sys.argv.index('-G2')
G2 = float(sys.argv[(ind + 1)])
if ('-G3' in sys.argv):
ind = sys.argv.index('-G3')
G3 = float(sys.argv[(ind + 1)])
for k in range(N):
gh = pmag.mktk03(8, k, G2, G3)
lon = random.randint(0, 360)
vec = pmag.getvec(gh, L, lon)
if (vec[2] >= Imax):
vec[0] += D
if (((k % 2) == 0) and (R == 1)):
vec[0] += 180.0
vec[1] = (- vec[1])
if (vec[0] >= 360.0):
vec[0] -= 360.0
print(('%7.1f %7.1f %8.2f ' % (vec[0], vec[1], vec[2]))) | NAME
tk03.py
DESCRIPTION
generates set of vectors drawn from TK03.gad at given lat and
rotated about vertical axis by given Dec
INPUT (COMMAND LINE ENTRY)
OUTPUT
dec, inc, int
SYNTAX
tk03.py [command line options] [> OutputFileName]
OPTIONS
-n N specify N, default is 100
-d D specify mean Dec, default is 0
-lat LAT specify latitude, default is 0
-rev include reversals
-t INT truncates intensities to >INT uT
-G2 FRAC specify average g_2^0 fraction (default is 0)
-G3 FRAC specify average g_3^0 fraction (default is 0) | programs/tk03.py | main | apivarunas/PmagPy | 2 | python | def main():
'\n NAME\n tk03.py\n\n DESCRIPTION\n generates set of vectors drawn from TK03.gad at given lat and\n rotated about vertical axis by given Dec\n\n INPUT (COMMAND LINE ENTRY)\n OUTPUT\n dec, inc, int\n\n SYNTAX\n tk03.py [command line options] [> OutputFileName]\n\n OPTIONS\n -n N specify N, default is 100\n -d D specify mean Dec, default is 0\n -lat LAT specify latitude, default is 0\n -rev include reversals\n -t INT truncates intensities to >INT uT\n -G2 FRAC specify average g_2^0 fraction (default is 0)\n -G3 FRAC specify average g_3^0 fraction (default is 0)\n '
(N, L, D, R) = (100, 0.0, 0.0, 0)
(G2, G3) = (0.0, 0.0)
cnt = 1
Imax = 0
if ((len(sys.argv) != 0) and ('-h' in sys.argv)):
print(main.__doc__)
sys.exit()
else:
if ('-n' in sys.argv):
ind = sys.argv.index('-n')
N = int(sys.argv[(ind + 1)])
if ('-d' in sys.argv):
ind = sys.argv.index('-d')
D = float(sys.argv[(ind + 1)])
if ('-lat' in sys.argv):
ind = sys.argv.index('-lat')
L = float(sys.argv[(ind + 1)])
if ('-t' in sys.argv):
ind = sys.argv.index('-t')
Imax = (1000.0 * float(sys.argv[(ind + 1)]))
if ('-rev' in sys.argv):
R = 1
if ('-G2' in sys.argv):
ind = sys.argv.index('-G2')
G2 = float(sys.argv[(ind + 1)])
if ('-G3' in sys.argv):
ind = sys.argv.index('-G3')
G3 = float(sys.argv[(ind + 1)])
for k in range(N):
gh = pmag.mktk03(8, k, G2, G3)
lon = random.randint(0, 360)
vec = pmag.getvec(gh, L, lon)
if (vec[2] >= Imax):
vec[0] += D
if (((k % 2) == 0) and (R == 1)):
vec[0] += 180.0
vec[1] = (- vec[1])
if (vec[0] >= 360.0):
vec[0] -= 360.0
print(('%7.1f %7.1f %8.2f ' % (vec[0], vec[1], vec[2]))) | def main():
'\n NAME\n tk03.py\n\n DESCRIPTION\n generates set of vectors drawn from TK03.gad at given lat and\n rotated about vertical axis by given Dec\n\n INPUT (COMMAND LINE ENTRY)\n OUTPUT\n dec, inc, int\n\n SYNTAX\n tk03.py [command line options] [> OutputFileName]\n\n OPTIONS\n -n N specify N, default is 100\n -d D specify mean Dec, default is 0\n -lat LAT specify latitude, default is 0\n -rev include reversals\n -t INT truncates intensities to >INT uT\n -G2 FRAC specify average g_2^0 fraction (default is 0)\n -G3 FRAC specify average g_3^0 fraction (default is 0)\n '
(N, L, D, R) = (100, 0.0, 0.0, 0)
(G2, G3) = (0.0, 0.0)
cnt = 1
Imax = 0
if ((len(sys.argv) != 0) and ('-h' in sys.argv)):
print(main.__doc__)
sys.exit()
else:
if ('-n' in sys.argv):
ind = sys.argv.index('-n')
N = int(sys.argv[(ind + 1)])
if ('-d' in sys.argv):
ind = sys.argv.index('-d')
D = float(sys.argv[(ind + 1)])
if ('-lat' in sys.argv):
ind = sys.argv.index('-lat')
L = float(sys.argv[(ind + 1)])
if ('-t' in sys.argv):
ind = sys.argv.index('-t')
Imax = (1000.0 * float(sys.argv[(ind + 1)]))
if ('-rev' in sys.argv):
R = 1
if ('-G2' in sys.argv):
ind = sys.argv.index('-G2')
G2 = float(sys.argv[(ind + 1)])
if ('-G3' in sys.argv):
ind = sys.argv.index('-G3')
G3 = float(sys.argv[(ind + 1)])
for k in range(N):
gh = pmag.mktk03(8, k, G2, G3)
lon = random.randint(0, 360)
vec = pmag.getvec(gh, L, lon)
if (vec[2] >= Imax):
vec[0] += D
if (((k % 2) == 0) and (R == 1)):
vec[0] += 180.0
vec[1] = (- vec[1])
if (vec[0] >= 360.0):
vec[0] -= 360.0
print(('%7.1f %7.1f %8.2f ' % (vec[0], vec[1], vec[2])))<|docstring|>NAME
tk03.py
DESCRIPTION
generates set of vectors drawn from TK03.gad at given lat and
rotated about vertical axis by given Dec
INPUT (COMMAND LINE ENTRY)
OUTPUT
dec, inc, int
SYNTAX
tk03.py [command line options] [> OutputFileName]
OPTIONS
-n N specify N, default is 100
-d D specify mean Dec, default is 0
-lat LAT specify latitude, default is 0
-rev include reversals
-t INT truncates intensities to >INT uT
-G2 FRAC specify average g_2^0 fraction (default is 0)
-G3 FRAC specify average g_3^0 fraction (default is 0)<|endoftext|> |
ce7c087cd4c33a4803ac36f76769897d845a6537f71d2fe811e19e4849825931 | def straceback():
'Returns a string with the traceback.'
import traceback
return traceback.format_exc() | Returns a string with the traceback. | pymatgen/io/abinitio/events.py | straceback | jmflorez/pymatgen | 1 | python | def straceback():
import traceback
return traceback.format_exc() | def straceback():
import traceback
return traceback.format_exc()<|docstring|>Returns a string with the traceback.<|endoftext|> |
ed9e2777fde8458626d9c9fa84d83fc953221c9dce461a408a0e1fd59cc8f4a8 | def __init__(self, message, src_file, src_line):
'\n Basic constructor for `AbinitEvent`. \n\n Args:\n message:\n String with human-readable message providing info on the event.\n src_file:\n String with the name of the Fortran file where the event is raised.\n src_line\n Integer giving the line number in src_file.\n '
self.message = message
self.src_file = src_file
self.src_line = src_line | Basic constructor for `AbinitEvent`.
Args:
message:
String with human-readable message providing info on the event.
src_file:
String with the name of the Fortran file where the event is raised.
src_line
Integer giving the line number in src_file. | pymatgen/io/abinitio/events.py | __init__ | jmflorez/pymatgen | 1 | python | def __init__(self, message, src_file, src_line):
'\n Basic constructor for `AbinitEvent`. \n\n Args:\n message:\n String with human-readable message providing info on the event.\n src_file:\n String with the name of the Fortran file where the event is raised.\n src_line\n Integer giving the line number in src_file.\n '
self.message = message
self.src_file = src_file
self.src_line = src_line | def __init__(self, message, src_file, src_line):
'\n Basic constructor for `AbinitEvent`. \n\n Args:\n message:\n String with human-readable message providing info on the event.\n src_file:\n String with the name of the Fortran file where the event is raised.\n src_line\n Integer giving the line number in src_file.\n '
self.message = message
self.src_file = src_file
self.src_line = src_line<|docstring|>Basic constructor for `AbinitEvent`.
Args:
message:
String with human-readable message providing info on the event.
src_file:
String with the name of the Fortran file where the event is raised.
src_line
Integer giving the line number in src_file.<|endoftext|> |
95b07f5950e76e57ee66ccfa68cbff6bd7e16c6da26ac587b997afe900ad51c8 | @property
def name(self):
'Name of the event (class name)'
return self.__class__.__name__ | Name of the event (class name) | pymatgen/io/abinitio/events.py | name | jmflorez/pymatgen | 1 | python | @property
def name(self):
return self.__class__.__name__ | @property
def name(self):
return self.__class__.__name__<|docstring|>Name of the event (class name)<|endoftext|> |
fc178a961ba7255f23e7f30afe889540d1ed8039f94980b8819307a0771618d1 | @property
def baseclass(self):
'The baseclass of self.'
for cls in _BASE_CLASSES:
if isinstance(self, cls):
return cls
err_msg = ('Cannot determine the base class of %s' % self.__class__.__name__)
raise ValueError(err_msg) | The baseclass of self. | pymatgen/io/abinitio/events.py | baseclass | jmflorez/pymatgen | 1 | python | @property
def baseclass(self):
for cls in _BASE_CLASSES:
if isinstance(self, cls):
return cls
err_msg = ('Cannot determine the base class of %s' % self.__class__.__name__)
raise ValueError(err_msg) | @property
def baseclass(self):
for cls in _BASE_CLASSES:
if isinstance(self, cls):
return cls
err_msg = ('Cannot determine the base class of %s' % self.__class__.__name__)
raise ValueError(err_msg)<|docstring|>The baseclass of self.<|endoftext|> |
e7fb6c09f6300708ee3bfef72d115305a15a23d5cea2fab8c3caee063032268d | def action(self):
'\n Returns a dictionary whose values that can be used to decide\n which actions should be performed e.g the SCF data at the last\n iteration can be used to decide whether the calculations should\n be restarted or not.\n '
return {} | Returns a dictionary whose values that can be used to decide
which actions should be performed e.g the SCF data at the last
iteration can be used to decide whether the calculations should
be restarted or not. | pymatgen/io/abinitio/events.py | action | jmflorez/pymatgen | 1 | python | def action(self):
'\n Returns a dictionary whose values that can be used to decide\n which actions should be performed e.g the SCF data at the last\n iteration can be used to decide whether the calculations should\n be restarted or not.\n '
return {} | def action(self):
'\n Returns a dictionary whose values that can be used to decide\n which actions should be performed e.g the SCF data at the last\n iteration can be used to decide whether the calculations should\n be restarted or not.\n '
return {}<|docstring|>Returns a dictionary whose values that can be used to decide
which actions should be performed e.g the SCF data at the last
iteration can be used to decide whether the calculations should
be restarted or not.<|endoftext|> |
62c3a0748c6fa862bf99c4381262d87b449f89bfff473a4fbf98eac87d1135b5 | def __init__(self, filename, events=None):
'\n Args:\n filename:\n Name of the file\n events:\n List of Event objects\n '
self.filename = os.path.abspath(filename)
self._events = []
self._events_by_baseclass = collections.defaultdict(list)
if (events is not None):
for ev in events:
self.append(ev) | Args:
filename:
Name of the file
events:
List of Event objects | pymatgen/io/abinitio/events.py | __init__ | jmflorez/pymatgen | 1 | python | def __init__(self, filename, events=None):
'\n Args:\n filename:\n Name of the file\n events:\n List of Event objects\n '
self.filename = os.path.abspath(filename)
self._events = []
self._events_by_baseclass = collections.defaultdict(list)
if (events is not None):
for ev in events:
self.append(ev) | def __init__(self, filename, events=None):
'\n Args:\n filename:\n Name of the file\n events:\n List of Event objects\n '
self.filename = os.path.abspath(filename)
self._events = []
self._events_by_baseclass = collections.defaultdict(list)
if (events is not None):
for ev in events:
self.append(ev)<|docstring|>Args:
filename:
Name of the file
events:
List of Event objects<|endoftext|> |
9326bffb83fffc0ba8a51168d52f71699cd3a7dd53a53ba8334211ad91f15d9a | def append(self, event):
'Add an event to the list.'
self._events.append(event)
self._events_by_baseclass[event.baseclass].append(event) | Add an event to the list. | pymatgen/io/abinitio/events.py | append | jmflorez/pymatgen | 1 | python | def append(self, event):
self._events.append(event)
self._events_by_baseclass[event.baseclass].append(event) | def append(self, event):
self._events.append(event)
self._events_by_baseclass[event.baseclass].append(event)<|docstring|>Add an event to the list.<|endoftext|> |
4cc04c8a4c4b270fe3ed3d26a488cc4c495ec27488d18c8df4dab0fc456e9c18 | def set_run_completed(self, bool_value):
'Set the value of _run_completed.'
self._run_completed = bool_value | Set the value of _run_completed. | pymatgen/io/abinitio/events.py | set_run_completed | jmflorez/pymatgen | 1 | python | def set_run_completed(self, bool_value):
self._run_completed = bool_value | def set_run_completed(self, bool_value):
self._run_completed = bool_value<|docstring|>Set the value of _run_completed.<|endoftext|> |
9fe1c43b90261e04a7d8bd153671077d70652e3d3bc5e26d77f2dd16fa2fcd76 | @property
def run_completed(self):
'\n Returns True if the calculation terminated.\n '
try:
return self._run_completed
except AttributeError:
return False | Returns True if the calculation terminated. | pymatgen/io/abinitio/events.py | run_completed | jmflorez/pymatgen | 1 | python | @property
def run_completed(self):
'\n \n '
try:
return self._run_completed
except AttributeError:
return False | @property
def run_completed(self):
'\n \n '
try:
return self._run_completed
except AttributeError:
return False<|docstring|>Returns True if the calculation terminated.<|endoftext|> |
5f5090c8980aaeaeff1a6ee02c5baf932f27fbcbab34a50f41c9c0d0de2807d2 | @property
def comments(self):
'List of comments found.'
return self.select(AbinitComment) | List of comments found. | pymatgen/io/abinitio/events.py | comments | jmflorez/pymatgen | 1 | python | @property
def comments(self):
return self.select(AbinitComment) | @property
def comments(self):
return self.select(AbinitComment)<|docstring|>List of comments found.<|endoftext|> |
9e13a51d461f121c83c4bc4f0a1cde332b0fcdbcc2f3e4ac7925ad796b019b0a | @property
def errors(self):
'List of errors found.'
return self.select(AbinitError) | List of errors found. | pymatgen/io/abinitio/events.py | errors | jmflorez/pymatgen | 1 | python | @property
def errors(self):
return self.select(AbinitError) | @property
def errors(self):
return self.select(AbinitError)<|docstring|>List of errors found.<|endoftext|> |
c63f190827d1e67c7f6535a0279ee5e97f09113b6c65d1f7b20a8af8a152c006 | @property
def bugs(self):
'List of bugs found.'
return self.select(AbinitBug) | List of bugs found. | pymatgen/io/abinitio/events.py | bugs | jmflorez/pymatgen | 1 | python | @property
def bugs(self):
return self.select(AbinitBug) | @property
def bugs(self):
return self.select(AbinitBug)<|docstring|>List of bugs found.<|endoftext|> |
3761ed524a374dfd156d4046895a6043031a51dd63049dc6afbff3d44f3d00b6 | @property
def warnings(self):
'List of warnings found.'
return self.select(AbinitWarning) | List of warnings found. | pymatgen/io/abinitio/events.py | warnings | jmflorez/pymatgen | 1 | python | @property
def warnings(self):
return self.select(AbinitWarning) | @property
def warnings(self):
return self.select(AbinitWarning)<|docstring|>List of warnings found.<|endoftext|> |
d824f626c62253221af91c01b03f95715647d26f620362b3118858335c07a30f | @property
def num_warnings(self):
'Number of warnings reported.'
return len(self.warnings) | Number of warnings reported. | pymatgen/io/abinitio/events.py | num_warnings | jmflorez/pymatgen | 1 | python | @property
def num_warnings(self):
return len(self.warnings) | @property
def num_warnings(self):
return len(self.warnings)<|docstring|>Number of warnings reported.<|endoftext|> |
d16954be1a932a3155076573700414a067ac21918221bd540b2871b02eca52fe | @property
def num_errors(self):
'Number of errors reported.'
return len(self.errors) | Number of errors reported. | pymatgen/io/abinitio/events.py | num_errors | jmflorez/pymatgen | 1 | python | @property
def num_errors(self):
return len(self.errors) | @property
def num_errors(self):
return len(self.errors)<|docstring|>Number of errors reported.<|endoftext|> |
384d9a8903ccb870b1b35e99129381b432923044f022ba612476861ed6e894a0 | @property
def num_comments(self):
'Number of comments reported.'
return len(self.comments) | Number of comments reported. | pymatgen/io/abinitio/events.py | num_comments | jmflorez/pymatgen | 1 | python | @property
def num_comments(self):
return len(self.comments) | @property
def num_comments(self):
return len(self.comments)<|docstring|>Number of comments reported.<|endoftext|> |
09996027b4aa16452d1da89a72c859f5e026551a0e234fe224bad1de9756f351 | def select(self, base_class):
'\n Return the list of events that inherits from class base_class\n\n Args:\n only_critical:\n if True, only critical events are returned.\n '
return self._events_by_baseclass[base_class][:] | Return the list of events that inherits from class base_class
Args:
only_critical:
if True, only critical events are returned. | pymatgen/io/abinitio/events.py | select | jmflorez/pymatgen | 1 | python | def select(self, base_class):
'\n Return the list of events that inherits from class base_class\n\n Args:\n only_critical:\n if True, only critical events are returned.\n '
return self._events_by_baseclass[base_class][:] | def select(self, base_class):
'\n Return the list of events that inherits from class base_class\n\n Args:\n only_critical:\n if True, only critical events are returned.\n '
return self._events_by_baseclass[base_class][:]<|docstring|>Return the list of events that inherits from class base_class
Args:
only_critical:
if True, only critical events are returned.<|endoftext|> |
d8f8aad3fd5d30f1eb497c6573237ee1c9a5148097a80a1e91a53a6efd32dafd | @staticmethod
def parse(filename):
'\n This is the new parser, it will be used when we implement\n the new format in abinit.\n '
run_completed = False
filename = os.path.abspath(filename)
report = EventReport(filename)
w = WildCard('*Error|*Warning|*Comment|*ERROR|*WARNING|*COMMENT')
with YamlTokenizer(filename) as tokens:
for doc in tokens:
if w.match(doc.tag):
try:
event = yaml.load(doc.text)
except:
message = ('Malformatted YAML document at line: %d\n' % doc.lineno)
message += doc.text
message += ('Traceback:\n %s' % straceback())
if ('error' in doc.tag.lower()):
print('seems an error', doc.tag)
event = AbinitYamlError(message=message, src_file=__file__, src_line=0)
else:
event = AbinitYamlWarning(message=message, src_file=__file__, src_line=0)
event.lineno = doc.lineno
report.append(event)
if (doc.tag == '!FinalSummary'):
run_completed = True
report.set_run_completed(run_completed)
return report | This is the new parser, it will be used when we implement
the new format in abinit. | pymatgen/io/abinitio/events.py | parse | jmflorez/pymatgen | 1 | python | @staticmethod
def parse(filename):
'\n This is the new parser, it will be used when we implement\n the new format in abinit.\n '
run_completed = False
filename = os.path.abspath(filename)
report = EventReport(filename)
w = WildCard('*Error|*Warning|*Comment|*ERROR|*WARNING|*COMMENT')
with YamlTokenizer(filename) as tokens:
for doc in tokens:
if w.match(doc.tag):
try:
event = yaml.load(doc.text)
except:
message = ('Malformatted YAML document at line: %d\n' % doc.lineno)
message += doc.text
message += ('Traceback:\n %s' % straceback())
if ('error' in doc.tag.lower()):
print('seems an error', doc.tag)
event = AbinitYamlError(message=message, src_file=__file__, src_line=0)
else:
event = AbinitYamlWarning(message=message, src_file=__file__, src_line=0)
event.lineno = doc.lineno
report.append(event)
if (doc.tag == '!FinalSummary'):
run_completed = True
report.set_run_completed(run_completed)
return report | @staticmethod
def parse(filename):
'\n This is the new parser, it will be used when we implement\n the new format in abinit.\n '
run_completed = False
filename = os.path.abspath(filename)
report = EventReport(filename)
w = WildCard('*Error|*Warning|*Comment|*ERROR|*WARNING|*COMMENT')
with YamlTokenizer(filename) as tokens:
for doc in tokens:
if w.match(doc.tag):
try:
event = yaml.load(doc.text)
except:
message = ('Malformatted YAML document at line: %d\n' % doc.lineno)
message += doc.text
message += ('Traceback:\n %s' % straceback())
if ('error' in doc.tag.lower()):
print('seems an error', doc.tag)
event = AbinitYamlError(message=message, src_file=__file__, src_line=0)
else:
event = AbinitYamlWarning(message=message, src_file=__file__, src_line=0)
event.lineno = doc.lineno
report.append(event)
if (doc.tag == '!FinalSummary'):
run_completed = True
report.set_run_completed(run_completed)
return report<|docstring|>This is the new parser, it will be used when we implement
the new format in abinit.<|endoftext|> |
2e7864739976233f9fc1139e2dec462f68a821dd89b2eb3b30c0383330d38918 | def report_exception(self, filename, exc):
'\n This method is used when self.parser raises an Exception so that\n we can report a customized `EventReport` object with info the exception.\n '
return EventReport(filename, events=[Error(str(exc))]) | This method is used when self.parser raises an Exception so that
we can report a customized `EventReport` object with info the exception. | pymatgen/io/abinitio/events.py | report_exception | jmflorez/pymatgen | 1 | python | def report_exception(self, filename, exc):
'\n This method is used when self.parser raises an Exception so that\n we can report a customized `EventReport` object with info the exception.\n '
return EventReport(filename, events=[Error(str(exc))]) | def report_exception(self, filename, exc):
'\n This method is used when self.parser raises an Exception so that\n we can report a customized `EventReport` object with info the exception.\n '
return EventReport(filename, events=[Error(str(exc))])<|docstring|>This method is used when self.parser raises an Exception so that
we can report a customized `EventReport` object with info the exception.<|endoftext|> |
ed2400ff362355000dcd60aa3e2acf2b9b137c1c20b2b6689609aa3278ef6da0 | def UT2datetime(indate, inut):
'\n\tConverts date of the format YYYYMMDD and time in floating point \n\thours to a datetime object.\n\t\n\t'
if (np.size(indate) == 1):
date = (np.zeros(np.size(inut), dtype='int32') + indate)
else:
date = np.int32(indate)
if (np.size(inut) == 1):
ut = np.float32([inut])
else:
ut = np.float32(inut)
for i in range(0, np.size(ut)):
if (ut[i] < 0):
ut[i] += 24.0
date[i] = MinusDay(date[i])
yr = np.int32((date / 10000))
mn = np.int32(((date % 10000) / 100))
dy = (date % 100)
hh = np.int32(ut)
mmf = ((ut - hh) * 60)
mm = np.int32(mmf)
ssf = ((mmf - mm) * 60.0)
ss = np.int32(ssf)
ms = np.int32(((ssf - ss) * 1000000.0))
dt = np.array([datetime.datetime(yr[i], mn[i], dy[i], hh[i], mm[i], ss[i], ms[i]) for i in range(0, np.size(ut))])
return dt | Converts date of the format YYYYMMDD and time in floating point
hours to a datetime object. | build/lib/DateTimeTools/UT2datetime.py | UT2datetime | pshustov/DateTimeTools | 0 | python | def UT2datetime(indate, inut):
'\n\tConverts date of the format YYYYMMDD and time in floating point \n\thours to a datetime object.\n\t\n\t'
if (np.size(indate) == 1):
date = (np.zeros(np.size(inut), dtype='int32') + indate)
else:
date = np.int32(indate)
if (np.size(inut) == 1):
ut = np.float32([inut])
else:
ut = np.float32(inut)
for i in range(0, np.size(ut)):
if (ut[i] < 0):
ut[i] += 24.0
date[i] = MinusDay(date[i])
yr = np.int32((date / 10000))
mn = np.int32(((date % 10000) / 100))
dy = (date % 100)
hh = np.int32(ut)
mmf = ((ut - hh) * 60)
mm = np.int32(mmf)
ssf = ((mmf - mm) * 60.0)
ss = np.int32(ssf)
ms = np.int32(((ssf - ss) * 1000000.0))
dt = np.array([datetime.datetime(yr[i], mn[i], dy[i], hh[i], mm[i], ss[i], ms[i]) for i in range(0, np.size(ut))])
return dt | def UT2datetime(indate, inut):
'\n\tConverts date of the format YYYYMMDD and time in floating point \n\thours to a datetime object.\n\t\n\t'
if (np.size(indate) == 1):
date = (np.zeros(np.size(inut), dtype='int32') + indate)
else:
date = np.int32(indate)
if (np.size(inut) == 1):
ut = np.float32([inut])
else:
ut = np.float32(inut)
for i in range(0, np.size(ut)):
if (ut[i] < 0):
ut[i] += 24.0
date[i] = MinusDay(date[i])
yr = np.int32((date / 10000))
mn = np.int32(((date % 10000) / 100))
dy = (date % 100)
hh = np.int32(ut)
mmf = ((ut - hh) * 60)
mm = np.int32(mmf)
ssf = ((mmf - mm) * 60.0)
ss = np.int32(ssf)
ms = np.int32(((ssf - ss) * 1000000.0))
dt = np.array([datetime.datetime(yr[i], mn[i], dy[i], hh[i], mm[i], ss[i], ms[i]) for i in range(0, np.size(ut))])
return dt<|docstring|>Converts date of the format YYYYMMDD and time in floating point
hours to a datetime object.<|endoftext|> |
c67c21d1f2f6c6702da8b0432c80b45dfe0c45ea10f37b5b514fcbff7443e7a3 | def datetime2UT(DT):
'\n\tConverts datetime objects to arrays of dates with the format \n\tYYYYMMDD and times in floating point hours. \n\t'
if hasattr(DT, '__iter__'):
n = np.size(DT)
ut = np.zeros(n, dtype='float32')
date = np.zeros(n, dtype='int32')
for i in range(0, n):
ut[i] = ((np.float32(DT[i].hour) + (np.float32(DT[i].minute) / 60.0)) + (np.float32(DT[i].second) / 3600.0))
date[i] = (((np.int32(DT[i].year) * 10000) + (np.int32(DT[i].month) * 100)) + np.int32(DT[i].day))
else:
ut = ((np.float32(DT.hour) + (np.float32(DT.minute) / 60.0)) + (np.float32(DT.second) / 3600.0))
date = (((np.int32(DT.year) * 10000) + (np.int32(DT.month) * 100)) + np.int32(DT.day))
return (date, ut) | Converts datetime objects to arrays of dates with the format
YYYYMMDD and times in floating point hours. | build/lib/DateTimeTools/UT2datetime.py | datetime2UT | pshustov/DateTimeTools | 0 | python | def datetime2UT(DT):
'\n\tConverts datetime objects to arrays of dates with the format \n\tYYYYMMDD and times in floating point hours. \n\t'
if hasattr(DT, '__iter__'):
n = np.size(DT)
ut = np.zeros(n, dtype='float32')
date = np.zeros(n, dtype='int32')
for i in range(0, n):
ut[i] = ((np.float32(DT[i].hour) + (np.float32(DT[i].minute) / 60.0)) + (np.float32(DT[i].second) / 3600.0))
date[i] = (((np.int32(DT[i].year) * 10000) + (np.int32(DT[i].month) * 100)) + np.int32(DT[i].day))
else:
ut = ((np.float32(DT.hour) + (np.float32(DT.minute) / 60.0)) + (np.float32(DT.second) / 3600.0))
date = (((np.int32(DT.year) * 10000) + (np.int32(DT.month) * 100)) + np.int32(DT.day))
return (date, ut) | def datetime2UT(DT):
'\n\tConverts datetime objects to arrays of dates with the format \n\tYYYYMMDD and times in floating point hours. \n\t'
if hasattr(DT, '__iter__'):
n = np.size(DT)
ut = np.zeros(n, dtype='float32')
date = np.zeros(n, dtype='int32')
for i in range(0, n):
ut[i] = ((np.float32(DT[i].hour) + (np.float32(DT[i].minute) / 60.0)) + (np.float32(DT[i].second) / 3600.0))
date[i] = (((np.int32(DT[i].year) * 10000) + (np.int32(DT[i].month) * 100)) + np.int32(DT[i].day))
else:
ut = ((np.float32(DT.hour) + (np.float32(DT.minute) / 60.0)) + (np.float32(DT.second) / 3600.0))
date = (((np.int32(DT.year) * 10000) + (np.int32(DT.month) * 100)) + np.int32(DT.day))
return (date, ut)<|docstring|>Converts datetime objects to arrays of dates with the format
YYYYMMDD and times in floating point hours.<|endoftext|> |
2f5e72daef8d2ce6bb81ce086d641a6b264aa7c50d75b576ff606f0e843c818b | def get_args(func):
'Given a function, returns a tuple (*required*, *optional*), tuples of\n non-keyword and keyword arguments respectively. If a function contains\n splats (\\* or \\**), a :exc:`~cosmic.exceptions.SpecError` will be raised.\n '
(args, varargs, keywords, defaults) = inspect.getargspec(func)
if (varargs or keywords):
raise SpecError('Cannot define action with splats (* or **)')
if (len(args) == 0):
return ((), ())
numargs = (len(args) - (len(defaults) if defaults else 0))
required_args = tuple(args[:numargs])
optional_args = tuple(args[numargs:])
return (required_args, optional_args) | Given a function, returns a tuple (*required*, *optional*), tuples of
non-keyword and keyword arguments respectively. If a function contains
splats (\* or \**), a :exc:`~cosmic.exceptions.SpecError` will be raised. | cosmic/tools.py | get_args | cosmic-api/cosmic.py | 1 | python | def get_args(func):
'Given a function, returns a tuple (*required*, *optional*), tuples of\n non-keyword and keyword arguments respectively. If a function contains\n splats (\\* or \\**), a :exc:`~cosmic.exceptions.SpecError` will be raised.\n '
(args, varargs, keywords, defaults) = inspect.getargspec(func)
if (varargs or keywords):
raise SpecError('Cannot define action with splats (* or **)')
if (len(args) == 0):
return ((), ())
numargs = (len(args) - (len(defaults) if defaults else 0))
required_args = tuple(args[:numargs])
optional_args = tuple(args[numargs:])
return (required_args, optional_args) | def get_args(func):
'Given a function, returns a tuple (*required*, *optional*), tuples of\n non-keyword and keyword arguments respectively. If a function contains\n splats (\\* or \\**), a :exc:`~cosmic.exceptions.SpecError` will be raised.\n '
(args, varargs, keywords, defaults) = inspect.getargspec(func)
if (varargs or keywords):
raise SpecError('Cannot define action with splats (* or **)')
if (len(args) == 0):
return ((), ())
numargs = (len(args) - (len(defaults) if defaults else 0))
required_args = tuple(args[:numargs])
optional_args = tuple(args[numargs:])
return (required_args, optional_args)<|docstring|>Given a function, returns a tuple (*required*, *optional*), tuples of
non-keyword and keyword arguments respectively. If a function contains
splats (\* or \**), a :exc:`~cosmic.exceptions.SpecError` will be raised.<|endoftext|> |
578cb96f8a1b03294f09ec3bf6e6112388e287d26fac8004109f07bcb2af3983 | def args_to_datum(*args, **kwargs):
'Takes arbitrary args and kwargs and packs them into a dict if there are\n more than one. Returns `None` if there are no arguments. Must be called\n with either a single argument or multiple keyword arguments.\n '
if ((len(args) == 1) and (len(kwargs) == 0)):
return args[0]
if ((len(args) == 0) and (len(kwargs) > 0)):
return kwargs
if ((len(args) == 0) and (len(kwargs) == 0)):
return None
raise SpecError('Action must be called either with one argument or with one or more keyword arguments') | Takes arbitrary args and kwargs and packs them into a dict if there are
more than one. Returns `None` if there are no arguments. Must be called
with either a single argument or multiple keyword arguments. | cosmic/tools.py | args_to_datum | cosmic-api/cosmic.py | 1 | python | def args_to_datum(*args, **kwargs):
'Takes arbitrary args and kwargs and packs them into a dict if there are\n more than one. Returns `None` if there are no arguments. Must be called\n with either a single argument or multiple keyword arguments.\n '
if ((len(args) == 1) and (len(kwargs) == 0)):
return args[0]
if ((len(args) == 0) and (len(kwargs) > 0)):
return kwargs
if ((len(args) == 0) and (len(kwargs) == 0)):
return None
raise SpecError('Action must be called either with one argument or with one or more keyword arguments') | def args_to_datum(*args, **kwargs):
'Takes arbitrary args and kwargs and packs them into a dict if there are\n more than one. Returns `None` if there are no arguments. Must be called\n with either a single argument or multiple keyword arguments.\n '
if ((len(args) == 1) and (len(kwargs) == 0)):
return args[0]
if ((len(args) == 0) and (len(kwargs) > 0)):
return kwargs
if ((len(args) == 0) and (len(kwargs) == 0)):
return None
raise SpecError('Action must be called either with one argument or with one or more keyword arguments')<|docstring|>Takes arbitrary args and kwargs and packs them into a dict if there are
more than one. Returns `None` if there are no arguments. Must be called
with either a single argument or multiple keyword arguments.<|endoftext|> |
4385e08b9faac316e0dbaca0e1454d36e30e3974cb403ff3b2e3bbe6cbc7e71b | def assert_is_compatible(schema, required_args, optional_args):
'Raises a :exc:`~cosmic.exceptions.SpecError` if function argument spec\n (as returned by :func:`get_args`) is incompatible with the given schema.\n By incompatible, it is meant that there exists such a piece of data that\n is valid according to the schema, but that could not be applied to the\n function by :func:`apply_to_func`.\n '
if (len((required_args + optional_args)) == 0):
raise SpecError('Function needs to accept arguments')
if (len((required_args + optional_args)) == 1):
return
if (not isinstance(schema, Struct)):
raise SpecError('For a function that takes arguments, accepts schema is expected to be a Struct')
for r in required_args:
if ((r not in schema.param.keys()) or (not schema.param[r]['required'])):
raise SpecError(("Action argument '%s' must have a corresponding required field in the accepts schema" % r))
for f in schema.param.keys():
if (f not in set((required_args + optional_args))):
raise SpecError(("The '%s' field must have a corresponding function argument" % f)) | Raises a :exc:`~cosmic.exceptions.SpecError` if function argument spec
(as returned by :func:`get_args`) is incompatible with the given schema.
By incompatible, it is meant that there exists such a piece of data that
is valid according to the schema, but that could not be applied to the
function by :func:`apply_to_func`. | cosmic/tools.py | assert_is_compatible | cosmic-api/cosmic.py | 1 | python | def assert_is_compatible(schema, required_args, optional_args):
'Raises a :exc:`~cosmic.exceptions.SpecError` if function argument spec\n (as returned by :func:`get_args`) is incompatible with the given schema.\n By incompatible, it is meant that there exists such a piece of data that\n is valid according to the schema, but that could not be applied to the\n function by :func:`apply_to_func`.\n '
if (len((required_args + optional_args)) == 0):
raise SpecError('Function needs to accept arguments')
if (len((required_args + optional_args)) == 1):
return
if (not isinstance(schema, Struct)):
raise SpecError('For a function that takes arguments, accepts schema is expected to be a Struct')
for r in required_args:
if ((r not in schema.param.keys()) or (not schema.param[r]['required'])):
raise SpecError(("Action argument '%s' must have a corresponding required field in the accepts schema" % r))
for f in schema.param.keys():
if (f not in set((required_args + optional_args))):
raise SpecError(("The '%s' field must have a corresponding function argument" % f)) | def assert_is_compatible(schema, required_args, optional_args):
'Raises a :exc:`~cosmic.exceptions.SpecError` if function argument spec\n (as returned by :func:`get_args`) is incompatible with the given schema.\n By incompatible, it is meant that there exists such a piece of data that\n is valid according to the schema, but that could not be applied to the\n function by :func:`apply_to_func`.\n '
if (len((required_args + optional_args)) == 0):
raise SpecError('Function needs to accept arguments')
if (len((required_args + optional_args)) == 1):
return
if (not isinstance(schema, Struct)):
raise SpecError('For a function that takes arguments, accepts schema is expected to be a Struct')
for r in required_args:
if ((r not in schema.param.keys()) or (not schema.param[r]['required'])):
raise SpecError(("Action argument '%s' must have a corresponding required field in the accepts schema" % r))
for f in schema.param.keys():
if (f not in set((required_args + optional_args))):
raise SpecError(("The '%s' field must have a corresponding function argument" % f))<|docstring|>Raises a :exc:`~cosmic.exceptions.SpecError` if function argument spec
(as returned by :func:`get_args`) is incompatible with the given schema.
By incompatible, it is meant that there exists such a piece of data that
is valid according to the schema, but that could not be applied to the
function by :func:`apply_to_func`.<|endoftext|> |
395181d993a4faee8ba601971e4b729a49aadaeed2446cdf5e19737e30a87e0b | def s3_import_csv(r):
'\n Import CSV file into database\n\n Args:\n r: the S3Request\n\n Note:\n Called by S3CRUD.create\n '
import cgi
import csv
csv.field_size_limit(1000000000)
infile = r.post_vars.filename
if (isinstance(infile, cgi.FieldStorage) and infile.filename):
infile = infile.file
else:
try:
infile = open(infile, 'rb')
except IOError:
current.session.error = (current.T('Cannot read from file: %(filename)s') % {'filename': infile})
redirect(r.url(method='', representation='html'))
table = r.resource.table
if table:
table.import_from_csv_file(stream)
else:
db = current.db
db.import_from_csv_file(stream)
db.commit() | Import CSV file into database
Args:
r: the S3Request
Note:
Called by S3CRUD.create | modules/s3/s3import.py | s3_import_csv | annehaley/eden | 205 | python | def s3_import_csv(r):
'\n Import CSV file into database\n\n Args:\n r: the S3Request\n\n Note:\n Called by S3CRUD.create\n '
import cgi
import csv
csv.field_size_limit(1000000000)
infile = r.post_vars.filename
if (isinstance(infile, cgi.FieldStorage) and infile.filename):
infile = infile.file
else:
try:
infile = open(infile, 'rb')
except IOError:
current.session.error = (current.T('Cannot read from file: %(filename)s') % {'filename': infile})
redirect(r.url(method=, representation='html'))
table = r.resource.table
if table:
table.import_from_csv_file(stream)
else:
db = current.db
db.import_from_csv_file(stream)
db.commit() | def s3_import_csv(r):
'\n Import CSV file into database\n\n Args:\n r: the S3Request\n\n Note:\n Called by S3CRUD.create\n '
import cgi
import csv
csv.field_size_limit(1000000000)
infile = r.post_vars.filename
if (isinstance(infile, cgi.FieldStorage) and infile.filename):
infile = infile.file
else:
try:
infile = open(infile, 'rb')
except IOError:
current.session.error = (current.T('Cannot read from file: %(filename)s') % {'filename': infile})
redirect(r.url(method=, representation='html'))
table = r.resource.table
if table:
table.import_from_csv_file(stream)
else:
db = current.db
db.import_from_csv_file(stream)
db.commit()<|docstring|>Import CSV file into database
Args:
r: the S3Request
Note:
Called by S3CRUD.create<|endoftext|> |
bc0c8040f86eedee582004d4e816bbdd343b1edcb5ffc89c1157b3f3c56483cf | def s3_import_url(r):
'\n Import data from vars in URL query\n\n Args:\n r: the S3Request\n\n Note:\n Can only update single records (no mass-update)\n Called by S3CRUD.create / S3CRUD.update\n '
xml = current.xml
table = r.target()[2]
record = r.record
resource = r.resource
if (record and r.component):
resource = resource.components[r.component_name]
resource.load()
if (len(resource) == 1):
record = resource.records()[0]
else:
record = None
r.vars.update({resource.fkey: r.record[resource.pkey]})
elif ((not record) and r.component):
item = xml.json_message(False, 400, 'Invalid Request!')
return {'item': item}
if (record and (xml.UID in table.fields)):
r.vars.update({xml.UID: xml.export_uid(record[xml.UID])})
element = etree.Element(xml.TAG.resource)
element.set(xml.ATTRIBUTE.name, resource.tablename)
for var in r.vars:
if (var.find('.') != (- 1)):
continue
elif (var in table.fields):
field = table[var]
value = s3_str(r.vars[var])
if (var in xml.FIELDS_TO_ATTRIBUTES):
element.set(var, value)
else:
data = etree.Element(xml.TAG.data)
data.set(xml.ATTRIBUTE.field, var)
if (field.type == 'upload'):
data.set(xml.ATTRIBUTE.filename, value)
else:
data.text = value
element.append(data)
tree = xml.tree([element], domain=xml.domain)
result = Storage(committed=False)
def log(item):
result['item'] = item
resource.configure(oncommit_import_item=log)
try:
success = resource.import_xml(tree)
except SyntaxError:
pass
if result.item:
result = result.item
if (success and result.committed):
r.id = result.id
method = result.method
if (method == result.METHOD.CREATE):
item = xml.json_message(True, 201, ('Created as %s?%s.id=%s' % (str(r.url(method='', representation='html', vars={})), r.name, result.id)))
else:
item = xml.json_message(True, 200, 'Record updated')
else:
item = xml.json_message(False, 403, (('Could not create/update record: %s' % resource.error) or xml.error), tree=xml.tree2json(tree))
return {'item': item} | Import data from vars in URL query
Args:
r: the S3Request
Note:
Can only update single records (no mass-update)
Called by S3CRUD.create / S3CRUD.update | modules/s3/s3import.py | s3_import_url | annehaley/eden | 205 | python | def s3_import_url(r):
'\n Import data from vars in URL query\n\n Args:\n r: the S3Request\n\n Note:\n Can only update single records (no mass-update)\n Called by S3CRUD.create / S3CRUD.update\n '
xml = current.xml
table = r.target()[2]
record = r.record
resource = r.resource
if (record and r.component):
resource = resource.components[r.component_name]
resource.load()
if (len(resource) == 1):
record = resource.records()[0]
else:
record = None
r.vars.update({resource.fkey: r.record[resource.pkey]})
elif ((not record) and r.component):
item = xml.json_message(False, 400, 'Invalid Request!')
return {'item': item}
if (record and (xml.UID in table.fields)):
r.vars.update({xml.UID: xml.export_uid(record[xml.UID])})
element = etree.Element(xml.TAG.resource)
element.set(xml.ATTRIBUTE.name, resource.tablename)
for var in r.vars:
if (var.find('.') != (- 1)):
continue
elif (var in table.fields):
field = table[var]
value = s3_str(r.vars[var])
if (var in xml.FIELDS_TO_ATTRIBUTES):
element.set(var, value)
else:
data = etree.Element(xml.TAG.data)
data.set(xml.ATTRIBUTE.field, var)
if (field.type == 'upload'):
data.set(xml.ATTRIBUTE.filename, value)
else:
data.text = value
element.append(data)
tree = xml.tree([element], domain=xml.domain)
result = Storage(committed=False)
def log(item):
result['item'] = item
resource.configure(oncommit_import_item=log)
try:
success = resource.import_xml(tree)
except SyntaxError:
pass
if result.item:
result = result.item
if (success and result.committed):
r.id = result.id
method = result.method
if (method == result.METHOD.CREATE):
item = xml.json_message(True, 201, ('Created as %s?%s.id=%s' % (str(r.url(method=, representation='html', vars={})), r.name, result.id)))
else:
item = xml.json_message(True, 200, 'Record updated')
else:
item = xml.json_message(False, 403, (('Could not create/update record: %s' % resource.error) or xml.error), tree=xml.tree2json(tree))
return {'item': item} | def s3_import_url(r):
'\n Import data from vars in URL query\n\n Args:\n r: the S3Request\n\n Note:\n Can only update single records (no mass-update)\n Called by S3CRUD.create / S3CRUD.update\n '
xml = current.xml
table = r.target()[2]
record = r.record
resource = r.resource
if (record and r.component):
resource = resource.components[r.component_name]
resource.load()
if (len(resource) == 1):
record = resource.records()[0]
else:
record = None
r.vars.update({resource.fkey: r.record[resource.pkey]})
elif ((not record) and r.component):
item = xml.json_message(False, 400, 'Invalid Request!')
return {'item': item}
if (record and (xml.UID in table.fields)):
r.vars.update({xml.UID: xml.export_uid(record[xml.UID])})
element = etree.Element(xml.TAG.resource)
element.set(xml.ATTRIBUTE.name, resource.tablename)
for var in r.vars:
if (var.find('.') != (- 1)):
continue
elif (var in table.fields):
field = table[var]
value = s3_str(r.vars[var])
if (var in xml.FIELDS_TO_ATTRIBUTES):
element.set(var, value)
else:
data = etree.Element(xml.TAG.data)
data.set(xml.ATTRIBUTE.field, var)
if (field.type == 'upload'):
data.set(xml.ATTRIBUTE.filename, value)
else:
data.text = value
element.append(data)
tree = xml.tree([element], domain=xml.domain)
result = Storage(committed=False)
def log(item):
result['item'] = item
resource.configure(oncommit_import_item=log)
try:
success = resource.import_xml(tree)
except SyntaxError:
pass
if result.item:
result = result.item
if (success and result.committed):
r.id = result.id
method = result.method
if (method == result.METHOD.CREATE):
item = xml.json_message(True, 201, ('Created as %s?%s.id=%s' % (str(r.url(method=, representation='html', vars={})), r.name, result.id)))
else:
item = xml.json_message(True, 200, 'Record updated')
else:
item = xml.json_message(False, 403, (('Could not create/update record: %s' % resource.error) or xml.error), tree=xml.tree2json(tree))
return {'item': item}<|docstring|>Import data from vars in URL query
Args:
r: the S3Request
Note:
Can only update single records (no mass-update)
Called by S3CRUD.create / S3CRUD.update<|endoftext|> |
83c4cfa3cd33d99d8a79f237e30b75e8cf534a4958146123cce6b7c9974a791f | def apply_method(self, r, **attr):
'\n Args\n r: the S3Request\n attr: dictionary of parameters for the method handler\n\n Returns:\n output object to send to the view\n\n Known means of communicating with this module:\n\n It expects a URL of the form: /prefix/name/import\n\n It will interpret the http requests as follows:\n\n GET will trigger the upload\n POST will trigger either commits or display the import details\n DELETE will trigger deletes\n\n It will accept one of the following control vars:\n item: to specify a single item in the import job\n job: to specify a job\n It should not receive both so job takes precedent over item\n\n For CSV imports, the calling controller can add extra fields\n to the upload form to add columns to each row in the CSV. To add\n the extra fields, pass a named parameter "csv_extra_fields" to the\n s3_rest_controller call (or the S3Request call, respectively):\n\n s3_rest_controller(module, resourcename,\n csv_extra_fields = {"label": "ColumnLabelInTheCSV",\n "field": field_instance,\n }\n ])\n\n The Field instance "field" will be added to the upload form, and\n the user input will be added to each row of the CSV under the\n label as specified. If the "field" validator has options, the\n input value will be translated into the option representation,\n otherwise the value will be used as-is.\n\n Note that the "label" in the dict is the column label in the CSV,\n whereas the field label for the form is to be set in the Field\n instance passed as "field".\n\n You can add any arbitrary number of csv_extra_fields to the list.\n\n Additionally, you may want to allow the user to choose whether\n the import shall first remove all existing data in the target\n table. To do so, pass a label for the "replace_option" to the\n request:\n\n s3_rest_controller(module, resourcename,\n replace_option = T("Remove existing data before import"),\n )\n\n This will add the respective checkbox to the upload form.\n\n You may also want to provide a link to download a CSV template from\n the upload form. To do that, add the resource name to the request\n attributes:\n\n s3_rest_controller(module, resourcename,\n csv_template = "<resourcename>",\n )\n\n This will provide a link to:\n - static/formats/s3csv/<controller>/<resourcename>.csv\n at the top of the upload form.\n\n '
T = current.T
self.messages = messages = Messages(T)
messages.download_template = 'Download Template'
messages.invalid_file_format = 'Invalid File Format'
messages.unsupported_file_type = 'Unsupported file type of %s'
messages.stylesheet_not_found = 'No Stylesheet %s could be found to manage the import file.'
messages.no_file = 'No file submitted'
messages.file_open_error = 'Unable to open the file %s'
messages.file_not_found = 'The file to upload is missing'
messages.no_records_to_import = 'No records to import'
messages.no_job_to_delete = 'No job to delete, maybe it has already been deleted.'
messages.title_job_read = 'Details of the selected import job'
messages.title_job_list = 'List of import items'
messages.file_uploaded = 'Import file uploaded'
messages.upload_submit_btn = 'Upload Data File'
messages.open_btn = 'Open'
messages.view_btn = 'View'
messages.delete_btn = 'Delete'
messages.item_show_details = 'Display Details'
messages.job_total_records = 'Total records in the Import Job'
messages.job_records_selected = 'Records selected'
messages.job_deleted = 'Import job deleted'
messages.job_completed = 'Job run on %s. With result of (%s)'
messages.import_file = 'Import File'
messages.import_file_comment = 'Upload a file formatted according to the Template.'
messages.user_name = 'User Name'
messages.commit_total_records_imported = '%s records imported'
messages.commit_total_records_ignored = '%s records ignored'
messages.commit_total_errors = '%s records in error'
tablename = self.tablename
self.__define_table()
permitted = current.auth.s3_has_permission
authorised = (permitted('create', self.upload_tablename) and permitted('create', tablename))
if (not authorised):
if (r.method is not None):
r.unauthorised()
else:
return {'form': None}
self.controller_resource = self.resource
self.controller_table = self.table
self.controller_tablename = tablename
self.upload_resource = None
self.item_resource = None
self.controller = r.controller
self.function = r.function
try:
self.upload_title = (current.response.s3.crud_strings[tablename].title_upload or T('Import'))
except (KeyError, AttributeError):
self.upload_title = T('Import')
current.session.s3.ocr_enabled = False
self.error = None
self.warning = None
if ('csv_stylesheet' in attr):
self.csv_stylesheet = attr['csv_stylesheet']
else:
self.csv_stylesheet = None
self.csv_extra_fields = None
self.csv_extra_data = None
self.xslt_path = os.path.join(r.folder, r.XSLT_PATH)
self.xslt_extension = r.XSLT_EXTENSION
get_vars = r.get_vars
transform = get_vars.get('transform', None)
source = get_vars.get('filename', None)
if ('job' in r.post_vars):
upload_id = r.post_vars['job']
else:
upload_id = get_vars.get('job')
items = self._process_item_list(upload_id, r.vars)
if ('delete' in get_vars):
r.http = 'DELETE'
self.upload_id = upload_id
upload_job = current.db((self.upload_table.id == upload_id)).select(limitby=(0, 1)).first()
if upload_job:
self.job_id = upload_job.job_id
self.upload_job = upload_job
else:
self.job_id = None
self.upload_job = None
self.ajax = (current.request.ajax and (r.post_vars.approach == 'ajax'))
if (r.http == 'GET'):
if (source != None):
self.commit(source, transform)
if (upload_id != None):
return self.display_job(upload_id)
else:
return self.upload(r, **attr)
elif (r.http == 'POST'):
if (items != None):
return self.commit_items(upload_id, items)
else:
return self.generate_job(r, **attr)
elif (r.http == 'DELETE'):
if (upload_id != None):
return self.delete_job(upload_id)
r.error(405, current.ERROR.BAD_METHOD) | Args
r: the S3Request
attr: dictionary of parameters for the method handler
Returns:
output object to send to the view
Known means of communicating with this module:
It expects a URL of the form: /prefix/name/import
It will interpret the http requests as follows:
GET will trigger the upload
POST will trigger either commits or display the import details
DELETE will trigger deletes
It will accept one of the following control vars:
item: to specify a single item in the import job
job: to specify a job
It should not receive both so job takes precedent over item
For CSV imports, the calling controller can add extra fields
to the upload form to add columns to each row in the CSV. To add
the extra fields, pass a named parameter "csv_extra_fields" to the
s3_rest_controller call (or the S3Request call, respectively):
s3_rest_controller(module, resourcename,
csv_extra_fields = {"label": "ColumnLabelInTheCSV",
"field": field_instance,
}
])
The Field instance "field" will be added to the upload form, and
the user input will be added to each row of the CSV under the
label as specified. If the "field" validator has options, the
input value will be translated into the option representation,
otherwise the value will be used as-is.
Note that the "label" in the dict is the column label in the CSV,
whereas the field label for the form is to be set in the Field
instance passed as "field".
You can add any arbitrary number of csv_extra_fields to the list.
Additionally, you may want to allow the user to choose whether
the import shall first remove all existing data in the target
table. To do so, pass a label for the "replace_option" to the
request:
s3_rest_controller(module, resourcename,
replace_option = T("Remove existing data before import"),
)
This will add the respective checkbox to the upload form.
You may also want to provide a link to download a CSV template from
the upload form. To do that, add the resource name to the request
attributes:
s3_rest_controller(module, resourcename,
csv_template = "<resourcename>",
)
This will provide a link to:
- static/formats/s3csv/<controller>/<resourcename>.csv
at the top of the upload form. | modules/s3/s3import.py | apply_method | annehaley/eden | 205 | python | def apply_method(self, r, **attr):
'\n Args\n r: the S3Request\n attr: dictionary of parameters for the method handler\n\n Returns:\n output object to send to the view\n\n Known means of communicating with this module:\n\n It expects a URL of the form: /prefix/name/import\n\n It will interpret the http requests as follows:\n\n GET will trigger the upload\n POST will trigger either commits or display the import details\n DELETE will trigger deletes\n\n It will accept one of the following control vars:\n item: to specify a single item in the import job\n job: to specify a job\n It should not receive both so job takes precedent over item\n\n For CSV imports, the calling controller can add extra fields\n to the upload form to add columns to each row in the CSV. To add\n the extra fields, pass a named parameter "csv_extra_fields" to the\n s3_rest_controller call (or the S3Request call, respectively):\n\n s3_rest_controller(module, resourcename,\n csv_extra_fields = {"label": "ColumnLabelInTheCSV",\n "field": field_instance,\n }\n ])\n\n The Field instance "field" will be added to the upload form, and\n the user input will be added to each row of the CSV under the\n label as specified. If the "field" validator has options, the\n input value will be translated into the option representation,\n otherwise the value will be used as-is.\n\n Note that the "label" in the dict is the column label in the CSV,\n whereas the field label for the form is to be set in the Field\n instance passed as "field".\n\n You can add any arbitrary number of csv_extra_fields to the list.\n\n Additionally, you may want to allow the user to choose whether\n the import shall first remove all existing data in the target\n table. To do so, pass a label for the "replace_option" to the\n request:\n\n s3_rest_controller(module, resourcename,\n replace_option = T("Remove existing data before import"),\n )\n\n This will add the respective checkbox to the upload form.\n\n You may also want to provide a link to download a CSV template from\n the upload form. To do that, add the resource name to the request\n attributes:\n\n s3_rest_controller(module, resourcename,\n csv_template = "<resourcename>",\n )\n\n This will provide a link to:\n - static/formats/s3csv/<controller>/<resourcename>.csv\n at the top of the upload form.\n\n '
T = current.T
self.messages = messages = Messages(T)
messages.download_template = 'Download Template'
messages.invalid_file_format = 'Invalid File Format'
messages.unsupported_file_type = 'Unsupported file type of %s'
messages.stylesheet_not_found = 'No Stylesheet %s could be found to manage the import file.'
messages.no_file = 'No file submitted'
messages.file_open_error = 'Unable to open the file %s'
messages.file_not_found = 'The file to upload is missing'
messages.no_records_to_import = 'No records to import'
messages.no_job_to_delete = 'No job to delete, maybe it has already been deleted.'
messages.title_job_read = 'Details of the selected import job'
messages.title_job_list = 'List of import items'
messages.file_uploaded = 'Import file uploaded'
messages.upload_submit_btn = 'Upload Data File'
messages.open_btn = 'Open'
messages.view_btn = 'View'
messages.delete_btn = 'Delete'
messages.item_show_details = 'Display Details'
messages.job_total_records = 'Total records in the Import Job'
messages.job_records_selected = 'Records selected'
messages.job_deleted = 'Import job deleted'
messages.job_completed = 'Job run on %s. With result of (%s)'
messages.import_file = 'Import File'
messages.import_file_comment = 'Upload a file formatted according to the Template.'
messages.user_name = 'User Name'
messages.commit_total_records_imported = '%s records imported'
messages.commit_total_records_ignored = '%s records ignored'
messages.commit_total_errors = '%s records in error'
tablename = self.tablename
self.__define_table()
permitted = current.auth.s3_has_permission
authorised = (permitted('create', self.upload_tablename) and permitted('create', tablename))
if (not authorised):
if (r.method is not None):
r.unauthorised()
else:
return {'form': None}
self.controller_resource = self.resource
self.controller_table = self.table
self.controller_tablename = tablename
self.upload_resource = None
self.item_resource = None
self.controller = r.controller
self.function = r.function
try:
self.upload_title = (current.response.s3.crud_strings[tablename].title_upload or T('Import'))
except (KeyError, AttributeError):
self.upload_title = T('Import')
current.session.s3.ocr_enabled = False
self.error = None
self.warning = None
if ('csv_stylesheet' in attr):
self.csv_stylesheet = attr['csv_stylesheet']
else:
self.csv_stylesheet = None
self.csv_extra_fields = None
self.csv_extra_data = None
self.xslt_path = os.path.join(r.folder, r.XSLT_PATH)
self.xslt_extension = r.XSLT_EXTENSION
get_vars = r.get_vars
transform = get_vars.get('transform', None)
source = get_vars.get('filename', None)
if ('job' in r.post_vars):
upload_id = r.post_vars['job']
else:
upload_id = get_vars.get('job')
items = self._process_item_list(upload_id, r.vars)
if ('delete' in get_vars):
r.http = 'DELETE'
self.upload_id = upload_id
upload_job = current.db((self.upload_table.id == upload_id)).select(limitby=(0, 1)).first()
if upload_job:
self.job_id = upload_job.job_id
self.upload_job = upload_job
else:
self.job_id = None
self.upload_job = None
self.ajax = (current.request.ajax and (r.post_vars.approach == 'ajax'))
if (r.http == 'GET'):
if (source != None):
self.commit(source, transform)
if (upload_id != None):
return self.display_job(upload_id)
else:
return self.upload(r, **attr)
elif (r.http == 'POST'):
if (items != None):
return self.commit_items(upload_id, items)
else:
return self.generate_job(r, **attr)
elif (r.http == 'DELETE'):
if (upload_id != None):
return self.delete_job(upload_id)
r.error(405, current.ERROR.BAD_METHOD) | def apply_method(self, r, **attr):
'\n Args\n r: the S3Request\n attr: dictionary of parameters for the method handler\n\n Returns:\n output object to send to the view\n\n Known means of communicating with this module:\n\n It expects a URL of the form: /prefix/name/import\n\n It will interpret the http requests as follows:\n\n GET will trigger the upload\n POST will trigger either commits or display the import details\n DELETE will trigger deletes\n\n It will accept one of the following control vars:\n item: to specify a single item in the import job\n job: to specify a job\n It should not receive both so job takes precedent over item\n\n For CSV imports, the calling controller can add extra fields\n to the upload form to add columns to each row in the CSV. To add\n the extra fields, pass a named parameter "csv_extra_fields" to the\n s3_rest_controller call (or the S3Request call, respectively):\n\n s3_rest_controller(module, resourcename,\n csv_extra_fields = {"label": "ColumnLabelInTheCSV",\n "field": field_instance,\n }\n ])\n\n The Field instance "field" will be added to the upload form, and\n the user input will be added to each row of the CSV under the\n label as specified. If the "field" validator has options, the\n input value will be translated into the option representation,\n otherwise the value will be used as-is.\n\n Note that the "label" in the dict is the column label in the CSV,\n whereas the field label for the form is to be set in the Field\n instance passed as "field".\n\n You can add any arbitrary number of csv_extra_fields to the list.\n\n Additionally, you may want to allow the user to choose whether\n the import shall first remove all existing data in the target\n table. To do so, pass a label for the "replace_option" to the\n request:\n\n s3_rest_controller(module, resourcename,\n replace_option = T("Remove existing data before import"),\n )\n\n This will add the respective checkbox to the upload form.\n\n You may also want to provide a link to download a CSV template from\n the upload form. To do that, add the resource name to the request\n attributes:\n\n s3_rest_controller(module, resourcename,\n csv_template = "<resourcename>",\n )\n\n This will provide a link to:\n - static/formats/s3csv/<controller>/<resourcename>.csv\n at the top of the upload form.\n\n '
T = current.T
self.messages = messages = Messages(T)
messages.download_template = 'Download Template'
messages.invalid_file_format = 'Invalid File Format'
messages.unsupported_file_type = 'Unsupported file type of %s'
messages.stylesheet_not_found = 'No Stylesheet %s could be found to manage the import file.'
messages.no_file = 'No file submitted'
messages.file_open_error = 'Unable to open the file %s'
messages.file_not_found = 'The file to upload is missing'
messages.no_records_to_import = 'No records to import'
messages.no_job_to_delete = 'No job to delete, maybe it has already been deleted.'
messages.title_job_read = 'Details of the selected import job'
messages.title_job_list = 'List of import items'
messages.file_uploaded = 'Import file uploaded'
messages.upload_submit_btn = 'Upload Data File'
messages.open_btn = 'Open'
messages.view_btn = 'View'
messages.delete_btn = 'Delete'
messages.item_show_details = 'Display Details'
messages.job_total_records = 'Total records in the Import Job'
messages.job_records_selected = 'Records selected'
messages.job_deleted = 'Import job deleted'
messages.job_completed = 'Job run on %s. With result of (%s)'
messages.import_file = 'Import File'
messages.import_file_comment = 'Upload a file formatted according to the Template.'
messages.user_name = 'User Name'
messages.commit_total_records_imported = '%s records imported'
messages.commit_total_records_ignored = '%s records ignored'
messages.commit_total_errors = '%s records in error'
tablename = self.tablename
self.__define_table()
permitted = current.auth.s3_has_permission
authorised = (permitted('create', self.upload_tablename) and permitted('create', tablename))
if (not authorised):
if (r.method is not None):
r.unauthorised()
else:
return {'form': None}
self.controller_resource = self.resource
self.controller_table = self.table
self.controller_tablename = tablename
self.upload_resource = None
self.item_resource = None
self.controller = r.controller
self.function = r.function
try:
self.upload_title = (current.response.s3.crud_strings[tablename].title_upload or T('Import'))
except (KeyError, AttributeError):
self.upload_title = T('Import')
current.session.s3.ocr_enabled = False
self.error = None
self.warning = None
if ('csv_stylesheet' in attr):
self.csv_stylesheet = attr['csv_stylesheet']
else:
self.csv_stylesheet = None
self.csv_extra_fields = None
self.csv_extra_data = None
self.xslt_path = os.path.join(r.folder, r.XSLT_PATH)
self.xslt_extension = r.XSLT_EXTENSION
get_vars = r.get_vars
transform = get_vars.get('transform', None)
source = get_vars.get('filename', None)
if ('job' in r.post_vars):
upload_id = r.post_vars['job']
else:
upload_id = get_vars.get('job')
items = self._process_item_list(upload_id, r.vars)
if ('delete' in get_vars):
r.http = 'DELETE'
self.upload_id = upload_id
upload_job = current.db((self.upload_table.id == upload_id)).select(limitby=(0, 1)).first()
if upload_job:
self.job_id = upload_job.job_id
self.upload_job = upload_job
else:
self.job_id = None
self.upload_job = None
self.ajax = (current.request.ajax and (r.post_vars.approach == 'ajax'))
if (r.http == 'GET'):
if (source != None):
self.commit(source, transform)
if (upload_id != None):
return self.display_job(upload_id)
else:
return self.upload(r, **attr)
elif (r.http == 'POST'):
if (items != None):
return self.commit_items(upload_id, items)
else:
return self.generate_job(r, **attr)
elif (r.http == 'DELETE'):
if (upload_id != None):
return self.delete_job(upload_id)
r.error(405, current.ERROR.BAD_METHOD)<|docstring|>Args
r: the S3Request
attr: dictionary of parameters for the method handler
Returns:
output object to send to the view
Known means of communicating with this module:
It expects a URL of the form: /prefix/name/import
It will interpret the http requests as follows:
GET will trigger the upload
POST will trigger either commits or display the import details
DELETE will trigger deletes
It will accept one of the following control vars:
item: to specify a single item in the import job
job: to specify a job
It should not receive both so job takes precedent over item
For CSV imports, the calling controller can add extra fields
to the upload form to add columns to each row in the CSV. To add
the extra fields, pass a named parameter "csv_extra_fields" to the
s3_rest_controller call (or the S3Request call, respectively):
s3_rest_controller(module, resourcename,
csv_extra_fields = {"label": "ColumnLabelInTheCSV",
"field": field_instance,
}
])
The Field instance "field" will be added to the upload form, and
the user input will be added to each row of the CSV under the
label as specified. If the "field" validator has options, the
input value will be translated into the option representation,
otherwise the value will be used as-is.
Note that the "label" in the dict is the column label in the CSV,
whereas the field label for the form is to be set in the Field
instance passed as "field".
You can add any arbitrary number of csv_extra_fields to the list.
Additionally, you may want to allow the user to choose whether
the import shall first remove all existing data in the target
table. To do so, pass a label for the "replace_option" to the
request:
s3_rest_controller(module, resourcename,
replace_option = T("Remove existing data before import"),
)
This will add the respective checkbox to the upload form.
You may also want to provide a link to download a CSV template from
the upload form. To do that, add the resource name to the request
attributes:
s3_rest_controller(module, resourcename,
csv_template = "<resourcename>",
)
This will provide a link to:
- static/formats/s3csv/<controller>/<resourcename>.csv
at the top of the upload form.<|endoftext|> |
7c64370dd6947d8f115471a332312c1bc797c4e36d6e56bc23f585435c130abb | def upload(self, r, **attr):
'\n This will display the upload form\n It will ask for a file to be uploaded or for a job to be selected.\n\n If a file is uploaded then it will guess at the file type and\n ask for the transform file to be used. The transform files will\n be in a dataTable with the module specific files shown first and\n after those all other known transform files. Once the transform\n file is selected the import process can be started which will\n generate an importJob, and a "POST" method will occur\n\n If a job is selected it will have two actions, open and delete.\n Open will mean that a "GET" method will occur, with the job details\n passed in.\n Whilst the delete action will trigger a "DELETE" method.\n '
output = self._create_upload_dataTable()
if (r.representation == 'aadata'):
return output
form = self._upload_form(r, **attr)
output.update(form=form, title=self.upload_title)
return output | This will display the upload form
It will ask for a file to be uploaded or for a job to be selected.
If a file is uploaded then it will guess at the file type and
ask for the transform file to be used. The transform files will
be in a dataTable with the module specific files shown first and
after those all other known transform files. Once the transform
file is selected the import process can be started which will
generate an importJob, and a "POST" method will occur
If a job is selected it will have two actions, open and delete.
Open will mean that a "GET" method will occur, with the job details
passed in.
Whilst the delete action will trigger a "DELETE" method. | modules/s3/s3import.py | upload | annehaley/eden | 205 | python | def upload(self, r, **attr):
'\n This will display the upload form\n It will ask for a file to be uploaded or for a job to be selected.\n\n If a file is uploaded then it will guess at the file type and\n ask for the transform file to be used. The transform files will\n be in a dataTable with the module specific files shown first and\n after those all other known transform files. Once the transform\n file is selected the import process can be started which will\n generate an importJob, and a "POST" method will occur\n\n If a job is selected it will have two actions, open and delete.\n Open will mean that a "GET" method will occur, with the job details\n passed in.\n Whilst the delete action will trigger a "DELETE" method.\n '
output = self._create_upload_dataTable()
if (r.representation == 'aadata'):
return output
form = self._upload_form(r, **attr)
output.update(form=form, title=self.upload_title)
return output | def upload(self, r, **attr):
'\n This will display the upload form\n It will ask for a file to be uploaded or for a job to be selected.\n\n If a file is uploaded then it will guess at the file type and\n ask for the transform file to be used. The transform files will\n be in a dataTable with the module specific files shown first and\n after those all other known transform files. Once the transform\n file is selected the import process can be started which will\n generate an importJob, and a "POST" method will occur\n\n If a job is selected it will have two actions, open and delete.\n Open will mean that a "GET" method will occur, with the job details\n passed in.\n Whilst the delete action will trigger a "DELETE" method.\n '
output = self._create_upload_dataTable()
if (r.representation == 'aadata'):
return output
form = self._upload_form(r, **attr)
output.update(form=form, title=self.upload_title)
return output<|docstring|>This will display the upload form
It will ask for a file to be uploaded or for a job to be selected.
If a file is uploaded then it will guess at the file type and
ask for the transform file to be used. The transform files will
be in a dataTable with the module specific files shown first and
after those all other known transform files. Once the transform
file is selected the import process can be started which will
generate an importJob, and a "POST" method will occur
If a job is selected it will have two actions, open and delete.
Open will mean that a "GET" method will occur, with the job details
passed in.
Whilst the delete action will trigger a "DELETE" method.<|endoftext|> |
e55c5ac3bf297a7beb676b385adce5e048d8b2f2fcf5fd814f5d9e2a1f1acc73 | def generate_job(self, r, **attr):
'\n Generate an ImportJob from the submitted upload form\n '
db = current.db
response = current.response
s3 = response.s3
ajax = self.ajax
table = self.upload_table
if ajax:
sfilename = ofilename = r.post_vars['file'].filename
upload_id = table.insert(controller=self.controller, function=self.function, filename=ofilename, file=sfilename, user_id=current.session.auth.user.id)
else:
title = self.upload_title
form = self._upload_form(r, **attr)
r = self.request
r.read_body()
sfilename = form.vars.file
try:
ofilename = r.post_vars['file'].filename
except (KeyError, AttributeError):
form.errors.file = self.messages.no_file
if form.errors:
response.flash = ''
output = self._create_upload_dataTable()
output.update(form=form, title=title)
return output
elif ((not sfilename) or (ofilename not in r.files) or (r.files[ofilename] is None)):
response.flash = ''
response.error = self.messages.file_not_found
output = self._create_upload_dataTable()
output.update(form=form, title=title)
return output
query = (table.file == sfilename)
db(query).update(controller=self.controller, function=self.function, filename=ofilename, user_id=current.session.auth.user.id)
row = db(query).select(table.id, limitby=(0, 1)).first()
upload_id = row.id
extension = ofilename.rsplit('.', 1).pop()
if (extension not in ('csv', 'xls', 'xlsx', 'xlsm')):
if ajax:
return {'Error': self.messages.invalid_file_format}
response.flash = None
response.error = self.messages.invalid_file_format
return self.upload(r, **attr)
db.commit()
if ajax:
upload_file = r.post_vars.file.file
else:
upload_file = r.files[ofilename]
if (extension == 'xls'):
if ('xls_parser' in s3):
upload_file.seek(0)
upload_file = s3.xls_parser(upload_file.read())
extension = 'csv'
if (upload_file is None):
response.flash = None
response.error = self.messages.file_not_found
return self.upload(r, **attr)
else:
upload_file.seek(0)
if ('single_pass' in r.vars):
single_pass = r.vars['single_pass']
else:
single_pass = None
self._generate_import_job(upload_id, upload_file, extension, commit_job=single_pass)
if (upload_id is None):
row = db(query).update(status=2)
if (self.error != None):
response.error = self.error
if (self.warning != None):
response.warning = self.warning
response.flash = ''
return self.upload(r, **attr)
if single_pass:
current.session.confirmation = self.messages.file_uploaded
next_URL = URL(r=self.request, f=self.function, args=['import'], vars=current.request.get_vars)
redirect(next_URL)
s3.dataTable_vars = {'job': upload_id}
return self.display_job(upload_id) | Generate an ImportJob from the submitted upload form | modules/s3/s3import.py | generate_job | annehaley/eden | 205 | python | def generate_job(self, r, **attr):
'\n \n '
db = current.db
response = current.response
s3 = response.s3
ajax = self.ajax
table = self.upload_table
if ajax:
sfilename = ofilename = r.post_vars['file'].filename
upload_id = table.insert(controller=self.controller, function=self.function, filename=ofilename, file=sfilename, user_id=current.session.auth.user.id)
else:
title = self.upload_title
form = self._upload_form(r, **attr)
r = self.request
r.read_body()
sfilename = form.vars.file
try:
ofilename = r.post_vars['file'].filename
except (KeyError, AttributeError):
form.errors.file = self.messages.no_file
if form.errors:
response.flash =
output = self._create_upload_dataTable()
output.update(form=form, title=title)
return output
elif ((not sfilename) or (ofilename not in r.files) or (r.files[ofilename] is None)):
response.flash =
response.error = self.messages.file_not_found
output = self._create_upload_dataTable()
output.update(form=form, title=title)
return output
query = (table.file == sfilename)
db(query).update(controller=self.controller, function=self.function, filename=ofilename, user_id=current.session.auth.user.id)
row = db(query).select(table.id, limitby=(0, 1)).first()
upload_id = row.id
extension = ofilename.rsplit('.', 1).pop()
if (extension not in ('csv', 'xls', 'xlsx', 'xlsm')):
if ajax:
return {'Error': self.messages.invalid_file_format}
response.flash = None
response.error = self.messages.invalid_file_format
return self.upload(r, **attr)
db.commit()
if ajax:
upload_file = r.post_vars.file.file
else:
upload_file = r.files[ofilename]
if (extension == 'xls'):
if ('xls_parser' in s3):
upload_file.seek(0)
upload_file = s3.xls_parser(upload_file.read())
extension = 'csv'
if (upload_file is None):
response.flash = None
response.error = self.messages.file_not_found
return self.upload(r, **attr)
else:
upload_file.seek(0)
if ('single_pass' in r.vars):
single_pass = r.vars['single_pass']
else:
single_pass = None
self._generate_import_job(upload_id, upload_file, extension, commit_job=single_pass)
if (upload_id is None):
row = db(query).update(status=2)
if (self.error != None):
response.error = self.error
if (self.warning != None):
response.warning = self.warning
response.flash =
return self.upload(r, **attr)
if single_pass:
current.session.confirmation = self.messages.file_uploaded
next_URL = URL(r=self.request, f=self.function, args=['import'], vars=current.request.get_vars)
redirect(next_URL)
s3.dataTable_vars = {'job': upload_id}
return self.display_job(upload_id) | def generate_job(self, r, **attr):
'\n \n '
db = current.db
response = current.response
s3 = response.s3
ajax = self.ajax
table = self.upload_table
if ajax:
sfilename = ofilename = r.post_vars['file'].filename
upload_id = table.insert(controller=self.controller, function=self.function, filename=ofilename, file=sfilename, user_id=current.session.auth.user.id)
else:
title = self.upload_title
form = self._upload_form(r, **attr)
r = self.request
r.read_body()
sfilename = form.vars.file
try:
ofilename = r.post_vars['file'].filename
except (KeyError, AttributeError):
form.errors.file = self.messages.no_file
if form.errors:
response.flash =
output = self._create_upload_dataTable()
output.update(form=form, title=title)
return output
elif ((not sfilename) or (ofilename not in r.files) or (r.files[ofilename] is None)):
response.flash =
response.error = self.messages.file_not_found
output = self._create_upload_dataTable()
output.update(form=form, title=title)
return output
query = (table.file == sfilename)
db(query).update(controller=self.controller, function=self.function, filename=ofilename, user_id=current.session.auth.user.id)
row = db(query).select(table.id, limitby=(0, 1)).first()
upload_id = row.id
extension = ofilename.rsplit('.', 1).pop()
if (extension not in ('csv', 'xls', 'xlsx', 'xlsm')):
if ajax:
return {'Error': self.messages.invalid_file_format}
response.flash = None
response.error = self.messages.invalid_file_format
return self.upload(r, **attr)
db.commit()
if ajax:
upload_file = r.post_vars.file.file
else:
upload_file = r.files[ofilename]
if (extension == 'xls'):
if ('xls_parser' in s3):
upload_file.seek(0)
upload_file = s3.xls_parser(upload_file.read())
extension = 'csv'
if (upload_file is None):
response.flash = None
response.error = self.messages.file_not_found
return self.upload(r, **attr)
else:
upload_file.seek(0)
if ('single_pass' in r.vars):
single_pass = r.vars['single_pass']
else:
single_pass = None
self._generate_import_job(upload_id, upload_file, extension, commit_job=single_pass)
if (upload_id is None):
row = db(query).update(status=2)
if (self.error != None):
response.error = self.error
if (self.warning != None):
response.warning = self.warning
response.flash =
return self.upload(r, **attr)
if single_pass:
current.session.confirmation = self.messages.file_uploaded
next_URL = URL(r=self.request, f=self.function, args=['import'], vars=current.request.get_vars)
redirect(next_URL)
s3.dataTable_vars = {'job': upload_id}
return self.display_job(upload_id)<|docstring|>Generate an ImportJob from the submitted upload form<|endoftext|> |
82b062255b72f5e00731cd46fdc435e628114de9155c1d1f64ec96b9e71a5e75 | def commit(self, source, transform):
'\n Import a source\n\n Args:\n source: the source\n transform: the stylesheet path\n '
session = current.session
try:
user = session.auth.user.id
except AttributeError:
user = None
extension = source.rsplit('.', 1).pop()
if (extension not in ('csv, xls', 'xlsx', 'xlsm')):
file_format = 'csv'
else:
file_format = extension
try:
with open(source, 'r') as infile:
upload_id = self.upload_table.insert(controller=self.controller, function=self.function, filename=source, user_id=user, status=1)
current.db.commit()
result = self._generate_import_job(upload_id, infile, file_format, stylesheet=transform)
except IOError:
session.error = (self.messages.file_open_error % source)
redirect(URL(r=self.request, f=self.function))
if (result is None):
if (self.error != None):
if (session.error is None):
session.error = self.error
else:
session.error += self.error
if (self.warning != None):
if (session.warning is None):
session.warning = self.warning
else:
session.warning += self.warning
else:
items = self._get_all_items(upload_id, True)
self._commit_import_job(upload_id, items)
result = self._update_upload_job(upload_id)
messages = self.messages
msg = ('%s : %s %s %s' % (source, messages.commit_total_records_imported, messages.commit_total_errors, messages.commit_total_records_ignored))
msg = (msg % result)
confirmation = session.confirmation
if (confirmation is None):
confirmation = msg
else:
confirmation += msg | Import a source
Args:
source: the source
transform: the stylesheet path | modules/s3/s3import.py | commit | annehaley/eden | 205 | python | def commit(self, source, transform):
'\n Import a source\n\n Args:\n source: the source\n transform: the stylesheet path\n '
session = current.session
try:
user = session.auth.user.id
except AttributeError:
user = None
extension = source.rsplit('.', 1).pop()
if (extension not in ('csv, xls', 'xlsx', 'xlsm')):
file_format = 'csv'
else:
file_format = extension
try:
with open(source, 'r') as infile:
upload_id = self.upload_table.insert(controller=self.controller, function=self.function, filename=source, user_id=user, status=1)
current.db.commit()
result = self._generate_import_job(upload_id, infile, file_format, stylesheet=transform)
except IOError:
session.error = (self.messages.file_open_error % source)
redirect(URL(r=self.request, f=self.function))
if (result is None):
if (self.error != None):
if (session.error is None):
session.error = self.error
else:
session.error += self.error
if (self.warning != None):
if (session.warning is None):
session.warning = self.warning
else:
session.warning += self.warning
else:
items = self._get_all_items(upload_id, True)
self._commit_import_job(upload_id, items)
result = self._update_upload_job(upload_id)
messages = self.messages
msg = ('%s : %s %s %s' % (source, messages.commit_total_records_imported, messages.commit_total_errors, messages.commit_total_records_ignored))
msg = (msg % result)
confirmation = session.confirmation
if (confirmation is None):
confirmation = msg
else:
confirmation += msg | def commit(self, source, transform):
'\n Import a source\n\n Args:\n source: the source\n transform: the stylesheet path\n '
session = current.session
try:
user = session.auth.user.id
except AttributeError:
user = None
extension = source.rsplit('.', 1).pop()
if (extension not in ('csv, xls', 'xlsx', 'xlsm')):
file_format = 'csv'
else:
file_format = extension
try:
with open(source, 'r') as infile:
upload_id = self.upload_table.insert(controller=self.controller, function=self.function, filename=source, user_id=user, status=1)
current.db.commit()
result = self._generate_import_job(upload_id, infile, file_format, stylesheet=transform)
except IOError:
session.error = (self.messages.file_open_error % source)
redirect(URL(r=self.request, f=self.function))
if (result is None):
if (self.error != None):
if (session.error is None):
session.error = self.error
else:
session.error += self.error
if (self.warning != None):
if (session.warning is None):
session.warning = self.warning
else:
session.warning += self.warning
else:
items = self._get_all_items(upload_id, True)
self._commit_import_job(upload_id, items)
result = self._update_upload_job(upload_id)
messages = self.messages
msg = ('%s : %s %s %s' % (source, messages.commit_total_records_imported, messages.commit_total_errors, messages.commit_total_records_ignored))
msg = (msg % result)
confirmation = session.confirmation
if (confirmation is None):
confirmation = msg
else:
confirmation += msg<|docstring|>Import a source
Args:
source: the source
transform: the stylesheet path<|endoftext|> |
622dc3da16474f65f688cf099618858dbe56e8e40c718c091fe5306144f739a5 | def delete_job(self, upload_id):
'\n Delete an uploaded file and the corresponding import job\n\n Args:\n upload_id: the upload ID\n '
db = current.db
request = self.request
resource = request.resource
job_id = self.job_id
if job_id:
result = resource.import_xml(None, id=None, tree=None, job_id=job_id, delete_job=True)
count = db((self.upload_table.id == upload_id)).delete()
db.commit()
result = count
if (result == False):
current.response.warning = self.messages.no_job_to_delete
else:
current.response.confirmation = self.messages.job_deleted
self.next = request.url(vars={}) | Delete an uploaded file and the corresponding import job
Args:
upload_id: the upload ID | modules/s3/s3import.py | delete_job | annehaley/eden | 205 | python | def delete_job(self, upload_id):
'\n Delete an uploaded file and the corresponding import job\n\n Args:\n upload_id: the upload ID\n '
db = current.db
request = self.request
resource = request.resource
job_id = self.job_id
if job_id:
result = resource.import_xml(None, id=None, tree=None, job_id=job_id, delete_job=True)
count = db((self.upload_table.id == upload_id)).delete()
db.commit()
result = count
if (result == False):
current.response.warning = self.messages.no_job_to_delete
else:
current.response.confirmation = self.messages.job_deleted
self.next = request.url(vars={}) | def delete_job(self, upload_id):
'\n Delete an uploaded file and the corresponding import job\n\n Args:\n upload_id: the upload ID\n '
db = current.db
request = self.request
resource = request.resource
job_id = self.job_id
if job_id:
result = resource.import_xml(None, id=None, tree=None, job_id=job_id, delete_job=True)
count = db((self.upload_table.id == upload_id)).delete()
db.commit()
result = count
if (result == False):
current.response.warning = self.messages.no_job_to_delete
else:
current.response.confirmation = self.messages.job_deleted
self.next = request.url(vars={})<|docstring|>Delete an uploaded file and the corresponding import job
Args:
upload_id: the upload ID<|endoftext|> |
e8eab9071a1c67a6dc5861354295920641a8d5fe2dfc110aedf978763964e583 | def _upload_form(self, r, **attr):
'\n Create and process the upload form, including csv_extra_fields\n '
EXTRA_FIELDS = 'csv_extra_fields'
TEMPLATE = 'csv_template'
REPLACE_OPTION = 'replace_option'
response = current.response
s3 = response.s3
request = self.request
table = self.upload_table
formstyle = s3.crud.formstyle
response.view = self._view(request, 'list_filter.html')
if (REPLACE_OPTION in attr):
replace_option = attr[REPLACE_OPTION]
if (replace_option is not None):
field = table.replace_option
field.readable = field.writable = True
field.label = replace_option
replace_option_help = attr.get('replace_option_help', current.T('Delete all data of this type which the user has permission to before upload. This is designed for workflows where the data is maintained in an offline spreadsheet and uploaded just for Reads.'))
field.comment = DIV(_class='tooltip', _title=('%s|%s' % (replace_option, replace_option_help)))
fields = [f for f in table if (f.readable or (f.writable and (not f.compute)))]
if (EXTRA_FIELDS in attr):
extra_fields = attr[EXTRA_FIELDS]
if (extra_fields is not None):
fields.extend([f['field'] for f in extra_fields if ('field' in f)])
self.csv_extra_fields = extra_fields
(labels, required) = s3_mark_required(fields)
if required:
s3.has_required = True
form = SQLFORM.factory(*fields, table_name=self.UPLOAD_TABLE_NAME, labels=labels, formstyle=formstyle, upload=os.path.join(request.folder, 'uploads', 'imports'), separator='', message=self.messages.file_uploaded)
args = ['s3csv']
template = attr.get(TEMPLATE, True)
if (template is True):
args.extend([self.controller, ('%s.csv' % self.function)])
elif isinstance(template, str):
if (os.path.splitext(template)[1] not in KNOWN_SPREADSHEET_EXTENSIONS):
template = ('%s.csv' % template)
args.extend([self.controller, template])
elif isinstance(template, (tuple, list)):
args.extend(template[:(- 1)])
template = template[(- 1)]
if (os.path.splitext(template)[1] not in KNOWN_SPREADSHEET_EXTENSIONS):
template = ('%s.csv' % template)
args.append(template)
else:
template = None
if (template is not None):
url = URL(r=request, c='static', f='formats', args=args)
try:
open(('%s/../%s' % (r.folder, url)))
form[0][0].insert(0, TR(TD(A(self.messages.download_template, _href=url)), _id='template__row'))
except IOError:
pass
if form.accepts(r.post_vars, current.session, formname='upload_form'):
form_vars = form.vars
table.insert(file=form_vars.file, replace_option=form_vars.get('replace_option'))
if self.csv_extra_fields:
extra_data = self.csv_extra_data = Storage()
for f in self.csv_extra_fields:
label = f.get('label')
if (not label):
continue
field = f.get('field')
value = f.get('value')
if field:
if (field.name in form_vars):
data = form_vars[field.name]
else:
data = field.default
value = data
requires = field.requires
if (not isinstance(requires, (list, tuple))):
requires = [requires]
if requires:
requires = requires[0]
if isinstance(requires, IS_EMPTY_OR):
requires = requires.other
try:
options = requires.options()
except:
pass
else:
for (k, v) in options:
if (k == str(data)):
value = v
break
if hasattr(value, 'm'):
value = value.m
elif (value is None):
continue
extra_data[label] = value
s3.no_formats = True
return form | Create and process the upload form, including csv_extra_fields | modules/s3/s3import.py | _upload_form | annehaley/eden | 205 | python | def _upload_form(self, r, **attr):
'\n \n '
EXTRA_FIELDS = 'csv_extra_fields'
TEMPLATE = 'csv_template'
REPLACE_OPTION = 'replace_option'
response = current.response
s3 = response.s3
request = self.request
table = self.upload_table
formstyle = s3.crud.formstyle
response.view = self._view(request, 'list_filter.html')
if (REPLACE_OPTION in attr):
replace_option = attr[REPLACE_OPTION]
if (replace_option is not None):
field = table.replace_option
field.readable = field.writable = True
field.label = replace_option
replace_option_help = attr.get('replace_option_help', current.T('Delete all data of this type which the user has permission to before upload. This is designed for workflows where the data is maintained in an offline spreadsheet and uploaded just for Reads.'))
field.comment = DIV(_class='tooltip', _title=('%s|%s' % (replace_option, replace_option_help)))
fields = [f for f in table if (f.readable or (f.writable and (not f.compute)))]
if (EXTRA_FIELDS in attr):
extra_fields = attr[EXTRA_FIELDS]
if (extra_fields is not None):
fields.extend([f['field'] for f in extra_fields if ('field' in f)])
self.csv_extra_fields = extra_fields
(labels, required) = s3_mark_required(fields)
if required:
s3.has_required = True
form = SQLFORM.factory(*fields, table_name=self.UPLOAD_TABLE_NAME, labels=labels, formstyle=formstyle, upload=os.path.join(request.folder, 'uploads', 'imports'), separator=, message=self.messages.file_uploaded)
args = ['s3csv']
template = attr.get(TEMPLATE, True)
if (template is True):
args.extend([self.controller, ('%s.csv' % self.function)])
elif isinstance(template, str):
if (os.path.splitext(template)[1] not in KNOWN_SPREADSHEET_EXTENSIONS):
template = ('%s.csv' % template)
args.extend([self.controller, template])
elif isinstance(template, (tuple, list)):
args.extend(template[:(- 1)])
template = template[(- 1)]
if (os.path.splitext(template)[1] not in KNOWN_SPREADSHEET_EXTENSIONS):
template = ('%s.csv' % template)
args.append(template)
else:
template = None
if (template is not None):
url = URL(r=request, c='static', f='formats', args=args)
try:
open(('%s/../%s' % (r.folder, url)))
form[0][0].insert(0, TR(TD(A(self.messages.download_template, _href=url)), _id='template__row'))
except IOError:
pass
if form.accepts(r.post_vars, current.session, formname='upload_form'):
form_vars = form.vars
table.insert(file=form_vars.file, replace_option=form_vars.get('replace_option'))
if self.csv_extra_fields:
extra_data = self.csv_extra_data = Storage()
for f in self.csv_extra_fields:
label = f.get('label')
if (not label):
continue
field = f.get('field')
value = f.get('value')
if field:
if (field.name in form_vars):
data = form_vars[field.name]
else:
data = field.default
value = data
requires = field.requires
if (not isinstance(requires, (list, tuple))):
requires = [requires]
if requires:
requires = requires[0]
if isinstance(requires, IS_EMPTY_OR):
requires = requires.other
try:
options = requires.options()
except:
pass
else:
for (k, v) in options:
if (k == str(data)):
value = v
break
if hasattr(value, 'm'):
value = value.m
elif (value is None):
continue
extra_data[label] = value
s3.no_formats = True
return form | def _upload_form(self, r, **attr):
'\n \n '
EXTRA_FIELDS = 'csv_extra_fields'
TEMPLATE = 'csv_template'
REPLACE_OPTION = 'replace_option'
response = current.response
s3 = response.s3
request = self.request
table = self.upload_table
formstyle = s3.crud.formstyle
response.view = self._view(request, 'list_filter.html')
if (REPLACE_OPTION in attr):
replace_option = attr[REPLACE_OPTION]
if (replace_option is not None):
field = table.replace_option
field.readable = field.writable = True
field.label = replace_option
replace_option_help = attr.get('replace_option_help', current.T('Delete all data of this type which the user has permission to before upload. This is designed for workflows where the data is maintained in an offline spreadsheet and uploaded just for Reads.'))
field.comment = DIV(_class='tooltip', _title=('%s|%s' % (replace_option, replace_option_help)))
fields = [f for f in table if (f.readable or (f.writable and (not f.compute)))]
if (EXTRA_FIELDS in attr):
extra_fields = attr[EXTRA_FIELDS]
if (extra_fields is not None):
fields.extend([f['field'] for f in extra_fields if ('field' in f)])
self.csv_extra_fields = extra_fields
(labels, required) = s3_mark_required(fields)
if required:
s3.has_required = True
form = SQLFORM.factory(*fields, table_name=self.UPLOAD_TABLE_NAME, labels=labels, formstyle=formstyle, upload=os.path.join(request.folder, 'uploads', 'imports'), separator=, message=self.messages.file_uploaded)
args = ['s3csv']
template = attr.get(TEMPLATE, True)
if (template is True):
args.extend([self.controller, ('%s.csv' % self.function)])
elif isinstance(template, str):
if (os.path.splitext(template)[1] not in KNOWN_SPREADSHEET_EXTENSIONS):
template = ('%s.csv' % template)
args.extend([self.controller, template])
elif isinstance(template, (tuple, list)):
args.extend(template[:(- 1)])
template = template[(- 1)]
if (os.path.splitext(template)[1] not in KNOWN_SPREADSHEET_EXTENSIONS):
template = ('%s.csv' % template)
args.append(template)
else:
template = None
if (template is not None):
url = URL(r=request, c='static', f='formats', args=args)
try:
open(('%s/../%s' % (r.folder, url)))
form[0][0].insert(0, TR(TD(A(self.messages.download_template, _href=url)), _id='template__row'))
except IOError:
pass
if form.accepts(r.post_vars, current.session, formname='upload_form'):
form_vars = form.vars
table.insert(file=form_vars.file, replace_option=form_vars.get('replace_option'))
if self.csv_extra_fields:
extra_data = self.csv_extra_data = Storage()
for f in self.csv_extra_fields:
label = f.get('label')
if (not label):
continue
field = f.get('field')
value = f.get('value')
if field:
if (field.name in form_vars):
data = form_vars[field.name]
else:
data = field.default
value = data
requires = field.requires
if (not isinstance(requires, (list, tuple))):
requires = [requires]
if requires:
requires = requires[0]
if isinstance(requires, IS_EMPTY_OR):
requires = requires.other
try:
options = requires.options()
except:
pass
else:
for (k, v) in options:
if (k == str(data)):
value = v
break
if hasattr(value, 'm'):
value = value.m
elif (value is None):
continue
extra_data[label] = value
s3.no_formats = True
return form<|docstring|>Create and process the upload form, including csv_extra_fields<|endoftext|> |
f2d111ca57f7cfefe709fe2c12c157e339e5060cea587452c86599e3ac35637b | def _create_upload_dataTable(self):
'\n List of previous Import jobs\n '
db = current.db
request = self.request
controller = self.controller
function = self.function
s3 = current.response.s3
table = self.upload_table
s3.filter = ((table.controller == controller) & (table.function == function))
self._use_upload_table()
output = {}
self._use_controller_table()
if (request.representation == 'aadata'):
return output
query = (table.status != 3)
rows = db(query).select(table.id)
restrictOpen = [str(row.id) for row in rows]
query = (table.status == 3)
rows = db(query).select(table.id)
restrictView = [str(row.id) for row in rows]
s3.actions = [{'label': s3_str(self.messages.open_btn), 'restrict': restrictOpen, 'url': URL(r=request, c=controller, f=function, args=['import'], vars={'job': '[id]'}), '_class': 'action-btn'}, {'label': s3_str(self.messages.view_btn), 'restrict': restrictView, 'url': URL(r=request, c=controller, f=function, args=['import'], vars={'job': '[id]'}), '_class': 'action-btn'}, {'label': s3_str(self.messages.delete_btn), 'url': URL(r=request, c=controller, f=function, args=['import'], vars={'job': '[id]', 'delete': 'True'}), '_class': 'delete-btn'}]
rows = db((table.status == 1)).select(table.id)
s3.dataTableStyleAlert = [str(row.id) for row in rows]
rows = db((table.status == 2)).select(table.id)
s3.dataTableStyleWarning = [str(row.id) for row in rows]
return output | List of previous Import jobs | modules/s3/s3import.py | _create_upload_dataTable | annehaley/eden | 205 | python | def _create_upload_dataTable(self):
'\n \n '
db = current.db
request = self.request
controller = self.controller
function = self.function
s3 = current.response.s3
table = self.upload_table
s3.filter = ((table.controller == controller) & (table.function == function))
self._use_upload_table()
output = {}
self._use_controller_table()
if (request.representation == 'aadata'):
return output
query = (table.status != 3)
rows = db(query).select(table.id)
restrictOpen = [str(row.id) for row in rows]
query = (table.status == 3)
rows = db(query).select(table.id)
restrictView = [str(row.id) for row in rows]
s3.actions = [{'label': s3_str(self.messages.open_btn), 'restrict': restrictOpen, 'url': URL(r=request, c=controller, f=function, args=['import'], vars={'job': '[id]'}), '_class': 'action-btn'}, {'label': s3_str(self.messages.view_btn), 'restrict': restrictView, 'url': URL(r=request, c=controller, f=function, args=['import'], vars={'job': '[id]'}), '_class': 'action-btn'}, {'label': s3_str(self.messages.delete_btn), 'url': URL(r=request, c=controller, f=function, args=['import'], vars={'job': '[id]', 'delete': 'True'}), '_class': 'delete-btn'}]
rows = db((table.status == 1)).select(table.id)
s3.dataTableStyleAlert = [str(row.id) for row in rows]
rows = db((table.status == 2)).select(table.id)
s3.dataTableStyleWarning = [str(row.id) for row in rows]
return output | def _create_upload_dataTable(self):
'\n \n '
db = current.db
request = self.request
controller = self.controller
function = self.function
s3 = current.response.s3
table = self.upload_table
s3.filter = ((table.controller == controller) & (table.function == function))
self._use_upload_table()
output = {}
self._use_controller_table()
if (request.representation == 'aadata'):
return output
query = (table.status != 3)
rows = db(query).select(table.id)
restrictOpen = [str(row.id) for row in rows]
query = (table.status == 3)
rows = db(query).select(table.id)
restrictView = [str(row.id) for row in rows]
s3.actions = [{'label': s3_str(self.messages.open_btn), 'restrict': restrictOpen, 'url': URL(r=request, c=controller, f=function, args=['import'], vars={'job': '[id]'}), '_class': 'action-btn'}, {'label': s3_str(self.messages.view_btn), 'restrict': restrictView, 'url': URL(r=request, c=controller, f=function, args=['import'], vars={'job': '[id]'}), '_class': 'action-btn'}, {'label': s3_str(self.messages.delete_btn), 'url': URL(r=request, c=controller, f=function, args=['import'], vars={'job': '[id]', 'delete': 'True'}), '_class': 'delete-btn'}]
rows = db((table.status == 1)).select(table.id)
s3.dataTableStyleAlert = [str(row.id) for row in rows]
rows = db((table.status == 2)).select(table.id)
s3.dataTableStyleWarning = [str(row.id) for row in rows]
return output<|docstring|>List of previous Import jobs<|endoftext|> |
cd0b28824154b0610e14c0a01458fbff044b1033f9ebecda47824ba1083f8c00 | def _create_import_item_dataTable(self, upload_id, job_id):
'\n @todo: docstring?\n '
s3 = current.response.s3
represent = {'s3_import_item.element': self._item_element_represent}
self._use_import_item_table(job_id)
table = self.table
query = ((table.job_id == job_id) & (table.tablename == self.controller_tablename))
rows = current.db(query).select(table.id, table.error)
select_list = []
error_list = []
for row in rows:
if row.error:
error_list.append(str(row.id))
else:
select_list.append(('%s' % row.id))
s3.filter = query
if self.ajax:
resource = self.resource
resource.add_filter(query)
rows = resource.select(['id', 'element', 'error'], limit=None)['rows']
return (upload_id, select_list, rows)
s3.actions = [{'label': s3_str(self.messages.item_show_details), '_class': 'action-btn toggle-item'}]
s3.jquery_ready.append("\n$('#import-items').on('click','.toggle-item',function(){$('.importItem.item-'+$(this).attr('db_id')).toggle();})")
output = self._dataTable(['id', 'element', 'error'], represent=represent, ajax_item_id=upload_id, dt_bulk_select=select_list)
self._use_controller_table()
if (self.request.representation == 'aadata'):
return output
s3.dataTableStyleWarning = error_list
form = output['items']
job = INPUT(_type='hidden', _id='importUploadID', _name='job', _value=('%s' % upload_id))
form.append(job)
return output | @todo: docstring? | modules/s3/s3import.py | _create_import_item_dataTable | annehaley/eden | 205 | python | def _create_import_item_dataTable(self, upload_id, job_id):
'\n \n '
s3 = current.response.s3
represent = {'s3_import_item.element': self._item_element_represent}
self._use_import_item_table(job_id)
table = self.table
query = ((table.job_id == job_id) & (table.tablename == self.controller_tablename))
rows = current.db(query).select(table.id, table.error)
select_list = []
error_list = []
for row in rows:
if row.error:
error_list.append(str(row.id))
else:
select_list.append(('%s' % row.id))
s3.filter = query
if self.ajax:
resource = self.resource
resource.add_filter(query)
rows = resource.select(['id', 'element', 'error'], limit=None)['rows']
return (upload_id, select_list, rows)
s3.actions = [{'label': s3_str(self.messages.item_show_details), '_class': 'action-btn toggle-item'}]
s3.jquery_ready.append("\n$('#import-items').on('click','.toggle-item',function(){$('.importItem.item-'+$(this).attr('db_id')).toggle();})")
output = self._dataTable(['id', 'element', 'error'], represent=represent, ajax_item_id=upload_id, dt_bulk_select=select_list)
self._use_controller_table()
if (self.request.representation == 'aadata'):
return output
s3.dataTableStyleWarning = error_list
form = output['items']
job = INPUT(_type='hidden', _id='importUploadID', _name='job', _value=('%s' % upload_id))
form.append(job)
return output | def _create_import_item_dataTable(self, upload_id, job_id):
'\n \n '
s3 = current.response.s3
represent = {'s3_import_item.element': self._item_element_represent}
self._use_import_item_table(job_id)
table = self.table
query = ((table.job_id == job_id) & (table.tablename == self.controller_tablename))
rows = current.db(query).select(table.id, table.error)
select_list = []
error_list = []
for row in rows:
if row.error:
error_list.append(str(row.id))
else:
select_list.append(('%s' % row.id))
s3.filter = query
if self.ajax:
resource = self.resource
resource.add_filter(query)
rows = resource.select(['id', 'element', 'error'], limit=None)['rows']
return (upload_id, select_list, rows)
s3.actions = [{'label': s3_str(self.messages.item_show_details), '_class': 'action-btn toggle-item'}]
s3.jquery_ready.append("\n$('#import-items').on('click','.toggle-item',function(){$('.importItem.item-'+$(this).attr('db_id')).toggle();})")
output = self._dataTable(['id', 'element', 'error'], represent=represent, ajax_item_id=upload_id, dt_bulk_select=select_list)
self._use_controller_table()
if (self.request.representation == 'aadata'):
return output
s3.dataTableStyleWarning = error_list
form = output['items']
job = INPUT(_type='hidden', _id='importUploadID', _name='job', _value=('%s' % upload_id))
form.append(job)
return output<|docstring|>@todo: docstring?<|endoftext|> |
fefa0b12414bb3370fac413e085f27671fca21de708483cf6cf8b90c9014549d | def _generate_import_job(self, upload_id, infile, file_format, stylesheet=None, commit_job=False):
'\n This will take a s3_import_upload record and\n generate the importJob\n\n Args:\n infile: The uploaded file\n '
if (file_format in ('csv', 'comma-separated-values')):
fmt = 'csv'
src = infile
elif (file_format == 'xls'):
fmt = 'xls'
src = infile
elif (file_format in ('xlsx', 'xlsm')):
fmt = 'xlsx'
src = infile
else:
msg = (self.messages.unsupported_file_type % file_format)
self.error = msg
current.log.debug(msg)
return None
if (stylesheet == None):
stylesheet = self._get_stylesheet()
if (stylesheet == None):
return None
request = self.request
resource = request.resource
self.table = self.controller_table
self.tablename = self.controller_tablename
args = {}
mode = request.get_vars.get('xsltmode', None)
if (mode is not None):
args['mode'] = mode
resource.import_xml(src, format=fmt, extra_data=self.csv_extra_data, stylesheet=stylesheet, ignore_errors=True, commit_job=commit_job, **args)
job = resource.job
if (job is None):
if resource.error:
self.error = resource.error
return None
self.warning = self.messages.no_records_to_import
return None
db = current.db
job_id = job.job_id
errors = current.xml.collect_errors(job)
if errors:
current.response.s3.error_report = errors
result = db((self.upload_table.id == upload_id)).update(job_id=job_id)
db.commit()
self.job_id = job_id
return True | This will take a s3_import_upload record and
generate the importJob
Args:
infile: The uploaded file | modules/s3/s3import.py | _generate_import_job | annehaley/eden | 205 | python | def _generate_import_job(self, upload_id, infile, file_format, stylesheet=None, commit_job=False):
'\n This will take a s3_import_upload record and\n generate the importJob\n\n Args:\n infile: The uploaded file\n '
if (file_format in ('csv', 'comma-separated-values')):
fmt = 'csv'
src = infile
elif (file_format == 'xls'):
fmt = 'xls'
src = infile
elif (file_format in ('xlsx', 'xlsm')):
fmt = 'xlsx'
src = infile
else:
msg = (self.messages.unsupported_file_type % file_format)
self.error = msg
current.log.debug(msg)
return None
if (stylesheet == None):
stylesheet = self._get_stylesheet()
if (stylesheet == None):
return None
request = self.request
resource = request.resource
self.table = self.controller_table
self.tablename = self.controller_tablename
args = {}
mode = request.get_vars.get('xsltmode', None)
if (mode is not None):
args['mode'] = mode
resource.import_xml(src, format=fmt, extra_data=self.csv_extra_data, stylesheet=stylesheet, ignore_errors=True, commit_job=commit_job, **args)
job = resource.job
if (job is None):
if resource.error:
self.error = resource.error
return None
self.warning = self.messages.no_records_to_import
return None
db = current.db
job_id = job.job_id
errors = current.xml.collect_errors(job)
if errors:
current.response.s3.error_report = errors
result = db((self.upload_table.id == upload_id)).update(job_id=job_id)
db.commit()
self.job_id = job_id
return True | def _generate_import_job(self, upload_id, infile, file_format, stylesheet=None, commit_job=False):
'\n This will take a s3_import_upload record and\n generate the importJob\n\n Args:\n infile: The uploaded file\n '
if (file_format in ('csv', 'comma-separated-values')):
fmt = 'csv'
src = infile
elif (file_format == 'xls'):
fmt = 'xls'
src = infile
elif (file_format in ('xlsx', 'xlsm')):
fmt = 'xlsx'
src = infile
else:
msg = (self.messages.unsupported_file_type % file_format)
self.error = msg
current.log.debug(msg)
return None
if (stylesheet == None):
stylesheet = self._get_stylesheet()
if (stylesheet == None):
return None
request = self.request
resource = request.resource
self.table = self.controller_table
self.tablename = self.controller_tablename
args = {}
mode = request.get_vars.get('xsltmode', None)
if (mode is not None):
args['mode'] = mode
resource.import_xml(src, format=fmt, extra_data=self.csv_extra_data, stylesheet=stylesheet, ignore_errors=True, commit_job=commit_job, **args)
job = resource.job
if (job is None):
if resource.error:
self.error = resource.error
return None
self.warning = self.messages.no_records_to_import
return None
db = current.db
job_id = job.job_id
errors = current.xml.collect_errors(job)
if errors:
current.response.s3.error_report = errors
result = db((self.upload_table.id == upload_id)).update(job_id=job_id)
db.commit()
self.job_id = job_id
return True<|docstring|>This will take a s3_import_upload record and
generate the importJob
Args:
infile: The uploaded file<|endoftext|> |
04cd521e553fd4e8c29a3419d20df24278e70ae42ba2aab47fad7cac660a482d | def _get_stylesheet(self, file_format='csv'):
'\n Get the stylesheet for transformation of the import\n\n Args:\n file_format: the import source file format\n '
if (file_format == 'csv'):
xslt_path = os.path.join(self.xslt_path, 's3csv')
else:
xslt_path = os.path.join(self.xslt_path, file_format, 'import.xsl')
return xslt_path
if self.csv_stylesheet:
if isinstance(self.csv_stylesheet, (tuple, list)):
stylesheet = os.path.join(xslt_path, *self.csv_stylesheet)
else:
stylesheet = os.path.join(xslt_path, self.controller, self.csv_stylesheet)
else:
xslt_filename = ('%s.%s' % (self.function, self.xslt_extension))
stylesheet = os.path.join(xslt_path, self.controller, xslt_filename)
if (os.path.exists(stylesheet) is False):
msg = (self.messages.stylesheet_not_found % stylesheet)
self.error = msg
current.log.debug(msg)
return None
return stylesheet | Get the stylesheet for transformation of the import
Args:
file_format: the import source file format | modules/s3/s3import.py | _get_stylesheet | annehaley/eden | 205 | python | def _get_stylesheet(self, file_format='csv'):
'\n Get the stylesheet for transformation of the import\n\n Args:\n file_format: the import source file format\n '
if (file_format == 'csv'):
xslt_path = os.path.join(self.xslt_path, 's3csv')
else:
xslt_path = os.path.join(self.xslt_path, file_format, 'import.xsl')
return xslt_path
if self.csv_stylesheet:
if isinstance(self.csv_stylesheet, (tuple, list)):
stylesheet = os.path.join(xslt_path, *self.csv_stylesheet)
else:
stylesheet = os.path.join(xslt_path, self.controller, self.csv_stylesheet)
else:
xslt_filename = ('%s.%s' % (self.function, self.xslt_extension))
stylesheet = os.path.join(xslt_path, self.controller, xslt_filename)
if (os.path.exists(stylesheet) is False):
msg = (self.messages.stylesheet_not_found % stylesheet)
self.error = msg
current.log.debug(msg)
return None
return stylesheet | def _get_stylesheet(self, file_format='csv'):
'\n Get the stylesheet for transformation of the import\n\n Args:\n file_format: the import source file format\n '
if (file_format == 'csv'):
xslt_path = os.path.join(self.xslt_path, 's3csv')
else:
xslt_path = os.path.join(self.xslt_path, file_format, 'import.xsl')
return xslt_path
if self.csv_stylesheet:
if isinstance(self.csv_stylesheet, (tuple, list)):
stylesheet = os.path.join(xslt_path, *self.csv_stylesheet)
else:
stylesheet = os.path.join(xslt_path, self.controller, self.csv_stylesheet)
else:
xslt_filename = ('%s.%s' % (self.function, self.xslt_extension))
stylesheet = os.path.join(xslt_path, self.controller, xslt_filename)
if (os.path.exists(stylesheet) is False):
msg = (self.messages.stylesheet_not_found % stylesheet)
self.error = msg
current.log.debug(msg)
return None
return stylesheet<|docstring|>Get the stylesheet for transformation of the import
Args:
file_format: the import source file format<|endoftext|> |
ce811c21fa86dd376acce52e5e6c124e22ee7226d4515452561c9df0309ab222 | def _commit_import_job(self, upload_id, items):
'\n This will save all of the selected import items\n '
db = current.db
resource = self.request.resource
self.importDetails = {}
table = self.upload_table
row = db((table.id == upload_id)).select(table.job_id, table.replace_option, limitby=(0, 1)).first()
if (row is None):
return False
else:
job_id = row.job_id
current.response.s3.import_replace = row.replace_option
itemTable = S3ImportJob.define_item_table()
if (itemTable != None):
rows = self._get_all_items(upload_id, as_string=True)
self._store_import_details(job_id, 'preDelete')
for _id in rows:
if (str(_id) not in items):
db((itemTable.id == _id)).delete()
self.table = self.controller_table
self.tablename = self.controller_tablename
self._store_import_details(job_id, 'preImportTree')
resource.import_xml(None, job_id=job_id, ignore_errors=True)
return (resource.error is None) | This will save all of the selected import items | modules/s3/s3import.py | _commit_import_job | annehaley/eden | 205 | python | def _commit_import_job(self, upload_id, items):
'\n \n '
db = current.db
resource = self.request.resource
self.importDetails = {}
table = self.upload_table
row = db((table.id == upload_id)).select(table.job_id, table.replace_option, limitby=(0, 1)).first()
if (row is None):
return False
else:
job_id = row.job_id
current.response.s3.import_replace = row.replace_option
itemTable = S3ImportJob.define_item_table()
if (itemTable != None):
rows = self._get_all_items(upload_id, as_string=True)
self._store_import_details(job_id, 'preDelete')
for _id in rows:
if (str(_id) not in items):
db((itemTable.id == _id)).delete()
self.table = self.controller_table
self.tablename = self.controller_tablename
self._store_import_details(job_id, 'preImportTree')
resource.import_xml(None, job_id=job_id, ignore_errors=True)
return (resource.error is None) | def _commit_import_job(self, upload_id, items):
'\n \n '
db = current.db
resource = self.request.resource
self.importDetails = {}
table = self.upload_table
row = db((table.id == upload_id)).select(table.job_id, table.replace_option, limitby=(0, 1)).first()
if (row is None):
return False
else:
job_id = row.job_id
current.response.s3.import_replace = row.replace_option
itemTable = S3ImportJob.define_item_table()
if (itemTable != None):
rows = self._get_all_items(upload_id, as_string=True)
self._store_import_details(job_id, 'preDelete')
for _id in rows:
if (str(_id) not in items):
db((itemTable.id == _id)).delete()
self.table = self.controller_table
self.tablename = self.controller_tablename
self._store_import_details(job_id, 'preImportTree')
resource.import_xml(None, job_id=job_id, ignore_errors=True)
return (resource.error is None)<|docstring|>This will save all of the selected import items<|endoftext|> |
3cd5f99010fe32b238a1c9c5807e27e25fe272d07a5b8b4489e3dd86341539a0 | def _store_import_details(self, job_id, key):
'\n This will store the details from an importJob\n '
itable = S3ImportJob.define_item_table()
query = ((itable.job_id == job_id) & (itable.tablename == self.controller_tablename))
rows = current.db(query).select(itable.data, itable.error)
items = [{'data': row.data, 'error': row.error} for row in rows]
self.importDetails[key] = items | This will store the details from an importJob | modules/s3/s3import.py | _store_import_details | annehaley/eden | 205 | python | def _store_import_details(self, job_id, key):
'\n \n '
itable = S3ImportJob.define_item_table()
query = ((itable.job_id == job_id) & (itable.tablename == self.controller_tablename))
rows = current.db(query).select(itable.data, itable.error)
items = [{'data': row.data, 'error': row.error} for row in rows]
self.importDetails[key] = items | def _store_import_details(self, job_id, key):
'\n \n '
itable = S3ImportJob.define_item_table()
query = ((itable.job_id == job_id) & (itable.tablename == self.controller_tablename))
rows = current.db(query).select(itable.data, itable.error)
items = [{'data': row.data, 'error': row.error} for row in rows]
self.importDetails[key] = items<|docstring|>This will store the details from an importJob<|endoftext|> |
f66d3169a0070aaaf3d55f9d74ac4bcf35d9d2be6aee2a00477ba7f937143933 | def _update_upload_job(self, upload_id):
'\n This will record the results from the import, and change the\n status of the upload job\n\n TODO:\n report errors in referenced records, too\n '
resource = self.request.resource
db = current.db
totalPreDelete = len(self.importDetails['preDelete'])
totalPreImport = len(self.importDetails['preImportTree'])
totalIgnored = (totalPreDelete - totalPreImport)
if (resource.error_tree is None):
totalErrors = 0
else:
totalErrors = len(resource.error_tree.findall(("resource[@name='%s']" % resource.tablename)))
totalRecords = (totalPreImport - totalErrors)
if (totalRecords < 0):
totalRecords = 0
query = (self.upload_table.id == upload_id)
db(query).update(summary_added=totalRecords, summary_error=totalErrors, summary_ignored=totalIgnored, status=3)
db.commit()
return (totalRecords, totalErrors, totalIgnored) | This will record the results from the import, and change the
status of the upload job
TODO:
report errors in referenced records, too | modules/s3/s3import.py | _update_upload_job | annehaley/eden | 205 | python | def _update_upload_job(self, upload_id):
'\n This will record the results from the import, and change the\n status of the upload job\n\n TODO:\n report errors in referenced records, too\n '
resource = self.request.resource
db = current.db
totalPreDelete = len(self.importDetails['preDelete'])
totalPreImport = len(self.importDetails['preImportTree'])
totalIgnored = (totalPreDelete - totalPreImport)
if (resource.error_tree is None):
totalErrors = 0
else:
totalErrors = len(resource.error_tree.findall(("resource[@name='%s']" % resource.tablename)))
totalRecords = (totalPreImport - totalErrors)
if (totalRecords < 0):
totalRecords = 0
query = (self.upload_table.id == upload_id)
db(query).update(summary_added=totalRecords, summary_error=totalErrors, summary_ignored=totalIgnored, status=3)
db.commit()
return (totalRecords, totalErrors, totalIgnored) | def _update_upload_job(self, upload_id):
'\n This will record the results from the import, and change the\n status of the upload job\n\n TODO:\n report errors in referenced records, too\n '
resource = self.request.resource
db = current.db
totalPreDelete = len(self.importDetails['preDelete'])
totalPreImport = len(self.importDetails['preImportTree'])
totalIgnored = (totalPreDelete - totalPreImport)
if (resource.error_tree is None):
totalErrors = 0
else:
totalErrors = len(resource.error_tree.findall(("resource[@name='%s']" % resource.tablename)))
totalRecords = (totalPreImport - totalErrors)
if (totalRecords < 0):
totalRecords = 0
query = (self.upload_table.id == upload_id)
db(query).update(summary_added=totalRecords, summary_error=totalErrors, summary_ignored=totalIgnored, status=3)
db.commit()
return (totalRecords, totalErrors, totalIgnored)<|docstring|>This will record the results from the import, and change the
status of the upload job
TODO:
report errors in referenced records, too<|endoftext|> |
b2c92cb585688543110d49cc7a4bbb3953f86b30a96059161f2b8957a9f46ee6 | def _display_completed_job(self, totals, timestmp=None):
'\n Generate a summary flash message for a completed import job\n\n Args:\n totals: the job totals as tuple\n (total imported, total errors, total ignored)\n timestmp: the timestamp of the completion\n '
messages = self.messages
msg = ('%s - %s - %s' % (messages.commit_total_records_imported, messages.commit_total_errors, messages.commit_total_records_ignored))
msg = (msg % totals)
if (timestmp != None):
current.session.flash = (messages.job_completed % (self.date_represent(timestmp), msg))
elif (totals[1] != 0):
current.session.error = msg
elif (totals[2] != 0):
current.session.warning = msg
else:
current.session.flash = msg | Generate a summary flash message for a completed import job
Args:
totals: the job totals as tuple
(total imported, total errors, total ignored)
timestmp: the timestamp of the completion | modules/s3/s3import.py | _display_completed_job | annehaley/eden | 205 | python | def _display_completed_job(self, totals, timestmp=None):
'\n Generate a summary flash message for a completed import job\n\n Args:\n totals: the job totals as tuple\n (total imported, total errors, total ignored)\n timestmp: the timestamp of the completion\n '
messages = self.messages
msg = ('%s - %s - %s' % (messages.commit_total_records_imported, messages.commit_total_errors, messages.commit_total_records_ignored))
msg = (msg % totals)
if (timestmp != None):
current.session.flash = (messages.job_completed % (self.date_represent(timestmp), msg))
elif (totals[1] != 0):
current.session.error = msg
elif (totals[2] != 0):
current.session.warning = msg
else:
current.session.flash = msg | def _display_completed_job(self, totals, timestmp=None):
'\n Generate a summary flash message for a completed import job\n\n Args:\n totals: the job totals as tuple\n (total imported, total errors, total ignored)\n timestmp: the timestamp of the completion\n '
messages = self.messages
msg = ('%s - %s - %s' % (messages.commit_total_records_imported, messages.commit_total_errors, messages.commit_total_records_ignored))
msg = (msg % totals)
if (timestmp != None):
current.session.flash = (messages.job_completed % (self.date_represent(timestmp), msg))
elif (totals[1] != 0):
current.session.error = msg
elif (totals[2] != 0):
current.session.warning = msg
else:
current.session.flash = msg<|docstring|>Generate a summary flash message for a completed import job
Args:
totals: the job totals as tuple
(total imported, total errors, total ignored)
timestmp: the timestamp of the completion<|endoftext|> |
76bf3daddb2b3959441426e499bb11a46c6a14ef44c7085644601d00097c8ab1 | def _dataTable(self, list_fields, represent=None, ajax_item_id=None, dt_bulk_select=None):
'\n Method to get the data for the dataTable\n This can be either a raw html representation or\n and ajax call update\n Additional data will be cached to limit calls back to the server\n\n Args:\n list_fields: list of field names\n sort_by: list of sort by columns\n represent: a dict of field callback functions used\n to change how the data will be displayed\n keyed on the field identifier\n\n Returns:\n a dict()\n In html representations this will be a table of the data\n plus the sortby instructions\n In ajax this will be a json response\n\n In addition the following values will be made available:\n recordsTotal Number of records in the filtered data set\n recordsFiltered Number of records to display\n start Start point in the ordered data set\n limit Number of records in the ordered set\n NOTE: limit - recordsFiltered = total cached\n '
from .s3data import S3DataTable
request = self.request
resource = self.resource
s3 = current.response.s3
if (s3.filter is not None):
self.resource.add_filter(s3.filter)
representation = request.representation
totalrows = None
if (representation == 'aadata'):
(searchq, orderby, left) = resource.datatable_filter(list_fields, request.get_vars)
if (searchq is not None):
totalrows = resource.count()
resource.add_filter(searchq)
else:
(orderby, left) = (None, None)
if (representation == 'aadata'):
get_vars = request.get_vars
start = get_vars.get('displayStart', None)
limit = get_vars.get('pageLength', None)
draw = int((get_vars.draw or 0))
else:
start = 0
limit = s3.ROWSPERPAGE
if (limit is not None):
try:
start = int(start)
limit = int(limit)
except ValueError:
start = None
limit = None
else:
start = None
if (not orderby):
orderby = (~ resource.table.error)
data = resource.select(list_fields, start=start, limit=limit, count=True, orderby=orderby, left=left)
rows = data['rows']
displayrows = data['numrows']
if (totalrows is None):
totalrows = displayrows
if represent:
_represent = list(represent.items())
for row in rows:
record_id = row['s3_import_item.id']
for (column, method) in _represent:
if (column in row):
row[column] = method(record_id, row[column])
rfields = resource.resolve_selectors(list_fields)[0]
dt = S3DataTable(rfields, rows, orderby=orderby)
datatable_id = 'import-items'
if (representation == 'aadata'):
output = dt.json(totalrows, displayrows, datatable_id, draw, dt_bulk_actions=[current.T('Import')])
else:
url = ('/%s/%s/%s/import.aadata?job=%s' % (request.application, request.controller, request.function, ajax_item_id))
items = dt.html(totalrows, displayrows, datatable_id, dt_ajax_url=url, dt_bulk_actions=[current.T('Import')], dt_bulk_selected=dt_bulk_select)
output = {'items': items}
current.response.s3.dataTableID = [datatable_id]
return output | Method to get the data for the dataTable
This can be either a raw html representation or
and ajax call update
Additional data will be cached to limit calls back to the server
Args:
list_fields: list of field names
sort_by: list of sort by columns
represent: a dict of field callback functions used
to change how the data will be displayed
keyed on the field identifier
Returns:
a dict()
In html representations this will be a table of the data
plus the sortby instructions
In ajax this will be a json response
In addition the following values will be made available:
recordsTotal Number of records in the filtered data set
recordsFiltered Number of records to display
start Start point in the ordered data set
limit Number of records in the ordered set
NOTE: limit - recordsFiltered = total cached | modules/s3/s3import.py | _dataTable | annehaley/eden | 205 | python | def _dataTable(self, list_fields, represent=None, ajax_item_id=None, dt_bulk_select=None):
'\n Method to get the data for the dataTable\n This can be either a raw html representation or\n and ajax call update\n Additional data will be cached to limit calls back to the server\n\n Args:\n list_fields: list of field names\n sort_by: list of sort by columns\n represent: a dict of field callback functions used\n to change how the data will be displayed\n keyed on the field identifier\n\n Returns:\n a dict()\n In html representations this will be a table of the data\n plus the sortby instructions\n In ajax this will be a json response\n\n In addition the following values will be made available:\n recordsTotal Number of records in the filtered data set\n recordsFiltered Number of records to display\n start Start point in the ordered data set\n limit Number of records in the ordered set\n NOTE: limit - recordsFiltered = total cached\n '
from .s3data import S3DataTable
request = self.request
resource = self.resource
s3 = current.response.s3
if (s3.filter is not None):
self.resource.add_filter(s3.filter)
representation = request.representation
totalrows = None
if (representation == 'aadata'):
(searchq, orderby, left) = resource.datatable_filter(list_fields, request.get_vars)
if (searchq is not None):
totalrows = resource.count()
resource.add_filter(searchq)
else:
(orderby, left) = (None, None)
if (representation == 'aadata'):
get_vars = request.get_vars
start = get_vars.get('displayStart', None)
limit = get_vars.get('pageLength', None)
draw = int((get_vars.draw or 0))
else:
start = 0
limit = s3.ROWSPERPAGE
if (limit is not None):
try:
start = int(start)
limit = int(limit)
except ValueError:
start = None
limit = None
else:
start = None
if (not orderby):
orderby = (~ resource.table.error)
data = resource.select(list_fields, start=start, limit=limit, count=True, orderby=orderby, left=left)
rows = data['rows']
displayrows = data['numrows']
if (totalrows is None):
totalrows = displayrows
if represent:
_represent = list(represent.items())
for row in rows:
record_id = row['s3_import_item.id']
for (column, method) in _represent:
if (column in row):
row[column] = method(record_id, row[column])
rfields = resource.resolve_selectors(list_fields)[0]
dt = S3DataTable(rfields, rows, orderby=orderby)
datatable_id = 'import-items'
if (representation == 'aadata'):
output = dt.json(totalrows, displayrows, datatable_id, draw, dt_bulk_actions=[current.T('Import')])
else:
url = ('/%s/%s/%s/import.aadata?job=%s' % (request.application, request.controller, request.function, ajax_item_id))
items = dt.html(totalrows, displayrows, datatable_id, dt_ajax_url=url, dt_bulk_actions=[current.T('Import')], dt_bulk_selected=dt_bulk_select)
output = {'items': items}
current.response.s3.dataTableID = [datatable_id]
return output | def _dataTable(self, list_fields, represent=None, ajax_item_id=None, dt_bulk_select=None):
'\n Method to get the data for the dataTable\n This can be either a raw html representation or\n and ajax call update\n Additional data will be cached to limit calls back to the server\n\n Args:\n list_fields: list of field names\n sort_by: list of sort by columns\n represent: a dict of field callback functions used\n to change how the data will be displayed\n keyed on the field identifier\n\n Returns:\n a dict()\n In html representations this will be a table of the data\n plus the sortby instructions\n In ajax this will be a json response\n\n In addition the following values will be made available:\n recordsTotal Number of records in the filtered data set\n recordsFiltered Number of records to display\n start Start point in the ordered data set\n limit Number of records in the ordered set\n NOTE: limit - recordsFiltered = total cached\n '
from .s3data import S3DataTable
request = self.request
resource = self.resource
s3 = current.response.s3
if (s3.filter is not None):
self.resource.add_filter(s3.filter)
representation = request.representation
totalrows = None
if (representation == 'aadata'):
(searchq, orderby, left) = resource.datatable_filter(list_fields, request.get_vars)
if (searchq is not None):
totalrows = resource.count()
resource.add_filter(searchq)
else:
(orderby, left) = (None, None)
if (representation == 'aadata'):
get_vars = request.get_vars
start = get_vars.get('displayStart', None)
limit = get_vars.get('pageLength', None)
draw = int((get_vars.draw or 0))
else:
start = 0
limit = s3.ROWSPERPAGE
if (limit is not None):
try:
start = int(start)
limit = int(limit)
except ValueError:
start = None
limit = None
else:
start = None
if (not orderby):
orderby = (~ resource.table.error)
data = resource.select(list_fields, start=start, limit=limit, count=True, orderby=orderby, left=left)
rows = data['rows']
displayrows = data['numrows']
if (totalrows is None):
totalrows = displayrows
if represent:
_represent = list(represent.items())
for row in rows:
record_id = row['s3_import_item.id']
for (column, method) in _represent:
if (column in row):
row[column] = method(record_id, row[column])
rfields = resource.resolve_selectors(list_fields)[0]
dt = S3DataTable(rfields, rows, orderby=orderby)
datatable_id = 'import-items'
if (representation == 'aadata'):
output = dt.json(totalrows, displayrows, datatable_id, draw, dt_bulk_actions=[current.T('Import')])
else:
url = ('/%s/%s/%s/import.aadata?job=%s' % (request.application, request.controller, request.function, ajax_item_id))
items = dt.html(totalrows, displayrows, datatable_id, dt_ajax_url=url, dt_bulk_actions=[current.T('Import')], dt_bulk_selected=dt_bulk_select)
output = {'items': items}
current.response.s3.dataTableID = [datatable_id]
return output<|docstring|>Method to get the data for the dataTable
This can be either a raw html representation or
and ajax call update
Additional data will be cached to limit calls back to the server
Args:
list_fields: list of field names
sort_by: list of sort by columns
represent: a dict of field callback functions used
to change how the data will be displayed
keyed on the field identifier
Returns:
a dict()
In html representations this will be a table of the data
plus the sortby instructions
In ajax this will be a json response
In addition the following values will be made available:
recordsTotal Number of records in the filtered data set
recordsFiltered Number of records to display
start Start point in the ordered data set
limit Number of records in the ordered set
NOTE: limit - recordsFiltered = total cached<|endoftext|> |
fff6ac03b27d4300c787c36743c614573c9c326128a863f11fa389308142564b | def _item_element_represent(self, item_id, value):
'\n Represent the element in an import item for dataTable display\n\n Args:\n value: the string containing the element\n '
try:
element = etree.fromstring(value)
except:
return DIV(value)
db = current.db
tablename = element.get('name')
table = db[tablename]
output = DIV()
details = TABLE(_class=('importItem item-%s' % item_id))
(header, rows) = self._add_item_details(element.findall('data'), table)
if (header is not None):
output.append(header)
components = element.findall('resource')
s3db = current.s3db
for component in components:
ctablename = component.get('name')
ctable = s3db.table(ctablename)
if (not ctable):
continue
self._add_item_details(component.findall('data'), ctable, details=rows, prefix=True)
if rows:
details.append(TBODY(rows))
errors = current.xml.collect_errors(element)
if errors:
details.append(TFOOT(TR(TH(('%s:' % current.T('Errors'))), TD(UL([LI(e) for e in errors])))))
if ((rows == []) and (components == [])):
refdetail = TABLE(_class=('importItem item-%s' % item_id))
references = element.findall('reference')
for reference in references:
tuid = reference.get('tuid')
resource = reference.get('resource')
refdetail.append(TR(TD(resource), TD(tuid)))
output.append(refdetail)
else:
output.append(details)
return output | Represent the element in an import item for dataTable display
Args:
value: the string containing the element | modules/s3/s3import.py | _item_element_represent | annehaley/eden | 205 | python | def _item_element_represent(self, item_id, value):
'\n Represent the element in an import item for dataTable display\n\n Args:\n value: the string containing the element\n '
try:
element = etree.fromstring(value)
except:
return DIV(value)
db = current.db
tablename = element.get('name')
table = db[tablename]
output = DIV()
details = TABLE(_class=('importItem item-%s' % item_id))
(header, rows) = self._add_item_details(element.findall('data'), table)
if (header is not None):
output.append(header)
components = element.findall('resource')
s3db = current.s3db
for component in components:
ctablename = component.get('name')
ctable = s3db.table(ctablename)
if (not ctable):
continue
self._add_item_details(component.findall('data'), ctable, details=rows, prefix=True)
if rows:
details.append(TBODY(rows))
errors = current.xml.collect_errors(element)
if errors:
details.append(TFOOT(TR(TH(('%s:' % current.T('Errors'))), TD(UL([LI(e) for e in errors])))))
if ((rows == []) and (components == [])):
refdetail = TABLE(_class=('importItem item-%s' % item_id))
references = element.findall('reference')
for reference in references:
tuid = reference.get('tuid')
resource = reference.get('resource')
refdetail.append(TR(TD(resource), TD(tuid)))
output.append(refdetail)
else:
output.append(details)
return output | def _item_element_represent(self, item_id, value):
'\n Represent the element in an import item for dataTable display\n\n Args:\n value: the string containing the element\n '
try:
element = etree.fromstring(value)
except:
return DIV(value)
db = current.db
tablename = element.get('name')
table = db[tablename]
output = DIV()
details = TABLE(_class=('importItem item-%s' % item_id))
(header, rows) = self._add_item_details(element.findall('data'), table)
if (header is not None):
output.append(header)
components = element.findall('resource')
s3db = current.s3db
for component in components:
ctablename = component.get('name')
ctable = s3db.table(ctablename)
if (not ctable):
continue
self._add_item_details(component.findall('data'), ctable, details=rows, prefix=True)
if rows:
details.append(TBODY(rows))
errors = current.xml.collect_errors(element)
if errors:
details.append(TFOOT(TR(TH(('%s:' % current.T('Errors'))), TD(UL([LI(e) for e in errors])))))
if ((rows == []) and (components == [])):
refdetail = TABLE(_class=('importItem item-%s' % item_id))
references = element.findall('reference')
for reference in references:
tuid = reference.get('tuid')
resource = reference.get('resource')
refdetail.append(TR(TD(resource), TD(tuid)))
output.append(refdetail)
else:
output.append(details)
return output<|docstring|>Represent the element in an import item for dataTable display
Args:
value: the string containing the element<|endoftext|> |
8102c72de8eecdda425593f689a5a6cf5fdc8fe60a3923cc2d5a372e45ee92f6 | @staticmethod
def _add_item_details(data, table, details=None, prefix=False):
'\n Add details of the item element\n\n Args:\n data: the list of data elements in the item element\n table: the table for the data\n details: the existing details rows list (to append to)\n '
tablename = table._tablename
if (details is None):
details = []
first = None
firstString = None
header = None
for child in data:
f = child.get('field', None)
if (f not in table.fields):
continue
elif (f == 'wkt'):
continue
field = table[f]
ftype = str(field.type)
value = child.get('value', None)
if (not value):
value = current.xml.xml_decode(child.text)
try:
value = S3Importer._decode_data(field, value)
except (TypeError, ValueError):
pass
if value:
value = s3_str(value)
else:
value = ''
if ((f != None) and (value != None)):
headerText = P(B(('%s: ' % f)), value)
if (not first):
first = headerText
if ((ftype == 'string') and (not firstString)):
firstString = headerText
if (f == 'name'):
header = headerText
if prefix:
details.append(TR(TH(('%s.%s:' % (tablename, f))), TD(value)))
else:
details.append(TR(TH(('%s:' % f)), TD(value)))
if (not header):
if firstString:
header = firstString
else:
header = first
return (header, details) | Add details of the item element
Args:
data: the list of data elements in the item element
table: the table for the data
details: the existing details rows list (to append to) | modules/s3/s3import.py | _add_item_details | annehaley/eden | 205 | python | @staticmethod
def _add_item_details(data, table, details=None, prefix=False):
'\n Add details of the item element\n\n Args:\n data: the list of data elements in the item element\n table: the table for the data\n details: the existing details rows list (to append to)\n '
tablename = table._tablename
if (details is None):
details = []
first = None
firstString = None
header = None
for child in data:
f = child.get('field', None)
if (f not in table.fields):
continue
elif (f == 'wkt'):
continue
field = table[f]
ftype = str(field.type)
value = child.get('value', None)
if (not value):
value = current.xml.xml_decode(child.text)
try:
value = S3Importer._decode_data(field, value)
except (TypeError, ValueError):
pass
if value:
value = s3_str(value)
else:
value =
if ((f != None) and (value != None)):
headerText = P(B(('%s: ' % f)), value)
if (not first):
first = headerText
if ((ftype == 'string') and (not firstString)):
firstString = headerText
if (f == 'name'):
header = headerText
if prefix:
details.append(TR(TH(('%s.%s:' % (tablename, f))), TD(value)))
else:
details.append(TR(TH(('%s:' % f)), TD(value)))
if (not header):
if firstString:
header = firstString
else:
header = first
return (header, details) | @staticmethod
def _add_item_details(data, table, details=None, prefix=False):
'\n Add details of the item element\n\n Args:\n data: the list of data elements in the item element\n table: the table for the data\n details: the existing details rows list (to append to)\n '
tablename = table._tablename
if (details is None):
details = []
first = None
firstString = None
header = None
for child in data:
f = child.get('field', None)
if (f not in table.fields):
continue
elif (f == 'wkt'):
continue
field = table[f]
ftype = str(field.type)
value = child.get('value', None)
if (not value):
value = current.xml.xml_decode(child.text)
try:
value = S3Importer._decode_data(field, value)
except (TypeError, ValueError):
pass
if value:
value = s3_str(value)
else:
value =
if ((f != None) and (value != None)):
headerText = P(B(('%s: ' % f)), value)
if (not first):
first = headerText
if ((ftype == 'string') and (not firstString)):
firstString = headerText
if (f == 'name'):
header = headerText
if prefix:
details.append(TR(TH(('%s.%s:' % (tablename, f))), TD(value)))
else:
details.append(TR(TH(('%s:' % f)), TD(value)))
if (not header):
if firstString:
header = firstString
else:
header = first
return (header, details)<|docstring|>Add details of the item element
Args:
data: the list of data elements in the item element
table: the table for the data
details: the existing details rows list (to append to)<|endoftext|> |
285913e9e5307ef6897e57bc53b51c31ba887d4d754cf323924cee05999bd3c3 | @staticmethod
def _decode_data(field, value):
'\n Try to decode string data into their original type\n\n Args:\n field: the Field instance\n value: the stringified value\n\n TODO:\n Replace this by ordinary decoder\n '
if ((field.type == 'string') or (field.type == 'password') or (field.type == 'upload') or (field.type == 'text')):
return value
elif ((field.type == 'integer') or (field.type == 'id')):
return int(value)
elif ((field.type == 'double') or (field.type == 'decimal')):
return float(value)
elif (field.type == 'boolean'):
if (value and (not (str(value)[:1].upper() in ['F', '0']))):
return 'T'
else:
return 'F'
elif (field.type == 'date'):
return value
elif (field.type == 'time'):
return value
elif (field.type == 'datetime'):
return value
else:
return value | Try to decode string data into their original type
Args:
field: the Field instance
value: the stringified value
TODO:
Replace this by ordinary decoder | modules/s3/s3import.py | _decode_data | annehaley/eden | 205 | python | @staticmethod
def _decode_data(field, value):
'\n Try to decode string data into their original type\n\n Args:\n field: the Field instance\n value: the stringified value\n\n TODO:\n Replace this by ordinary decoder\n '
if ((field.type == 'string') or (field.type == 'password') or (field.type == 'upload') or (field.type == 'text')):
return value
elif ((field.type == 'integer') or (field.type == 'id')):
return int(value)
elif ((field.type == 'double') or (field.type == 'decimal')):
return float(value)
elif (field.type == 'boolean'):
if (value and (not (str(value)[:1].upper() in ['F', '0']))):
return 'T'
else:
return 'F'
elif (field.type == 'date'):
return value
elif (field.type == 'time'):
return value
elif (field.type == 'datetime'):
return value
else:
return value | @staticmethod
def _decode_data(field, value):
'\n Try to decode string data into their original type\n\n Args:\n field: the Field instance\n value: the stringified value\n\n TODO:\n Replace this by ordinary decoder\n '
if ((field.type == 'string') or (field.type == 'password') or (field.type == 'upload') or (field.type == 'text')):
return value
elif ((field.type == 'integer') or (field.type == 'id')):
return int(value)
elif ((field.type == 'double') or (field.type == 'decimal')):
return float(value)
elif (field.type == 'boolean'):
if (value and (not (str(value)[:1].upper() in ['F', '0']))):
return 'T'
else:
return 'F'
elif (field.type == 'date'):
return value
elif (field.type == 'time'):
return value
elif (field.type == 'datetime'):
return value
else:
return value<|docstring|>Try to decode string data into their original type
Args:
field: the Field instance
value: the stringified value
TODO:
Replace this by ordinary decoder<|endoftext|> |
98ec711806e5930a986cca4c99e0a05bd7f86700a0069190ed182735ab59ba74 | @staticmethod
def date_represent(date_obj):
'\n Represent a datetime object as string\n\n Args:\n date_obj: the datetime object\n\n TODO:\n Replace by S3DateTime method?\n '
return date_obj.strftime('%d %B %Y, %I:%M%p') | Represent a datetime object as string
Args:
date_obj: the datetime object
TODO:
Replace by S3DateTime method? | modules/s3/s3import.py | date_represent | annehaley/eden | 205 | python | @staticmethod
def date_represent(date_obj):
'\n Represent a datetime object as string\n\n Args:\n date_obj: the datetime object\n\n TODO:\n Replace by S3DateTime method?\n '
return date_obj.strftime('%d %B %Y, %I:%M%p') | @staticmethod
def date_represent(date_obj):
'\n Represent a datetime object as string\n\n Args:\n date_obj: the datetime object\n\n TODO:\n Replace by S3DateTime method?\n '
return date_obj.strftime('%d %B %Y, %I:%M%p')<|docstring|>Represent a datetime object as string
Args:
date_obj: the datetime object
TODO:
Replace by S3DateTime method?<|endoftext|> |
fff7cfb1106e955ff15ad4b44dfa7b70c1b481823204d4946e888776487bd45b | def _process_item_list(self, upload_id, req_vars):
'\n Get the list of IDs for the selected items from the "mode"\n and "selected" request variables\n\n Args:\n upload_id: the upload_id\n vars: the request variables\n '
items = None
if ('mode' in req_vars):
mode = req_vars['mode']
selected = req_vars.get('selected', [])
if (mode == 'Inclusive'):
items = selected
elif (mode == 'Exclusive'):
all_items = self._get_all_items(upload_id, as_string=True)
items = [i for i in all_items if (i not in selected)]
return items | Get the list of IDs for the selected items from the "mode"
and "selected" request variables
Args:
upload_id: the upload_id
vars: the request variables | modules/s3/s3import.py | _process_item_list | annehaley/eden | 205 | python | def _process_item_list(self, upload_id, req_vars):
'\n Get the list of IDs for the selected items from the "mode"\n and "selected" request variables\n\n Args:\n upload_id: the upload_id\n vars: the request variables\n '
items = None
if ('mode' in req_vars):
mode = req_vars['mode']
selected = req_vars.get('selected', [])
if (mode == 'Inclusive'):
items = selected
elif (mode == 'Exclusive'):
all_items = self._get_all_items(upload_id, as_string=True)
items = [i for i in all_items if (i not in selected)]
return items | def _process_item_list(self, upload_id, req_vars):
'\n Get the list of IDs for the selected items from the "mode"\n and "selected" request variables\n\n Args:\n upload_id: the upload_id\n vars: the request variables\n '
items = None
if ('mode' in req_vars):
mode = req_vars['mode']
selected = req_vars.get('selected', [])
if (mode == 'Inclusive'):
items = selected
elif (mode == 'Exclusive'):
all_items = self._get_all_items(upload_id, as_string=True)
items = [i for i in all_items if (i not in selected)]
return items<|docstring|>Get the list of IDs for the selected items from the "mode"
and "selected" request variables
Args:
upload_id: the upload_id
vars: the request variables<|endoftext|> |
b135d3a513c43063a496cb8fced492def87f4cead0749cd1d0f1a83a52e08ce2 | def _get_all_items(self, upload_id, as_string=False):
'\n Get a list of the record IDs of all import items for\n the the given upload ID\n\n Args:\n upload_id: the upload ID\n as_string: represent each ID as string\n '
item_table = S3ImportJob.define_item_table()
upload_table = self.upload_table
query = (((upload_table.id == upload_id) & (item_table.job_id == upload_table.job_id)) & (item_table.tablename == self.controller_tablename))
rows = current.db(query).select(item_table.id)
if as_string:
items = [str(row.id) for row in rows]
else:
items = [row.id for row in rows]
return items | Get a list of the record IDs of all import items for
the the given upload ID
Args:
upload_id: the upload ID
as_string: represent each ID as string | modules/s3/s3import.py | _get_all_items | annehaley/eden | 205 | python | def _get_all_items(self, upload_id, as_string=False):
'\n Get a list of the record IDs of all import items for\n the the given upload ID\n\n Args:\n upload_id: the upload ID\n as_string: represent each ID as string\n '
item_table = S3ImportJob.define_item_table()
upload_table = self.upload_table
query = (((upload_table.id == upload_id) & (item_table.job_id == upload_table.job_id)) & (item_table.tablename == self.controller_tablename))
rows = current.db(query).select(item_table.id)
if as_string:
items = [str(row.id) for row in rows]
else:
items = [row.id for row in rows]
return items | def _get_all_items(self, upload_id, as_string=False):
'\n Get a list of the record IDs of all import items for\n the the given upload ID\n\n Args:\n upload_id: the upload ID\n as_string: represent each ID as string\n '
item_table = S3ImportJob.define_item_table()
upload_table = self.upload_table
query = (((upload_table.id == upload_id) & (item_table.job_id == upload_table.job_id)) & (item_table.tablename == self.controller_tablename))
rows = current.db(query).select(item_table.id)
if as_string:
items = [str(row.id) for row in rows]
else:
items = [row.id for row in rows]
return items<|docstring|>Get a list of the record IDs of all import items for
the the given upload ID
Args:
upload_id: the upload ID
as_string: represent each ID as string<|endoftext|> |
ec4eb4ae28df75b2abb14890e2d9709d0dafe1d0cc1987e94b204f13443c268d | def _use_upload_table(self):
'\n Set the resource and the table to being s3_import_upload\n '
self.tablename = self.upload_tablename
if (self.upload_resource is None):
self.upload_resource = current.s3db.resource(self.tablename)
self.resource = self.upload_resource
self.table = self.upload_table | Set the resource and the table to being s3_import_upload | modules/s3/s3import.py | _use_upload_table | annehaley/eden | 205 | python | def _use_upload_table(self):
'\n \n '
self.tablename = self.upload_tablename
if (self.upload_resource is None):
self.upload_resource = current.s3db.resource(self.tablename)
self.resource = self.upload_resource
self.table = self.upload_table | def _use_upload_table(self):
'\n \n '
self.tablename = self.upload_tablename
if (self.upload_resource is None):
self.upload_resource = current.s3db.resource(self.tablename)
self.resource = self.upload_resource
self.table = self.upload_table<|docstring|>Set the resource and the table to being s3_import_upload<|endoftext|> |
897b7f1ef59dc96446be52115ccbc078fa94677e2207ef3217dce257727873b5 | def _use_controller_table(self):
'\n Set the resource and the table to be the imported resource\n '
self.resource = self.controller_resource
self.table = self.controller_table
self.tablename = self.controller_tablename | Set the resource and the table to be the imported resource | modules/s3/s3import.py | _use_controller_table | annehaley/eden | 205 | python | def _use_controller_table(self):
'\n \n '
self.resource = self.controller_resource
self.table = self.controller_table
self.tablename = self.controller_tablename | def _use_controller_table(self):
'\n \n '
self.resource = self.controller_resource
self.table = self.controller_table
self.tablename = self.controller_tablename<|docstring|>Set the resource and the table to be the imported resource<|endoftext|> |
b162809af2a1efdadef35dd94d3124f1d6f8e8baf41552adf82ad8cef9a0d949 | def _use_import_item_table(self, job_id):
'\n Set the resource and the table to being s3_import_item\n '
self.table = S3ImportJob.define_item_table()
self.tablename = S3ImportJob.ITEM_TABLE_NAME
if (self.item_resource == None):
self.item_resource = current.s3db.resource(self.tablename)
self.resource = self.item_resource | Set the resource and the table to being s3_import_item | modules/s3/s3import.py | _use_import_item_table | annehaley/eden | 205 | python | def _use_import_item_table(self, job_id):
'\n \n '
self.table = S3ImportJob.define_item_table()
self.tablename = S3ImportJob.ITEM_TABLE_NAME
if (self.item_resource == None):
self.item_resource = current.s3db.resource(self.tablename)
self.resource = self.item_resource | def _use_import_item_table(self, job_id):
'\n \n '
self.table = S3ImportJob.define_item_table()
self.tablename = S3ImportJob.ITEM_TABLE_NAME
if (self.item_resource == None):
self.item_resource = current.s3db.resource(self.tablename)
self.resource = self.item_resource<|docstring|>Set the resource and the table to being s3_import_item<|endoftext|> |
16093ba8501827e1e233f1cbfb40ba61c87ac30bfa765e33d4322e07a425e87f | def __define_table(self):
' Configures the upload table '
T = current.T
request = current.request
self.upload_tablename = self.UPLOAD_TABLE_NAME
import_upload_status = {1: T('Pending'), 2: T('In error'), 3: T('Completed')}
now = request.utcnow
table = self.define_upload_table()
table.file.upload_folder = os.path.join(request.folder, 'uploads')
messages = self.messages
table.file.comment = DIV(_class='tooltip', _title=('%s|%s' % (messages.import_file, messages.import_file_comment)))
table.file.label = messages.import_file
table.status.requires = IS_IN_SET(import_upload_status, zero=None)
table.status.represent = s3_options_represent(import_upload_status)
table.user_id.label = messages.user_name
table.user_id.represent = current.s3db.auth_UserRepresent(show_email=False, show_link=False)
table.created_on.default = now
table.created_on.represent = self.date_represent
table.modified_on.default = now
table.modified_on.update = now
table.modified_on.represent = self.date_represent
table.replace_option.label = T('Replace')
self.upload_table = current.db[self.UPLOAD_TABLE_NAME] | Configures the upload table | modules/s3/s3import.py | __define_table | annehaley/eden | 205 | python | def __define_table(self):
' '
T = current.T
request = current.request
self.upload_tablename = self.UPLOAD_TABLE_NAME
import_upload_status = {1: T('Pending'), 2: T('In error'), 3: T('Completed')}
now = request.utcnow
table = self.define_upload_table()
table.file.upload_folder = os.path.join(request.folder, 'uploads')
messages = self.messages
table.file.comment = DIV(_class='tooltip', _title=('%s|%s' % (messages.import_file, messages.import_file_comment)))
table.file.label = messages.import_file
table.status.requires = IS_IN_SET(import_upload_status, zero=None)
table.status.represent = s3_options_represent(import_upload_status)
table.user_id.label = messages.user_name
table.user_id.represent = current.s3db.auth_UserRepresent(show_email=False, show_link=False)
table.created_on.default = now
table.created_on.represent = self.date_represent
table.modified_on.default = now
table.modified_on.update = now
table.modified_on.represent = self.date_represent
table.replace_option.label = T('Replace')
self.upload_table = current.db[self.UPLOAD_TABLE_NAME] | def __define_table(self):
' '
T = current.T
request = current.request
self.upload_tablename = self.UPLOAD_TABLE_NAME
import_upload_status = {1: T('Pending'), 2: T('In error'), 3: T('Completed')}
now = request.utcnow
table = self.define_upload_table()
table.file.upload_folder = os.path.join(request.folder, 'uploads')
messages = self.messages
table.file.comment = DIV(_class='tooltip', _title=('%s|%s' % (messages.import_file, messages.import_file_comment)))
table.file.label = messages.import_file
table.status.requires = IS_IN_SET(import_upload_status, zero=None)
table.status.represent = s3_options_represent(import_upload_status)
table.user_id.label = messages.user_name
table.user_id.represent = current.s3db.auth_UserRepresent(show_email=False, show_link=False)
table.created_on.default = now
table.created_on.represent = self.date_represent
table.modified_on.default = now
table.modified_on.update = now
table.modified_on.represent = self.date_represent
table.replace_option.label = T('Replace')
self.upload_table = current.db[self.UPLOAD_TABLE_NAME]<|docstring|>Configures the upload table<|endoftext|> |
44852323f610fa9005900631b4a29e4aac9d79837fd59942da6880bc9707d480 | @classmethod
def define_upload_table(cls):
' Defines the upload table '
db = current.db
UPLOAD_TABLE_NAME = cls.UPLOAD_TABLE_NAME
if (UPLOAD_TABLE_NAME not in db):
db.define_table(UPLOAD_TABLE_NAME, Field('controller', readable=False, writable=False), Field('function', readable=False, writable=False), Field('file', 'upload', length=current.MAX_FILENAME_LENGTH, uploadfolder=os.path.join(current.request.folder, 'uploads', 'imports'), autodelete=True), Field('filename', readable=False, writable=False), Field('status', 'integer', default=1, readable=False, writable=False), Field('extra_data', readable=False, writable=False), Field('replace_option', 'boolean', default=False, readable=False, writable=False), Field('job_id', length=128, readable=False, writable=False), Field('user_id', 'integer', readable=False, writable=False), Field('created_on', 'datetime', readable=False, writable=False), Field('modified_on', 'datetime', readable=False, writable=False), Field('summary_added', 'integer', readable=False, writable=False), Field('summary_error', 'integer', readable=False, writable=False), Field('summary_ignored', 'integer', readable=False, writable=False), Field('completed_details', 'text', readable=False, writable=False))
return db[UPLOAD_TABLE_NAME] | Defines the upload table | modules/s3/s3import.py | define_upload_table | annehaley/eden | 205 | python | @classmethod
def define_upload_table(cls):
' '
db = current.db
UPLOAD_TABLE_NAME = cls.UPLOAD_TABLE_NAME
if (UPLOAD_TABLE_NAME not in db):
db.define_table(UPLOAD_TABLE_NAME, Field('controller', readable=False, writable=False), Field('function', readable=False, writable=False), Field('file', 'upload', length=current.MAX_FILENAME_LENGTH, uploadfolder=os.path.join(current.request.folder, 'uploads', 'imports'), autodelete=True), Field('filename', readable=False, writable=False), Field('status', 'integer', default=1, readable=False, writable=False), Field('extra_data', readable=False, writable=False), Field('replace_option', 'boolean', default=False, readable=False, writable=False), Field('job_id', length=128, readable=False, writable=False), Field('user_id', 'integer', readable=False, writable=False), Field('created_on', 'datetime', readable=False, writable=False), Field('modified_on', 'datetime', readable=False, writable=False), Field('summary_added', 'integer', readable=False, writable=False), Field('summary_error', 'integer', readable=False, writable=False), Field('summary_ignored', 'integer', readable=False, writable=False), Field('completed_details', 'text', readable=False, writable=False))
return db[UPLOAD_TABLE_NAME] | @classmethod
def define_upload_table(cls):
' '
db = current.db
UPLOAD_TABLE_NAME = cls.UPLOAD_TABLE_NAME
if (UPLOAD_TABLE_NAME not in db):
db.define_table(UPLOAD_TABLE_NAME, Field('controller', readable=False, writable=False), Field('function', readable=False, writable=False), Field('file', 'upload', length=current.MAX_FILENAME_LENGTH, uploadfolder=os.path.join(current.request.folder, 'uploads', 'imports'), autodelete=True), Field('filename', readable=False, writable=False), Field('status', 'integer', default=1, readable=False, writable=False), Field('extra_data', readable=False, writable=False), Field('replace_option', 'boolean', default=False, readable=False, writable=False), Field('job_id', length=128, readable=False, writable=False), Field('user_id', 'integer', readable=False, writable=False), Field('created_on', 'datetime', readable=False, writable=False), Field('modified_on', 'datetime', readable=False, writable=False), Field('summary_added', 'integer', readable=False, writable=False), Field('summary_error', 'integer', readable=False, writable=False), Field('summary_ignored', 'integer', readable=False, writable=False), Field('completed_details', 'text', readable=False, writable=False))
return db[UPLOAD_TABLE_NAME]<|docstring|>Defines the upload table<|endoftext|> |
7f5b90dd9acf95b7585175cedeb15e6f9265e81e5430b5ebdd7949ac9d1116c2 | def __init__(self, job):
'\n Constructor\n\n Args:\n job: the import job this item belongs to\n '
self.job = job
self.lock = False
self.error = None
self.item_id = uuid.uuid4()
self.id = None
self.uid = None
self.table = None
self.tablename = None
self.element = None
self.data = None
self.original = None
self.components = []
self.references = []
self.load_components = []
self.load_references = []
self.parent = None
self.skip = False
self.mci = 2
self.mtime = datetime.datetime.utcnow()
self.modified = True
self.conflict = False
self.strategy = job.strategy
self.update_policy = job.update_policy
self.conflict_policy = job.conflict_policy
self.method = None
self.onvalidation = None
self.onaccept = None
self.accepted = None
self.permitted = False
self.committed = False
self.update = [] | Constructor
Args:
job: the import job this item belongs to | modules/s3/s3import.py | __init__ | annehaley/eden | 205 | python | def __init__(self, job):
'\n Constructor\n\n Args:\n job: the import job this item belongs to\n '
self.job = job
self.lock = False
self.error = None
self.item_id = uuid.uuid4()
self.id = None
self.uid = None
self.table = None
self.tablename = None
self.element = None
self.data = None
self.original = None
self.components = []
self.references = []
self.load_components = []
self.load_references = []
self.parent = None
self.skip = False
self.mci = 2
self.mtime = datetime.datetime.utcnow()
self.modified = True
self.conflict = False
self.strategy = job.strategy
self.update_policy = job.update_policy
self.conflict_policy = job.conflict_policy
self.method = None
self.onvalidation = None
self.onaccept = None
self.accepted = None
self.permitted = False
self.committed = False
self.update = [] | def __init__(self, job):
'\n Constructor\n\n Args:\n job: the import job this item belongs to\n '
self.job = job
self.lock = False
self.error = None
self.item_id = uuid.uuid4()
self.id = None
self.uid = None
self.table = None
self.tablename = None
self.element = None
self.data = None
self.original = None
self.components = []
self.references = []
self.load_components = []
self.load_references = []
self.parent = None
self.skip = False
self.mci = 2
self.mtime = datetime.datetime.utcnow()
self.modified = True
self.conflict = False
self.strategy = job.strategy
self.update_policy = job.update_policy
self.conflict_policy = job.conflict_policy
self.method = None
self.onvalidation = None
self.onaccept = None
self.accepted = None
self.permitted = False
self.committed = False
self.update = []<|docstring|>Constructor
Args:
job: the import job this item belongs to<|endoftext|> |
050163082962a386171159e54a42d41c3e5365dd6ba05079f2c551a447c8ff3f | def __repr__(self):
' Helper method for debugging '
_str = ('<S3ImportItem %s {item_id=%s uid=%s id=%s error=%s data=%s}>' % (self.table, self.item_id, self.uid, self.id, self.error, self.data))
return _str | Helper method for debugging | modules/s3/s3import.py | __repr__ | annehaley/eden | 205 | python | def __repr__(self):
' '
_str = ('<S3ImportItem %s {item_id=%s uid=%s id=%s error=%s data=%s}>' % (self.table, self.item_id, self.uid, self.id, self.error, self.data))
return _str | def __repr__(self):
' '
_str = ('<S3ImportItem %s {item_id=%s uid=%s id=%s error=%s data=%s}>' % (self.table, self.item_id, self.uid, self.id, self.error, self.data))
return _str<|docstring|>Helper method for debugging<|endoftext|> |
e19c41de098f79eaf7edb82bce7d82c3de602ac730d6ee67b9b0774751af8b4b | def parse(self, element, original=None, table=None, tree=None, files=None):
'\n Read data from a <resource> element\n\n Args:\n element: the element\n table: the DB table\n tree: the import tree\n files: uploaded files\n\n Returns:\n True if successful, False if not (sets self.error)\n '
s3db = current.s3db
xml = current.xml
ERROR = xml.ATTRIBUTE['error']
self.element = element
if (table is None):
tablename = element.get(xml.ATTRIBUTE['name'])
table = s3db.table(tablename)
if (table is None):
self.error = current.ERROR.BAD_RESOURCE
element.set(ERROR, s3_str(self.error))
return False
else:
tablename = table._tablename
self.table = table
self.tablename = tablename
UID = xml.UID
if (original is None):
original = S3Resource.original(table, element, mandatory=self._mandatory_fields())
elif (isinstance(original, str) and (UID in table.fields)):
query = (table[UID] == original)
pkeys = set((fname for fname in table.fields if table[fname].unique))
fields = S3Resource.import_fields(table, pkeys, mandatory=self._mandatory_fields())
original = current.db(query).select(*fields, limitby=(0, 1)).first()
else:
original = None
postprocess = s3db.get_config(tablename, 'xml_post_parse')
data = xml.record(table, element, files=files, original=original, postprocess=postprocess)
if (data is None):
self.error = current.ERROR.VALIDATION_ERROR
self.accepted = False
if (not element.get(ERROR, False)):
element.set(ERROR, s3_str(self.error))
return False
self.data = data
MCI = xml.MCI
MTIME = xml.MTIME
self.uid = data.get(UID)
if (original is not None):
self.original = original
self.id = original[table._id.name]
if ((not current.response.s3.synchronise_uuids) and (UID in original)):
self.uid = self.data[UID] = original[UID]
if (MTIME in data):
self.mtime = data[MTIME]
if (MCI in data):
self.mci = data[MCI]
return True | Read data from a <resource> element
Args:
element: the element
table: the DB table
tree: the import tree
files: uploaded files
Returns:
True if successful, False if not (sets self.error) | modules/s3/s3import.py | parse | annehaley/eden | 205 | python | def parse(self, element, original=None, table=None, tree=None, files=None):
'\n Read data from a <resource> element\n\n Args:\n element: the element\n table: the DB table\n tree: the import tree\n files: uploaded files\n\n Returns:\n True if successful, False if not (sets self.error)\n '
s3db = current.s3db
xml = current.xml
ERROR = xml.ATTRIBUTE['error']
self.element = element
if (table is None):
tablename = element.get(xml.ATTRIBUTE['name'])
table = s3db.table(tablename)
if (table is None):
self.error = current.ERROR.BAD_RESOURCE
element.set(ERROR, s3_str(self.error))
return False
else:
tablename = table._tablename
self.table = table
self.tablename = tablename
UID = xml.UID
if (original is None):
original = S3Resource.original(table, element, mandatory=self._mandatory_fields())
elif (isinstance(original, str) and (UID in table.fields)):
query = (table[UID] == original)
pkeys = set((fname for fname in table.fields if table[fname].unique))
fields = S3Resource.import_fields(table, pkeys, mandatory=self._mandatory_fields())
original = current.db(query).select(*fields, limitby=(0, 1)).first()
else:
original = None
postprocess = s3db.get_config(tablename, 'xml_post_parse')
data = xml.record(table, element, files=files, original=original, postprocess=postprocess)
if (data is None):
self.error = current.ERROR.VALIDATION_ERROR
self.accepted = False
if (not element.get(ERROR, False)):
element.set(ERROR, s3_str(self.error))
return False
self.data = data
MCI = xml.MCI
MTIME = xml.MTIME
self.uid = data.get(UID)
if (original is not None):
self.original = original
self.id = original[table._id.name]
if ((not current.response.s3.synchronise_uuids) and (UID in original)):
self.uid = self.data[UID] = original[UID]
if (MTIME in data):
self.mtime = data[MTIME]
if (MCI in data):
self.mci = data[MCI]
return True | def parse(self, element, original=None, table=None, tree=None, files=None):
'\n Read data from a <resource> element\n\n Args:\n element: the element\n table: the DB table\n tree: the import tree\n files: uploaded files\n\n Returns:\n True if successful, False if not (sets self.error)\n '
s3db = current.s3db
xml = current.xml
ERROR = xml.ATTRIBUTE['error']
self.element = element
if (table is None):
tablename = element.get(xml.ATTRIBUTE['name'])
table = s3db.table(tablename)
if (table is None):
self.error = current.ERROR.BAD_RESOURCE
element.set(ERROR, s3_str(self.error))
return False
else:
tablename = table._tablename
self.table = table
self.tablename = tablename
UID = xml.UID
if (original is None):
original = S3Resource.original(table, element, mandatory=self._mandatory_fields())
elif (isinstance(original, str) and (UID in table.fields)):
query = (table[UID] == original)
pkeys = set((fname for fname in table.fields if table[fname].unique))
fields = S3Resource.import_fields(table, pkeys, mandatory=self._mandatory_fields())
original = current.db(query).select(*fields, limitby=(0, 1)).first()
else:
original = None
postprocess = s3db.get_config(tablename, 'xml_post_parse')
data = xml.record(table, element, files=files, original=original, postprocess=postprocess)
if (data is None):
self.error = current.ERROR.VALIDATION_ERROR
self.accepted = False
if (not element.get(ERROR, False)):
element.set(ERROR, s3_str(self.error))
return False
self.data = data
MCI = xml.MCI
MTIME = xml.MTIME
self.uid = data.get(UID)
if (original is not None):
self.original = original
self.id = original[table._id.name]
if ((not current.response.s3.synchronise_uuids) and (UID in original)):
self.uid = self.data[UID] = original[UID]
if (MTIME in data):
self.mtime = data[MTIME]
if (MCI in data):
self.mci = data[MCI]
return True<|docstring|>Read data from a <resource> element
Args:
element: the element
table: the DB table
tree: the import tree
files: uploaded files
Returns:
True if successful, False if not (sets self.error)<|endoftext|> |
53e7355c75c6b6da453d2385e3cf5ef7200aa77f52a851a7fe5c7e7b56e6f486 | def deduplicate(self):
'\n Detect whether this is an update or a new record\n '
table = self.table
if ((table is None) or self.id):
return
METHOD = self.METHOD
CREATE = METHOD['CREATE']
UPDATE = METHOD['UPDATE']
DELETE = METHOD['DELETE']
MERGE = METHOD['MERGE']
xml = current.xml
UID = xml.UID
data = self.data
if (self.job.second_pass and (UID in table.fields)):
uid = data.get(UID)
if (uid and (not self.element.get(UID)) and (not self.original)):
del data[UID]
mandatory = self._mandatory_fields()
if (self.original is not None):
original = self.original
elif self.data:
original = S3Resource.original(table, self.data, mandatory=mandatory)
else:
original = None
synchronise_uuids = current.response.s3.synchronise_uuids
deleted = data[xml.DELETED]
if deleted:
if data[xml.REPLACEDBY]:
self.method = MERGE
else:
self.method = DELETE
self.uid = data.get(UID)
if (original is not None):
self.id = original[table._id.name]
if (not deleted):
self.method = UPDATE
else:
if ((UID in data) and (not synchronise_uuids)):
self.id = None
if (not deleted):
self.method = CREATE
else:
self.method = DELETE
self.skip = True
else:
resolve = current.s3db.get_config(self.tablename, 'deduplicate')
if (data and resolve):
resolve(self)
if (self.id and (self.method in (UPDATE, DELETE, MERGE))):
fields = S3Resource.import_fields(table, data, mandatory=mandatory)
original = current.db((table._id == self.id)).select(*fields, limitby=(0, 1)).first()
if (original and (not synchronise_uuids) and (UID in original)):
self.uid = data[UID] = original[UID]
self.original = original | Detect whether this is an update or a new record | modules/s3/s3import.py | deduplicate | annehaley/eden | 205 | python | def deduplicate(self):
'\n \n '
table = self.table
if ((table is None) or self.id):
return
METHOD = self.METHOD
CREATE = METHOD['CREATE']
UPDATE = METHOD['UPDATE']
DELETE = METHOD['DELETE']
MERGE = METHOD['MERGE']
xml = current.xml
UID = xml.UID
data = self.data
if (self.job.second_pass and (UID in table.fields)):
uid = data.get(UID)
if (uid and (not self.element.get(UID)) and (not self.original)):
del data[UID]
mandatory = self._mandatory_fields()
if (self.original is not None):
original = self.original
elif self.data:
original = S3Resource.original(table, self.data, mandatory=mandatory)
else:
original = None
synchronise_uuids = current.response.s3.synchronise_uuids
deleted = data[xml.DELETED]
if deleted:
if data[xml.REPLACEDBY]:
self.method = MERGE
else:
self.method = DELETE
self.uid = data.get(UID)
if (original is not None):
self.id = original[table._id.name]
if (not deleted):
self.method = UPDATE
else:
if ((UID in data) and (not synchronise_uuids)):
self.id = None
if (not deleted):
self.method = CREATE
else:
self.method = DELETE
self.skip = True
else:
resolve = current.s3db.get_config(self.tablename, 'deduplicate')
if (data and resolve):
resolve(self)
if (self.id and (self.method in (UPDATE, DELETE, MERGE))):
fields = S3Resource.import_fields(table, data, mandatory=mandatory)
original = current.db((table._id == self.id)).select(*fields, limitby=(0, 1)).first()
if (original and (not synchronise_uuids) and (UID in original)):
self.uid = data[UID] = original[UID]
self.original = original | def deduplicate(self):
'\n \n '
table = self.table
if ((table is None) or self.id):
return
METHOD = self.METHOD
CREATE = METHOD['CREATE']
UPDATE = METHOD['UPDATE']
DELETE = METHOD['DELETE']
MERGE = METHOD['MERGE']
xml = current.xml
UID = xml.UID
data = self.data
if (self.job.second_pass and (UID in table.fields)):
uid = data.get(UID)
if (uid and (not self.element.get(UID)) and (not self.original)):
del data[UID]
mandatory = self._mandatory_fields()
if (self.original is not None):
original = self.original
elif self.data:
original = S3Resource.original(table, self.data, mandatory=mandatory)
else:
original = None
synchronise_uuids = current.response.s3.synchronise_uuids
deleted = data[xml.DELETED]
if deleted:
if data[xml.REPLACEDBY]:
self.method = MERGE
else:
self.method = DELETE
self.uid = data.get(UID)
if (original is not None):
self.id = original[table._id.name]
if (not deleted):
self.method = UPDATE
else:
if ((UID in data) and (not synchronise_uuids)):
self.id = None
if (not deleted):
self.method = CREATE
else:
self.method = DELETE
self.skip = True
else:
resolve = current.s3db.get_config(self.tablename, 'deduplicate')
if (data and resolve):
resolve(self)
if (self.id and (self.method in (UPDATE, DELETE, MERGE))):
fields = S3Resource.import_fields(table, data, mandatory=mandatory)
original = current.db((table._id == self.id)).select(*fields, limitby=(0, 1)).first()
if (original and (not synchronise_uuids) and (UID in original)):
self.uid = data[UID] = original[UID]
self.original = original<|docstring|>Detect whether this is an update or a new record<|endoftext|> |
59d3c13cb6a853e1c0cf3b4a8e6e11887ee1837ac12c6947550c06db10012789 | def authorize(self):
'\n Authorize the import of this item, sets self.permitted\n '
if (not self.table):
return False
auth = current.auth
tablename = self.tablename
if ((not auth.override) and (tablename.split('_', 1)[0] in auth.PROTECTED)):
return False
METHOD = self.METHOD
if (self.data.deleted is True):
if self.data.deleted_rb:
self.method = METHOD['MERGE']
else:
self.method = METHOD['DELETE']
self.accepted = (True if self.id else False)
elif self.id:
if (not self.original):
fields = S3Resource.import_fields(self.table, self.data, mandatory=self._mandatory_fields())
query = (self.table.id == self.id)
self.original = current.db(query).select(*fields, limitby=(0, 1)).first()
if self.original:
self.method = METHOD['UPDATE']
else:
self.method = METHOD['CREATE']
else:
self.method = METHOD['CREATE']
if (self.method == METHOD['CREATE']):
self.id = 0
has_permission = current.auth.s3_has_permission
if has_permission:
self.permitted = has_permission(self.method, tablename, record_id=self.id)
else:
self.permitted = True
return self.permitted | Authorize the import of this item, sets self.permitted | modules/s3/s3import.py | authorize | annehaley/eden | 205 | python | def authorize(self):
'\n \n '
if (not self.table):
return False
auth = current.auth
tablename = self.tablename
if ((not auth.override) and (tablename.split('_', 1)[0] in auth.PROTECTED)):
return False
METHOD = self.METHOD
if (self.data.deleted is True):
if self.data.deleted_rb:
self.method = METHOD['MERGE']
else:
self.method = METHOD['DELETE']
self.accepted = (True if self.id else False)
elif self.id:
if (not self.original):
fields = S3Resource.import_fields(self.table, self.data, mandatory=self._mandatory_fields())
query = (self.table.id == self.id)
self.original = current.db(query).select(*fields, limitby=(0, 1)).first()
if self.original:
self.method = METHOD['UPDATE']
else:
self.method = METHOD['CREATE']
else:
self.method = METHOD['CREATE']
if (self.method == METHOD['CREATE']):
self.id = 0
has_permission = current.auth.s3_has_permission
if has_permission:
self.permitted = has_permission(self.method, tablename, record_id=self.id)
else:
self.permitted = True
return self.permitted | def authorize(self):
'\n \n '
if (not self.table):
return False
auth = current.auth
tablename = self.tablename
if ((not auth.override) and (tablename.split('_', 1)[0] in auth.PROTECTED)):
return False
METHOD = self.METHOD
if (self.data.deleted is True):
if self.data.deleted_rb:
self.method = METHOD['MERGE']
else:
self.method = METHOD['DELETE']
self.accepted = (True if self.id else False)
elif self.id:
if (not self.original):
fields = S3Resource.import_fields(self.table, self.data, mandatory=self._mandatory_fields())
query = (self.table.id == self.id)
self.original = current.db(query).select(*fields, limitby=(0, 1)).first()
if self.original:
self.method = METHOD['UPDATE']
else:
self.method = METHOD['CREATE']
else:
self.method = METHOD['CREATE']
if (self.method == METHOD['CREATE']):
self.id = 0
has_permission = current.auth.s3_has_permission
if has_permission:
self.permitted = has_permission(self.method, tablename, record_id=self.id)
else:
self.permitted = True
return self.permitted<|docstring|>Authorize the import of this item, sets self.permitted<|endoftext|> |
6cb87dfb5fe7ec5c714abf64d63a82ba8cdd24e95eb4415f3ccf9b6a2378701e | def validate(self):
'\n Validate this item (=record onvalidation), sets self.accepted\n '
data = self.data
if (self.accepted is not None):
return self.accepted
if ((data is None) or (not self.table)):
self.accepted = False
return False
xml = current.xml
ERROR = xml.ATTRIBUTE['error']
METHOD = self.METHOD
DELETE = METHOD.DELETE
MERGE = METHOD.MERGE
if (not self.id):
self.deduplicate()
if (self.accepted is False):
return False
if (self.skip or (self.method in (DELETE, MERGE))):
self.accepted = (True if self.id else False)
return True
if (not self.id):
self._dynamic_defaults(data)
required_fields = self._mandatory_fields()
all_fields = list(data.keys())
failed_references = []
items = self.job.items
for reference in self.references:
resolvable = resolved = True
entry = reference.entry
if (entry and (not entry.id)):
if entry.item_id:
item = items[entry.item_id]
if item.error:
relement = reference.element
if (relement is not None):
msg = '; '.join(xml.collect_errors(entry.element))
relement.set(ERROR, msg)
else:
resolvable = False
resolved = False
else:
resolvable = resolved = False
field = reference.field
if isinstance(field, (tuple, list)):
field = field[1]
if resolved:
all_fields.append(field)
elif resolvable:
if (field not in required_fields):
required_fields.append(field)
if (field not in failed_references):
failed_references.append(field)
missing = [fname for fname in required_fields if (fname not in all_fields)]
original = self.original
if missing:
if original:
missing = [fname for fname in missing if (fname not in original)]
if missing:
fields = [f for f in missing if (f not in failed_references)]
if fields:
errors = [('%s: value(s) required' % ', '.join(fields))]
else:
errors = []
if failed_references:
fields = ', '.join(failed_references)
errors.append(('%s: reference import(s) failed' % ', '.join(failed_references)))
self.error = '; '.join(errors)
self.element.set(ERROR, self.error)
self.accepted = False
return False
form = Storage(method=self.method, vars=data, request_vars=data)
if self.id:
form.vars.id = self.id
form.errors = Storage()
tablename = self.tablename
key = ('%s_onvalidation' % self.method)
get_config = current.s3db.get_config
onvalidation = get_config(tablename, key, get_config(tablename, 'onvalidation'))
if onvalidation:
try:
callback(onvalidation, form)
except:
from traceback import format_exc
current.log.error(('S3Import %s onvalidation exception:' % tablename))
current.log.debug(format_exc(10))
accepted = True
if form.errors:
element = self.element
for k in form.errors:
e = element.findall(("data[@field='%s']" % k))
if (not e):
e = element.findall(("reference[@field='%s']" % k))
if (not e):
e = element
form.errors[k] = ('[%s] %s' % (k, form.errors[k]))
else:
e = e[0]
e.set(ERROR, s3_str(form.errors[k]))
self.error = current.ERROR.VALIDATION_ERROR
accepted = False
self.accepted = accepted
return accepted | Validate this item (=record onvalidation), sets self.accepted | modules/s3/s3import.py | validate | annehaley/eden | 205 | python | def validate(self):
'\n \n '
data = self.data
if (self.accepted is not None):
return self.accepted
if ((data is None) or (not self.table)):
self.accepted = False
return False
xml = current.xml
ERROR = xml.ATTRIBUTE['error']
METHOD = self.METHOD
DELETE = METHOD.DELETE
MERGE = METHOD.MERGE
if (not self.id):
self.deduplicate()
if (self.accepted is False):
return False
if (self.skip or (self.method in (DELETE, MERGE))):
self.accepted = (True if self.id else False)
return True
if (not self.id):
self._dynamic_defaults(data)
required_fields = self._mandatory_fields()
all_fields = list(data.keys())
failed_references = []
items = self.job.items
for reference in self.references:
resolvable = resolved = True
entry = reference.entry
if (entry and (not entry.id)):
if entry.item_id:
item = items[entry.item_id]
if item.error:
relement = reference.element
if (relement is not None):
msg = '; '.join(xml.collect_errors(entry.element))
relement.set(ERROR, msg)
else:
resolvable = False
resolved = False
else:
resolvable = resolved = False
field = reference.field
if isinstance(field, (tuple, list)):
field = field[1]
if resolved:
all_fields.append(field)
elif resolvable:
if (field not in required_fields):
required_fields.append(field)
if (field not in failed_references):
failed_references.append(field)
missing = [fname for fname in required_fields if (fname not in all_fields)]
original = self.original
if missing:
if original:
missing = [fname for fname in missing if (fname not in original)]
if missing:
fields = [f for f in missing if (f not in failed_references)]
if fields:
errors = [('%s: value(s) required' % ', '.join(fields))]
else:
errors = []
if failed_references:
fields = ', '.join(failed_references)
errors.append(('%s: reference import(s) failed' % ', '.join(failed_references)))
self.error = '; '.join(errors)
self.element.set(ERROR, self.error)
self.accepted = False
return False
form = Storage(method=self.method, vars=data, request_vars=data)
if self.id:
form.vars.id = self.id
form.errors = Storage()
tablename = self.tablename
key = ('%s_onvalidation' % self.method)
get_config = current.s3db.get_config
onvalidation = get_config(tablename, key, get_config(tablename, 'onvalidation'))
if onvalidation:
try:
callback(onvalidation, form)
except:
from traceback import format_exc
current.log.error(('S3Import %s onvalidation exception:' % tablename))
current.log.debug(format_exc(10))
accepted = True
if form.errors:
element = self.element
for k in form.errors:
e = element.findall(("data[@field='%s']" % k))
if (not e):
e = element.findall(("reference[@field='%s']" % k))
if (not e):
e = element
form.errors[k] = ('[%s] %s' % (k, form.errors[k]))
else:
e = e[0]
e.set(ERROR, s3_str(form.errors[k]))
self.error = current.ERROR.VALIDATION_ERROR
accepted = False
self.accepted = accepted
return accepted | def validate(self):
'\n \n '
data = self.data
if (self.accepted is not None):
return self.accepted
if ((data is None) or (not self.table)):
self.accepted = False
return False
xml = current.xml
ERROR = xml.ATTRIBUTE['error']
METHOD = self.METHOD
DELETE = METHOD.DELETE
MERGE = METHOD.MERGE
if (not self.id):
self.deduplicate()
if (self.accepted is False):
return False
if (self.skip or (self.method in (DELETE, MERGE))):
self.accepted = (True if self.id else False)
return True
if (not self.id):
self._dynamic_defaults(data)
required_fields = self._mandatory_fields()
all_fields = list(data.keys())
failed_references = []
items = self.job.items
for reference in self.references:
resolvable = resolved = True
entry = reference.entry
if (entry and (not entry.id)):
if entry.item_id:
item = items[entry.item_id]
if item.error:
relement = reference.element
if (relement is not None):
msg = '; '.join(xml.collect_errors(entry.element))
relement.set(ERROR, msg)
else:
resolvable = False
resolved = False
else:
resolvable = resolved = False
field = reference.field
if isinstance(field, (tuple, list)):
field = field[1]
if resolved:
all_fields.append(field)
elif resolvable:
if (field not in required_fields):
required_fields.append(field)
if (field not in failed_references):
failed_references.append(field)
missing = [fname for fname in required_fields if (fname not in all_fields)]
original = self.original
if missing:
if original:
missing = [fname for fname in missing if (fname not in original)]
if missing:
fields = [f for f in missing if (f not in failed_references)]
if fields:
errors = [('%s: value(s) required' % ', '.join(fields))]
else:
errors = []
if failed_references:
fields = ', '.join(failed_references)
errors.append(('%s: reference import(s) failed' % ', '.join(failed_references)))
self.error = '; '.join(errors)
self.element.set(ERROR, self.error)
self.accepted = False
return False
form = Storage(method=self.method, vars=data, request_vars=data)
if self.id:
form.vars.id = self.id
form.errors = Storage()
tablename = self.tablename
key = ('%s_onvalidation' % self.method)
get_config = current.s3db.get_config
onvalidation = get_config(tablename, key, get_config(tablename, 'onvalidation'))
if onvalidation:
try:
callback(onvalidation, form)
except:
from traceback import format_exc
current.log.error(('S3Import %s onvalidation exception:' % tablename))
current.log.debug(format_exc(10))
accepted = True
if form.errors:
element = self.element
for k in form.errors:
e = element.findall(("data[@field='%s']" % k))
if (not e):
e = element.findall(("reference[@field='%s']" % k))
if (not e):
e = element
form.errors[k] = ('[%s] %s' % (k, form.errors[k]))
else:
e = e[0]
e.set(ERROR, s3_str(form.errors[k]))
self.error = current.ERROR.VALIDATION_ERROR
accepted = False
self.accepted = accepted
return accepted<|docstring|>Validate this item (=record onvalidation), sets self.accepted<|endoftext|> |
0337b2cbd9c8624d427d30110a88a70fdfb1d5b42e5a560484c385682f191983 | def commit(self, ignore_errors=False):
'\n Commit this item to the database\n\n Args:\n ignore_errors: skip invalid components\n (still reports errors)\n '
if self.committed:
return True
if ((self.parent is not None) and self.parent.skip):
return True
db = current.db
s3db = current.s3db
xml = current.xml
ATTRIBUTE = xml.ATTRIBUTE
METHOD = self.METHOD
CREATE = METHOD.CREATE
UPDATE = METHOD.UPDATE
DELETE = METHOD.DELETE
MERGE = METHOD.MERGE
POLICY = self.POLICY
THIS = POLICY['THIS']
NEWER = POLICY['NEWER']
MASTER = POLICY['MASTER']
UID = xml.UID
MCI = xml.MCI
MTIME = xml.MTIME
VALIDATION_ERROR = current.ERROR.VALIDATION_ERROR
self.mtime = s3_utc(self.mtime)
self._resolve_references()
if (not self.validate()):
self.skip = True
parent = self.parent
if (parent is not None):
parent.error = VALIDATION_ERROR
element = parent.element
if (not element.get(ATTRIBUTE.error, False)):
element.set(ATTRIBUTE.error, s3_str(parent.error))
return ignore_errors
elif ((self.method not in (MERGE, DELETE)) and self.components):
for component in self.components:
if ((component.accepted is False) or (component.data is None)):
component.skip = True
self.skip = True
self.error = VALIDATION_ERROR
return ignore_errors
elif ((self.method in (MERGE, DELETE)) and (not self.accepted)):
self.skip = True
return True
if (not self.authorize()):
self.error = ('%s: %s, %s, %s' % (current.ERROR.NOT_PERMITTED, self.method, self.tablename, self.id))
self.skip = True
return ignore_errors
method = self.method
strategy = self.strategy
if (not isinstance(strategy, (list, tuple))):
strategy = [strategy]
if (method not in strategy):
self.error = current.ERROR.NOT_PERMITTED
self.skip = True
return True
table = self.table
original = self.original
original_mtime = None
original_mci = 0
if original:
if hasattr(table, MTIME):
original_mtime = s3_utc(original[MTIME])
if hasattr(table, MCI):
original_mci = original[MCI]
original_deleted = (('deleted' in original) and original.deleted)
else:
original_deleted = False
job = self.job
original_modified = True
self.modified = True
self.conflict = False
last_sync = s3_utc(job.last_sync)
if last_sync:
if (original_mtime and (original_mtime < last_sync)):
original_modified = False
if (self.mtime and (self.mtime < last_sync)):
self.modified = False
if (self.modified and original_modified):
self.conflict = True
if (self.conflict and (method in (UPDATE, DELETE, MERGE))):
if job.onconflict:
job.onconflict(self)
if (self.data is not None):
data = table._filter_fields(self.data, id=True)
else:
data = Storage()
if isinstance(self.update_policy, dict):
def update_policy(f):
setting = self.update_policy
p = setting.get(f, setting.get('__default__', THIS))
if (p not in POLICY):
return THIS
return p
else:
def update_policy(f):
p = self.update_policy
if (p not in POLICY):
return THIS
return p
if callable(job.log):
job.log(self)
tablename = self.tablename
enforce_realm_update = False
if (method == UPDATE):
if original:
if original_deleted:
policy = update_policy(None)
if (((policy == NEWER) and original_mtime and (original_mtime > self.mtime)) or ((policy == MASTER) and ((original_mci == 0) or (self.mci != 1)))):
self.skip = True
return True
for f in list(data.keys()):
if (f in original):
if (type(original[f]) is datetime.datetime):
if (s3_utc(data[f]) == s3_utc(original[f])):
del data[f]
continue
elif (data[f] == original[f]):
del data[f]
continue
remove = False
policy = update_policy(f)
if (policy == THIS):
remove = True
elif (policy == NEWER):
if (original_mtime and (original_mtime > self.mtime)):
remove = True
elif (policy == MASTER):
if ((original_mci == 0) or (self.mci != 1)):
remove = True
if remove:
del data[f]
if original_deleted:
data['deleted'] = False
if hasattr(table, 'deleted_fk'):
data['deleted_fk'] = ''
if hasattr(table, 'created_by'):
data['created_by'] = table.created_by.default
if hasattr(table, 'modified_by'):
data['modified_by'] = table.modified_by.default
for fieldname in table.fields:
field = table[fieldname]
default = field.default
if ((str(field.type)[:9] == 'reference') and (fieldname not in data) and (default is not None)):
data[fieldname] = default
enforce_realm_update = True
if ((not self.skip) and (not self.conflict) and (len(data) or self.components or self.references)):
if (self.uid and hasattr(table, UID)):
data[UID] = self.uid
if (MTIME in table):
data[MTIME] = self.mtime
if (MCI in data):
del data[MCI]
query = (table._id == self.id)
try:
db(query).update(**dict(data))
except:
self.error = sys.exc_info()[1]
self.skip = True
return ignore_errors
else:
self.committed = True
else:
self.committed = True
elif (method == CREATE):
if (UID in data):
del data[UID]
if (MCI in data):
del data[MCI]
for f in data:
if ((update_policy(f) == MASTER) and (self.mci != 1)):
del data[f]
if self.skip:
return True
elif (len(data) or self.components or self.references):
if (self.uid and (UID in table.fields)):
data[UID] = self.uid
if (MCI in table.fields):
data[MCI] = self.mci
try:
success = table.insert(**dict(data))
except:
self.error = sys.exc_info()[1]
self.skip = True
return ignore_errors
if success:
self.id = success
self.committed = True
else:
self.skip = True
return True
elif (method == DELETE):
if original:
if original_deleted:
self.skip = True
policy = update_policy(None)
if (policy == THIS):
self.skip = True
elif ((policy == NEWER) and (original_mtime and (original_mtime > self.mtime))):
self.skip = True
elif ((policy == MASTER) and ((original_mci == 0) or (self.mci != 1))):
self.skip = True
else:
self.skip = True
if ((not self.skip) and (not self.conflict)):
resource = s3db.resource(tablename, id=self.id)
success = resource.delete(cascade=True)
if resource.error:
self.error = resource.error
self.skip = True
return ignore_errors
return True
elif (method == MERGE):
if (UID not in table.fields):
self.skip = True
elif original:
if original_deleted:
self.skip = True
policy = update_policy(None)
if (policy == THIS):
self.skip = True
elif ((policy == NEWER) and (original_mtime and (original_mtime > self.mtime))):
self.skip = True
elif ((policy == MASTER) and ((original_mci == 0) or (self.mci != 1))):
self.skip = True
else:
self.skip = True
if ((not self.skip) and (not self.conflict)):
row = db((table[UID] == data[xml.REPLACEDBY])).select(table._id, limitby=(0, 1)).first()
if row:
original_id = row[table._id]
resource = s3db.resource(tablename, id=[original_id, self.id])
try:
success = resource.merge(original_id, self.id)
except:
self.error = sys.exc_info()[1]
self.skip = True
return ignore_errors
if success:
self.committed = True
else:
self.skip = True
return True
else:
raise RuntimeError(('unknown import method: %s' % method))
if self.committed:
form = Storage()
form.method = method
form.table = table
form.vars = self.data
(prefix, name) = tablename.split('_', 1)
if self.id:
form.vars.id = self.id
current.audit(method, prefix, name, form=form, record=self.id, representation='xml')
if (MTIME in table.fields):
modified_on = table[MTIME]
modified_on_update = modified_on.update
modified_on.update = None
else:
modified_on_update = None
get_config = s3db.get_config
s3db.update_super(table, form.vars)
if (method == CREATE):
current.auth.s3_set_record_owner(table, self.id)
elif (method == UPDATE):
update_realm = (enforce_realm_update or get_config(table, 'update_realm'))
if update_realm:
current.auth.set_realm_entity(table, self.id, force_update=True)
key = ('%s_onaccept' % method)
onaccept = (get_config(tablename, key) or get_config(tablename, 'onaccept'))
if onaccept:
callback(onaccept, form)
if (modified_on_update is not None):
modified_on.update = modified_on_update
if (self.update and self.id):
for u in self.update:
item = u.get('item')
if (not item):
continue
field = u.get('field')
if isinstance(field, (list, tuple)):
(pkey, fkey) = field
row = db((table.id == self.id)).select(table[pkey], limitby=(0, 1)).first()
ref_id = row[pkey]
else:
(pkey, fkey) = (None, field)
ref_id = self.id
if ('refkey' in u):
item._update_objref(fkey, u['refkey'], ref_id)
else:
item._update_reference(fkey, ref_id)
return True | Commit this item to the database
Args:
ignore_errors: skip invalid components
(still reports errors) | modules/s3/s3import.py | commit | annehaley/eden | 205 | python | def commit(self, ignore_errors=False):
'\n Commit this item to the database\n\n Args:\n ignore_errors: skip invalid components\n (still reports errors)\n '
if self.committed:
return True
if ((self.parent is not None) and self.parent.skip):
return True
db = current.db
s3db = current.s3db
xml = current.xml
ATTRIBUTE = xml.ATTRIBUTE
METHOD = self.METHOD
CREATE = METHOD.CREATE
UPDATE = METHOD.UPDATE
DELETE = METHOD.DELETE
MERGE = METHOD.MERGE
POLICY = self.POLICY
THIS = POLICY['THIS']
NEWER = POLICY['NEWER']
MASTER = POLICY['MASTER']
UID = xml.UID
MCI = xml.MCI
MTIME = xml.MTIME
VALIDATION_ERROR = current.ERROR.VALIDATION_ERROR
self.mtime = s3_utc(self.mtime)
self._resolve_references()
if (not self.validate()):
self.skip = True
parent = self.parent
if (parent is not None):
parent.error = VALIDATION_ERROR
element = parent.element
if (not element.get(ATTRIBUTE.error, False)):
element.set(ATTRIBUTE.error, s3_str(parent.error))
return ignore_errors
elif ((self.method not in (MERGE, DELETE)) and self.components):
for component in self.components:
if ((component.accepted is False) or (component.data is None)):
component.skip = True
self.skip = True
self.error = VALIDATION_ERROR
return ignore_errors
elif ((self.method in (MERGE, DELETE)) and (not self.accepted)):
self.skip = True
return True
if (not self.authorize()):
self.error = ('%s: %s, %s, %s' % (current.ERROR.NOT_PERMITTED, self.method, self.tablename, self.id))
self.skip = True
return ignore_errors
method = self.method
strategy = self.strategy
if (not isinstance(strategy, (list, tuple))):
strategy = [strategy]
if (method not in strategy):
self.error = current.ERROR.NOT_PERMITTED
self.skip = True
return True
table = self.table
original = self.original
original_mtime = None
original_mci = 0
if original:
if hasattr(table, MTIME):
original_mtime = s3_utc(original[MTIME])
if hasattr(table, MCI):
original_mci = original[MCI]
original_deleted = (('deleted' in original) and original.deleted)
else:
original_deleted = False
job = self.job
original_modified = True
self.modified = True
self.conflict = False
last_sync = s3_utc(job.last_sync)
if last_sync:
if (original_mtime and (original_mtime < last_sync)):
original_modified = False
if (self.mtime and (self.mtime < last_sync)):
self.modified = False
if (self.modified and original_modified):
self.conflict = True
if (self.conflict and (method in (UPDATE, DELETE, MERGE))):
if job.onconflict:
job.onconflict(self)
if (self.data is not None):
data = table._filter_fields(self.data, id=True)
else:
data = Storage()
if isinstance(self.update_policy, dict):
def update_policy(f):
setting = self.update_policy
p = setting.get(f, setting.get('__default__', THIS))
if (p not in POLICY):
return THIS
return p
else:
def update_policy(f):
p = self.update_policy
if (p not in POLICY):
return THIS
return p
if callable(job.log):
job.log(self)
tablename = self.tablename
enforce_realm_update = False
if (method == UPDATE):
if original:
if original_deleted:
policy = update_policy(None)
if (((policy == NEWER) and original_mtime and (original_mtime > self.mtime)) or ((policy == MASTER) and ((original_mci == 0) or (self.mci != 1)))):
self.skip = True
return True
for f in list(data.keys()):
if (f in original):
if (type(original[f]) is datetime.datetime):
if (s3_utc(data[f]) == s3_utc(original[f])):
del data[f]
continue
elif (data[f] == original[f]):
del data[f]
continue
remove = False
policy = update_policy(f)
if (policy == THIS):
remove = True
elif (policy == NEWER):
if (original_mtime and (original_mtime > self.mtime)):
remove = True
elif (policy == MASTER):
if ((original_mci == 0) or (self.mci != 1)):
remove = True
if remove:
del data[f]
if original_deleted:
data['deleted'] = False
if hasattr(table, 'deleted_fk'):
data['deleted_fk'] =
if hasattr(table, 'created_by'):
data['created_by'] = table.created_by.default
if hasattr(table, 'modified_by'):
data['modified_by'] = table.modified_by.default
for fieldname in table.fields:
field = table[fieldname]
default = field.default
if ((str(field.type)[:9] == 'reference') and (fieldname not in data) and (default is not None)):
data[fieldname] = default
enforce_realm_update = True
if ((not self.skip) and (not self.conflict) and (len(data) or self.components or self.references)):
if (self.uid and hasattr(table, UID)):
data[UID] = self.uid
if (MTIME in table):
data[MTIME] = self.mtime
if (MCI in data):
del data[MCI]
query = (table._id == self.id)
try:
db(query).update(**dict(data))
except:
self.error = sys.exc_info()[1]
self.skip = True
return ignore_errors
else:
self.committed = True
else:
self.committed = True
elif (method == CREATE):
if (UID in data):
del data[UID]
if (MCI in data):
del data[MCI]
for f in data:
if ((update_policy(f) == MASTER) and (self.mci != 1)):
del data[f]
if self.skip:
return True
elif (len(data) or self.components or self.references):
if (self.uid and (UID in table.fields)):
data[UID] = self.uid
if (MCI in table.fields):
data[MCI] = self.mci
try:
success = table.insert(**dict(data))
except:
self.error = sys.exc_info()[1]
self.skip = True
return ignore_errors
if success:
self.id = success
self.committed = True
else:
self.skip = True
return True
elif (method == DELETE):
if original:
if original_deleted:
self.skip = True
policy = update_policy(None)
if (policy == THIS):
self.skip = True
elif ((policy == NEWER) and (original_mtime and (original_mtime > self.mtime))):
self.skip = True
elif ((policy == MASTER) and ((original_mci == 0) or (self.mci != 1))):
self.skip = True
else:
self.skip = True
if ((not self.skip) and (not self.conflict)):
resource = s3db.resource(tablename, id=self.id)
success = resource.delete(cascade=True)
if resource.error:
self.error = resource.error
self.skip = True
return ignore_errors
return True
elif (method == MERGE):
if (UID not in table.fields):
self.skip = True
elif original:
if original_deleted:
self.skip = True
policy = update_policy(None)
if (policy == THIS):
self.skip = True
elif ((policy == NEWER) and (original_mtime and (original_mtime > self.mtime))):
self.skip = True
elif ((policy == MASTER) and ((original_mci == 0) or (self.mci != 1))):
self.skip = True
else:
self.skip = True
if ((not self.skip) and (not self.conflict)):
row = db((table[UID] == data[xml.REPLACEDBY])).select(table._id, limitby=(0, 1)).first()
if row:
original_id = row[table._id]
resource = s3db.resource(tablename, id=[original_id, self.id])
try:
success = resource.merge(original_id, self.id)
except:
self.error = sys.exc_info()[1]
self.skip = True
return ignore_errors
if success:
self.committed = True
else:
self.skip = True
return True
else:
raise RuntimeError(('unknown import method: %s' % method))
if self.committed:
form = Storage()
form.method = method
form.table = table
form.vars = self.data
(prefix, name) = tablename.split('_', 1)
if self.id:
form.vars.id = self.id
current.audit(method, prefix, name, form=form, record=self.id, representation='xml')
if (MTIME in table.fields):
modified_on = table[MTIME]
modified_on_update = modified_on.update
modified_on.update = None
else:
modified_on_update = None
get_config = s3db.get_config
s3db.update_super(table, form.vars)
if (method == CREATE):
current.auth.s3_set_record_owner(table, self.id)
elif (method == UPDATE):
update_realm = (enforce_realm_update or get_config(table, 'update_realm'))
if update_realm:
current.auth.set_realm_entity(table, self.id, force_update=True)
key = ('%s_onaccept' % method)
onaccept = (get_config(tablename, key) or get_config(tablename, 'onaccept'))
if onaccept:
callback(onaccept, form)
if (modified_on_update is not None):
modified_on.update = modified_on_update
if (self.update and self.id):
for u in self.update:
item = u.get('item')
if (not item):
continue
field = u.get('field')
if isinstance(field, (list, tuple)):
(pkey, fkey) = field
row = db((table.id == self.id)).select(table[pkey], limitby=(0, 1)).first()
ref_id = row[pkey]
else:
(pkey, fkey) = (None, field)
ref_id = self.id
if ('refkey' in u):
item._update_objref(fkey, u['refkey'], ref_id)
else:
item._update_reference(fkey, ref_id)
return True | def commit(self, ignore_errors=False):
'\n Commit this item to the database\n\n Args:\n ignore_errors: skip invalid components\n (still reports errors)\n '
if self.committed:
return True
if ((self.parent is not None) and self.parent.skip):
return True
db = current.db
s3db = current.s3db
xml = current.xml
ATTRIBUTE = xml.ATTRIBUTE
METHOD = self.METHOD
CREATE = METHOD.CREATE
UPDATE = METHOD.UPDATE
DELETE = METHOD.DELETE
MERGE = METHOD.MERGE
POLICY = self.POLICY
THIS = POLICY['THIS']
NEWER = POLICY['NEWER']
MASTER = POLICY['MASTER']
UID = xml.UID
MCI = xml.MCI
MTIME = xml.MTIME
VALIDATION_ERROR = current.ERROR.VALIDATION_ERROR
self.mtime = s3_utc(self.mtime)
self._resolve_references()
if (not self.validate()):
self.skip = True
parent = self.parent
if (parent is not None):
parent.error = VALIDATION_ERROR
element = parent.element
if (not element.get(ATTRIBUTE.error, False)):
element.set(ATTRIBUTE.error, s3_str(parent.error))
return ignore_errors
elif ((self.method not in (MERGE, DELETE)) and self.components):
for component in self.components:
if ((component.accepted is False) or (component.data is None)):
component.skip = True
self.skip = True
self.error = VALIDATION_ERROR
return ignore_errors
elif ((self.method in (MERGE, DELETE)) and (not self.accepted)):
self.skip = True
return True
if (not self.authorize()):
self.error = ('%s: %s, %s, %s' % (current.ERROR.NOT_PERMITTED, self.method, self.tablename, self.id))
self.skip = True
return ignore_errors
method = self.method
strategy = self.strategy
if (not isinstance(strategy, (list, tuple))):
strategy = [strategy]
if (method not in strategy):
self.error = current.ERROR.NOT_PERMITTED
self.skip = True
return True
table = self.table
original = self.original
original_mtime = None
original_mci = 0
if original:
if hasattr(table, MTIME):
original_mtime = s3_utc(original[MTIME])
if hasattr(table, MCI):
original_mci = original[MCI]
original_deleted = (('deleted' in original) and original.deleted)
else:
original_deleted = False
job = self.job
original_modified = True
self.modified = True
self.conflict = False
last_sync = s3_utc(job.last_sync)
if last_sync:
if (original_mtime and (original_mtime < last_sync)):
original_modified = False
if (self.mtime and (self.mtime < last_sync)):
self.modified = False
if (self.modified and original_modified):
self.conflict = True
if (self.conflict and (method in (UPDATE, DELETE, MERGE))):
if job.onconflict:
job.onconflict(self)
if (self.data is not None):
data = table._filter_fields(self.data, id=True)
else:
data = Storage()
if isinstance(self.update_policy, dict):
def update_policy(f):
setting = self.update_policy
p = setting.get(f, setting.get('__default__', THIS))
if (p not in POLICY):
return THIS
return p
else:
def update_policy(f):
p = self.update_policy
if (p not in POLICY):
return THIS
return p
if callable(job.log):
job.log(self)
tablename = self.tablename
enforce_realm_update = False
if (method == UPDATE):
if original:
if original_deleted:
policy = update_policy(None)
if (((policy == NEWER) and original_mtime and (original_mtime > self.mtime)) or ((policy == MASTER) and ((original_mci == 0) or (self.mci != 1)))):
self.skip = True
return True
for f in list(data.keys()):
if (f in original):
if (type(original[f]) is datetime.datetime):
if (s3_utc(data[f]) == s3_utc(original[f])):
del data[f]
continue
elif (data[f] == original[f]):
del data[f]
continue
remove = False
policy = update_policy(f)
if (policy == THIS):
remove = True
elif (policy == NEWER):
if (original_mtime and (original_mtime > self.mtime)):
remove = True
elif (policy == MASTER):
if ((original_mci == 0) or (self.mci != 1)):
remove = True
if remove:
del data[f]
if original_deleted:
data['deleted'] = False
if hasattr(table, 'deleted_fk'):
data['deleted_fk'] =
if hasattr(table, 'created_by'):
data['created_by'] = table.created_by.default
if hasattr(table, 'modified_by'):
data['modified_by'] = table.modified_by.default
for fieldname in table.fields:
field = table[fieldname]
default = field.default
if ((str(field.type)[:9] == 'reference') and (fieldname not in data) and (default is not None)):
data[fieldname] = default
enforce_realm_update = True
if ((not self.skip) and (not self.conflict) and (len(data) or self.components or self.references)):
if (self.uid and hasattr(table, UID)):
data[UID] = self.uid
if (MTIME in table):
data[MTIME] = self.mtime
if (MCI in data):
del data[MCI]
query = (table._id == self.id)
try:
db(query).update(**dict(data))
except:
self.error = sys.exc_info()[1]
self.skip = True
return ignore_errors
else:
self.committed = True
else:
self.committed = True
elif (method == CREATE):
if (UID in data):
del data[UID]
if (MCI in data):
del data[MCI]
for f in data:
if ((update_policy(f) == MASTER) and (self.mci != 1)):
del data[f]
if self.skip:
return True
elif (len(data) or self.components or self.references):
if (self.uid and (UID in table.fields)):
data[UID] = self.uid
if (MCI in table.fields):
data[MCI] = self.mci
try:
success = table.insert(**dict(data))
except:
self.error = sys.exc_info()[1]
self.skip = True
return ignore_errors
if success:
self.id = success
self.committed = True
else:
self.skip = True
return True
elif (method == DELETE):
if original:
if original_deleted:
self.skip = True
policy = update_policy(None)
if (policy == THIS):
self.skip = True
elif ((policy == NEWER) and (original_mtime and (original_mtime > self.mtime))):
self.skip = True
elif ((policy == MASTER) and ((original_mci == 0) or (self.mci != 1))):
self.skip = True
else:
self.skip = True
if ((not self.skip) and (not self.conflict)):
resource = s3db.resource(tablename, id=self.id)
success = resource.delete(cascade=True)
if resource.error:
self.error = resource.error
self.skip = True
return ignore_errors
return True
elif (method == MERGE):
if (UID not in table.fields):
self.skip = True
elif original:
if original_deleted:
self.skip = True
policy = update_policy(None)
if (policy == THIS):
self.skip = True
elif ((policy == NEWER) and (original_mtime and (original_mtime > self.mtime))):
self.skip = True
elif ((policy == MASTER) and ((original_mci == 0) or (self.mci != 1))):
self.skip = True
else:
self.skip = True
if ((not self.skip) and (not self.conflict)):
row = db((table[UID] == data[xml.REPLACEDBY])).select(table._id, limitby=(0, 1)).first()
if row:
original_id = row[table._id]
resource = s3db.resource(tablename, id=[original_id, self.id])
try:
success = resource.merge(original_id, self.id)
except:
self.error = sys.exc_info()[1]
self.skip = True
return ignore_errors
if success:
self.committed = True
else:
self.skip = True
return True
else:
raise RuntimeError(('unknown import method: %s' % method))
if self.committed:
form = Storage()
form.method = method
form.table = table
form.vars = self.data
(prefix, name) = tablename.split('_', 1)
if self.id:
form.vars.id = self.id
current.audit(method, prefix, name, form=form, record=self.id, representation='xml')
if (MTIME in table.fields):
modified_on = table[MTIME]
modified_on_update = modified_on.update
modified_on.update = None
else:
modified_on_update = None
get_config = s3db.get_config
s3db.update_super(table, form.vars)
if (method == CREATE):
current.auth.s3_set_record_owner(table, self.id)
elif (method == UPDATE):
update_realm = (enforce_realm_update or get_config(table, 'update_realm'))
if update_realm:
current.auth.set_realm_entity(table, self.id, force_update=True)
key = ('%s_onaccept' % method)
onaccept = (get_config(tablename, key) or get_config(tablename, 'onaccept'))
if onaccept:
callback(onaccept, form)
if (modified_on_update is not None):
modified_on.update = modified_on_update
if (self.update and self.id):
for u in self.update:
item = u.get('item')
if (not item):
continue
field = u.get('field')
if isinstance(field, (list, tuple)):
(pkey, fkey) = field
row = db((table.id == self.id)).select(table[pkey], limitby=(0, 1)).first()
ref_id = row[pkey]
else:
(pkey, fkey) = (None, field)
ref_id = self.id
if ('refkey' in u):
item._update_objref(fkey, u['refkey'], ref_id)
else:
item._update_reference(fkey, ref_id)
return True<|docstring|>Commit this item to the database
Args:
ignore_errors: skip invalid components
(still reports errors)<|endoftext|> |
2e4ed6e6b1c60ad8e8318658722352053c4861603b1493c284809d86e486ae8b | def _dynamic_defaults(self, data):
'\n Applies dynamic defaults from any keys in data that start with\n an underscore, used only for new records and only if the respective\n field is not populated yet.\n\n Args:\n data: the data dict\n '
for (k, v) in list(data.items()):
if (k[0] == '_'):
fn = k[1:]
if ((fn in self.table.fields) and (fn not in data)):
data[fn] = v | Applies dynamic defaults from any keys in data that start with
an underscore, used only for new records and only if the respective
field is not populated yet.
Args:
data: the data dict | modules/s3/s3import.py | _dynamic_defaults | annehaley/eden | 205 | python | def _dynamic_defaults(self, data):
'\n Applies dynamic defaults from any keys in data that start with\n an underscore, used only for new records and only if the respective\n field is not populated yet.\n\n Args:\n data: the data dict\n '
for (k, v) in list(data.items()):
if (k[0] == '_'):
fn = k[1:]
if ((fn in self.table.fields) and (fn not in data)):
data[fn] = v | def _dynamic_defaults(self, data):
'\n Applies dynamic defaults from any keys in data that start with\n an underscore, used only for new records and only if the respective\n field is not populated yet.\n\n Args:\n data: the data dict\n '
for (k, v) in list(data.items()):
if (k[0] == '_'):
fn = k[1:]
if ((fn in self.table.fields) and (fn not in data)):
data[fn] = v<|docstring|>Applies dynamic defaults from any keys in data that start with
an underscore, used only for new records and only if the respective
field is not populated yet.
Args:
data: the data dict<|endoftext|> |
42643b242406090c287d3c7e28d47fcbceee68528d1dca6b3fdde0a7722ad483 | def _resolve_references(self):
'\n Resolve the references of this item (=look up all foreign\n keys from other items of the same job). If a foreign key\n is not yet available, it will be scheduled for later update.\n '
table = self.table
if (not table):
return
db = current.db
items = self.job.items
for reference in self.references:
entry = reference.entry
if (not entry):
continue
field = reference.field
if isinstance(field, (list, tuple)):
(pkey, fkey) = field
else:
(pkey, fkey) = ('id', field)
f = table[fkey]
if (f.type == 'json'):
is_json = True
objref = reference.objref
if (not objref):
objref = S3ObjectReferences(self.data.get(fkey))
refkey = reference.refkey
if (not refkey):
continue
else:
is_json = False
refkey = objref = None
(ktablename, _, multiple) = s3_get_foreign_key(f)
if (not ktablename):
continue
if entry.tablename:
ktablename = entry.tablename
try:
ktable = current.s3db[ktablename]
except AttributeError:
continue
item = None
fk = entry.id
if entry.item_id:
item = items[entry.item_id]
if item:
if (item.original and item.original.get('deleted') and (not item.committed)):
fk = None
else:
fk = item.id
if (fk and (pkey != 'id')):
row = db((ktable._id == fk)).select(ktable[pkey], limitby=(0, 1)).first()
if (not row):
fk = None
continue
else:
fk = row[pkey]
if fk:
if is_json:
objref.resolve(refkey[0], refkey[1], refkey[2], fk)
elif multiple:
val = self.data.get(fkey, [])
if (fk not in val):
val.append(fk)
self.data[fkey] = val
else:
self.data[fkey] = fk
else:
if ((fkey in self.data) and (not multiple) and (not is_json)):
del self.data[fkey]
if item:
update = {'item': self, 'field': fkey}
if is_json:
update['refkey'] = refkey
item.update.append(update) | Resolve the references of this item (=look up all foreign
keys from other items of the same job). If a foreign key
is not yet available, it will be scheduled for later update. | modules/s3/s3import.py | _resolve_references | annehaley/eden | 205 | python | def _resolve_references(self):
'\n Resolve the references of this item (=look up all foreign\n keys from other items of the same job). If a foreign key\n is not yet available, it will be scheduled for later update.\n '
table = self.table
if (not table):
return
db = current.db
items = self.job.items
for reference in self.references:
entry = reference.entry
if (not entry):
continue
field = reference.field
if isinstance(field, (list, tuple)):
(pkey, fkey) = field
else:
(pkey, fkey) = ('id', field)
f = table[fkey]
if (f.type == 'json'):
is_json = True
objref = reference.objref
if (not objref):
objref = S3ObjectReferences(self.data.get(fkey))
refkey = reference.refkey
if (not refkey):
continue
else:
is_json = False
refkey = objref = None
(ktablename, _, multiple) = s3_get_foreign_key(f)
if (not ktablename):
continue
if entry.tablename:
ktablename = entry.tablename
try:
ktable = current.s3db[ktablename]
except AttributeError:
continue
item = None
fk = entry.id
if entry.item_id:
item = items[entry.item_id]
if item:
if (item.original and item.original.get('deleted') and (not item.committed)):
fk = None
else:
fk = item.id
if (fk and (pkey != 'id')):
row = db((ktable._id == fk)).select(ktable[pkey], limitby=(0, 1)).first()
if (not row):
fk = None
continue
else:
fk = row[pkey]
if fk:
if is_json:
objref.resolve(refkey[0], refkey[1], refkey[2], fk)
elif multiple:
val = self.data.get(fkey, [])
if (fk not in val):
val.append(fk)
self.data[fkey] = val
else:
self.data[fkey] = fk
else:
if ((fkey in self.data) and (not multiple) and (not is_json)):
del self.data[fkey]
if item:
update = {'item': self, 'field': fkey}
if is_json:
update['refkey'] = refkey
item.update.append(update) | def _resolve_references(self):
'\n Resolve the references of this item (=look up all foreign\n keys from other items of the same job). If a foreign key\n is not yet available, it will be scheduled for later update.\n '
table = self.table
if (not table):
return
db = current.db
items = self.job.items
for reference in self.references:
entry = reference.entry
if (not entry):
continue
field = reference.field
if isinstance(field, (list, tuple)):
(pkey, fkey) = field
else:
(pkey, fkey) = ('id', field)
f = table[fkey]
if (f.type == 'json'):
is_json = True
objref = reference.objref
if (not objref):
objref = S3ObjectReferences(self.data.get(fkey))
refkey = reference.refkey
if (not refkey):
continue
else:
is_json = False
refkey = objref = None
(ktablename, _, multiple) = s3_get_foreign_key(f)
if (not ktablename):
continue
if entry.tablename:
ktablename = entry.tablename
try:
ktable = current.s3db[ktablename]
except AttributeError:
continue
item = None
fk = entry.id
if entry.item_id:
item = items[entry.item_id]
if item:
if (item.original and item.original.get('deleted') and (not item.committed)):
fk = None
else:
fk = item.id
if (fk and (pkey != 'id')):
row = db((ktable._id == fk)).select(ktable[pkey], limitby=(0, 1)).first()
if (not row):
fk = None
continue
else:
fk = row[pkey]
if fk:
if is_json:
objref.resolve(refkey[0], refkey[1], refkey[2], fk)
elif multiple:
val = self.data.get(fkey, [])
if (fk not in val):
val.append(fk)
self.data[fkey] = val
else:
self.data[fkey] = fk
else:
if ((fkey in self.data) and (not multiple) and (not is_json)):
del self.data[fkey]
if item:
update = {'item': self, 'field': fkey}
if is_json:
update['refkey'] = refkey
item.update.append(update)<|docstring|>Resolve the references of this item (=look up all foreign
keys from other items of the same job). If a foreign key
is not yet available, it will be scheduled for later update.<|endoftext|> |
539649048fda0e3701c505e0e68f5aed7b439eebf16ccc968d6a728f1286ba33 | def _update_reference(self, field, value):
'\n Helper method to update a foreign key in an already written\n record. Will be called by the referenced item after (and only\n if) it has been committed. This is only needed if the reference\n could not be resolved before commit due to circular references.\n\n Args:\n field: the field name of the foreign key\n value: the value of the foreign key\n '
table = self.table
record_id = self.id
if ((not value) or (not table) or (not record_id) or (not self.permitted)):
return
db = current.db
update = None
fieldtype = str(table[field].type)
if fieldtype.startswith('list:reference'):
query = (table._id == record_id)
record = db(query).select(table[field], limitby=(0, 1)).first()
if record:
values = record[field]
if (value not in values):
values.append(value)
update = {field: values}
else:
update = {field: value}
if update:
if ('modified_on' in table.fields):
update['modified_on'] = table.modified_on
if ('modified_by' in table.fields):
update['modified_by'] = table.modified_by
db((table._id == record_id)).update(**update) | Helper method to update a foreign key in an already written
record. Will be called by the referenced item after (and only
if) it has been committed. This is only needed if the reference
could not be resolved before commit due to circular references.
Args:
field: the field name of the foreign key
value: the value of the foreign key | modules/s3/s3import.py | _update_reference | annehaley/eden | 205 | python | def _update_reference(self, field, value):
'\n Helper method to update a foreign key in an already written\n record. Will be called by the referenced item after (and only\n if) it has been committed. This is only needed if the reference\n could not be resolved before commit due to circular references.\n\n Args:\n field: the field name of the foreign key\n value: the value of the foreign key\n '
table = self.table
record_id = self.id
if ((not value) or (not table) or (not record_id) or (not self.permitted)):
return
db = current.db
update = None
fieldtype = str(table[field].type)
if fieldtype.startswith('list:reference'):
query = (table._id == record_id)
record = db(query).select(table[field], limitby=(0, 1)).first()
if record:
values = record[field]
if (value not in values):
values.append(value)
update = {field: values}
else:
update = {field: value}
if update:
if ('modified_on' in table.fields):
update['modified_on'] = table.modified_on
if ('modified_by' in table.fields):
update['modified_by'] = table.modified_by
db((table._id == record_id)).update(**update) | def _update_reference(self, field, value):
'\n Helper method to update a foreign key in an already written\n record. Will be called by the referenced item after (and only\n if) it has been committed. This is only needed if the reference\n could not be resolved before commit due to circular references.\n\n Args:\n field: the field name of the foreign key\n value: the value of the foreign key\n '
table = self.table
record_id = self.id
if ((not value) or (not table) or (not record_id) or (not self.permitted)):
return
db = current.db
update = None
fieldtype = str(table[field].type)
if fieldtype.startswith('list:reference'):
query = (table._id == record_id)
record = db(query).select(table[field], limitby=(0, 1)).first()
if record:
values = record[field]
if (value not in values):
values.append(value)
update = {field: values}
else:
update = {field: value}
if update:
if ('modified_on' in table.fields):
update['modified_on'] = table.modified_on
if ('modified_by' in table.fields):
update['modified_by'] = table.modified_by
db((table._id == record_id)).update(**update)<|docstring|>Helper method to update a foreign key in an already written
record. Will be called by the referenced item after (and only
if) it has been committed. This is only needed if the reference
could not be resolved before commit due to circular references.
Args:
field: the field name of the foreign key
value: the value of the foreign key<|endoftext|> |
f75497e9c2022f55e245fb021ea6593619d25fce16c7823f999c3a4de140cb89 | def _update_objref(self, field, refkey, value):
'\n Update object references in a JSON field\n\n Args:\n fieldname: the name of the JSON field\n refkey: the reference key, a tuple (tablename, uidtype, uid)\n value: the foreign key value\n '
table = self.table
record_id = self.id
if ((not value) or (not table) or (not record_id) or (not self.permitted)):
return
db = current.db
query = (table._id == record_id)
record = db(query).select(table._id, table[field], limitby=(0, 1)).first()
if record:
obj = record[field]
(tn, uidtype, uid) = refkey
S3ObjectReferences(obj).resolve(tn, uidtype, uid, value)
update = {field: obj}
if ('modified_on' in table.fields):
update['modified_on'] = table.modified_on
if ('modified_by' in table.fields):
update['modified_by'] = table.modified_by
record.update_record(**update) | Update object references in a JSON field
Args:
fieldname: the name of the JSON field
refkey: the reference key, a tuple (tablename, uidtype, uid)
value: the foreign key value | modules/s3/s3import.py | _update_objref | annehaley/eden | 205 | python | def _update_objref(self, field, refkey, value):
'\n Update object references in a JSON field\n\n Args:\n fieldname: the name of the JSON field\n refkey: the reference key, a tuple (tablename, uidtype, uid)\n value: the foreign key value\n '
table = self.table
record_id = self.id
if ((not value) or (not table) or (not record_id) or (not self.permitted)):
return
db = current.db
query = (table._id == record_id)
record = db(query).select(table._id, table[field], limitby=(0, 1)).first()
if record:
obj = record[field]
(tn, uidtype, uid) = refkey
S3ObjectReferences(obj).resolve(tn, uidtype, uid, value)
update = {field: obj}
if ('modified_on' in table.fields):
update['modified_on'] = table.modified_on
if ('modified_by' in table.fields):
update['modified_by'] = table.modified_by
record.update_record(**update) | def _update_objref(self, field, refkey, value):
'\n Update object references in a JSON field\n\n Args:\n fieldname: the name of the JSON field\n refkey: the reference key, a tuple (tablename, uidtype, uid)\n value: the foreign key value\n '
table = self.table
record_id = self.id
if ((not value) or (not table) or (not record_id) or (not self.permitted)):
return
db = current.db
query = (table._id == record_id)
record = db(query).select(table._id, table[field], limitby=(0, 1)).first()
if record:
obj = record[field]
(tn, uidtype, uid) = refkey
S3ObjectReferences(obj).resolve(tn, uidtype, uid, value)
update = {field: obj}
if ('modified_on' in table.fields):
update['modified_on'] = table.modified_on
if ('modified_by' in table.fields):
update['modified_by'] = table.modified_by
record.update_record(**update)<|docstring|>Update object references in a JSON field
Args:
fieldname: the name of the JSON field
refkey: the reference key, a tuple (tablename, uidtype, uid)
value: the foreign key value<|endoftext|> |
7920143cbee40a50d47a184382fc9991abc9bc1192fd26b4d28f1b31b14f6b35 | def store(self, item_table=None):
'\n Store this item in the DB\n '
if (item_table is None):
return None
item_id = self.item_id
db = current.db
row = db((item_table.item_id == item_id)).select(item_table.id, limitby=(0, 1)).first()
if row:
record_id = row.id
else:
record_id = None
record = Storage(job_id=self.job.job_id, item_id=item_id, tablename=self.tablename, record_uid=self.uid, skip=self.skip, error=(self.error or ''))
if (self.element is not None):
element_str = current.xml.tostring(self.element, xml_declaration=False)
record.update(element=element_str)
self_data = self.data
if (self_data is not None):
table = self.table
fields = table.fields
data = Storage()
for f in self_data.keys():
if (f not in fields):
continue
field = table[f]
field_type = str(field.type)
if ((field_type == 'id') or s3_has_foreign_key(field)):
continue
data_ = self_data[f]
if isinstance(data_, Field):
continue
data.update({f: data_})
record['data'] = pickle.dumps(data)
ritems = []
for reference in self.references:
field = reference.field
entry = reference.entry
store_entry = None
if entry:
if (entry.item_id is not None):
store_entry = {'field': field, 'item_id': str(entry.item_id)}
elif (entry.uid is not None):
store_entry = {'field': field, 'tablename': entry.tablename, 'uid': str(entry.uid)}
if (store_entry is not None):
ritems.append(json.dumps(store_entry))
if ritems:
record.update(ritems=ritems)
citems = [c.item_id for c in self.components]
if citems:
record.update(citems=citems)
if self.parent:
record.update(parent=self.parent.item_id)
if record_id:
db((item_table.id == record_id)).update(**record)
else:
record_id = item_table.insert(**record)
return record_id | Store this item in the DB | modules/s3/s3import.py | store | annehaley/eden | 205 | python | def store(self, item_table=None):
'\n \n '
if (item_table is None):
return None
item_id = self.item_id
db = current.db
row = db((item_table.item_id == item_id)).select(item_table.id, limitby=(0, 1)).first()
if row:
record_id = row.id
else:
record_id = None
record = Storage(job_id=self.job.job_id, item_id=item_id, tablename=self.tablename, record_uid=self.uid, skip=self.skip, error=(self.error or ))
if (self.element is not None):
element_str = current.xml.tostring(self.element, xml_declaration=False)
record.update(element=element_str)
self_data = self.data
if (self_data is not None):
table = self.table
fields = table.fields
data = Storage()
for f in self_data.keys():
if (f not in fields):
continue
field = table[f]
field_type = str(field.type)
if ((field_type == 'id') or s3_has_foreign_key(field)):
continue
data_ = self_data[f]
if isinstance(data_, Field):
continue
data.update({f: data_})
record['data'] = pickle.dumps(data)
ritems = []
for reference in self.references:
field = reference.field
entry = reference.entry
store_entry = None
if entry:
if (entry.item_id is not None):
store_entry = {'field': field, 'item_id': str(entry.item_id)}
elif (entry.uid is not None):
store_entry = {'field': field, 'tablename': entry.tablename, 'uid': str(entry.uid)}
if (store_entry is not None):
ritems.append(json.dumps(store_entry))
if ritems:
record.update(ritems=ritems)
citems = [c.item_id for c in self.components]
if citems:
record.update(citems=citems)
if self.parent:
record.update(parent=self.parent.item_id)
if record_id:
db((item_table.id == record_id)).update(**record)
else:
record_id = item_table.insert(**record)
return record_id | def store(self, item_table=None):
'\n \n '
if (item_table is None):
return None
item_id = self.item_id
db = current.db
row = db((item_table.item_id == item_id)).select(item_table.id, limitby=(0, 1)).first()
if row:
record_id = row.id
else:
record_id = None
record = Storage(job_id=self.job.job_id, item_id=item_id, tablename=self.tablename, record_uid=self.uid, skip=self.skip, error=(self.error or ))
if (self.element is not None):
element_str = current.xml.tostring(self.element, xml_declaration=False)
record.update(element=element_str)
self_data = self.data
if (self_data is not None):
table = self.table
fields = table.fields
data = Storage()
for f in self_data.keys():
if (f not in fields):
continue
field = table[f]
field_type = str(field.type)
if ((field_type == 'id') or s3_has_foreign_key(field)):
continue
data_ = self_data[f]
if isinstance(data_, Field):
continue
data.update({f: data_})
record['data'] = pickle.dumps(data)
ritems = []
for reference in self.references:
field = reference.field
entry = reference.entry
store_entry = None
if entry:
if (entry.item_id is not None):
store_entry = {'field': field, 'item_id': str(entry.item_id)}
elif (entry.uid is not None):
store_entry = {'field': field, 'tablename': entry.tablename, 'uid': str(entry.uid)}
if (store_entry is not None):
ritems.append(json.dumps(store_entry))
if ritems:
record.update(ritems=ritems)
citems = [c.item_id for c in self.components]
if citems:
record.update(citems=citems)
if self.parent:
record.update(parent=self.parent.item_id)
if record_id:
db((item_table.id == record_id)).update(**record)
else:
record_id = item_table.insert(**record)
return record_id<|docstring|>Store this item in the DB<|endoftext|> |
75ff8580c4985f56b0413feab22e2ef7fc100dfeaa44d3be9488768c9069f27b | def restore(self, row):
'\n Restore an item from a item table row. This does not restore\n the references (since this can not be done before all items\n are restored), must call job.restore_references() to do that\n\n Args:\n row: the item table row\n '
xml = current.xml
self.item_id = row.item_id
self.accepted = None
self.permitted = False
self.committed = False
tablename = row.tablename
self.id = None
self.uid = row.record_uid
self.skip = row.skip
if (row.data is not None):
self.data = pickle.loads(row.data)
else:
self.data = Storage()
data = self.data
if (xml.MTIME in data):
self.mtime = data[xml.MTIME]
if (xml.MCI in data):
self.mci = data[xml.MCI]
UID = xml.UID
if (UID in data):
self.uid = data[UID]
self.element = etree.fromstring(row.element)
if row.citems:
self.load_components = row.citems
if row.ritems:
self.load_references = [json.loads(ritem) for ritem in row.ritems]
self.load_parent = row.parent
s3db = current.s3db
try:
table = s3db[tablename]
except AttributeError:
self.error = current.ERROR.BAD_RESOURCE
return False
else:
self.table = table
self.tablename = tablename
original = S3Resource.original(table, self.data, mandatory=self._mandatory_fields())
if (original is not None):
self.original = original
self.id = original[table._id.name]
if ((not current.response.s3.synchronise_uuids) and (UID in original)):
self.uid = self.data[UID] = original[UID]
self.error = row.error
postprocess = s3db.get_config(self.tablename, 'xml_post_parse')
if postprocess:
postprocess(self.element, self.data)
if (self.error and (not self.data)):
return False
return True | Restore an item from a item table row. This does not restore
the references (since this can not be done before all items
are restored), must call job.restore_references() to do that
Args:
row: the item table row | modules/s3/s3import.py | restore | annehaley/eden | 205 | python | def restore(self, row):
'\n Restore an item from a item table row. This does not restore\n the references (since this can not be done before all items\n are restored), must call job.restore_references() to do that\n\n Args:\n row: the item table row\n '
xml = current.xml
self.item_id = row.item_id
self.accepted = None
self.permitted = False
self.committed = False
tablename = row.tablename
self.id = None
self.uid = row.record_uid
self.skip = row.skip
if (row.data is not None):
self.data = pickle.loads(row.data)
else:
self.data = Storage()
data = self.data
if (xml.MTIME in data):
self.mtime = data[xml.MTIME]
if (xml.MCI in data):
self.mci = data[xml.MCI]
UID = xml.UID
if (UID in data):
self.uid = data[UID]
self.element = etree.fromstring(row.element)
if row.citems:
self.load_components = row.citems
if row.ritems:
self.load_references = [json.loads(ritem) for ritem in row.ritems]
self.load_parent = row.parent
s3db = current.s3db
try:
table = s3db[tablename]
except AttributeError:
self.error = current.ERROR.BAD_RESOURCE
return False
else:
self.table = table
self.tablename = tablename
original = S3Resource.original(table, self.data, mandatory=self._mandatory_fields())
if (original is not None):
self.original = original
self.id = original[table._id.name]
if ((not current.response.s3.synchronise_uuids) and (UID in original)):
self.uid = self.data[UID] = original[UID]
self.error = row.error
postprocess = s3db.get_config(self.tablename, 'xml_post_parse')
if postprocess:
postprocess(self.element, self.data)
if (self.error and (not self.data)):
return False
return True | def restore(self, row):
'\n Restore an item from a item table row. This does not restore\n the references (since this can not be done before all items\n are restored), must call job.restore_references() to do that\n\n Args:\n row: the item table row\n '
xml = current.xml
self.item_id = row.item_id
self.accepted = None
self.permitted = False
self.committed = False
tablename = row.tablename
self.id = None
self.uid = row.record_uid
self.skip = row.skip
if (row.data is not None):
self.data = pickle.loads(row.data)
else:
self.data = Storage()
data = self.data
if (xml.MTIME in data):
self.mtime = data[xml.MTIME]
if (xml.MCI in data):
self.mci = data[xml.MCI]
UID = xml.UID
if (UID in data):
self.uid = data[UID]
self.element = etree.fromstring(row.element)
if row.citems:
self.load_components = row.citems
if row.ritems:
self.load_references = [json.loads(ritem) for ritem in row.ritems]
self.load_parent = row.parent
s3db = current.s3db
try:
table = s3db[tablename]
except AttributeError:
self.error = current.ERROR.BAD_RESOURCE
return False
else:
self.table = table
self.tablename = tablename
original = S3Resource.original(table, self.data, mandatory=self._mandatory_fields())
if (original is not None):
self.original = original
self.id = original[table._id.name]
if ((not current.response.s3.synchronise_uuids) and (UID in original)):
self.uid = self.data[UID] = original[UID]
self.error = row.error
postprocess = s3db.get_config(self.tablename, 'xml_post_parse')
if postprocess:
postprocess(self.element, self.data)
if (self.error and (not self.data)):
return False
return True<|docstring|>Restore an item from a item table row. This does not restore
the references (since this can not be done before all items
are restored), must call job.restore_references() to do that
Args:
row: the item table row<|endoftext|> |
ec0e9cd5fe4b992911c549c7f3894b7eea6e1bab3efb56fee6f85756c001c286 | def __init__(self, table, tree=None, files=None, job_id=None, strategy=None, update_policy=None, conflict_policy=None, last_sync=None, onconflict=None):
'\n Args:\n tree: the element tree to import\n files: files attached to the import (for upload fields)\n job_id: restore job from database (record ID or job_id)\n strategy: the import strategy\n update_policy: the update policy\n conflict_policy: the conflict resolution policy\n last_sync: the last synchronization time stamp (datetime)\n onconflict: custom conflict resolver function\n '
self.error = None
self.error_tree = etree.Element(current.xml.TAG.root)
self.table = table
self.tree = tree
self.files = files
self.directory = Storage()
self._uidmap = None
self.mandatory_fields = Storage()
self.elements = Storage()
self.items = Storage()
self.references = []
self.job_table = None
self.item_table = None
self.count = 0
self.created = []
self.updated = []
self.deleted = []
self.log = None
if (strategy is None):
METHOD = S3ImportItem.METHOD
strategy = (METHOD.CREATE, METHOD.UPDATE, METHOD.DELETE, METHOD.MERGE)
if (not isinstance(strategy, (tuple, list))):
strategy = [strategy]
self.strategy = strategy
if update_policy:
self.update_policy = update_policy
else:
self.update_policy = S3ImportItem.POLICY.OTHER
if conflict_policy:
self.conflict_policy = conflict_policy
else:
self.conflict_policy = S3ImportItem.POLICY.OTHER
self.mtime = None
self.last_sync = last_sync
self.onconflict = onconflict
if job_id:
self.__define_tables()
jobtable = self.job_table
if str(job_id).isdigit():
query = (jobtable.id == job_id)
else:
query = (jobtable.job_id == job_id)
row = current.db(query).select(jobtable.job_id, jobtable.tablename, limitby=(0, 1)).first()
if (not row):
raise SyntaxError('Job record not found')
self.job_id = row.job_id
self.second_pass = True
if (not self.table):
tablename = row.tablename
try:
table = current.s3db[tablename]
except AttributeError:
pass
else:
self.job_id = uuid.uuid4()
self.second_pass = False | Args:
tree: the element tree to import
files: files attached to the import (for upload fields)
job_id: restore job from database (record ID or job_id)
strategy: the import strategy
update_policy: the update policy
conflict_policy: the conflict resolution policy
last_sync: the last synchronization time stamp (datetime)
onconflict: custom conflict resolver function | modules/s3/s3import.py | __init__ | annehaley/eden | 205 | python | def __init__(self, table, tree=None, files=None, job_id=None, strategy=None, update_policy=None, conflict_policy=None, last_sync=None, onconflict=None):
'\n Args:\n tree: the element tree to import\n files: files attached to the import (for upload fields)\n job_id: restore job from database (record ID or job_id)\n strategy: the import strategy\n update_policy: the update policy\n conflict_policy: the conflict resolution policy\n last_sync: the last synchronization time stamp (datetime)\n onconflict: custom conflict resolver function\n '
self.error = None
self.error_tree = etree.Element(current.xml.TAG.root)
self.table = table
self.tree = tree
self.files = files
self.directory = Storage()
self._uidmap = None
self.mandatory_fields = Storage()
self.elements = Storage()
self.items = Storage()
self.references = []
self.job_table = None
self.item_table = None
self.count = 0
self.created = []
self.updated = []
self.deleted = []
self.log = None
if (strategy is None):
METHOD = S3ImportItem.METHOD
strategy = (METHOD.CREATE, METHOD.UPDATE, METHOD.DELETE, METHOD.MERGE)
if (not isinstance(strategy, (tuple, list))):
strategy = [strategy]
self.strategy = strategy
if update_policy:
self.update_policy = update_policy
else:
self.update_policy = S3ImportItem.POLICY.OTHER
if conflict_policy:
self.conflict_policy = conflict_policy
else:
self.conflict_policy = S3ImportItem.POLICY.OTHER
self.mtime = None
self.last_sync = last_sync
self.onconflict = onconflict
if job_id:
self.__define_tables()
jobtable = self.job_table
if str(job_id).isdigit():
query = (jobtable.id == job_id)
else:
query = (jobtable.job_id == job_id)
row = current.db(query).select(jobtable.job_id, jobtable.tablename, limitby=(0, 1)).first()
if (not row):
raise SyntaxError('Job record not found')
self.job_id = row.job_id
self.second_pass = True
if (not self.table):
tablename = row.tablename
try:
table = current.s3db[tablename]
except AttributeError:
pass
else:
self.job_id = uuid.uuid4()
self.second_pass = False | def __init__(self, table, tree=None, files=None, job_id=None, strategy=None, update_policy=None, conflict_policy=None, last_sync=None, onconflict=None):
'\n Args:\n tree: the element tree to import\n files: files attached to the import (for upload fields)\n job_id: restore job from database (record ID or job_id)\n strategy: the import strategy\n update_policy: the update policy\n conflict_policy: the conflict resolution policy\n last_sync: the last synchronization time stamp (datetime)\n onconflict: custom conflict resolver function\n '
self.error = None
self.error_tree = etree.Element(current.xml.TAG.root)
self.table = table
self.tree = tree
self.files = files
self.directory = Storage()
self._uidmap = None
self.mandatory_fields = Storage()
self.elements = Storage()
self.items = Storage()
self.references = []
self.job_table = None
self.item_table = None
self.count = 0
self.created = []
self.updated = []
self.deleted = []
self.log = None
if (strategy is None):
METHOD = S3ImportItem.METHOD
strategy = (METHOD.CREATE, METHOD.UPDATE, METHOD.DELETE, METHOD.MERGE)
if (not isinstance(strategy, (tuple, list))):
strategy = [strategy]
self.strategy = strategy
if update_policy:
self.update_policy = update_policy
else:
self.update_policy = S3ImportItem.POLICY.OTHER
if conflict_policy:
self.conflict_policy = conflict_policy
else:
self.conflict_policy = S3ImportItem.POLICY.OTHER
self.mtime = None
self.last_sync = last_sync
self.onconflict = onconflict
if job_id:
self.__define_tables()
jobtable = self.job_table
if str(job_id).isdigit():
query = (jobtable.id == job_id)
else:
query = (jobtable.job_id == job_id)
row = current.db(query).select(jobtable.job_id, jobtable.tablename, limitby=(0, 1)).first()
if (not row):
raise SyntaxError('Job record not found')
self.job_id = row.job_id
self.second_pass = True
if (not self.table):
tablename = row.tablename
try:
table = current.s3db[tablename]
except AttributeError:
pass
else:
self.job_id = uuid.uuid4()
self.second_pass = False<|docstring|>Args:
tree: the element tree to import
files: files attached to the import (for upload fields)
job_id: restore job from database (record ID or job_id)
strategy: the import strategy
update_policy: the update policy
conflict_policy: the conflict resolution policy
last_sync: the last synchronization time stamp (datetime)
onconflict: custom conflict resolver function<|endoftext|> |
90fb07ed096c8bc50e408e1ad6a9ca38cb1b3b0d6539892d0233cfd198855bc7 | @property
def uidmap(self):
'\n Map uuid/tuid => element, for faster reference lookups\n '
uidmap = self._uidmap
tree = self.tree
if ((uidmap is None) and (tree is not None)):
root = (tree if isinstance(tree, etree._Element) else tree.getroot())
xml = current.xml
UUID = xml.UID
TUID = xml.ATTRIBUTE.tuid
NAME = xml.ATTRIBUTE.name
elements = root.xpath(('.//%s' % xml.TAG.resource))
self._uidmap = uidmap = {UUID: {}, TUID: {}}
uuidmap = uidmap[UUID]
tuidmap = uidmap[TUID]
for element in elements:
name = element.get(NAME)
r_uuid = element.get(UUID)
if (r_uuid and (r_uuid not in uuidmap)):
uuidmap[(name, r_uuid)] = element
r_tuid = element.get(TUID)
if (r_tuid and (r_tuid not in tuidmap)):
tuidmap[(name, r_tuid)] = element
return uidmap | Map uuid/tuid => element, for faster reference lookups | modules/s3/s3import.py | uidmap | annehaley/eden | 205 | python | @property
def uidmap(self):
'\n \n '
uidmap = self._uidmap
tree = self.tree
if ((uidmap is None) and (tree is not None)):
root = (tree if isinstance(tree, etree._Element) else tree.getroot())
xml = current.xml
UUID = xml.UID
TUID = xml.ATTRIBUTE.tuid
NAME = xml.ATTRIBUTE.name
elements = root.xpath(('.//%s' % xml.TAG.resource))
self._uidmap = uidmap = {UUID: {}, TUID: {}}
uuidmap = uidmap[UUID]
tuidmap = uidmap[TUID]
for element in elements:
name = element.get(NAME)
r_uuid = element.get(UUID)
if (r_uuid and (r_uuid not in uuidmap)):
uuidmap[(name, r_uuid)] = element
r_tuid = element.get(TUID)
if (r_tuid and (r_tuid not in tuidmap)):
tuidmap[(name, r_tuid)] = element
return uidmap | @property
def uidmap(self):
'\n \n '
uidmap = self._uidmap
tree = self.tree
if ((uidmap is None) and (tree is not None)):
root = (tree if isinstance(tree, etree._Element) else tree.getroot())
xml = current.xml
UUID = xml.UID
TUID = xml.ATTRIBUTE.tuid
NAME = xml.ATTRIBUTE.name
elements = root.xpath(('.//%s' % xml.TAG.resource))
self._uidmap = uidmap = {UUID: {}, TUID: {}}
uuidmap = uidmap[UUID]
tuidmap = uidmap[TUID]
for element in elements:
name = element.get(NAME)
r_uuid = element.get(UUID)
if (r_uuid and (r_uuid not in uuidmap)):
uuidmap[(name, r_uuid)] = element
r_tuid = element.get(TUID)
if (r_tuid and (r_tuid not in tuidmap)):
tuidmap[(name, r_tuid)] = element
return uidmap<|docstring|>Map uuid/tuid => element, for faster reference lookups<|endoftext|> |
c97bc4ac351bd87933f93fa2a04ece875045517730cd0958ca845eee6b59cc03 | def add_item(self, element=None, original=None, components=None, parent=None, joinby=None):
'\n Parse and validate an XML element and add it as new item\n to the job.\n\n Args:\n element: the element\n original: the original DB record (if already available,\n will otherwise be looked-up by this function)\n components: a dictionary of components (as in S3Resource)\n to include in the job (defaults to all\n defined components)\n parent: the parent item (if this is a component)\n joinby: the component join key(s) (if this is a component)\n\n Returns:\n A unique identifier for the new item, or None if there\n was an error. self.error contains the last error, and\n self.error_tree an element tree with all failing elements\n including error attributes.\n '
if (element in self.elements):
return self.elements[element]
item = S3ImportItem(self)
item_id = item.item_id
self.items[item_id] = item
if (element is not None):
self.elements[element] = item_id
if (not item.parse(element, original=original, files=self.files)):
self.error = item.error
item.accepted = False
if (parent is None):
self.error_tree.append(deepcopy(item.element))
else:
table = item.table
s3db = current.s3db
components = s3db.get_components(table, names=components)
super_keys = s3db.get_super_keys(table)
cnames = Storage()
cinfos = Storage()
for alias in components:
component = components[alias]
ctable = component.table
if ((ctable._id != 'id') and ('instance_type' in ctable.fields)):
continue
pkey = component.pkey
if ((pkey != table._id.name) and (pkey not in super_keys)):
continue
if component.linktable:
ctable = component.linktable
fkey = component.lkey
else:
fkey = component.fkey
ctablename = ctable._tablename
if (ctablename in cnames):
cnames[ctablename].append(alias)
else:
cnames[ctablename] = [alias]
cinfos[(ctablename, alias)] = Storage(component=component, ctable=ctable, pkey=pkey, fkey=fkey, first=True)
add_item = self.add_item
xml = current.xml
UID = xml.UID
for celement in xml.components(element, names=list(cnames.keys())):
ctablename = celement.get(xml.ATTRIBUTE.name, None)
if ((not ctablename) or (ctablename not in cnames)):
continue
calias = celement.get(xml.ATTRIBUTE.alias, None)
if (calias is None):
aliases = cnames[ctablename]
if (len(aliases) == 1):
calias = aliases[0]
else:
calias = ctablename.split('_', 1)[1]
if ((ctablename, calias) not in cinfos):
continue
else:
cinfo = cinfos[(ctablename, calias)]
component = cinfo.component
ctable = cinfo.ctable
pkey = cinfo.pkey
fkey = cinfo.fkey
original = None
if (not component.multiple):
if (not cinfo.first):
continue
cinfo.first = False
if item.id:
db = current.db
query = ((table.id == item.id) & (table[pkey] == ctable[fkey]))
if (UID in ctable.fields):
row = db(query).select(ctable[UID], limitby=(0, 1)).first()
if row:
original = row[UID]
else:
original = db(query).select(ctable.ALL, limitby=(0, 1)).first()
item_id = add_item(element=celement, original=original, parent=item, joinby=(pkey, fkey))
if (item_id is None):
item.error = self.error
self.error_tree.append(deepcopy(item.element))
else:
citem = self.items[item_id]
citem.parent = item
item.components.append(citem)
lookahead = self.lookahead
directory = self.directory
table = item.table
data = item.data
tree = self.tree
def schedule(reference):
' Schedule a referenced item for implicit import '
entry = reference.entry
if (entry and (entry.element is not None) and (not entry.item_id)):
item_id = add_item(element=entry.element)
if item_id:
entry.item_id = item_id
if (tree is not None):
fields = [table[f] for f in table.fields]
rfields = [f for f in fields if s3_has_foreign_key(f)]
item.references = lookahead(element, table=table, fields=rfields, tree=tree, directory=directory)
for reference in item.references:
schedule(reference)
references = item.references
rappend = references.append
if (parent is not None):
entry = Storage(item_id=parent.item_id, element=parent.element, tablename=parent.tablename)
rappend(Storage(field=joinby, entry=entry))
json_references = s3db.get_config(table, 'json_references')
if json_references:
if (json_references is True):
fields = table.fields
else:
fields = json_references
if (not isinstance(fields, (tuple, list))):
fields = [fields]
for fieldname in fields:
value = data.get(fieldname)
field = table[fieldname]
if (value and (field.type == 'json')):
objref = S3ObjectReferences(value)
for ref in objref.refs:
rl = lookahead(None, tree=tree, directory=directory, lookup=ref)
if rl:
reference = rl[0]
schedule(reference)
rappend(Storage(field=fieldname, objref=objref, refkey=ref, entry=reference.entry))
deleted = data.get(xml.DELETED, False)
if deleted:
fieldname = xml.REPLACEDBY
replaced_by = data.get(fieldname)
if replaced_by:
rl = lookahead(element, tree=tree, directory=directory, lookup=(table, replaced_by))
if rl:
reference = rl[0]
schedule(reference)
rappend(Storage(field=fieldname, entry=reference.entry))
return item.item_id | Parse and validate an XML element and add it as new item
to the job.
Args:
element: the element
original: the original DB record (if already available,
will otherwise be looked-up by this function)
components: a dictionary of components (as in S3Resource)
to include in the job (defaults to all
defined components)
parent: the parent item (if this is a component)
joinby: the component join key(s) (if this is a component)
Returns:
A unique identifier for the new item, or None if there
was an error. self.error contains the last error, and
self.error_tree an element tree with all failing elements
including error attributes. | modules/s3/s3import.py | add_item | annehaley/eden | 205 | python | def add_item(self, element=None, original=None, components=None, parent=None, joinby=None):
'\n Parse and validate an XML element and add it as new item\n to the job.\n\n Args:\n element: the element\n original: the original DB record (if already available,\n will otherwise be looked-up by this function)\n components: a dictionary of components (as in S3Resource)\n to include in the job (defaults to all\n defined components)\n parent: the parent item (if this is a component)\n joinby: the component join key(s) (if this is a component)\n\n Returns:\n A unique identifier for the new item, or None if there\n was an error. self.error contains the last error, and\n self.error_tree an element tree with all failing elements\n including error attributes.\n '
if (element in self.elements):
return self.elements[element]
item = S3ImportItem(self)
item_id = item.item_id
self.items[item_id] = item
if (element is not None):
self.elements[element] = item_id
if (not item.parse(element, original=original, files=self.files)):
self.error = item.error
item.accepted = False
if (parent is None):
self.error_tree.append(deepcopy(item.element))
else:
table = item.table
s3db = current.s3db
components = s3db.get_components(table, names=components)
super_keys = s3db.get_super_keys(table)
cnames = Storage()
cinfos = Storage()
for alias in components:
component = components[alias]
ctable = component.table
if ((ctable._id != 'id') and ('instance_type' in ctable.fields)):
continue
pkey = component.pkey
if ((pkey != table._id.name) and (pkey not in super_keys)):
continue
if component.linktable:
ctable = component.linktable
fkey = component.lkey
else:
fkey = component.fkey
ctablename = ctable._tablename
if (ctablename in cnames):
cnames[ctablename].append(alias)
else:
cnames[ctablename] = [alias]
cinfos[(ctablename, alias)] = Storage(component=component, ctable=ctable, pkey=pkey, fkey=fkey, first=True)
add_item = self.add_item
xml = current.xml
UID = xml.UID
for celement in xml.components(element, names=list(cnames.keys())):
ctablename = celement.get(xml.ATTRIBUTE.name, None)
if ((not ctablename) or (ctablename not in cnames)):
continue
calias = celement.get(xml.ATTRIBUTE.alias, None)
if (calias is None):
aliases = cnames[ctablename]
if (len(aliases) == 1):
calias = aliases[0]
else:
calias = ctablename.split('_', 1)[1]
if ((ctablename, calias) not in cinfos):
continue
else:
cinfo = cinfos[(ctablename, calias)]
component = cinfo.component
ctable = cinfo.ctable
pkey = cinfo.pkey
fkey = cinfo.fkey
original = None
if (not component.multiple):
if (not cinfo.first):
continue
cinfo.first = False
if item.id:
db = current.db
query = ((table.id == item.id) & (table[pkey] == ctable[fkey]))
if (UID in ctable.fields):
row = db(query).select(ctable[UID], limitby=(0, 1)).first()
if row:
original = row[UID]
else:
original = db(query).select(ctable.ALL, limitby=(0, 1)).first()
item_id = add_item(element=celement, original=original, parent=item, joinby=(pkey, fkey))
if (item_id is None):
item.error = self.error
self.error_tree.append(deepcopy(item.element))
else:
citem = self.items[item_id]
citem.parent = item
item.components.append(citem)
lookahead = self.lookahead
directory = self.directory
table = item.table
data = item.data
tree = self.tree
def schedule(reference):
' Schedule a referenced item for implicit import '
entry = reference.entry
if (entry and (entry.element is not None) and (not entry.item_id)):
item_id = add_item(element=entry.element)
if item_id:
entry.item_id = item_id
if (tree is not None):
fields = [table[f] for f in table.fields]
rfields = [f for f in fields if s3_has_foreign_key(f)]
item.references = lookahead(element, table=table, fields=rfields, tree=tree, directory=directory)
for reference in item.references:
schedule(reference)
references = item.references
rappend = references.append
if (parent is not None):
entry = Storage(item_id=parent.item_id, element=parent.element, tablename=parent.tablename)
rappend(Storage(field=joinby, entry=entry))
json_references = s3db.get_config(table, 'json_references')
if json_references:
if (json_references is True):
fields = table.fields
else:
fields = json_references
if (not isinstance(fields, (tuple, list))):
fields = [fields]
for fieldname in fields:
value = data.get(fieldname)
field = table[fieldname]
if (value and (field.type == 'json')):
objref = S3ObjectReferences(value)
for ref in objref.refs:
rl = lookahead(None, tree=tree, directory=directory, lookup=ref)
if rl:
reference = rl[0]
schedule(reference)
rappend(Storage(field=fieldname, objref=objref, refkey=ref, entry=reference.entry))
deleted = data.get(xml.DELETED, False)
if deleted:
fieldname = xml.REPLACEDBY
replaced_by = data.get(fieldname)
if replaced_by:
rl = lookahead(element, tree=tree, directory=directory, lookup=(table, replaced_by))
if rl:
reference = rl[0]
schedule(reference)
rappend(Storage(field=fieldname, entry=reference.entry))
return item.item_id | def add_item(self, element=None, original=None, components=None, parent=None, joinby=None):
'\n Parse and validate an XML element and add it as new item\n to the job.\n\n Args:\n element: the element\n original: the original DB record (if already available,\n will otherwise be looked-up by this function)\n components: a dictionary of components (as in S3Resource)\n to include in the job (defaults to all\n defined components)\n parent: the parent item (if this is a component)\n joinby: the component join key(s) (if this is a component)\n\n Returns:\n A unique identifier for the new item, or None if there\n was an error. self.error contains the last error, and\n self.error_tree an element tree with all failing elements\n including error attributes.\n '
if (element in self.elements):
return self.elements[element]
item = S3ImportItem(self)
item_id = item.item_id
self.items[item_id] = item
if (element is not None):
self.elements[element] = item_id
if (not item.parse(element, original=original, files=self.files)):
self.error = item.error
item.accepted = False
if (parent is None):
self.error_tree.append(deepcopy(item.element))
else:
table = item.table
s3db = current.s3db
components = s3db.get_components(table, names=components)
super_keys = s3db.get_super_keys(table)
cnames = Storage()
cinfos = Storage()
for alias in components:
component = components[alias]
ctable = component.table
if ((ctable._id != 'id') and ('instance_type' in ctable.fields)):
continue
pkey = component.pkey
if ((pkey != table._id.name) and (pkey not in super_keys)):
continue
if component.linktable:
ctable = component.linktable
fkey = component.lkey
else:
fkey = component.fkey
ctablename = ctable._tablename
if (ctablename in cnames):
cnames[ctablename].append(alias)
else:
cnames[ctablename] = [alias]
cinfos[(ctablename, alias)] = Storage(component=component, ctable=ctable, pkey=pkey, fkey=fkey, first=True)
add_item = self.add_item
xml = current.xml
UID = xml.UID
for celement in xml.components(element, names=list(cnames.keys())):
ctablename = celement.get(xml.ATTRIBUTE.name, None)
if ((not ctablename) or (ctablename not in cnames)):
continue
calias = celement.get(xml.ATTRIBUTE.alias, None)
if (calias is None):
aliases = cnames[ctablename]
if (len(aliases) == 1):
calias = aliases[0]
else:
calias = ctablename.split('_', 1)[1]
if ((ctablename, calias) not in cinfos):
continue
else:
cinfo = cinfos[(ctablename, calias)]
component = cinfo.component
ctable = cinfo.ctable
pkey = cinfo.pkey
fkey = cinfo.fkey
original = None
if (not component.multiple):
if (not cinfo.first):
continue
cinfo.first = False
if item.id:
db = current.db
query = ((table.id == item.id) & (table[pkey] == ctable[fkey]))
if (UID in ctable.fields):
row = db(query).select(ctable[UID], limitby=(0, 1)).first()
if row:
original = row[UID]
else:
original = db(query).select(ctable.ALL, limitby=(0, 1)).first()
item_id = add_item(element=celement, original=original, parent=item, joinby=(pkey, fkey))
if (item_id is None):
item.error = self.error
self.error_tree.append(deepcopy(item.element))
else:
citem = self.items[item_id]
citem.parent = item
item.components.append(citem)
lookahead = self.lookahead
directory = self.directory
table = item.table
data = item.data
tree = self.tree
def schedule(reference):
' Schedule a referenced item for implicit import '
entry = reference.entry
if (entry and (entry.element is not None) and (not entry.item_id)):
item_id = add_item(element=entry.element)
if item_id:
entry.item_id = item_id
if (tree is not None):
fields = [table[f] for f in table.fields]
rfields = [f for f in fields if s3_has_foreign_key(f)]
item.references = lookahead(element, table=table, fields=rfields, tree=tree, directory=directory)
for reference in item.references:
schedule(reference)
references = item.references
rappend = references.append
if (parent is not None):
entry = Storage(item_id=parent.item_id, element=parent.element, tablename=parent.tablename)
rappend(Storage(field=joinby, entry=entry))
json_references = s3db.get_config(table, 'json_references')
if json_references:
if (json_references is True):
fields = table.fields
else:
fields = json_references
if (not isinstance(fields, (tuple, list))):
fields = [fields]
for fieldname in fields:
value = data.get(fieldname)
field = table[fieldname]
if (value and (field.type == 'json')):
objref = S3ObjectReferences(value)
for ref in objref.refs:
rl = lookahead(None, tree=tree, directory=directory, lookup=ref)
if rl:
reference = rl[0]
schedule(reference)
rappend(Storage(field=fieldname, objref=objref, refkey=ref, entry=reference.entry))
deleted = data.get(xml.DELETED, False)
if deleted:
fieldname = xml.REPLACEDBY
replaced_by = data.get(fieldname)
if replaced_by:
rl = lookahead(element, tree=tree, directory=directory, lookup=(table, replaced_by))
if rl:
reference = rl[0]
schedule(reference)
rappend(Storage(field=fieldname, entry=reference.entry))
return item.item_id<|docstring|>Parse and validate an XML element and add it as new item
to the job.
Args:
element: the element
original: the original DB record (if already available,
will otherwise be looked-up by this function)
components: a dictionary of components (as in S3Resource)
to include in the job (defaults to all
defined components)
parent: the parent item (if this is a component)
joinby: the component join key(s) (if this is a component)
Returns:
A unique identifier for the new item, or None if there
was an error. self.error contains the last error, and
self.error_tree an element tree with all failing elements
including error attributes.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.