hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c34f71509c416f907057a5acc2be974f7718754 | 233 | py | Python | napari/_qt/layers/qt_pyramid_layer.py | arokem/napari | e16e1163cf422d3aba6d86d1ae7dcd70a85b87dd | [
"BSD-3-Clause"
] | null | null | null | napari/_qt/layers/qt_pyramid_layer.py | arokem/napari | e16e1163cf422d3aba6d86d1ae7dcd70a85b87dd | [
"BSD-3-Clause"
] | 1 | 2019-09-18T22:59:55.000Z | 2019-09-23T16:41:08.000Z | napari/_qt/layers/qt_pyramid_layer.py | arokem/napari | e16e1163cf422d3aba6d86d1ae7dcd70a85b87dd | [
"BSD-3-Clause"
] | null | null | null | from ...layers import Pyramid
from .qt_image_layer import QtImageControls, QtImageProperties
class QtPyramidControls(QtImageControls, layer=Pyramid):
pass
class QtPyramidProperties(QtImageProperties, layer=Pyramid):
pass
| 21.181818 | 62 | 0.811159 | from ...layers import Pyramid
from .qt_image_layer import QtImageControls, QtImageProperties
class QtPyramidControls(QtImageControls, layer=Pyramid):
pass
class QtPyramidProperties(QtImageProperties, layer=Pyramid):
pass
| true | true |
1c34f7365ae533c7a016c65aaf031f4281d86efd | 1,955 | py | Python | PyObjCTest/test_nsanimation.py | Khan/pyobjc-framework-Cocoa | f8b015ea2a72d8d78be6084fb12925c4785b8f1f | [
"MIT"
] | 132 | 2015-01-01T10:02:42.000Z | 2022-03-09T12:51:01.000Z | mac/pyobjc-framework-Cocoa/PyObjCTest/test_nsanimation.py | mba811/music-player | 7998986b34cfda2244ef622adefb839331b81a81 | [
"BSD-2-Clause"
] | 6 | 2015-01-06T08:23:19.000Z | 2019-03-14T12:22:06.000Z | mac/pyobjc-framework-Cocoa/PyObjCTest/test_nsanimation.py | mba811/music-player | 7998986b34cfda2244ef622adefb839331b81a81 | [
"BSD-2-Clause"
] | 27 | 2015-02-23T11:51:43.000Z | 2022-03-07T02:34:18.000Z |
from PyObjCTools.TestSupport import *
from AppKit import *
try:
unicode
except NameError:
unicode = str
class TestNSAnimationHelper (NSObject):
def animationShouldStart_(self, animation): return 1
def animation_valueForProgress_(self, a, b): return 1
def animation_didReachProgressMark_(self, a, b): return 1
class TestNSAnimation (TestCase):
def testConstants(self):
self.assertEqual(NSAnimationEaseInOut, 0)
self.assertEqual(NSAnimationEaseIn, 1)
self.assertEqual(NSAnimationEaseOut, 2)
self.assertEqual(NSAnimationLinear, 3)
self.assertEqual(NSAnimationBlocking, 0)
self.assertEqual(NSAnimationNonblocking, 1)
self.assertEqual(NSAnimationNonblockingThreaded, 2)
self.assertIsInstance(NSAnimationProgressMarkNotification, unicode)
self.assertIsInstance(NSAnimationProgressMark, unicode)
self.assertIsInstance(NSViewAnimationTargetKey, unicode)
self.assertIsInstance(NSViewAnimationStartFrameKey, unicode)
self.assertIsInstance(NSViewAnimationEndFrameKey, unicode)
self.assertIsInstance(NSViewAnimationEffectKey, unicode)
self.assertIsInstance(NSViewAnimationFadeInEffect, unicode)
self.assertIsInstance(NSViewAnimationFadeOutEffect, unicode)
self.assertIsInstance(NSAnimationTriggerOrderIn, unicode)
self.assertIsInstance(NSAnimationTriggerOrderOut, unicode)
def testMethods(self):
self.assertResultIsBOOL(NSAnimation.isAnimating)
def testProtocol(self):
self.assertResultIsBOOL(TestNSAnimationHelper.animationShouldStart_)
self.assertResultHasType(TestNSAnimationHelper.animation_valueForProgress_, objc._C_FLT)
self.assertArgHasType(TestNSAnimationHelper.animation_valueForProgress_, 1, objc._C_FLT)
self.assertArgHasType(TestNSAnimationHelper.animation_didReachProgressMark_, 1, objc._C_FLT)
if __name__ == "__main__":
main()
| 38.333333 | 100 | 0.767263 |
from PyObjCTools.TestSupport import *
from AppKit import *
try:
unicode
except NameError:
unicode = str
class TestNSAnimationHelper (NSObject):
def animationShouldStart_(self, animation): return 1
def animation_valueForProgress_(self, a, b): return 1
def animation_didReachProgressMark_(self, a, b): return 1
class TestNSAnimation (TestCase):
def testConstants(self):
self.assertEqual(NSAnimationEaseInOut, 0)
self.assertEqual(NSAnimationEaseIn, 1)
self.assertEqual(NSAnimationEaseOut, 2)
self.assertEqual(NSAnimationLinear, 3)
self.assertEqual(NSAnimationBlocking, 0)
self.assertEqual(NSAnimationNonblocking, 1)
self.assertEqual(NSAnimationNonblockingThreaded, 2)
self.assertIsInstance(NSAnimationProgressMarkNotification, unicode)
self.assertIsInstance(NSAnimationProgressMark, unicode)
self.assertIsInstance(NSViewAnimationTargetKey, unicode)
self.assertIsInstance(NSViewAnimationStartFrameKey, unicode)
self.assertIsInstance(NSViewAnimationEndFrameKey, unicode)
self.assertIsInstance(NSViewAnimationEffectKey, unicode)
self.assertIsInstance(NSViewAnimationFadeInEffect, unicode)
self.assertIsInstance(NSViewAnimationFadeOutEffect, unicode)
self.assertIsInstance(NSAnimationTriggerOrderIn, unicode)
self.assertIsInstance(NSAnimationTriggerOrderOut, unicode)
def testMethods(self):
self.assertResultIsBOOL(NSAnimation.isAnimating)
def testProtocol(self):
self.assertResultIsBOOL(TestNSAnimationHelper.animationShouldStart_)
self.assertResultHasType(TestNSAnimationHelper.animation_valueForProgress_, objc._C_FLT)
self.assertArgHasType(TestNSAnimationHelper.animation_valueForProgress_, 1, objc._C_FLT)
self.assertArgHasType(TestNSAnimationHelper.animation_didReachProgressMark_, 1, objc._C_FLT)
if __name__ == "__main__":
main()
| true | true |
1c34f80eecfcae2605ef92175008884d8d327424 | 7,303 | py | Python | research/slim/nets/nets_factory.py | 873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | [
"Apache-2.0"
] | 153 | 2020-10-25T13:58:04.000Z | 2022-03-07T06:01:54.000Z | research/slim/nets/nets_factory.py | 873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | [
"Apache-2.0"
] | 11 | 2020-07-13T08:29:00.000Z | 2022-03-24T07:21:09.000Z | research/slim/nets/nets_factory.py | 873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | [
"Apache-2.0"
] | 23 | 2020-10-25T14:44:47.000Z | 2021-03-31T02:12:13.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a factory for building various models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib import slim as contrib_slim
from nets import alexnet
from nets import cifarnet
from nets import i3d
from nets import inception
from nets import lenet
from nets import mobilenet_v1
from nets import overfeat
from nets import resnet_v1
from nets import resnet_v2
from nets import s3dg
from nets import vgg
from nets.mobilenet import mobilenet_v2
from nets.mobilenet import mobilenet_v3
from nets.nasnet import nasnet
from nets.nasnet import pnasnet
slim = contrib_slim
networks_map = {
'alexnet_v2': alexnet.alexnet_v2,
'cifarnet': cifarnet.cifarnet,
'overfeat': overfeat.overfeat,
'vgg_a': vgg.vgg_a,
'vgg_16': vgg.vgg_16,
'vgg_19': vgg.vgg_19,
'inception_v1': inception.inception_v1,
'inception_v2': inception.inception_v2,
'inception_v3': inception.inception_v3,
'inception_v4': inception.inception_v4,
'inception_resnet_v2': inception.inception_resnet_v2,
'i3d': i3d.i3d,
's3dg': s3dg.s3dg,
'lenet': lenet.lenet,
'resnet_v1_50': resnet_v1.resnet_v1_50,
'resnet_v1_101': resnet_v1.resnet_v1_101,
'resnet_v1_152': resnet_v1.resnet_v1_152,
'resnet_v1_200': resnet_v1.resnet_v1_200,
'resnet_v2_50': resnet_v2.resnet_v2_50,
'resnet_v2_101': resnet_v2.resnet_v2_101,
'resnet_v2_152': resnet_v2.resnet_v2_152,
'resnet_v2_200': resnet_v2.resnet_v2_200,
'mobilenet_v1': mobilenet_v1.mobilenet_v1,
'mobilenet_v1_075': mobilenet_v1.mobilenet_v1_075,
'mobilenet_v1_050': mobilenet_v1.mobilenet_v1_050,
'mobilenet_v1_025': mobilenet_v1.mobilenet_v1_025,
'mobilenet_v2': mobilenet_v2.mobilenet,
'mobilenet_v2_140': mobilenet_v2.mobilenet_v2_140,
'mobilenet_v2_035': mobilenet_v2.mobilenet_v2_035,
'mobilenet_v3_small': mobilenet_v3.small,
'mobilenet_v3_large': mobilenet_v3.large,
'mobilenet_v3_small_minimalistic': mobilenet_v3.small_minimalistic,
'mobilenet_v3_large_minimalistic': mobilenet_v3.large_minimalistic,
'mobilenet_edgetpu': mobilenet_v3.edge_tpu,
'mobilenet_edgetpu_075': mobilenet_v3.edge_tpu_075,
'nasnet_cifar': nasnet.build_nasnet_cifar,
'nasnet_mobile': nasnet.build_nasnet_mobile,
'nasnet_large': nasnet.build_nasnet_large,
'pnasnet_large': pnasnet.build_pnasnet_large,
'pnasnet_mobile': pnasnet.build_pnasnet_mobile,
}
arg_scopes_map = {
'alexnet_v2': alexnet.alexnet_v2_arg_scope,
'cifarnet': cifarnet.cifarnet_arg_scope,
'overfeat': overfeat.overfeat_arg_scope,
'vgg_a': vgg.vgg_arg_scope,
'vgg_16': vgg.vgg_arg_scope,
'vgg_19': vgg.vgg_arg_scope,
'inception_v1': inception.inception_v3_arg_scope,
'inception_v2': inception.inception_v3_arg_scope,
'inception_v3': inception.inception_v3_arg_scope,
'inception_v4': inception.inception_v4_arg_scope,
'inception_resnet_v2': inception.inception_resnet_v2_arg_scope,
'i3d': i3d.i3d_arg_scope,
's3dg': s3dg.s3dg_arg_scope,
'lenet': lenet.lenet_arg_scope,
'resnet_v1_50': resnet_v1.resnet_arg_scope,
'resnet_v1_101': resnet_v1.resnet_arg_scope,
'resnet_v1_152': resnet_v1.resnet_arg_scope,
'resnet_v1_200': resnet_v1.resnet_arg_scope,
'resnet_v2_50': resnet_v2.resnet_arg_scope,
'resnet_v2_101': resnet_v2.resnet_arg_scope,
'resnet_v2_152': resnet_v2.resnet_arg_scope,
'resnet_v2_200': resnet_v2.resnet_arg_scope,
'mobilenet_v1': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_075': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_050': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_025': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v2': mobilenet_v2.training_scope,
'mobilenet_v2_035': mobilenet_v2.training_scope,
'mobilenet_v2_140': mobilenet_v2.training_scope,
'mobilenet_v3_small': mobilenet_v3.training_scope,
'mobilenet_v3_large': mobilenet_v3.training_scope,
'mobilenet_v3_small_minimalistic': mobilenet_v3.training_scope,
'mobilenet_v3_large_minimalistic': mobilenet_v3.training_scope,
'mobilenet_edgetpu': mobilenet_v3.training_scope,
'mobilenet_edgetpu_075': mobilenet_v3.training_scope,
'nasnet_cifar': nasnet.nasnet_cifar_arg_scope,
'nasnet_mobile': nasnet.nasnet_mobile_arg_scope,
'nasnet_large': nasnet.nasnet_large_arg_scope,
'pnasnet_large': pnasnet.pnasnet_large_arg_scope,
'pnasnet_mobile': pnasnet.pnasnet_mobile_arg_scope,
}
def get_network_fn(name, num_classes, weight_decay=0.0, is_training=False):
"""Returns a network_fn such as `logits, end_points = network_fn(images)`.
Args:
name: The name of the network.
num_classes: The number of classes to use for classification. If 0 or None,
the logits layer is omitted and its input features are returned instead.
weight_decay: The l2 coefficient for the model weights.
is_training: `True` if the model is being used for training and `False`
otherwise.
Returns:
network_fn: A function that applies the model to a batch of images. It has
the following signature:
net, end_points = network_fn(images)
The `images` input is a tensor of shape [batch_size, height, width, 3 or
1] with height = width = network_fn.default_image_size. (The
permissibility and treatment of other sizes depends on the network_fn.)
The returned `end_points` are a dictionary of intermediate activations.
The returned `net` is the topmost layer, depending on `num_classes`:
If `num_classes` was a non-zero integer, `net` is a logits tensor
of shape [batch_size, num_classes].
If `num_classes` was 0 or `None`, `net` is a tensor with the input
to the logits layer of shape [batch_size, 1, 1, num_features] or
[batch_size, num_features]. Dropout has not been applied to this
(even if the network's original classification does); it remains for
the caller to do this or not.
Raises:
ValueError: If network `name` is not recognized.
"""
if name not in networks_map:
raise ValueError('Name of network unknown %s' % name)
func = networks_map[name]
@functools.wraps(func)
def network_fn(images, **kwargs):
arg_scope = arg_scopes_map[name](weight_decay=weight_decay)
with slim.arg_scope(arg_scope):
return func(images, num_classes=num_classes, is_training=is_training,
**kwargs)
if hasattr(func, 'default_image_size'):
network_fn.default_image_size = func.default_image_size
return network_fn
| 42.213873 | 80 | 0.751061 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib import slim as contrib_slim
from nets import alexnet
from nets import cifarnet
from nets import i3d
from nets import inception
from nets import lenet
from nets import mobilenet_v1
from nets import overfeat
from nets import resnet_v1
from nets import resnet_v2
from nets import s3dg
from nets import vgg
from nets.mobilenet import mobilenet_v2
from nets.mobilenet import mobilenet_v3
from nets.nasnet import nasnet
from nets.nasnet import pnasnet
slim = contrib_slim
networks_map = {
'alexnet_v2': alexnet.alexnet_v2,
'cifarnet': cifarnet.cifarnet,
'overfeat': overfeat.overfeat,
'vgg_a': vgg.vgg_a,
'vgg_16': vgg.vgg_16,
'vgg_19': vgg.vgg_19,
'inception_v1': inception.inception_v1,
'inception_v2': inception.inception_v2,
'inception_v3': inception.inception_v3,
'inception_v4': inception.inception_v4,
'inception_resnet_v2': inception.inception_resnet_v2,
'i3d': i3d.i3d,
's3dg': s3dg.s3dg,
'lenet': lenet.lenet,
'resnet_v1_50': resnet_v1.resnet_v1_50,
'resnet_v1_101': resnet_v1.resnet_v1_101,
'resnet_v1_152': resnet_v1.resnet_v1_152,
'resnet_v1_200': resnet_v1.resnet_v1_200,
'resnet_v2_50': resnet_v2.resnet_v2_50,
'resnet_v2_101': resnet_v2.resnet_v2_101,
'resnet_v2_152': resnet_v2.resnet_v2_152,
'resnet_v2_200': resnet_v2.resnet_v2_200,
'mobilenet_v1': mobilenet_v1.mobilenet_v1,
'mobilenet_v1_075': mobilenet_v1.mobilenet_v1_075,
'mobilenet_v1_050': mobilenet_v1.mobilenet_v1_050,
'mobilenet_v1_025': mobilenet_v1.mobilenet_v1_025,
'mobilenet_v2': mobilenet_v2.mobilenet,
'mobilenet_v2_140': mobilenet_v2.mobilenet_v2_140,
'mobilenet_v2_035': mobilenet_v2.mobilenet_v2_035,
'mobilenet_v3_small': mobilenet_v3.small,
'mobilenet_v3_large': mobilenet_v3.large,
'mobilenet_v3_small_minimalistic': mobilenet_v3.small_minimalistic,
'mobilenet_v3_large_minimalistic': mobilenet_v3.large_minimalistic,
'mobilenet_edgetpu': mobilenet_v3.edge_tpu,
'mobilenet_edgetpu_075': mobilenet_v3.edge_tpu_075,
'nasnet_cifar': nasnet.build_nasnet_cifar,
'nasnet_mobile': nasnet.build_nasnet_mobile,
'nasnet_large': nasnet.build_nasnet_large,
'pnasnet_large': pnasnet.build_pnasnet_large,
'pnasnet_mobile': pnasnet.build_pnasnet_mobile,
}
arg_scopes_map = {
'alexnet_v2': alexnet.alexnet_v2_arg_scope,
'cifarnet': cifarnet.cifarnet_arg_scope,
'overfeat': overfeat.overfeat_arg_scope,
'vgg_a': vgg.vgg_arg_scope,
'vgg_16': vgg.vgg_arg_scope,
'vgg_19': vgg.vgg_arg_scope,
'inception_v1': inception.inception_v3_arg_scope,
'inception_v2': inception.inception_v3_arg_scope,
'inception_v3': inception.inception_v3_arg_scope,
'inception_v4': inception.inception_v4_arg_scope,
'inception_resnet_v2': inception.inception_resnet_v2_arg_scope,
'i3d': i3d.i3d_arg_scope,
's3dg': s3dg.s3dg_arg_scope,
'lenet': lenet.lenet_arg_scope,
'resnet_v1_50': resnet_v1.resnet_arg_scope,
'resnet_v1_101': resnet_v1.resnet_arg_scope,
'resnet_v1_152': resnet_v1.resnet_arg_scope,
'resnet_v1_200': resnet_v1.resnet_arg_scope,
'resnet_v2_50': resnet_v2.resnet_arg_scope,
'resnet_v2_101': resnet_v2.resnet_arg_scope,
'resnet_v2_152': resnet_v2.resnet_arg_scope,
'resnet_v2_200': resnet_v2.resnet_arg_scope,
'mobilenet_v1': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_075': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_050': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_025': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v2': mobilenet_v2.training_scope,
'mobilenet_v2_035': mobilenet_v2.training_scope,
'mobilenet_v2_140': mobilenet_v2.training_scope,
'mobilenet_v3_small': mobilenet_v3.training_scope,
'mobilenet_v3_large': mobilenet_v3.training_scope,
'mobilenet_v3_small_minimalistic': mobilenet_v3.training_scope,
'mobilenet_v3_large_minimalistic': mobilenet_v3.training_scope,
'mobilenet_edgetpu': mobilenet_v3.training_scope,
'mobilenet_edgetpu_075': mobilenet_v3.training_scope,
'nasnet_cifar': nasnet.nasnet_cifar_arg_scope,
'nasnet_mobile': nasnet.nasnet_mobile_arg_scope,
'nasnet_large': nasnet.nasnet_large_arg_scope,
'pnasnet_large': pnasnet.pnasnet_large_arg_scope,
'pnasnet_mobile': pnasnet.pnasnet_mobile_arg_scope,
}
def get_network_fn(name, num_classes, weight_decay=0.0, is_training=False):
if name not in networks_map:
raise ValueError('Name of network unknown %s' % name)
func = networks_map[name]
@functools.wraps(func)
def network_fn(images, **kwargs):
arg_scope = arg_scopes_map[name](weight_decay=weight_decay)
with slim.arg_scope(arg_scope):
return func(images, num_classes=num_classes, is_training=is_training,
**kwargs)
if hasattr(func, 'default_image_size'):
network_fn.default_image_size = func.default_image_size
return network_fn
| true | true |
1c34f8473405ca66b16aa4edaae04cd633afb328 | 8,327 | py | Python | bin/redsea/tidal_api.py | SultanSGillani/dotfiles | 17705501de7c72399656f909f06746700d5f04cd | [
"0BSD"
] | 7 | 2020-02-10T03:07:05.000Z | 2022-02-03T20:50:31.000Z | bin/redsea/tidal_api.py | SultanSGillani/dotfiles | 17705501de7c72399656f909f06746700d5f04cd | [
"0BSD"
] | 2 | 2020-07-18T14:42:36.000Z | 2022-01-20T14:41:03.000Z | bin/redsea/tidal_api.py | SultanSGillani/dotfiles | 17705501de7c72399656f909f06746700d5f04cd | [
"0BSD"
] | 1 | 2018-11-27T16:51:10.000Z | 2018-11-27T16:51:10.000Z | import pickle
import uuid
import os
import requests
class TidalRequestError(Exception):
def __init__(self, payload):
sf = '{subStatus}: {userMessage} (HTTP {status})'.format(**payload)
self.payload = payload
super(TidalRequestError, self).__init__(sf)
class TidalError(Exception):
def __init__(self, message):
self.message = message
super(TidalError, self).__init__(message)
class TidalApi(object):
TIDAL_API_BASE = 'https://api.tidalhifi.com/v1/'
TIDAL_CLIENT_VERSION = '1.9.1'
def __init__(self, session):
self.session = session
def _get(self, url, params={}):
params['countryCode'] = self.session.country_code
resp = requests.get(
self.TIDAL_API_BASE + url,
headers={
'X-Tidal-SessionId': self.session.session_id
},
params=params).json()
if 'status' in resp and resp['status'] == 404 and resp['subStatus'] == 2001:
raise TidalError('Error: {}. This might be region-locked.'.format(resp['userMessage']))
if 'status' in resp and not resp['status'] == 200:
raise TidalRequestError(resp)
return resp
def get_stream_url(self, track_id, quality):
return self._get('tracks/' + str(track_id) + '/streamUrl',
{'soundQuality': quality})
def get_playlist_items(self, playlist_id):
return self._get('playlists/' + playlist_id + '/items', {
'offset': 0,
'limit': 100
})
def get_album_tracks(self, album_id):
return self._get('albums/' + str(album_id) + '/tracks')
def get_artist_tracks(self, artist_id):
return self._get('artists/' + str(artist_id) + '/toptracks')
def get_track(self, track_id):
return self._get('tracks/' + str(track_id))
def get_album(self, album_id):
return self._get('albums/' + str(album_id))
def get_video(self, video_id):
return self._get('videos/' + str(video_id))
def get_favorite_tracks(self, user_id):
return self._get('users/' + str(user_id) + '/favorites/tracks',
{'limit': 9999})
def get_track_contributors(self, track_id):
return self._get('tracks/' + str(track_id) + '/contributors')
def get_video_stream_url(self, video_id):
return self._get('videos/' + str(video_id) + '/streamurl')
@classmethod
def get_album_artwork_url(cls, album_id, size=1280):
return 'https://resources.tidal.com/images/{0}/{1}x{1}.jpg'.format(
album_id.replace('-', '/'), size)
class TidalSession(object):
'''
Tidal session object which can be used to communicate with Tidal servers
'''
def __init__(self, username, password, token='4zx46pyr9o8qZNRw'):
'''
Initiate a new session
'''
self.TIDAL_CLIENT_VERSION = '1.9.1'
self.TIDAL_API_BASE = 'https://api.tidalhifi.com/v1/'
self.username = username
self.token = token
self.unique_id = str(uuid.uuid4()).replace('-', '')[16:]
self.auth(password)
password = None
def auth(self, password):
'''
Attempts to authorize and create a new valid session
'''
postParams = {
'username': self.username,
'password': password,
'token': self.token,
'clientUniqueKey': self.unique_id,
'clientVersion': self.TIDAL_CLIENT_VERSION
}
r = requests.post(self.TIDAL_API_BASE + 'login/username', data=postParams).json()
password = None
if 'status' in r and not r['status'] == 200:
raise TidalRequestError(r)
self.session_id = r['sessionId']
self.user_id = r['userId']
self.country_code = r['countryCode']
assert self.valid(), 'This session has an invalid sessionId. Please re-authenticate'
def session_type(self):
'''
Returns the type of token used to create the session
'''
if self.token == '4zx46pyr9o8qZNRw':
return 'Desktop'
elif self.token == 'kgsOOmYk3zShYrNP':
return 'Mobile'
else:
return 'Other/Unknown'
def valid(self):
'''
Checks if session is still valid and returns True/False
'''
r = requests.get(self.TIDAL_API_BASE + 'users/' + str(self.user_id),
params={'sessionId': self.session_id}).json()
if 'status' in r and not r['status'] == 200:
return False
else:
return True
class TidalSessionFile(object):
'''
Tidal session storage file which can save/load
'''
def __init__(self, session_file):
self.VERSION = '1.0'
self.session_file = session_file # Session file path
self.session_store = {} # Will contain data from session file
self.sessions = {} # Will contain sessions from session_store['sessions']
self.default = None # Specifies the name of the default session to use
if os.path.isfile(self.session_file):
with open(self.session_file, 'rb') as f:
self.session_store = pickle.load(f)
if 'version' in self.session_store and self.session_store['version'] == self.VERSION:
self.sessions = self.session_store['sessions']
self.default = self.session_store['default']
elif 'version' in self.session_store:
raise ValueError(
'Session file is version {} while redsea expects version {}'.
format(self.session_store['version'], self.VERSION))
else:
raise ValueError('Existing session file is malformed. Please delete/rebuild session file.')
f.close()
else:
self._save()
self = TidalSessionFile(session_file=self.session_file)
def _save(self):
'''
Attempts to write current session store to file
'''
self.session_store['version'] = self.VERSION
self.session_store['sessions'] = self.sessions
self.session_store['default'] = self.default
with open(self.session_file, 'wb') as f:
pickle.dump(self.session_store, f)
def new_session(self, session_name, username, password, token='4zx46pyr9o8qZNRw'):
'''
Create a new TidalSession object and auth with Tidal server
'''
if session_name not in self.sessions:
self.sessions[session_name] = TidalSession(username, password, token=token)
password = None
if len(self.sessions) == 1:
self.default = session_name
else:
password = None
raise ValueError('Session "{}" already exists in sessions file!'.format(session_name))
self._save()
def remove(self, session_name):
'''
Removes a session from the session store and saves the session file
'''
if session_name not in self.sessions:
raise ValueError('Session "{}" does not exist in session store.'.format(session_name))
self.sessions.pop(session_name)
self._save()
def load(self, session_name=None):
'''
Returns a session from the session store
'''
if len(self.sessions) == 0:
raise ValueError('There are no sessions in session file!')
if session_name is None:
session_name = self.default
if session_name in self.sessions:
assert self.sessions[session_name].valid(), '{} has an invalid sessionId. Please re-authenticate'.format(session_name)
return self.sessions[session_name]
raise ValueError('Session "{}" could not be found.'.format(session_name))
def set_default(self, session_name):
'''
Set a default session to return when
load() is called without a session name
'''
if session_name in self.sessions:
assert self.sessions[session_name].valid(), '{} has an invalid sessionId. Please re-authenticate'.format(session_name)
self.default = session_name
self._save() | 33.175299 | 130 | 0.593491 | import pickle
import uuid
import os
import requests
class TidalRequestError(Exception):
def __init__(self, payload):
sf = '{subStatus}: {userMessage} (HTTP {status})'.format(**payload)
self.payload = payload
super(TidalRequestError, self).__init__(sf)
class TidalError(Exception):
def __init__(self, message):
self.message = message
super(TidalError, self).__init__(message)
class TidalApi(object):
TIDAL_API_BASE = 'https://api.tidalhifi.com/v1/'
TIDAL_CLIENT_VERSION = '1.9.1'
def __init__(self, session):
self.session = session
def _get(self, url, params={}):
params['countryCode'] = self.session.country_code
resp = requests.get(
self.TIDAL_API_BASE + url,
headers={
'X-Tidal-SessionId': self.session.session_id
},
params=params).json()
if 'status' in resp and resp['status'] == 404 and resp['subStatus'] == 2001:
raise TidalError('Error: {}. This might be region-locked.'.format(resp['userMessage']))
if 'status' in resp and not resp['status'] == 200:
raise TidalRequestError(resp)
return resp
def get_stream_url(self, track_id, quality):
return self._get('tracks/' + str(track_id) + '/streamUrl',
{'soundQuality': quality})
def get_playlist_items(self, playlist_id):
return self._get('playlists/' + playlist_id + '/items', {
'offset': 0,
'limit': 100
})
def get_album_tracks(self, album_id):
return self._get('albums/' + str(album_id) + '/tracks')
def get_artist_tracks(self, artist_id):
return self._get('artists/' + str(artist_id) + '/toptracks')
def get_track(self, track_id):
return self._get('tracks/' + str(track_id))
def get_album(self, album_id):
return self._get('albums/' + str(album_id))
def get_video(self, video_id):
return self._get('videos/' + str(video_id))
def get_favorite_tracks(self, user_id):
return self._get('users/' + str(user_id) + '/favorites/tracks',
{'limit': 9999})
def get_track_contributors(self, track_id):
return self._get('tracks/' + str(track_id) + '/contributors')
def get_video_stream_url(self, video_id):
return self._get('videos/' + str(video_id) + '/streamurl')
@classmethod
def get_album_artwork_url(cls, album_id, size=1280):
return 'https://resources.tidal.com/images/{0}/{1}x{1}.jpg'.format(
album_id.replace('-', '/'), size)
class TidalSession(object):
def __init__(self, username, password, token='4zx46pyr9o8qZNRw'):
self.TIDAL_CLIENT_VERSION = '1.9.1'
self.TIDAL_API_BASE = 'https://api.tidalhifi.com/v1/'
self.username = username
self.token = token
self.unique_id = str(uuid.uuid4()).replace('-', '')[16:]
self.auth(password)
password = None
def auth(self, password):
postParams = {
'username': self.username,
'password': password,
'token': self.token,
'clientUniqueKey': self.unique_id,
'clientVersion': self.TIDAL_CLIENT_VERSION
}
r = requests.post(self.TIDAL_API_BASE + 'login/username', data=postParams).json()
password = None
if 'status' in r and not r['status'] == 200:
raise TidalRequestError(r)
self.session_id = r['sessionId']
self.user_id = r['userId']
self.country_code = r['countryCode']
assert self.valid(), 'This session has an invalid sessionId. Please re-authenticate'
def session_type(self):
if self.token == '4zx46pyr9o8qZNRw':
return 'Desktop'
elif self.token == 'kgsOOmYk3zShYrNP':
return 'Mobile'
else:
return 'Other/Unknown'
def valid(self):
r = requests.get(self.TIDAL_API_BASE + 'users/' + str(self.user_id),
params={'sessionId': self.session_id}).json()
if 'status' in r and not r['status'] == 200:
return False
else:
return True
class TidalSessionFile(object):
def __init__(self, session_file):
self.VERSION = '1.0'
self.session_file = session_file
self.session_store = {}
self.sessions = {}
self.default = None
if os.path.isfile(self.session_file):
with open(self.session_file, 'rb') as f:
self.session_store = pickle.load(f)
if 'version' in self.session_store and self.session_store['version'] == self.VERSION:
self.sessions = self.session_store['sessions']
self.default = self.session_store['default']
elif 'version' in self.session_store:
raise ValueError(
'Session file is version {} while redsea expects version {}'.
format(self.session_store['version'], self.VERSION))
else:
raise ValueError('Existing session file is malformed. Please delete/rebuild session file.')
f.close()
else:
self._save()
self = TidalSessionFile(session_file=self.session_file)
def _save(self):
self.session_store['version'] = self.VERSION
self.session_store['sessions'] = self.sessions
self.session_store['default'] = self.default
with open(self.session_file, 'wb') as f:
pickle.dump(self.session_store, f)
def new_session(self, session_name, username, password, token='4zx46pyr9o8qZNRw'):
if session_name not in self.sessions:
self.sessions[session_name] = TidalSession(username, password, token=token)
password = None
if len(self.sessions) == 1:
self.default = session_name
else:
password = None
raise ValueError('Session "{}" already exists in sessions file!'.format(session_name))
self._save()
def remove(self, session_name):
if session_name not in self.sessions:
raise ValueError('Session "{}" does not exist in session store.'.format(session_name))
self.sessions.pop(session_name)
self._save()
def load(self, session_name=None):
if len(self.sessions) == 0:
raise ValueError('There are no sessions in session file!')
if session_name is None:
session_name = self.default
if session_name in self.sessions:
assert self.sessions[session_name].valid(), '{} has an invalid sessionId. Please re-authenticate'.format(session_name)
return self.sessions[session_name]
raise ValueError('Session "{}" could not be found.'.format(session_name))
def set_default(self, session_name):
if session_name in self.sessions:
assert self.sessions[session_name].valid(), '{} has an invalid sessionId. Please re-authenticate'.format(session_name)
self.default = session_name
self._save() | true | true |
1c34f8a2fde5ba30697a03d4b7f7701e4cce5000 | 455 | py | Python | beer/cli/subcommands/hmm/phonelist.py | RobinAlgayres/beer | 15ad0dad5a49f98e658e948724e05df347ffe3b8 | [
"MIT"
] | 46 | 2018-02-27T18:15:08.000Z | 2022-02-16T22:10:55.000Z | beer/cli/subcommands/hmm/phonelist.py | RobinAlgayres/beer | 15ad0dad5a49f98e658e948724e05df347ffe3b8 | [
"MIT"
] | 16 | 2018-01-26T14:18:51.000Z | 2021-02-05T09:34:00.000Z | beer/cli/subcommands/hmm/phonelist.py | RobinAlgayres/beer | 15ad0dad5a49f98e658e948724e05df347ffe3b8 | [
"MIT"
] | 26 | 2018-03-12T14:03:26.000Z | 2021-05-24T21:15:01.000Z |
'print the list of phones from a set of phones\' HMM'
import argparse
import pickle
from natsort import natsorted
def setup(parser):
parser.add_argument('hmms', help='phones\' hmms')
def main(args, logger):
logger.debug('loading the HMMs...')
with open(args.hmms, 'rb') as f:
units, _ = pickle.load(f)
for key in natsorted(units.keys(), key=lambda x: x.lower()):
print(key)
if __name__ == "__main__":
main()
| 17.5 | 64 | 0.643956 |
import argparse
import pickle
from natsort import natsorted
def setup(parser):
parser.add_argument('hmms', help='phones\' hmms')
def main(args, logger):
logger.debug('loading the HMMs...')
with open(args.hmms, 'rb') as f:
units, _ = pickle.load(f)
for key in natsorted(units.keys(), key=lambda x: x.lower()):
print(key)
if __name__ == "__main__":
main()
| true | true |
1c34f94acc478114b177c4f1e4daaf1402f8de8f | 27 | py | Python | testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub2/test_in_sub2.py | markshao/pytest | 611b579d21f7e62b4c8ed54ab70fbfee7c6f5f64 | [
"MIT"
] | 9,225 | 2015-06-15T21:56:14.000Z | 2022-03-31T20:47:38.000Z | testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub2/test_in_sub2.py | markshao/pytest | 611b579d21f7e62b4c8ed54ab70fbfee7c6f5f64 | [
"MIT"
] | 7,794 | 2015-06-15T21:06:34.000Z | 2022-03-31T10:56:54.000Z | testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub2/test_in_sub2.py | markshao/pytest | 611b579d21f7e62b4c8ed54ab70fbfee7c6f5f64 | [
"MIT"
] | 2,598 | 2015-06-15T21:42:39.000Z | 2022-03-29T13:48:22.000Z | def test_2(arg2):
pass
| 9 | 17 | 0.62963 | def test_2(arg2):
pass
| true | true |
1c34fcb4d73d6021d2acb6af49703d7f1211b96d | 1,634 | py | Python | content/migrations/0046_auto_20210401_0016.py | bikramtuladhar/covid-19-procurement-explorer-admin | 9bba473c8b83c8651e3178b6fba01af74d8b27dc | [
"BSD-3-Clause"
] | null | null | null | content/migrations/0046_auto_20210401_0016.py | bikramtuladhar/covid-19-procurement-explorer-admin | 9bba473c8b83c8651e3178b6fba01af74d8b27dc | [
"BSD-3-Clause"
] | null | null | null | content/migrations/0046_auto_20210401_0016.py | bikramtuladhar/covid-19-procurement-explorer-admin | 9bba473c8b83c8651e3178b6fba01af74d8b27dc | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.1.7 on 2021-04-01 00:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('country', '0030_auto_20210331_1526'),
('content', '0045_auto_20210331_1526'),
]
operations = [
migrations.AlterField(
model_name='dataimport',
name='country',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='country.country'),
),
migrations.AlterField(
model_name='insightspage',
name='language',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='country.language'),
),
migrations.AlterField(
model_name='insightspage',
name='topics',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='country.topic'),
),
migrations.AlterField(
model_name='resourcespage',
name='lang',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='country.language'),
),
migrations.AlterField(
model_name='resourcespage',
name='topics',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='country.topic'),
),
migrations.AlterField(
model_name='staticpage',
name='language',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='country.language'),
),
]
| 35.521739 | 125 | 0.617503 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('country', '0030_auto_20210331_1526'),
('content', '0045_auto_20210331_1526'),
]
operations = [
migrations.AlterField(
model_name='dataimport',
name='country',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='country.country'),
),
migrations.AlterField(
model_name='insightspage',
name='language',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='country.language'),
),
migrations.AlterField(
model_name='insightspage',
name='topics',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='country.topic'),
),
migrations.AlterField(
model_name='resourcespage',
name='lang',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='country.language'),
),
migrations.AlterField(
model_name='resourcespage',
name='topics',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='country.topic'),
),
migrations.AlterField(
model_name='staticpage',
name='language',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='country.language'),
),
]
| true | true |
1c34fce2b3dc9d70b87850dfff00fb1971f911c0 | 1,568 | py | Python | python/zenhub-backup.py | open-cluster-management/zenhub-backup | b6811cbb01879e9a5de6d1490ac25d2a3c734fea | [
"Apache-2.0"
] | 4 | 2021-05-21T22:06:50.000Z | 2021-06-21T14:43:32.000Z | python/zenhub-backup.py | open-cluster-management/zenhub-backup | b6811cbb01879e9a5de6d1490ac25d2a3c734fea | [
"Apache-2.0"
] | null | null | null | python/zenhub-backup.py | open-cluster-management/zenhub-backup | b6811cbb01879e9a5de6d1490ac25d2a3c734fea | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from zenhub import Zenhub # the move issue API failed; fellback on requests library
import json
import os
# Update these variables with the project settings
ZENHUB_API = 'https://api.zenhub.com'
ZENHUB_API_TOKEN = os.getenv('ZENHUB_API_TOKEN')
GITHUB_REPO_ID = os.getenv('GITHUB_REPO_ID')
ZENHUB_WORKSPACE_ID = os.getenv('ZENHUB_WORKSPACE_ID')
AUTH_HEADER = {'X-Authentication-Token': '%s' % ZENHUB_API_TOKEN}
ZENHUB_BOARD_JSON_FILE = 'zenhub-board.json' if os.getenv('ZENHUB_BOARD_JSON_FILE') is None else os.getenv('ZENHUB_BOARD_JSON_FILE')
PIPELINE_MAPPING_JSON_FILE = 'pipeline-map.json' if os.getenv('PIPELINE_MAPPING_JSON_FILE') is None else os.getenv('PIPELINE_MAPPING_JSON_FILE')
if ZENHUB_API_TOKEN is None or GITHUB_REPO_ID is None or ZENHUB_WORKSPACE_ID is None:
print("ERROR: One or more of the required environment variables were not found: ZENHUB_API_TOKEN, GITHUB_REPO_ID, ZENHUB_WORKSPACE_ID")
exit(1)
# Details on how to find each of the required parameters
# https://github.com/ZenHubIO/API#move-an-issue-between-pipelines
ISSUES_API = '%s/p2/workspaces/%s/repositories/%s/issues' % (ZENHUB_API, ZENHUB_WORKSPACE_ID, GITHUB_REPO_ID)
def print_board():
zh = Zenhub(ZENHUB_API_TOKEN)
print('\nRetrieving Zenhub board...')
board = zh.get_repository_board(ZENHUB_WORKSPACE_ID, GITHUB_REPO_ID)
print('Writing "%s"...' % ZENHUB_BOARD_JSON_FILE)
f = open(ZENHUB_BOARD_JSON_FILE, 'w')
f.write(json.dumps(board))
f.close()
print('Backup complete.\n')
def main():
print_board()
main()
| 39.2 | 144 | 0.765306 |
from zenhub import Zenhub
import json
import os
ZENHUB_API = 'https://api.zenhub.com'
ZENHUB_API_TOKEN = os.getenv('ZENHUB_API_TOKEN')
GITHUB_REPO_ID = os.getenv('GITHUB_REPO_ID')
ZENHUB_WORKSPACE_ID = os.getenv('ZENHUB_WORKSPACE_ID')
AUTH_HEADER = {'X-Authentication-Token': '%s' % ZENHUB_API_TOKEN}
ZENHUB_BOARD_JSON_FILE = 'zenhub-board.json' if os.getenv('ZENHUB_BOARD_JSON_FILE') is None else os.getenv('ZENHUB_BOARD_JSON_FILE')
PIPELINE_MAPPING_JSON_FILE = 'pipeline-map.json' if os.getenv('PIPELINE_MAPPING_JSON_FILE') is None else os.getenv('PIPELINE_MAPPING_JSON_FILE')
if ZENHUB_API_TOKEN is None or GITHUB_REPO_ID is None or ZENHUB_WORKSPACE_ID is None:
print("ERROR: One or more of the required environment variables were not found: ZENHUB_API_TOKEN, GITHUB_REPO_ID, ZENHUB_WORKSPACE_ID")
exit(1)
%s/repositories/%s/issues' % (ZENHUB_API, ZENHUB_WORKSPACE_ID, GITHUB_REPO_ID)
def print_board():
zh = Zenhub(ZENHUB_API_TOKEN)
print('\nRetrieving Zenhub board...')
board = zh.get_repository_board(ZENHUB_WORKSPACE_ID, GITHUB_REPO_ID)
print('Writing "%s"...' % ZENHUB_BOARD_JSON_FILE)
f = open(ZENHUB_BOARD_JSON_FILE, 'w')
f.write(json.dumps(board))
f.close()
print('Backup complete.\n')
def main():
print_board()
main()
| true | true |
1c34fcfaddccb2b4050adc99174f6ef631ed2ae8 | 26,232 | py | Python | pyclustering/nnet/hhn.py | JosephChataignon/pyclustering | bf4f51a472622292627ec8c294eb205585e50f52 | [
"BSD-3-Clause"
] | 1,013 | 2015-01-26T19:50:14.000Z | 2022-03-31T07:38:48.000Z | pyclustering/nnet/hhn.py | peterlau0626/pyclustering | bf4f51a472622292627ec8c294eb205585e50f52 | [
"BSD-3-Clause"
] | 542 | 2015-01-20T16:44:32.000Z | 2022-01-29T14:57:20.000Z | pyclustering/nnet/hhn.py | peterlau0626/pyclustering | bf4f51a472622292627ec8c294eb205585e50f52 | [
"BSD-3-Clause"
] | 262 | 2015-03-19T07:28:12.000Z | 2022-03-30T07:28:24.000Z | """!
@brief Oscillatory Neural Network based on Hodgkin-Huxley Neuron Model
@details Implementation based on paper @cite article::nnet::hnn::1.
@authors Andrei Novikov (pyclustering@yandex.ru)
@date 2014-2020
@copyright BSD-3-Clause
"""
from scipy.integrate import odeint
from pyclustering.core.wrapper import ccore_library
import pyclustering.core.hhn_wrapper as wrapper
from pyclustering.nnet import *
from pyclustering.utils import allocate_sync_ensembles
import numpy
import random
class hhn_parameters:
"""!
@brief Describes parameters of Hodgkin-Huxley Oscillatory Network.
@see hhn_network
"""
def __init__(self):
"""!
@brief Default constructor of parameters for Hodgkin-Huxley Oscillatory Network.
@details Constructor initializes parameters by default non-zero values that can be
used for simple simulation.
"""
## Intrinsic noise.
self.nu = random.random() * 2.0 - 1.0
## Maximal conductivity for sodium current.
self.gNa = 120.0 * (1 + 0.02 * self.nu)
## Maximal conductivity for potassium current.
self.gK = 36.0 * (1 + 0.02 * self.nu)
## Maximal conductivity for leakage current.
self.gL = 0.3 * (1 + 0.02 * self.nu)
## Reverse potential of sodium current [mV].
self.vNa = 50.0
## Reverse potential of potassium current [mV].
self.vK = -77.0
## Reverse potential of leakage current [mV].
self.vL = -54.4
## Rest potential [mV].
self.vRest = -65.0
## External current [mV] for central element 1.
self.Icn1 = 5.0
## External current [mV] for central element 2.
self.Icn2 = 30.0
## Synaptic reversal potential [mV] for inhibitory effects.
self.Vsyninh = -80.0
## Synaptic reversal potential [mV] for exciting effects.
self.Vsynexc = 0.0
## Alfa-parameter for alfa-function for inhibitory effect.
self.alfa_inhibitory = 6.0
## Betta-parameter for alfa-function for inhibitory effect.
self.betta_inhibitory = 0.3
## Alfa-parameter for alfa-function for excitatory effect.
self.alfa_excitatory = 40.0
## Betta-parameter for alfa-function for excitatory effect.
self.betta_excitatory = 2.0
## Strength of the synaptic connection from PN to CN1.
self.w1 = 0.1
## Strength of the synaptic connection from CN1 to PN.
self.w2 = 9.0
## Strength of the synaptic connection from CN2 to PN.
self.w3 = 5.0
## Period of time [ms] when high strength value of synaptic connection exists from CN2 to PN.
self.deltah = 650.0
## Threshold of the membrane potential that should exceeded by oscillator to be considered as an active.
self.threshold = -10
## Affects pulse counter.
self.eps = 0.16
class central_element:
"""!
@brief Central element consist of two central neurons that are described by a little bit different dynamic than peripheral.
@see hhn_network
"""
def __init__(self):
"""!
@brief Constructor of central element.
"""
## Membrane potential of cenral neuron (V).
self.membrane_potential = 0.0
## Activation conductance of the sodium channel (m).
self.active_cond_sodium = 0.0
## Inactivaton conductance of the sodium channel (h).
self.inactive_cond_sodium = 0.0
## Activaton conductance of the sodium channel (h).
self.active_cond_potassium = 0.0
## Spike generation of central neuron.
self.pulse_generation = False
## Timestamps of generated pulses.
self.pulse_generation_time = []
def __repr__(self):
"""!
@brief Returns string that represents central element.
"""
return "%s, %s" % (self.membrane_potential, self.pulse_generation_time)
class hhn_network(network):
"""!
@brief Oscillatory Neural Network with central element based on Hodgkin-Huxley neuron model.
@details Interaction between oscillators is performed via central element (no connection between oscillators that
are called as peripheral). Peripheral oscillators receive external stimulus. Central element consist of
two oscillators: the first is used for synchronization some ensemble of oscillators and the second
controls synchronization of the first central oscillator with various ensembles.
Usage example where oscillatory network with 6 oscillators is used for simulation. The first two oscillators
have the same stimulus, as well as the third and fourth oscillators and the last two. Thus three synchronous
ensembles are expected after simulation.
@code
from pyclustering.nnet.hhn import hhn_network, hhn_parameters
from pyclustering.nnet.dynamic_visualizer import dynamic_visualizer
# Change period of time when high strength value of synaptic connection exists from CN2 to PN.
params = hhn_parameters()
params.deltah = 400
# Create Hodgkin-Huxley oscillatory network with stimulus.
net = hhn_network(6, [0, 0, 25, 25, 47, 47], params)
# Simulate network.
(t, dyn_peripheral, dyn_central) = net.simulate(2400, 600)
# Visualize network's output (membrane potential of peripheral and central neurons).
amount_canvases = 6 + 2 # 6 peripheral oscillator + 2 central elements
visualizer = dynamic_visualizer(amount_canvases, x_title="Time", y_title="V", y_labels=False)
visualizer.append_dynamics(t, dyn_peripheral, 0, True)
visualizer.append_dynamics(t, dyn_central, amount_canvases - 2, True)
visualizer.show()
@endcode
There is visualized result of simulation where three synchronous ensembles of oscillators can be observed. The
first and the second oscillators form the first ensemble, the third and the fourth form the second ensemble and
the last two oscillators form the third ensemble.
@image html hhn_three_ensembles.png
"""
def __init__(self, num_osc, stimulus = None, parameters = None, type_conn = None, type_conn_represent = conn_represent.MATRIX, ccore = True):
"""!
@brief Constructor of oscillatory network based on Hodgkin-Huxley neuron model.
@param[in] num_osc (uint): Number of peripheral oscillators in the network.
@param[in] stimulus (list): List of stimulus for oscillators, number of stimulus should be equal to number of peripheral oscillators.
@param[in] parameters (hhn_parameters): Parameters of the network.
@param[in] type_conn (conn_type): Type of connections between oscillators in the network (ignored for this type of network).
@param[in] type_conn_represent (conn_represent): Internal representation of connection in the network: matrix or list.
@param[in] ccore (bool): If 'True' then CCORE is used (C/C++ implementation of the model).
"""
super().__init__(num_osc, conn_type.NONE, type_conn_represent)
if stimulus is None:
self._stimulus = [0.0] * num_osc
else:
self._stimulus = stimulus
if parameters is not None:
self._params = parameters
else:
self._params = hhn_parameters()
self.__ccore_hhn_pointer = None
self.__ccore_hhn_dynamic_pointer = None
if (ccore is True) and ccore_library.workable():
self.__ccore_hhn_pointer = wrapper.hhn_create(num_osc, self._params)
else:
self._membrane_dynamic_pointer = None # final result is stored here.
self._membrane_potential = [0.0] * self._num_osc
self._active_cond_sodium = [0.0] * self._num_osc
self._inactive_cond_sodium = [0.0] * self._num_osc
self._active_cond_potassium = [0.0] * self._num_osc
self._link_activation_time = [0.0] * self._num_osc
self._link_pulse_counter = [0.0] * self._num_osc
self._link_weight3 = [0.0] * self._num_osc
self._pulse_generation_time = [[] for i in range(self._num_osc)]
self._pulse_generation = [False] * self._num_osc
self._noise = [random.random() * 2.0 - 1.0 for i in range(self._num_osc)]
self._central_element = [central_element(), central_element()]
def __del__(self):
"""!
@brief Destroy dynamically allocated oscillatory network instance in case of CCORE usage.
"""
if self.__ccore_hhn_pointer:
wrapper.hhn_destroy(self.__ccore_hhn_pointer)
def simulate(self, steps, time, solution = solve_type.RK4):
"""!
@brief Performs static simulation of oscillatory network based on Hodgkin-Huxley neuron model.
@details Output dynamic is sensible to amount of steps of simulation and solver of differential equation.
Python implementation uses 'odeint' from 'scipy', CCORE uses classical RK4 and RFK45 methods,
therefore in case of CCORE HHN (Hodgkin-Huxley network) amount of steps should be greater than in
case of Python HHN.
@param[in] steps (uint): Number steps of simulations during simulation.
@param[in] time (double): Time of simulation.
@param[in] solution (solve_type): Type of solver for differential equations.
@return (tuple) Dynamic of oscillatory network represented by (time, peripheral neurons dynamic, central elements
dynamic), where types are (list, list, list).
"""
return self.simulate_static(steps, time, solution)
def simulate_static(self, steps, time, solution = solve_type.RK4):
"""!
@brief Performs static simulation of oscillatory network based on Hodgkin-Huxley neuron model.
@details Output dynamic is sensible to amount of steps of simulation and solver of differential equation.
Python implementation uses 'odeint' from 'scipy', CCORE uses classical RK4 and RFK45 methods,
therefore in case of CCORE HHN (Hodgkin-Huxley network) amount of steps should be greater than in
case of Python HHN.
@param[in] steps (uint): Number steps of simulations during simulation.
@param[in] time (double): Time of simulation.
@param[in] solution (solve_type): Type of solver for differential equations.
@return (tuple) Dynamic of oscillatory network represented by (time, peripheral neurons dynamic, central elements
dynamic), where types are (list, list, list).
"""
# Check solver before simulation
if solution == solve_type.FAST:
raise NameError("Solver FAST is not support due to low accuracy that leads to huge error.")
self._membrane_dynamic_pointer = None
if self.__ccore_hhn_pointer is not None:
self.__ccore_hhn_dynamic_pointer = wrapper.hhn_dynamic_create(True, False, False, False)
wrapper.hhn_simulate(self.__ccore_hhn_pointer, steps, time, solution, self._stimulus, self.__ccore_hhn_dynamic_pointer)
peripheral_membrane_potential = wrapper.hhn_dynamic_get_peripheral_evolution(self.__ccore_hhn_dynamic_pointer, 0)
central_membrane_potential = wrapper.hhn_dynamic_get_central_evolution(self.__ccore_hhn_dynamic_pointer, 0)
dynamic_time = wrapper.hhn_dynamic_get_time(self.__ccore_hhn_dynamic_pointer)
self._membrane_dynamic_pointer = peripheral_membrane_potential
wrapper.hhn_dynamic_destroy(self.__ccore_hhn_dynamic_pointer)
return dynamic_time, peripheral_membrane_potential, central_membrane_potential
if solution == solve_type.RKF45:
raise NameError("Solver RKF45 is not support in python version.")
dyn_peripheral = [self._membrane_potential[:]]
dyn_central = [[0.0, 0.0]]
dyn_time = [0.0]
step = time / steps
int_step = step / 10.0
for t in numpy.arange(step, time + step, step):
# update states of oscillators
(memb_peripheral, memb_central) = self._calculate_states(solution, t, step, int_step)
# update states of oscillators
dyn_peripheral.append(memb_peripheral)
dyn_central.append(memb_central)
dyn_time.append(t)
self._membrane_dynamic_pointer = dyn_peripheral
return dyn_time, dyn_peripheral, dyn_central
def _calculate_states(self, solution, t, step, int_step):
"""!
@brief Calculates new state of each oscillator in the network. Returns only excitatory state of oscillators.
@param[in] solution (solve_type): Type solver of the differential equations.
@param[in] t (double): Current time of simulation.
@param[in] step (uint): Step of solution at the end of which states of oscillators should be calculated.
@param[in] int_step (double): Differentiation step that is used for solving differential equation.
@return (list) New states of membrane potentials for peripheral oscillators and for cental elements as a list where
the last two values correspond to central element 1 and 2.
"""
next_membrane = [0.0] * self._num_osc
next_active_sodium = [0.0] * self._num_osc
next_inactive_sodium = [0.0] * self._num_osc
next_active_potassium = [0.0] * self._num_osc
# Update states of oscillators
for index in range(0, self._num_osc, 1):
result = odeint(self.hnn_state,
[self._membrane_potential[index], self._active_cond_sodium[index], self._inactive_cond_sodium[index], self._active_cond_potassium[index]],
numpy.arange(t - step, t, int_step),
(index, ))
[ next_membrane[index], next_active_sodium[index], next_inactive_sodium[index], next_active_potassium[index] ] = result[len(result) - 1][0:4]
next_cn_membrane = [0.0, 0.0]
next_cn_active_sodium = [0.0, 0.0]
next_cn_inactive_sodium = [0.0, 0.0]
next_cn_active_potassium = [0.0, 0.0]
# Update states of central elements
for index in range(0, len(self._central_element)):
result = odeint(self.hnn_state,
[self._central_element[index].membrane_potential, self._central_element[index].active_cond_sodium, self._central_element[index].inactive_cond_sodium, self._central_element[index].active_cond_potassium],
numpy.arange(t - step, t, int_step),
(self._num_osc + index, ))
[ next_cn_membrane[index], next_cn_active_sodium[index], next_cn_inactive_sodium[index], next_cn_active_potassium[index] ] = result[len(result) - 1][0:4]
# Noise generation
self._noise = [ 1.0 + 0.01 * (random.random() * 2.0 - 1.0) for i in range(self._num_osc)]
# Updating states of PNs
self.__update_peripheral_neurons(t, step, next_membrane, next_active_sodium, next_inactive_sodium, next_active_potassium)
# Updation states of CN
self.__update_central_neurons(t, next_cn_membrane, next_cn_active_sodium, next_cn_inactive_sodium, next_cn_active_potassium)
return (next_membrane, next_cn_membrane)
def __update_peripheral_neurons(self, t, step, next_membrane, next_active_sodium, next_inactive_sodium, next_active_potassium):
"""!
@brief Update peripheral neurons in line with new values of current in channels.
@param[in] t (doubles): Current time of simulation.
@param[in] step (uint): Step (time duration) during simulation when states of oscillators should be calculated.
@param[in] next_membrane (list): New values of membrane potentials for peripheral neurons.
@Param[in] next_active_sodium (list): New values of activation conductances of the sodium channels for peripheral neurons.
@param[in] next_inactive_sodium (list): New values of inactivaton conductances of the sodium channels for peripheral neurons.
@param[in] next_active_potassium (list): New values of activation conductances of the potassium channel for peripheral neurons.
"""
self._membrane_potential = next_membrane[:]
self._active_cond_sodium = next_active_sodium[:]
self._inactive_cond_sodium = next_inactive_sodium[:]
self._active_cond_potassium = next_active_potassium[:]
for index in range(0, self._num_osc):
if self._pulse_generation[index] is False:
if self._membrane_potential[index] >= 0.0:
self._pulse_generation[index] = True
self._pulse_generation_time[index].append(t)
elif self._membrane_potential[index] < 0.0:
self._pulse_generation[index] = False
# Update connection from CN2 to PN
if self._link_weight3[index] == 0.0:
if self._membrane_potential[index] > self._params.threshold:
self._link_pulse_counter[index] += step
if self._link_pulse_counter[index] >= 1 / self._params.eps:
self._link_weight3[index] = self._params.w3
self._link_activation_time[index] = t
elif not ((self._link_activation_time[index] < t) and (t < self._link_activation_time[index] + self._params.deltah)):
self._link_weight3[index] = 0.0
self._link_pulse_counter[index] = 0.0
def __update_central_neurons(self, t, next_cn_membrane, next_cn_active_sodium, next_cn_inactive_sodium, next_cn_active_potassium):
"""!
@brief Update of central neurons in line with new values of current in channels.
@param[in] t (doubles): Current time of simulation.
@param[in] next_membrane (list): New values of membrane potentials for central neurons.
@Param[in] next_active_sodium (list): New values of activation conductances of the sodium channels for central neurons.
@param[in] next_inactive_sodium (list): New values of inactivaton conductances of the sodium channels for central neurons.
@param[in] next_active_potassium (list): New values of activation conductances of the potassium channel for central neurons.
"""
for index in range(0, len(self._central_element)):
self._central_element[index].membrane_potential = next_cn_membrane[index]
self._central_element[index].active_cond_sodium = next_cn_active_sodium[index]
self._central_element[index].inactive_cond_sodium = next_cn_inactive_sodium[index]
self._central_element[index].active_cond_potassium = next_cn_active_potassium[index]
if self._central_element[index].pulse_generation is False:
if self._central_element[index].membrane_potential >= 0.0:
self._central_element[index].pulse_generation = True
self._central_element[index].pulse_generation_time.append(t)
elif self._central_element[index].membrane_potential < 0.0:
self._central_element[index].pulse_generation = False
def hnn_state(self, inputs, t, argv):
"""!
@brief Returns new values of excitatory and inhibitory parts of oscillator and potential of oscillator.
@param[in] inputs (list): States of oscillator for integration [v, m, h, n] (see description below).
@param[in] t (double): Current time of simulation.
@param[in] argv (tuple): Extra arguments that are not used for integration - index of oscillator.
@return (list) new values of oscillator [v, m, h, n], where:
v - membrane potantial of oscillator,
m - activation conductance of the sodium channel,
h - inactication conductance of the sodium channel,
n - activation conductance of the potassium channel.
"""
index = argv
v = inputs[0] # membrane potential (v).
m = inputs[1] # activation conductance of the sodium channel (m).
h = inputs[2] # inactivaton conductance of the sodium channel (h).
n = inputs[3] # activation conductance of the potassium channel (n).
# Calculate ion current
# gNa * m[i]^3 * h * (v[i] - vNa) + gK * n[i]^4 * (v[i] - vK) + gL (v[i] - vL)
active_sodium_part = self._params.gNa * (m ** 3) * h * (v - self._params.vNa)
inactive_sodium_part = self._params.gK * (n ** 4) * (v - self._params.vK)
active_potassium_part = self._params.gL * (v - self._params.vL)
Iion = active_sodium_part + inactive_sodium_part + active_potassium_part
Iext = 0.0
Isyn = 0.0
if index < self._num_osc:
# PN - peripheral neuron - calculation of external current and synaptic current.
Iext = self._stimulus[index] * self._noise[index] # probably noise can be pre-defined for reducting compexity
memory_impact1 = 0.0
for i in range(0, len(self._central_element[0].pulse_generation_time)):
memory_impact1 += self.__alfa_function(t - self._central_element[0].pulse_generation_time[i], self._params.alfa_inhibitory, self._params.betta_inhibitory);
memory_impact2 = 0.0
for i in range(0, len(self._central_element[1].pulse_generation_time)):
memory_impact2 += self.__alfa_function(t - self._central_element[1].pulse_generation_time[i], self._params.alfa_inhibitory, self._params.betta_inhibitory);
Isyn = self._params.w2 * (v - self._params.Vsyninh) * memory_impact1 + self._link_weight3[index] * (v - self._params.Vsyninh) * memory_impact2;
else:
# CN - central element.
central_index = index - self._num_osc
if central_index == 0:
Iext = self._params.Icn1 # CN1
memory_impact = 0.0
for index_oscillator in range(0, self._num_osc):
for index_generation in range(0, len(self._pulse_generation_time[index_oscillator])):
memory_impact += self.__alfa_function(t - self._pulse_generation_time[index_oscillator][index_generation], self._params.alfa_excitatory, self._params.betta_excitatory);
Isyn = self._params.w1 * (v - self._params.Vsynexc) * memory_impact
elif central_index == 1:
Iext = self._params.Icn2 # CN2
Isyn = 0.0
else:
assert 0;
# Membrane potential
dv = -Iion + Iext - Isyn
# Calculate variables
potential = v - self._params.vRest
am = (2.5 - 0.1 * potential) / (math.exp(2.5 - 0.1 * potential) - 1.0)
ah = 0.07 * math.exp(-potential / 20.0)
an = (0.1 - 0.01 * potential) / (math.exp(1.0 - 0.1 * potential) - 1.0)
bm = 4.0 * math.exp(-potential / 18.0)
bh = 1.0 / (math.exp(3.0 - 0.1 * potential) + 1.0)
bn = 0.125 * math.exp(-potential / 80.0)
dm = am * (1.0 - m) - bm * m
dh = ah * (1.0 - h) - bh * h
dn = an * (1.0 - n) - bn * n
return [dv, dm, dh, dn]
def allocate_sync_ensembles(self, tolerance = 0.1):
"""!
@brief Allocates clusters in line with ensembles of synchronous oscillators where each. Synchronous ensemble corresponds to only one cluster.
@param[in] tolerance (double): maximum error for allocation of synchronous ensemble oscillators.
@return (list) Grours (lists) of indexes of synchronous oscillators. For example [ [index_osc1, index_osc3], [index_osc2], [index_osc4, index_osc5] ].
"""
return allocate_sync_ensembles(self._membrane_dynamic_pointer, tolerance, 20.0, None)
def __alfa_function(self, time, alfa, betta):
"""!
@brief Calculates value of alfa-function for difference between spike generation time and current simulation time.
@param[in] time (double): Difference between last spike generation time and current time.
@param[in] alfa (double): Alfa parameter for alfa-function.
@param[in] betta (double): Betta parameter for alfa-function.
@return (double) Value of alfa-function.
"""
return alfa * time * math.exp(-betta * time)
| 47.521739 | 231 | 0.617986 |
from scipy.integrate import odeint
from pyclustering.core.wrapper import ccore_library
import pyclustering.core.hhn_wrapper as wrapper
from pyclustering.nnet import *
from pyclustering.utils import allocate_sync_ensembles
import numpy
import random
class hhn_parameters:
def __init__(self):
random.random() * 2.0 - 1.0
f.nu)
)
e = []
def __repr__(self):
return "%s, %s" % (self.membrane_potential, self.pulse_generation_time)
class hhn_network(network):
def __init__(self, num_osc, stimulus = None, parameters = None, type_conn = None, type_conn_represent = conn_represent.MATRIX, ccore = True):
super().__init__(num_osc, conn_type.NONE, type_conn_represent)
if stimulus is None:
self._stimulus = [0.0] * num_osc
else:
self._stimulus = stimulus
if parameters is not None:
self._params = parameters
else:
self._params = hhn_parameters()
self.__ccore_hhn_pointer = None
self.__ccore_hhn_dynamic_pointer = None
if (ccore is True) and ccore_library.workable():
self.__ccore_hhn_pointer = wrapper.hhn_create(num_osc, self._params)
else:
self._membrane_dynamic_pointer = None
self._membrane_potential = [0.0] * self._num_osc
self._active_cond_sodium = [0.0] * self._num_osc
self._inactive_cond_sodium = [0.0] * self._num_osc
self._active_cond_potassium = [0.0] * self._num_osc
self._link_activation_time = [0.0] * self._num_osc
self._link_pulse_counter = [0.0] * self._num_osc
self._link_weight3 = [0.0] * self._num_osc
self._pulse_generation_time = [[] for i in range(self._num_osc)]
self._pulse_generation = [False] * self._num_osc
self._noise = [random.random() * 2.0 - 1.0 for i in range(self._num_osc)]
self._central_element = [central_element(), central_element()]
def __del__(self):
if self.__ccore_hhn_pointer:
wrapper.hhn_destroy(self.__ccore_hhn_pointer)
def simulate(self, steps, time, solution = solve_type.RK4):
return self.simulate_static(steps, time, solution)
def simulate_static(self, steps, time, solution = solve_type.RK4):
if solution == solve_type.FAST:
raise NameError("Solver FAST is not support due to low accuracy that leads to huge error.")
self._membrane_dynamic_pointer = None
if self.__ccore_hhn_pointer is not None:
self.__ccore_hhn_dynamic_pointer = wrapper.hhn_dynamic_create(True, False, False, False)
wrapper.hhn_simulate(self.__ccore_hhn_pointer, steps, time, solution, self._stimulus, self.__ccore_hhn_dynamic_pointer)
peripheral_membrane_potential = wrapper.hhn_dynamic_get_peripheral_evolution(self.__ccore_hhn_dynamic_pointer, 0)
central_membrane_potential = wrapper.hhn_dynamic_get_central_evolution(self.__ccore_hhn_dynamic_pointer, 0)
dynamic_time = wrapper.hhn_dynamic_get_time(self.__ccore_hhn_dynamic_pointer)
self._membrane_dynamic_pointer = peripheral_membrane_potential
wrapper.hhn_dynamic_destroy(self.__ccore_hhn_dynamic_pointer)
return dynamic_time, peripheral_membrane_potential, central_membrane_potential
if solution == solve_type.RKF45:
raise NameError("Solver RKF45 is not support in python version.")
dyn_peripheral = [self._membrane_potential[:]]
dyn_central = [[0.0, 0.0]]
dyn_time = [0.0]
step = time / steps
int_step = step / 10.0
for t in numpy.arange(step, time + step, step):
(memb_peripheral, memb_central) = self._calculate_states(solution, t, step, int_step)
dyn_peripheral.append(memb_peripheral)
dyn_central.append(memb_central)
dyn_time.append(t)
self._membrane_dynamic_pointer = dyn_peripheral
return dyn_time, dyn_peripheral, dyn_central
def _calculate_states(self, solution, t, step, int_step):
next_membrane = [0.0] * self._num_osc
next_active_sodium = [0.0] * self._num_osc
next_inactive_sodium = [0.0] * self._num_osc
next_active_potassium = [0.0] * self._num_osc
for index in range(0, self._num_osc, 1):
result = odeint(self.hnn_state,
[self._membrane_potential[index], self._active_cond_sodium[index], self._inactive_cond_sodium[index], self._active_cond_potassium[index]],
numpy.arange(t - step, t, int_step),
(index, ))
[ next_membrane[index], next_active_sodium[index], next_inactive_sodium[index], next_active_potassium[index] ] = result[len(result) - 1][0:4]
next_cn_membrane = [0.0, 0.0]
next_cn_active_sodium = [0.0, 0.0]
next_cn_inactive_sodium = [0.0, 0.0]
next_cn_active_potassium = [0.0, 0.0]
for index in range(0, len(self._central_element)):
result = odeint(self.hnn_state,
[self._central_element[index].membrane_potential, self._central_element[index].active_cond_sodium, self._central_element[index].inactive_cond_sodium, self._central_element[index].active_cond_potassium],
numpy.arange(t - step, t, int_step),
(self._num_osc + index, ))
[ next_cn_membrane[index], next_cn_active_sodium[index], next_cn_inactive_sodium[index], next_cn_active_potassium[index] ] = result[len(result) - 1][0:4]
self._noise = [ 1.0 + 0.01 * (random.random() * 2.0 - 1.0) for i in range(self._num_osc)]
self.__update_peripheral_neurons(t, step, next_membrane, next_active_sodium, next_inactive_sodium, next_active_potassium)
self.__update_central_neurons(t, next_cn_membrane, next_cn_active_sodium, next_cn_inactive_sodium, next_cn_active_potassium)
return (next_membrane, next_cn_membrane)
def __update_peripheral_neurons(self, t, step, next_membrane, next_active_sodium, next_inactive_sodium, next_active_potassium):
self._membrane_potential = next_membrane[:]
self._active_cond_sodium = next_active_sodium[:]
self._inactive_cond_sodium = next_inactive_sodium[:]
self._active_cond_potassium = next_active_potassium[:]
for index in range(0, self._num_osc):
if self._pulse_generation[index] is False:
if self._membrane_potential[index] >= 0.0:
self._pulse_generation[index] = True
self._pulse_generation_time[index].append(t)
elif self._membrane_potential[index] < 0.0:
self._pulse_generation[index] = False
if self._link_weight3[index] == 0.0:
if self._membrane_potential[index] > self._params.threshold:
self._link_pulse_counter[index] += step
if self._link_pulse_counter[index] >= 1 / self._params.eps:
self._link_weight3[index] = self._params.w3
self._link_activation_time[index] = t
elif not ((self._link_activation_time[index] < t) and (t < self._link_activation_time[index] + self._params.deltah)):
self._link_weight3[index] = 0.0
self._link_pulse_counter[index] = 0.0
def __update_central_neurons(self, t, next_cn_membrane, next_cn_active_sodium, next_cn_inactive_sodium, next_cn_active_potassium):
for index in range(0, len(self._central_element)):
self._central_element[index].membrane_potential = next_cn_membrane[index]
self._central_element[index].active_cond_sodium = next_cn_active_sodium[index]
self._central_element[index].inactive_cond_sodium = next_cn_inactive_sodium[index]
self._central_element[index].active_cond_potassium = next_cn_active_potassium[index]
if self._central_element[index].pulse_generation is False:
if self._central_element[index].membrane_potential >= 0.0:
self._central_element[index].pulse_generation = True
self._central_element[index].pulse_generation_time.append(t)
elif self._central_element[index].membrane_potential < 0.0:
self._central_element[index].pulse_generation = False
def hnn_state(self, inputs, t, argv):
index = argv
v = inputs[0]
m = inputs[1]
h = inputs[2]
n = inputs[3]
active_sodium_part = self._params.gNa * (m ** 3) * h * (v - self._params.vNa)
inactive_sodium_part = self._params.gK * (n ** 4) * (v - self._params.vK)
active_potassium_part = self._params.gL * (v - self._params.vL)
Iion = active_sodium_part + inactive_sodium_part + active_potassium_part
Iext = 0.0
Isyn = 0.0
if index < self._num_osc:
Iext = self._stimulus[index] * self._noise[index]
memory_impact1 = 0.0
for i in range(0, len(self._central_element[0].pulse_generation_time)):
memory_impact1 += self.__alfa_function(t - self._central_element[0].pulse_generation_time[i], self._params.alfa_inhibitory, self._params.betta_inhibitory);
memory_impact2 = 0.0
for i in range(0, len(self._central_element[1].pulse_generation_time)):
memory_impact2 += self.__alfa_function(t - self._central_element[1].pulse_generation_time[i], self._params.alfa_inhibitory, self._params.betta_inhibitory);
Isyn = self._params.w2 * (v - self._params.Vsyninh) * memory_impact1 + self._link_weight3[index] * (v - self._params.Vsyninh) * memory_impact2;
else:
central_index = index - self._num_osc
if central_index == 0:
Iext = self._params.Icn1
memory_impact = 0.0
for index_oscillator in range(0, self._num_osc):
for index_generation in range(0, len(self._pulse_generation_time[index_oscillator])):
memory_impact += self.__alfa_function(t - self._pulse_generation_time[index_oscillator][index_generation], self._params.alfa_excitatory, self._params.betta_excitatory);
Isyn = self._params.w1 * (v - self._params.Vsynexc) * memory_impact
elif central_index == 1:
Iext = self._params.Icn2
Isyn = 0.0
else:
assert 0;
dv = -Iion + Iext - Isyn
potential = v - self._params.vRest
am = (2.5 - 0.1 * potential) / (math.exp(2.5 - 0.1 * potential) - 1.0)
ah = 0.07 * math.exp(-potential / 20.0)
an = (0.1 - 0.01 * potential) / (math.exp(1.0 - 0.1 * potential) - 1.0)
bm = 4.0 * math.exp(-potential / 18.0)
bh = 1.0 / (math.exp(3.0 - 0.1 * potential) + 1.0)
bn = 0.125 * math.exp(-potential / 80.0)
dm = am * (1.0 - m) - bm * m
dh = ah * (1.0 - h) - bh * h
dn = an * (1.0 - n) - bn * n
return [dv, dm, dh, dn]
def allocate_sync_ensembles(self, tolerance = 0.1):
return allocate_sync_ensembles(self._membrane_dynamic_pointer, tolerance, 20.0, None)
def __alfa_function(self, time, alfa, betta):
return alfa * time * math.exp(-betta * time)
| true | true |
1c34ff459644f5dd3812472989b58fef5b5d706d | 4,744 | py | Python | sdk/consumption/azure-mgmt-consumption/azure/mgmt/consumption/aio/operations/_operations.py | adewaleo/azure-sdk-for-python | 169457edbea5e3c5557246cfcf8bd635d528bae4 | [
"MIT"
] | 1 | 2020-03-05T18:10:35.000Z | 2020-03-05T18:10:35.000Z | sdk/consumption/azure-mgmt-consumption/azure/mgmt/consumption/aio/operations/_operations.py | adewaleo/azure-sdk-for-python | 169457edbea5e3c5557246cfcf8bd635d528bae4 | [
"MIT"
] | 2 | 2020-03-03T23:11:13.000Z | 2020-03-30T18:50:55.000Z | sdk/consumption/azure-mgmt-consumption/azure/mgmt/consumption/aio/operations/_operations.py | adewaleo/azure-sdk-for-python | 169457edbea5e3c5557246cfcf8bd635d528bae4 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.consumption.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["models.OperationListResult"]:
"""Lists all of the available consumption REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.consumption.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Consumption/operations'} # type: ignore
| 44.754717 | 133 | 0.660835 |
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["models.OperationListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Consumption/operations'}
| true | true |
1c34ff63a8b9e3f11d2b2c93327b77046e395fa0 | 2,579 | py | Python | chaosoci/core/networking/filters.py | LaudateCorpus1/chaostoolkit-oci | 36da01a47dd1b0881ec21cb70775fde5011b38ed | [
"Apache-2.0"
] | 15 | 2018-11-20T15:36:52.000Z | 2021-12-16T21:46:56.000Z | chaosoci/core/networking/filters.py | LaudateCorpus1/chaostoolkit-oci | 36da01a47dd1b0881ec21cb70775fde5011b38ed | [
"Apache-2.0"
] | 21 | 2018-11-26T19:11:52.000Z | 2021-12-15T19:38:37.000Z | chaosoci/core/networking/filters.py | LaudateCorpus1/chaostoolkit-oci | 36da01a47dd1b0881ec21cb70775fde5011b38ed | [
"Apache-2.0"
] | 8 | 2018-11-20T15:37:09.000Z | 2021-07-28T20:27:19.000Z | # coding: utf-8
# Copyright 2020, Oracle Corporation and/or its affiliates.
__all__ = ["filter_route_tables", "filter_nat_gateway", "filter_service_gateway", "filter_internet_gateway"]
from typing import Any, Dict, List
from chaoslib.exceptions import ActivityFailed
from chaosoci.util.constants import FILTER_ERR
from logzero import logger
from oci.core import VirtualNetworkClient
from oci.core.models import (RouteTable, NatGateway, InternetGateway, ServiceGateway)
def filter_route_tables(route_tables: List[RouteTable] = None,
filters: Dict[str, Any] = None) -> List[RouteTable]:
"""
Return only those route tables that match the filters provided.
"""
return filter_networks("Route Tables", route_tables, filters)
def filter_nat_gateway(nat_gateways: List[NatGateway] = None,
filters: Dict[str, Any] = None) -> List[NatGateway]:
"""
Return only those network gateways that match the filters provided.
"""
return filter_networks("Nat Gateway", nat_gateways, filters)
def filter_internet_gateway(internet_gateways: List[InternetGateway] = None,
filters: Dict[str, Any] = None) -> List[InternetGateway]:
"""
Return only those internet gateways that match the filters provided.
"""
return filter_networks("Internet Gateway", internet_gateways, filters)
def filter_service_gateway(service_gateways: List[ServiceGateway] = None,
filters: Dict[str, Any] = None) -> List[ServiceGateway]:
"""
Return only those service gateways that match the filters provided.
"""
return filter_networks("Service Gateway", service_gateways, filters)
def filter_networks(gateway_type, gateways, filters):
gateways = gateways or None
if gateways is None:
raise ActivityFailed('No {} were found.', gateway_type)
filters_set = {x for x in filters}
available_filters_set = {x for x in gateways[0].attribute_map}
# Partial filtering may return service gateways we do not want. We avoid it.
if not filters_set.issubset(available_filters_set):
raise ActivityFailed(FILTER_ERR)
# Walk the service gateways and find those that match the given filters.
filtered = []
for service_gateway in gateways:
sentinel = True
for attr, val in filters.items():
if val != getattr(service_gateway, attr, None):
sentinel = False
break
if sentinel:
filtered.append(service_gateway)
return filtered
| 33.493506 | 108 | 0.691741 |
__all__ = ["filter_route_tables", "filter_nat_gateway", "filter_service_gateway", "filter_internet_gateway"]
from typing import Any, Dict, List
from chaoslib.exceptions import ActivityFailed
from chaosoci.util.constants import FILTER_ERR
from logzero import logger
from oci.core import VirtualNetworkClient
from oci.core.models import (RouteTable, NatGateway, InternetGateway, ServiceGateway)
def filter_route_tables(route_tables: List[RouteTable] = None,
filters: Dict[str, Any] = None) -> List[RouteTable]:
return filter_networks("Route Tables", route_tables, filters)
def filter_nat_gateway(nat_gateways: List[NatGateway] = None,
filters: Dict[str, Any] = None) -> List[NatGateway]:
return filter_networks("Nat Gateway", nat_gateways, filters)
def filter_internet_gateway(internet_gateways: List[InternetGateway] = None,
filters: Dict[str, Any] = None) -> List[InternetGateway]:
return filter_networks("Internet Gateway", internet_gateways, filters)
def filter_service_gateway(service_gateways: List[ServiceGateway] = None,
filters: Dict[str, Any] = None) -> List[ServiceGateway]:
return filter_networks("Service Gateway", service_gateways, filters)
def filter_networks(gateway_type, gateways, filters):
gateways = gateways or None
if gateways is None:
raise ActivityFailed('No {} were found.', gateway_type)
filters_set = {x for x in filters}
available_filters_set = {x for x in gateways[0].attribute_map}
if not filters_set.issubset(available_filters_set):
raise ActivityFailed(FILTER_ERR)
filtered = []
for service_gateway in gateways:
sentinel = True
for attr, val in filters.items():
if val != getattr(service_gateway, attr, None):
sentinel = False
break
if sentinel:
filtered.append(service_gateway)
return filtered
| true | true |
1c34ffd069311c30480e2a2287d5535272434e89 | 259 | py | Python | vr/server/templatetags/formfield.py | isabella232/vr.server | 705511f8176bda0627be1ae86a458178589ee3db | [
"MIT"
] | null | null | null | vr/server/templatetags/formfield.py | isabella232/vr.server | 705511f8176bda0627be1ae86a458178589ee3db | [
"MIT"
] | 3 | 2016-12-15T21:55:02.000Z | 2019-02-13T11:43:29.000Z | vr/server/templatetags/formfield.py | isabella232/vr.server | 705511f8176bda0627be1ae86a458178589ee3db | [
"MIT"
] | 2 | 2017-01-16T09:31:03.000Z | 2022-03-26T09:21:36.000Z | from django import template
register = template.Library()
# Allow rendering formfields with our custom template include by saying
# {% formfield form.somefield %}
@register.inclusion_tag('_formfield.html')
def formfield(field):
return {'field': field}
| 23.545455 | 71 | 0.760618 | from django import template
register = template.Library()
@register.inclusion_tag('_formfield.html')
def formfield(field):
return {'field': field}
| true | true |
1c34ffde69a6a33cef6377790d2deabc7d07eadf | 6,945 | py | Python | research/maskgan/models/rnn_zaremba.py | afish880/TensorTest | a41f00ac171cf53539b4e2de47f2e15ccb848c90 | [
"Apache-2.0"
] | 1 | 2019-02-04T02:44:37.000Z | 2019-02-04T02:44:37.000Z | research/maskgan/models/rnn_zaremba.py | afish880/TensorTest | a41f00ac171cf53539b4e2de47f2e15ccb848c90 | [
"Apache-2.0"
] | null | null | null | research/maskgan/models/rnn_zaremba.py | afish880/TensorTest | a41f00ac171cf53539b4e2de47f2e15ccb848c90 | [
"Apache-2.0"
] | 1 | 2021-05-08T11:27:53.000Z | 2021-05-08T11:27:53.000Z | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple RNN model definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
def generator(hparams,
inputs,
targets,
targets_present,
is_training,
is_validating,
reuse=None):
"""Define the Generator graph.
G will now impute tokens that have been masked from the input seqeunce.
"""
tf.logging.warning(
'Undirectional generative model is not a useful model for this MaskGAN '
'because future context is needed. Use only for debugging purposes.')
init_scale = 0.05
initializer = tf.random_uniform_initializer(-init_scale, init_scale)
with tf.variable_scope('gen', reuse=reuse, initializer=initializer):
def lstm_cell():
return tf.contrib.rnn.BasicLSTMCell(hparams.gen_rnn_size,
forget_bias=0.0,
state_is_tuple=True,
reuse=reuse)
attn_cell = lstm_cell
if is_training and FLAGS.keep_prob < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(
lstm_cell(), output_keep_prob=FLAGS.keep_prob)
cell_gen = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in range(hparams.gen_num_layers)],
state_is_tuple=True)
initial_state = cell_gen.zero_state(FLAGS.batch_size, tf.float32)
with tf.variable_scope('rnn'):
sequence, logits, log_probs = [], [], []
embedding = tf.get_variable('embedding',
[FLAGS.vocab_size, hparams.gen_rnn_size])
softmax_w = tf.get_variable('softmax_w',
[hparams.gen_rnn_size, FLAGS.vocab_size])
softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size])
rnn_inputs = tf.nn.embedding_lookup(embedding, inputs)
if is_training and FLAGS.keep_prob < 1:
rnn_inputs = tf.nn.dropout(rnn_inputs, FLAGS.keep_prob)
fake = None
for t in xrange(FLAGS.sequence_length):
if t > 0:
tf.get_variable_scope().reuse_variables()
# Input to the model is the first token to provide context. The
# model will then predict token t > 0.
if t == 0:
# Always provide the real input at t = 0.
state_gen = initial_state
rnn_inp = rnn_inputs[:, t]
# If the input is present, read in the input at t.
# If the input is not present, read in the previously generated.
else:
real_rnn_inp = rnn_inputs[:, t]
fake_rnn_inp = tf.nn.embedding_lookup(embedding, fake)
# While validating, the decoder should be operating in teacher
# forcing regime. Also, if we're just training with cross_entropy
# use teacher forcing.
if is_validating or (is_training and
FLAGS.gen_training_strategy == 'cross_entropy'):
rnn_inp = real_rnn_inp
else:
rnn_inp = tf.where(targets_present[:, t - 1], real_rnn_inp,
fake_rnn_inp)
# RNN.
rnn_out, state_gen = cell_gen(rnn_inp, state_gen)
logit = tf.matmul(rnn_out, softmax_w) + softmax_b
# Real sample.
real = targets[:, t]
categorical = tf.contrib.distributions.Categorical(logits=logit)
fake = categorical.sample()
log_prob = categorical.log_prob(fake)
# Output for Generator will either be generated or the input.
#
# If present: Return real.
# If not present: Return fake.
output = tf.where(targets_present[:, t], real, fake)
# Add to lists.
sequence.append(output)
log_probs.append(log_prob)
logits.append(logit)
# Produce the RNN state had the model operated only
# over real data.
real_state_gen = initial_state
for t in xrange(FLAGS.sequence_length):
tf.get_variable_scope().reuse_variables()
rnn_inp = rnn_inputs[:, t]
# RNN.
rnn_out, real_state_gen = cell_gen(rnn_inp, real_state_gen)
final_state = real_state_gen
return (tf.stack(sequence, axis=1), tf.stack(logits, axis=1), tf.stack(
log_probs, axis=1), initial_state, final_state)
def discriminator(hparams, sequence, is_training, reuse=None):
"""Define the Discriminator graph."""
tf.logging.warning(
'Undirectional Discriminative model is not a useful model for this '
'MaskGAN because future context is needed. Use only for debugging '
'purposes.')
sequence = tf.cast(sequence, tf.int32)
with tf.variable_scope('dis', reuse=reuse):
def lstm_cell():
return tf.contrib.rnn.BasicLSTMCell(hparams.dis_rnn_size,
forget_bias=0.0,
state_is_tuple=True,
reuse=reuse)
attn_cell = lstm_cell
if is_training and FLAGS.keep_prob < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(
lstm_cell(), output_keep_prob=FLAGS.keep_prob)
cell_dis = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in range(hparams.dis_num_layers)],
state_is_tuple=True)
state_dis = cell_dis.zero_state(FLAGS.batch_size, tf.float32)
with tf.variable_scope('rnn') as vs:
predictions = []
embedding = tf.get_variable('embedding',
[FLAGS.vocab_size, hparams.dis_rnn_size])
rnn_inputs = tf.nn.embedding_lookup(embedding, sequence)
if is_training and FLAGS.keep_prob < 1:
rnn_inputs = tf.nn.dropout(rnn_inputs, FLAGS.keep_prob)
for t in xrange(FLAGS.sequence_length):
if t > 0:
tf.get_variable_scope().reuse_variables()
rnn_in = rnn_inputs[:, t]
rnn_out, state_dis = cell_dis(rnn_in, state_dis)
# Prediction is linear output for Discriminator.
pred = tf.contrib.layers.linear(rnn_out, 1, scope=vs)
predictions.append(pred)
predictions = tf.stack(predictions, axis=1)
return tf.squeeze(predictions, axis=2)
| 35.433673 | 80 | 0.624478 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
def generator(hparams,
inputs,
targets,
targets_present,
is_training,
is_validating,
reuse=None):
tf.logging.warning(
'Undirectional generative model is not a useful model for this MaskGAN '
'because future context is needed. Use only for debugging purposes.')
init_scale = 0.05
initializer = tf.random_uniform_initializer(-init_scale, init_scale)
with tf.variable_scope('gen', reuse=reuse, initializer=initializer):
def lstm_cell():
return tf.contrib.rnn.BasicLSTMCell(hparams.gen_rnn_size,
forget_bias=0.0,
state_is_tuple=True,
reuse=reuse)
attn_cell = lstm_cell
if is_training and FLAGS.keep_prob < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(
lstm_cell(), output_keep_prob=FLAGS.keep_prob)
cell_gen = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in range(hparams.gen_num_layers)],
state_is_tuple=True)
initial_state = cell_gen.zero_state(FLAGS.batch_size, tf.float32)
with tf.variable_scope('rnn'):
sequence, logits, log_probs = [], [], []
embedding = tf.get_variable('embedding',
[FLAGS.vocab_size, hparams.gen_rnn_size])
softmax_w = tf.get_variable('softmax_w',
[hparams.gen_rnn_size, FLAGS.vocab_size])
softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size])
rnn_inputs = tf.nn.embedding_lookup(embedding, inputs)
if is_training and FLAGS.keep_prob < 1:
rnn_inputs = tf.nn.dropout(rnn_inputs, FLAGS.keep_prob)
fake = None
for t in xrange(FLAGS.sequence_length):
if t > 0:
tf.get_variable_scope().reuse_variables()
if t == 0:
state_gen = initial_state
rnn_inp = rnn_inputs[:, t]
else:
real_rnn_inp = rnn_inputs[:, t]
fake_rnn_inp = tf.nn.embedding_lookup(embedding, fake)
# use teacher forcing.
if is_validating or (is_training and
FLAGS.gen_training_strategy == 'cross_entropy'):
rnn_inp = real_rnn_inp
else:
rnn_inp = tf.where(targets_present[:, t - 1], real_rnn_inp,
fake_rnn_inp)
# RNN.
rnn_out, state_gen = cell_gen(rnn_inp, state_gen)
logit = tf.matmul(rnn_out, softmax_w) + softmax_b
# Real sample.
real = targets[:, t]
categorical = tf.contrib.distributions.Categorical(logits=logit)
fake = categorical.sample()
log_prob = categorical.log_prob(fake)
# Output for Generator will either be generated or the input.
#
# If present: Return real.
# If not present: Return fake.
output = tf.where(targets_present[:, t], real, fake)
# Add to lists.
sequence.append(output)
log_probs.append(log_prob)
logits.append(logit)
# Produce the RNN state had the model operated only
# over real data.
real_state_gen = initial_state
for t in xrange(FLAGS.sequence_length):
tf.get_variable_scope().reuse_variables()
rnn_inp = rnn_inputs[:, t]
# RNN.
rnn_out, real_state_gen = cell_gen(rnn_inp, real_state_gen)
final_state = real_state_gen
return (tf.stack(sequence, axis=1), tf.stack(logits, axis=1), tf.stack(
log_probs, axis=1), initial_state, final_state)
def discriminator(hparams, sequence, is_training, reuse=None):
tf.logging.warning(
'Undirectional Discriminative model is not a useful model for this '
'MaskGAN because future context is needed. Use only for debugging '
'purposes.')
sequence = tf.cast(sequence, tf.int32)
with tf.variable_scope('dis', reuse=reuse):
def lstm_cell():
return tf.contrib.rnn.BasicLSTMCell(hparams.dis_rnn_size,
forget_bias=0.0,
state_is_tuple=True,
reuse=reuse)
attn_cell = lstm_cell
if is_training and FLAGS.keep_prob < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(
lstm_cell(), output_keep_prob=FLAGS.keep_prob)
cell_dis = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in range(hparams.dis_num_layers)],
state_is_tuple=True)
state_dis = cell_dis.zero_state(FLAGS.batch_size, tf.float32)
with tf.variable_scope('rnn') as vs:
predictions = []
embedding = tf.get_variable('embedding',
[FLAGS.vocab_size, hparams.dis_rnn_size])
rnn_inputs = tf.nn.embedding_lookup(embedding, sequence)
if is_training and FLAGS.keep_prob < 1:
rnn_inputs = tf.nn.dropout(rnn_inputs, FLAGS.keep_prob)
for t in xrange(FLAGS.sequence_length):
if t > 0:
tf.get_variable_scope().reuse_variables()
rnn_in = rnn_inputs[:, t]
rnn_out, state_dis = cell_dis(rnn_in, state_dis)
# Prediction is linear output for Discriminator.
pred = tf.contrib.layers.linear(rnn_out, 1, scope=vs)
predictions.append(pred)
predictions = tf.stack(predictions, axis=1)
return tf.squeeze(predictions, axis=2)
| true | true |
1c350009e0a7420971ced657135ace27ba273c39 | 7,212 | py | Python | lesson13/sunzhaohui/reboot/users/group/__init__.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | lesson13/sunzhaohui/reboot/users/group/__init__.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | lesson13/sunzhaohui/reboot/users/group/__init__.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | # _*_ encoding:utf-8 _*_
__author__ = 'sunzhaohui'
__date__ = '2019-08-05 17:20'
from django.shortcuts import render
from django.http import HttpResponse,QueryDict,HttpResponseRedirect,JsonResponse,Http404
from django.urls import reverse
from django.conf import settings
from users.models import UserProfile
from django.contrib.auth.models import Group
from django.db.models import Q
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from users.forms import RoleProfileForm
from django.contrib.auth.hashers import make_password
from django.views.generic import View,DetailView,ListView
from django.contrib.auth import authenticate, login, logout
# Create your views here.
# 用户认证及权限管理模块导入
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from pure_pagination.mixins import PaginationMixin
class RoleListView(LoginRequiredMixin,PermissionRequiredMixin,PaginationMixin,ListView):
model = Group
template_name = "users/rolelist.html"
context_object_name = "rolelist"
login_url = '/login/' # 用户没有通过或者权限不够时跳转的地址,默认是 settings.LOGIN_URL.
# 把没通过检查的用户重定向到没有 "next page" 的非登录页面时,把它设置为 None ,这样它会在 URL 中移除。
redirect_field_name = 'redirect_to'
permission_required = ('users.view_group','users.delete_group','users.add_group','users.change_group')
#@method_decorator(login_required(login_url='/login/'))
paginate_by = 2
keyword = ''
#搜索
def get_queryset(self):
queryset = super(RoleListView, self).get_queryset()
self.keyword = self.request.GET.get('keyword','').strip()
print(self.keyword)
if self.keyword:
queryset = queryset.filter(Q(name__icontains=self.keyword) )
return queryset
#显示搜索关键字
# def get_context_data(self, **kwargs):
# context = super(RoleListView,self).get_context_data(**kwargs)
#
# context['keyword'] = self.keyword
# #context['user'] = self.request.user.username
# #rolelist = list(context["object_list"])
# rolelist = []
# for role in context["object_list"]:
# role_info = {}
# # role_name = role.name
# # role_username = role.user_set.all()
# role_info['id'] = role.id
# role_info['name'] = role.name
# role_info['member'] = role.user_set.all()
# role_info['permissions'] = role.permissions.all()
# rolelist.append(role_info)
# context['rolelist'] = rolelist
# print(context)
# return context
#去前端展示
def get_context_data(self, **kwargs):
context = super(RoleListView, self).get_context_data(**kwargs)
context['keyword'] = self.keyword
return context
#添加角色
def post(self, request):
_roleForm = RoleProfileForm(request.POST)
if _roleForm.is_valid():
try:
data = _roleForm.cleaned_data
print(data)
self.model.objects.create(**data)
res = {'code': 0, 'result': '添加角色成功'}
except:
# logger.error("create user error: %s" % traceback.format_exc())
res = {'code': 1, 'errmsg': '添加角色失败'}
else:
# 获取自定义的表单错误的两种常用方式
print(_roleForm.errors)
# <ul class="errorlist">
# <li>phone<ul class="errorlist"><li>手机号码非法</li></ul></li>
# <li>username<ul class="errorlist"><li>已存在一位使用该名字的用户。</li></ul></li>
# </ul>
print(_roleForm.errors.as_json())
# {"phone": [{"message": "\u624b\u673a\u53f7\u7801\u975e\u6cd5", "code": "invalid"}],
# "username": [{"message": "\u5df2\u5b4f7f\u7528\u8be5\u540d\u5b57\u7684\u7528\u6237\u3002",
# "code": "unique"}]}
# print(_roleForm.errors['phone'][0]) # 手机号码非法
print(_roleForm.errors['name'][0]) # 已存在一位使用该名字的用户
res = {'code': 1, 'errmsg': _roleForm.errors.as_json()}
return JsonResponse(res, safe=True)
def delete(self,request,**kwargs):
print(kwargs)
data = QueryDict(request.body).dict()
id = data['id']
print(id)
try:
self.model.objects.get(id=id).delete()
res = {'code': 0, 'result': '删除角色成功'}
except:
# print(id)
res = {'code': 1, 'errmsg': '删除角色失败'}
return JsonResponse(res, safe=True)
class RolePowerView(LoginRequiredMixin,PermissionRequiredMixin, DetailView):
login_url = '/login/' # 用户没有通过或者权限不够时跳转的地址,默认是 settings.LOGIN_URL.
# 把没通过检查的用户重定向到没有 "next page" 的非登录页面时,把它设置为 None ,这样它会在 URL 中移除。
redirect_field_name = 'redirect_to'
permission_required = ('users.view_group','users.delete_group','users.add_group','users.change_group')
"""
更新角色及权限
"""
template_name = 'users/role_power.html'
model = Group
context_object_name = 'role'
# 返回所有组、权限,并将当前用户所拥有的组、权限显示
def get_context_data(self, **kwargs):
context = super(RolePowerView, self).get_context_data(**kwargs)
context['role_has_users'],context['role_has_permissions'] = self._get_role_power()
context['role_not_users'],context['role_not_permissions'] = self._get_role_not_power()
return context
# 获取当前角色所有用户,权限以列表形式返回
def _get_role_power(self):
pk = self.kwargs.get(self.pk_url_kwarg)
try:
role = self.model.objects.get(pk=pk)
users = role.user_set.all()
return users,role.permissions.all()
except self.model.DoesNotExist:
raise Http404
# 获取当前角色没有的用户,权限,以列表形式返回
def _get_role_not_power(self):
pk = self.kwargs.get(self.pk_url_kwarg)
try:
role = self.model.objects.get(pk=pk)
all_user = UserProfile.objects.all()
users = [user for user in all_user if user not in role.user_set.all()]
all_perms = Permission.objects.all()
perms = [perm for perm in all_perms if perm not in role.permissions.all()]
return users,perms
except:
return JsonResponse([], safe=False)
#修改角色
def post(self, request, **kwargs):
#ops.user_set.set([2])
print(request.POST)
print(request.POST.getlist('users', []))
user_id_list = request.POST.getlist('users_selected', [])
permission_id_list = request.POST.getlist('perms_selected', [])
pk = kwargs.get("pk")
try:
role = self.model.objects.get(pk=pk)
# user.groups.set(group_id_list)
print(user_id_list)
role.user_set.set(user_id_list)
role.permissions.set(permission_id_list)
res = {'code': 0, 'next_url': reverse("users:role_list"), 'result': '角色权限更新成功'}
except:
res = {'code': 1, 'next_url': reverse("users:role_list"), 'errmsg': '角色权限更新失败'}
#logger.error("edit user group pwoer error: %s" % traceback.format_exc())
return render(request, settings.JUMP_PAGE, res) | 37.5625 | 106 | 0.631725 |
__author__ = 'sunzhaohui'
__date__ = '2019-08-05 17:20'
from django.shortcuts import render
from django.http import HttpResponse,QueryDict,HttpResponseRedirect,JsonResponse,Http404
from django.urls import reverse
from django.conf import settings
from users.models import UserProfile
from django.contrib.auth.models import Group
from django.db.models import Q
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from users.forms import RoleProfileForm
from django.contrib.auth.hashers import make_password
from django.views.generic import View,DetailView,ListView
from django.contrib.auth import authenticate, login, logout
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from pure_pagination.mixins import PaginationMixin
class RoleListView(LoginRequiredMixin,PermissionRequiredMixin,PaginationMixin,ListView):
model = Group
template_name = "users/rolelist.html"
context_object_name = "rolelist"
login_url = '/login/'
redirect_field_name = 'redirect_to'
permission_required = ('users.view_group','users.delete_group','users.add_group','users.change_group')
paginate_by = 2
keyword = ''
def get_queryset(self):
queryset = super(RoleListView, self).get_queryset()
self.keyword = self.request.GET.get('keyword','').strip()
print(self.keyword)
if self.keyword:
queryset = queryset.filter(Q(name__icontains=self.keyword) )
return queryset
oleListView, self).get_context_data(**kwargs)
context['keyword'] = self.keyword
return context
def post(self, request):
_roleForm = RoleProfileForm(request.POST)
if _roleForm.is_valid():
try:
data = _roleForm.cleaned_data
print(data)
self.model.objects.create(**data)
res = {'code': 0, 'result': '添加角色成功'}
except:
res = {'code': 1, 'errmsg': '添加角色失败'}
else:
print(_roleForm.errors)
print(_roleForm.errors.as_json())
print(_roleForm.errors['name'][0])
res = {'code': 1, 'errmsg': _roleForm.errors.as_json()}
return JsonResponse(res, safe=True)
def delete(self,request,**kwargs):
print(kwargs)
data = QueryDict(request.body).dict()
id = data['id']
print(id)
try:
self.model.objects.get(id=id).delete()
res = {'code': 0, 'result': '删除角色成功'}
except:
res = {'code': 1, 'errmsg': '删除角色失败'}
return JsonResponse(res, safe=True)
class RolePowerView(LoginRequiredMixin,PermissionRequiredMixin, DetailView):
login_url = '/login/'
redirect_field_name = 'redirect_to'
permission_required = ('users.view_group','users.delete_group','users.add_group','users.change_group')
template_name = 'users/role_power.html'
model = Group
context_object_name = 'role'
def get_context_data(self, **kwargs):
context = super(RolePowerView, self).get_context_data(**kwargs)
context['role_has_users'],context['role_has_permissions'] = self._get_role_power()
context['role_not_users'],context['role_not_permissions'] = self._get_role_not_power()
return context
def _get_role_power(self):
pk = self.kwargs.get(self.pk_url_kwarg)
try:
role = self.model.objects.get(pk=pk)
users = role.user_set.all()
return users,role.permissions.all()
except self.model.DoesNotExist:
raise Http404
def _get_role_not_power(self):
pk = self.kwargs.get(self.pk_url_kwarg)
try:
role = self.model.objects.get(pk=pk)
all_user = UserProfile.objects.all()
users = [user for user in all_user if user not in role.user_set.all()]
all_perms = Permission.objects.all()
perms = [perm for perm in all_perms if perm not in role.permissions.all()]
return users,perms
except:
return JsonResponse([], safe=False)
def post(self, request, **kwargs):
print(request.POST)
print(request.POST.getlist('users', []))
user_id_list = request.POST.getlist('users_selected', [])
permission_id_list = request.POST.getlist('perms_selected', [])
pk = kwargs.get("pk")
try:
role = self.model.objects.get(pk=pk)
print(user_id_list)
role.user_set.set(user_id_list)
role.permissions.set(permission_id_list)
res = {'code': 0, 'next_url': reverse("users:role_list"), 'result': '角色权限更新成功'}
except:
res = {'code': 1, 'next_url': reverse("users:role_list"), 'errmsg': '角色权限更新失败'}
return render(request, settings.JUMP_PAGE, res) | true | true |
1c350203e6d8985a8b2b61ffaaafac8504e26b3e | 1,208 | py | Python | nipype/interfaces/tests/test_auto_SSHDataGrabber.py | mfalkiewicz/nipype | 775e21b78fb1ffa2ff9cb12e6f052868bd44d052 | [
"Apache-2.0"
] | 1 | 2015-01-19T13:12:27.000Z | 2015-01-19T13:12:27.000Z | nipype/interfaces/tests/test_auto_SSHDataGrabber.py | bpinsard/nipype | 373bdddba9f675ef153951afa368729e2d8950d2 | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/tests/test_auto_SSHDataGrabber.py | bpinsard/nipype | 373bdddba9f675ef153951afa368729e2d8950d2 | [
"Apache-2.0"
] | null | null | null | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..io import SSHDataGrabber
def test_SSHDataGrabber_inputs():
input_map = dict(base_directory=dict(mandatory=True,
),
download_files=dict(usedefault=True,
),
hostname=dict(mandatory=True,
),
ignore_exception=dict(deprecated='1.0.0',
nohash=True,
usedefault=True,
),
password=dict(),
raise_on_empty=dict(usedefault=True,
),
sort_filelist=dict(mandatory=True,
),
ssh_log_to_file=dict(usedefault=True,
),
template=dict(mandatory=True,
),
template_args=dict(),
template_expression=dict(usedefault=True,
),
username=dict(),
)
inputs = SSHDataGrabber.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_SSHDataGrabber_outputs():
output_map = dict()
outputs = SSHDataGrabber.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 26.844444 | 67 | 0.672185 |
from __future__ import unicode_literals
from ..io import SSHDataGrabber
def test_SSHDataGrabber_inputs():
input_map = dict(base_directory=dict(mandatory=True,
),
download_files=dict(usedefault=True,
),
hostname=dict(mandatory=True,
),
ignore_exception=dict(deprecated='1.0.0',
nohash=True,
usedefault=True,
),
password=dict(),
raise_on_empty=dict(usedefault=True,
),
sort_filelist=dict(mandatory=True,
),
ssh_log_to_file=dict(usedefault=True,
),
template=dict(mandatory=True,
),
template_args=dict(),
template_expression=dict(usedefault=True,
),
username=dict(),
)
inputs = SSHDataGrabber.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_SSHDataGrabber_outputs():
output_map = dict()
outputs = SSHDataGrabber.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| true | true |
1c3502398c0fb7fe2dc6975eb1f77e0bbc5b81ed | 4,624 | py | Python | timesmoothing.py | Numlet/pgw-python | 1731fccdd0d3a3a199246fdc6dc04058273237ab | [
"MIT"
] | 2 | 2020-11-13T09:22:06.000Z | 2021-11-13T14:50:50.000Z | timesmoothing.py | Numlet/pgw-python | 1731fccdd0d3a3a199246fdc6dc04058273237ab | [
"MIT"
] | null | null | null | timesmoothing.py | Numlet/pgw-python | 1731fccdd0d3a3a199246fdc6dc04058273237ab | [
"MIT"
] | 2 | 2020-12-07T09:34:07.000Z | 2021-06-23T08:39:38.000Z | # -*- coding: utf-8 -*-
#from settings import annualcycleraw, variablename_to_smooth, outputpath
import xarray as xr
import numpy as np
import sys
import math
from pathlib import Path
def filterdata(annualcycleraw, variablename_to_smooth, outputpath):
"""
This function performs a temporal smoothing of an annual timeseries (typically daily resolution) using a spectral filter
(Bosshard et al. 2011).
Input:
Input 1: Path to a netcdf file of the annual cycle to be smoothed.
Normally this is the change in a specific variable between two simulations (e.g. warming).
Can be 4 or 3 dimensional, where the time is one dimension and the others are space dimensions.
Input 2: The name of the variable within the given netcdf file
Input 3: Path where to save the output
Output:
A netcdf file containing the smoothed annual cycle. Format: "variablename"_filteredcycle.nc
"""
Diff = xr.open_dataset(annualcycleraw)[variablename_to_smooth].squeeze()
coords = Diff.coords
print('Dimension that is assumed to be time dimension is called: ', Diff.dims[0])
print('shape of data: ', Diff.shape)
Diff = Diff.data
#create an array to store the smoothed timeseries
#Diff_smooth=np.zeros_like(Diff, dtype=np.float32)
if len(Diff.shape) == 4:
times = Diff.shape[0]
levels = Diff.shape[1]
ygrids = Diff.shape[2]
xgrids = Diff.shape[3]
elif len(Diff.shape) == 3:
times = Diff.shape[0]
ygrids = Diff.shape[1]
xgrids = Diff.shape[2]
levels = 0
else:
sys.exit('Wrog dimensions of input file should be 3 or 4-D')
if len(Diff.shape) == 4:
for i in range(levels): #loop over levels to smooth the timeseries on every level
for yy in range(ygrids):
for xx in range(xgrids):
Diff[:,i,yy,xx] = harmonic_ac_analysis(Diff[:,i,yy,xx]) #reconstruct the smoothed timeseries using function below
if len(Diff.shape) == 3:
for yy in range(ygrids):
for xx in range(xgrids):
Diff[:,yy,xx] = harmonic_ac_analysis(Diff[:,yy,xx]) #dump the smoothed timeseries in the array on the original level
print('Done with smoothing')
#del Diff
Diff = xr.DataArray(Diff, coords=coords, name=variablename_to_smooth)
Path(outputpath).mkdir(parents=True, exist_ok=True)
Diff.to_netcdf(outputpath+'/'+variablename_to_smooth+'_filteredcycle.nc', mode='w')
print('saved file '+outputpath+'/'+variablename_to_smooth+'_filteredcycle.nc')
def harmonic_ac_analysis(ts):
"""
Estimation of the harmonics according to formula 12.19 -
12.23 on p. 264 in Storch & Zwiers
Is incomplete since it is only for use in surrogate smoothing --> only the part of the formulas that is needed there
Arguments:
ts: a 1-d numpy array of a timeseries
Returns:
hcts: a reconstructed smoothed timeseries (the more modes are summed the less smoothing)
mean: the mean of the timeseries (needed for reconstruction)
"""
if np.any(np.isnan(ts) == True): #if there are nans, return nans
smooths = np.full_like(ts, np.nan) #sys.exit('There are nan values')
return smooths
else:
mean = ts.mean() #calculate the mean of the timeseries (used for reconstruction)
lt = len(ts) #how long is the timeseries?
P = lt
#initialize the output array. we will use at max 4 modes for reconstruction (for peformance reasons, it can be increased)
hcts = np.zeros((4,lt))
timevector=np.arange(1,lt+1,1) #timesteps used in calculation
q = math.floor(P/2.) #a measure that is to check that the performed calculation is justified.
for i in range(1,4): #create the reconstruction timeseries, mode by mode (starting at 1 until 5, if one wants more smoothing this number can be increased.)
if i < q: #only if this is true the calculation is valid
#these are the formulas from Storch & Zwiers
bracket = 2.*math.pi*i/P*timevector
a = 2./lt*(ts.dot(np.cos(bracket))) #careful with multiplications of vectors (ts and timevector)..
b = 2./lt*(ts.dot(np.sin(bracket))) #dot product (Skalarprodukt) for scalar number output!
hcts[i-1,:] = a * np.cos(bracket) + b * np.sin(bracket) #calculate the reconstruction time series
else: #abort if the above condition is not fulfilled. In this case more programming is needed.
sys.exit('Whooops that should not be the case for a yearly timeseries! i (reconstruction grade) is larger than the number of timeseries elements / 2.')
smooths = sum(hcts[0:3,:]) + mean
return smooths
if __name__ == "__main__":
annualcycleraw = str(sys.argv[1])
variablename_to_smooth = str(sys.argv[2])
outputpath = str(sys.argv[3])
filterdata(annualcycleraw, variablename_to_smooth, outputpath)
| 35.030303 | 157 | 0.721453 |
import xarray as xr
import numpy as np
import sys
import math
from pathlib import Path
def filterdata(annualcycleraw, variablename_to_smooth, outputpath):
Diff = xr.open_dataset(annualcycleraw)[variablename_to_smooth].squeeze()
coords = Diff.coords
print('Dimension that is assumed to be time dimension is called: ', Diff.dims[0])
print('shape of data: ', Diff.shape)
Diff = Diff.data
if len(Diff.shape) == 4:
times = Diff.shape[0]
levels = Diff.shape[1]
ygrids = Diff.shape[2]
xgrids = Diff.shape[3]
elif len(Diff.shape) == 3:
times = Diff.shape[0]
ygrids = Diff.shape[1]
xgrids = Diff.shape[2]
levels = 0
else:
sys.exit('Wrog dimensions of input file should be 3 or 4-D')
if len(Diff.shape) == 4:
for i in range(levels):
for yy in range(ygrids):
for xx in range(xgrids):
Diff[:,i,yy,xx] = harmonic_ac_analysis(Diff[:,i,yy,xx])
if len(Diff.shape) == 3:
for yy in range(ygrids):
for xx in range(xgrids):
Diff[:,yy,xx] = harmonic_ac_analysis(Diff[:,yy,xx])
print('Done with smoothing')
Diff = xr.DataArray(Diff, coords=coords, name=variablename_to_smooth)
Path(outputpath).mkdir(parents=True, exist_ok=True)
Diff.to_netcdf(outputpath+'/'+variablename_to_smooth+'_filteredcycle.nc', mode='w')
print('saved file '+outputpath+'/'+variablename_to_smooth+'_filteredcycle.nc')
def harmonic_ac_analysis(ts):
if np.any(np.isnan(ts) == True):
smooths = np.full_like(ts, np.nan)
return smooths
else:
mean = ts.mean()
lt = len(ts)
P = lt
hcts = np.zeros((4,lt))
timevector=np.arange(1,lt+1,1)
q = math.floor(P/2.)
for i in range(1,4):
if i < q:
bracket = 2.*math.pi*i/P*timevector
a = 2./lt*(ts.dot(np.cos(bracket)))
b = 2./lt*(ts.dot(np.sin(bracket)))
hcts[i-1,:] = a * np.cos(bracket) + b * np.sin(bracket)
else:
sys.exit('Whooops that should not be the case for a yearly timeseries! i (reconstruction grade) is larger than the number of timeseries elements / 2.')
smooths = sum(hcts[0:3,:]) + mean
return smooths
if __name__ == "__main__":
annualcycleraw = str(sys.argv[1])
variablename_to_smooth = str(sys.argv[2])
outputpath = str(sys.argv[3])
filterdata(annualcycleraw, variablename_to_smooth, outputpath)
| true | true |
1c35024bbad0f44318da90414d0b5f6b0469348a | 5,416 | py | Python | tests/wallet/did_wallet/test_did_rpc.py | ethgreen/ethgreen-blockchain | 8f1a450897ab7a82326aea7e57e18ac2c03a9e83 | [
"Apache-2.0"
] | 11 | 2021-11-10T19:30:12.000Z | 2022-02-09T04:30:29.000Z | tests/wallet/did_wallet/test_did_rpc.py | ethgreen/ethgreen-blockchain | 8f1a450897ab7a82326aea7e57e18ac2c03a9e83 | [
"Apache-2.0"
] | 6 | 2021-11-16T17:11:03.000Z | 2021-12-28T17:11:20.000Z | tests/wallet/did_wallet/test_did_rpc.py | ethgreen/ethgreen-blockchain | 8f1a450897ab7a82326aea7e57e18ac2c03a9e83 | [
"Apache-2.0"
] | 3 | 2021-11-21T02:27:10.000Z | 2022-03-15T08:34:47.000Z | import asyncio
import logging
import pytest
from ethgreen.rpc.rpc_server import start_rpc_server
from ethgreen.rpc.wallet_rpc_api import WalletRpcApi
from ethgreen.rpc.wallet_rpc_client import WalletRpcClient
from ethgreen.simulator.simulator_protocol import FarmNewBlockProtocol
from ethgreen.types.peer_info import PeerInfo
from ethgreen.util.ints import uint16, uint64
from ethgreen.wallet.util.wallet_types import WalletType
from tests.setup_nodes import self_hostname, setup_simulators_and_wallets, bt
from tests.time_out_assert import time_out_assert
from ethgreen.wallet.did_wallet.did_wallet import DIDWallet
log = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
class TestDIDWallet:
@pytest.fixture(scope="function")
async def three_wallet_nodes(self):
async for _ in setup_simulators_and_wallets(1, 3, {}):
yield _
@pytest.mark.asyncio
async def test_create_did(self, three_wallet_nodes):
num_blocks = 4
full_nodes, wallets = three_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node_0, wallet_server_0 = wallets[0]
wallet_node_1, wallet_server_1 = wallets[1]
wallet_node_2, wallet_server_2 = wallets[2]
MAX_WAIT_SECS = 30
wallet = wallet_node_0.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await wallet_server_0.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await wallet_server_1.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await wallet_server_2.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
for i in range(0, num_blocks + 1):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
log.info("Waiting for initial money in Wallet 0 ...")
api_one = WalletRpcApi(wallet_node_0)
config = bt.config
daemon_port = config["daemon_port"]
test_rpc_port = uint16(21529)
await wallet_server_0.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
client = await WalletRpcClient.create(self_hostname, test_rpc_port, bt.root_path, bt.config)
rpc_server_cleanup = await start_rpc_server(
api_one,
self_hostname,
daemon_port,
test_rpc_port,
lambda x: None,
bt.root_path,
config,
connect_to_daemon=False,
)
async def got_initial_money():
balances = await client.get_wallet_balance("1")
return balances["confirmed_wallet_balance"] > 0
await time_out_assert(timeout=MAX_WAIT_SECS, function=got_initial_money)
val = await client.create_new_did_wallet(201)
assert isinstance(val, dict)
if "success" in val:
assert val["success"]
assert val["type"] == WalletType.DISTRIBUTED_ID.value
assert val["wallet_id"] > 1
assert len(val["my_did"]) == 64
assert bytes.fromhex(val["my_did"])
main_wallet_2 = wallet_node_2.wallet_state_manager.main_wallet
ph2 = await main_wallet_2.get_new_puzzlehash()
for i in range(0, num_blocks + 1):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
recovery_list = [bytes.fromhex(val["my_did"])]
async with wallet_node_2.wallet_state_manager.lock:
did_wallet_2: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_2.wallet_state_manager, main_wallet_2, uint64(101), recovery_list
)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
filename = "test.backup"
did_wallet_2.create_backup(filename)
val = await client.create_new_did_wallet_from_recovery(filename)
if "success" in val:
assert val["success"]
assert val["type"] == WalletType.DISTRIBUTED_ID.value
assert val["wallet_id"] > 1
did_wallet_id_3 = val["wallet_id"]
assert len(val["my_did"]) == 64
assert bytes.fromhex(val["my_did"]) == did_wallet_2.did_info.origin_coin.name()
assert bytes.fromhex(val["coin_name"])
assert bytes.fromhex(val["newpuzhash"])
assert bytes.fromhex(val["pubkey"])
filename = "test.attest"
val = await client.did_create_attest(
did_wallet_2.wallet_id, val["coin_name"], val["pubkey"], val["newpuzhash"], filename
)
if "success" in val:
assert val["success"]
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
val = await client.did_recovery_spend(did_wallet_id_3, [filename])
if "success" in val:
assert val["success"]
for i in range(0, num_blocks * 2):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
val = await client.get_wallet_balance(did_wallet_id_3)
assert val["confirmed_wallet_balance"] == 101
await rpc_server_cleanup()
| 40.41791 | 105 | 0.6887 | import asyncio
import logging
import pytest
from ethgreen.rpc.rpc_server import start_rpc_server
from ethgreen.rpc.wallet_rpc_api import WalletRpcApi
from ethgreen.rpc.wallet_rpc_client import WalletRpcClient
from ethgreen.simulator.simulator_protocol import FarmNewBlockProtocol
from ethgreen.types.peer_info import PeerInfo
from ethgreen.util.ints import uint16, uint64
from ethgreen.wallet.util.wallet_types import WalletType
from tests.setup_nodes import self_hostname, setup_simulators_and_wallets, bt
from tests.time_out_assert import time_out_assert
from ethgreen.wallet.did_wallet.did_wallet import DIDWallet
log = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
class TestDIDWallet:
@pytest.fixture(scope="function")
async def three_wallet_nodes(self):
async for _ in setup_simulators_and_wallets(1, 3, {}):
yield _
@pytest.mark.asyncio
async def test_create_did(self, three_wallet_nodes):
num_blocks = 4
full_nodes, wallets = three_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node_0, wallet_server_0 = wallets[0]
wallet_node_1, wallet_server_1 = wallets[1]
wallet_node_2, wallet_server_2 = wallets[2]
MAX_WAIT_SECS = 30
wallet = wallet_node_0.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await wallet_server_0.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await wallet_server_1.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await wallet_server_2.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
for i in range(0, num_blocks + 1):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
log.info("Waiting for initial money in Wallet 0 ...")
api_one = WalletRpcApi(wallet_node_0)
config = bt.config
daemon_port = config["daemon_port"]
test_rpc_port = uint16(21529)
await wallet_server_0.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
client = await WalletRpcClient.create(self_hostname, test_rpc_port, bt.root_path, bt.config)
rpc_server_cleanup = await start_rpc_server(
api_one,
self_hostname,
daemon_port,
test_rpc_port,
lambda x: None,
bt.root_path,
config,
connect_to_daemon=False,
)
async def got_initial_money():
balances = await client.get_wallet_balance("1")
return balances["confirmed_wallet_balance"] > 0
await time_out_assert(timeout=MAX_WAIT_SECS, function=got_initial_money)
val = await client.create_new_did_wallet(201)
assert isinstance(val, dict)
if "success" in val:
assert val["success"]
assert val["type"] == WalletType.DISTRIBUTED_ID.value
assert val["wallet_id"] > 1
assert len(val["my_did"]) == 64
assert bytes.fromhex(val["my_did"])
main_wallet_2 = wallet_node_2.wallet_state_manager.main_wallet
ph2 = await main_wallet_2.get_new_puzzlehash()
for i in range(0, num_blocks + 1):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
recovery_list = [bytes.fromhex(val["my_did"])]
async with wallet_node_2.wallet_state_manager.lock:
did_wallet_2: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_2.wallet_state_manager, main_wallet_2, uint64(101), recovery_list
)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
filename = "test.backup"
did_wallet_2.create_backup(filename)
val = await client.create_new_did_wallet_from_recovery(filename)
if "success" in val:
assert val["success"]
assert val["type"] == WalletType.DISTRIBUTED_ID.value
assert val["wallet_id"] > 1
did_wallet_id_3 = val["wallet_id"]
assert len(val["my_did"]) == 64
assert bytes.fromhex(val["my_did"]) == did_wallet_2.did_info.origin_coin.name()
assert bytes.fromhex(val["coin_name"])
assert bytes.fromhex(val["newpuzhash"])
assert bytes.fromhex(val["pubkey"])
filename = "test.attest"
val = await client.did_create_attest(
did_wallet_2.wallet_id, val["coin_name"], val["pubkey"], val["newpuzhash"], filename
)
if "success" in val:
assert val["success"]
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
val = await client.did_recovery_spend(did_wallet_id_3, [filename])
if "success" in val:
assert val["success"]
for i in range(0, num_blocks * 2):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
val = await client.get_wallet_balance(did_wallet_id_3)
assert val["confirmed_wallet_balance"] == 101
await rpc_server_cleanup()
| true | true |
1c3502b00816b1391aa44c72e9fff9864542562e | 1,579 | py | Python | dfd/api.py | legnaleurc/dfd | 50f580c4cf2d6528a6df8310093aef2b0b7f2c08 | [
"MIT"
] | null | null | null | dfd/api.py | legnaleurc/dfd | 50f580c4cf2d6528a6df8310093aef2b0b7f2c08 | [
"MIT"
] | null | null | null | dfd/api.py | legnaleurc/dfd | 50f580c4cf2d6528a6df8310093aef2b0b7f2c08 | [
"MIT"
] | null | null | null | import json
from aiohttp import web as aw
from .database import InvalidFilterError
# NOTE we dont expect filters will be large text
class FiltersHandler(aw.View):
async def post(self):
filters = self.request.app['filters']
new_filter = await self.request.text()
try:
new_id = await filters.add(new_filter)
except InvalidFilterError:
return aw.Response(status=400)
rv = str(new_id)
return aw.Response(text=rv, content_type='application/json')
async def get(self):
filters = self.request.app['filters']
rv = await filters.get()
rv = json.dumps(rv)
rv = rv + '\n'
return aw.Response(text=rv, content_type='application/json')
async def put(self):
id_ = self.request.match_info['id']
if id_ is None:
return aw.Response(status=400)
id_ = int(id_)
filters = self.request.app['filters']
new_filter = await self.request.text()
try:
ok = await filters.update(id_, new_filter)
except InvalidFilterError:
return aw.Response(status=400)
if not ok:
return aw.Response(status=500)
return aw.Response()
async def delete(self):
id_ = self.request.match_info['id']
if id_ is None:
return aw.Response(status=400)
id_ = int(id_)
filters = self.request.app['filters']
ok = await filters.remove(id_)
if not ok:
return aw.Response(status=500)
return aw.Response()
| 28.709091 | 68 | 0.59658 | import json
from aiohttp import web as aw
from .database import InvalidFilterError
class FiltersHandler(aw.View):
async def post(self):
filters = self.request.app['filters']
new_filter = await self.request.text()
try:
new_id = await filters.add(new_filter)
except InvalidFilterError:
return aw.Response(status=400)
rv = str(new_id)
return aw.Response(text=rv, content_type='application/json')
async def get(self):
filters = self.request.app['filters']
rv = await filters.get()
rv = json.dumps(rv)
rv = rv + '\n'
return aw.Response(text=rv, content_type='application/json')
async def put(self):
id_ = self.request.match_info['id']
if id_ is None:
return aw.Response(status=400)
id_ = int(id_)
filters = self.request.app['filters']
new_filter = await self.request.text()
try:
ok = await filters.update(id_, new_filter)
except InvalidFilterError:
return aw.Response(status=400)
if not ok:
return aw.Response(status=500)
return aw.Response()
async def delete(self):
id_ = self.request.match_info['id']
if id_ is None:
return aw.Response(status=400)
id_ = int(id_)
filters = self.request.app['filters']
ok = await filters.remove(id_)
if not ok:
return aw.Response(status=500)
return aw.Response()
| true | true |
1c3503919c84d443d05169a5103bf543a789cea3 | 22,676 | py | Python | scripts/obtain_data.py | quimaguirre/NetworkAnalysis | c7a4da3ba5696800738b4767065ce29fa0020d79 | [
"MIT"
] | 1 | 2017-07-10T17:33:31.000Z | 2017-07-10T17:33:31.000Z | scripts/obtain_data.py | quimaguirre/NetworkAnalysis | c7a4da3ba5696800738b4767065ce29fa0020d79 | [
"MIT"
] | null | null | null | scripts/obtain_data.py | quimaguirre/NetworkAnalysis | c7a4da3ba5696800738b4767065ce29fa0020d79 | [
"MIT"
] | null | null | null | import argparse
import ConfigParser
import cPickle
import mysql.connector
import networkx as nx
import sys, os, re
from context import NetworkAnalysis
import NetworkAnalysis.drug as NA_drug
"""
NetworkAnalysis
2017 Joaquim Aguirre-Plans
Structural Bioinformatics Laboratory
Universitat Pompeu Fabra
"""
def main():
options = parse_user_arguments()
create_tissue_specific_network(options)
def parse_user_arguments(*args, **kwds):
"""
Parses the arguments of the program
"""
parser = argparse.ArgumentParser(
description = "Obtain tissue-specificity data from BIANA and save it in Pickle files",
epilog = "@oliva's lab 2017")
parser.add_argument('-p','--pickles_path',dest='pickles_path',action = 'store',default=os.path.join(os.path.join(os.path.dirname(__file__), '..'), 'NetworkAnalysis/pickles'),
help = """Define the directory where the data will be stored. """)
options=parser.parse_args()
return options
#################
#################
# MAIN FUNCTION #
#################
#################
def create_tissue_specific_network(options):
"""
Generates the profiles of the input drug
"""
#--------------------------------------#
# GET INFORMATION FROM CONFIG FILE #
#--------------------------------------#
# Get the program path
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# Read the config file
config_file = os.path.join(main_path, 'config.ini')
config = ConfigParser.ConfigParser()
config.read(config_file)
cnx = mysql.connector.connect( user=config.get('BIANA', 'user'),
password=config.get('BIANA', 'password'),
host=config.get('BIANA', 'host'),
database=config.get('BIANA', 'database') )
#----------------------#
# OBTAIN BTO FILES #
#----------------------#
BTOterm_file = os.path.join(options.pickles_path, 'BTOterm2uE.pcl')
BTOname_file = os.path.join(options.pickles_path, 'BTOname2uE.pcl')
if not fileExist(BTOterm_file) or not fileExist(BTOname_file):
BTOterm2uE, BTOname2uE = obtain_uEs_from_Tissues(cnx, config.get('BIANA', 'unification_protocol'))
cPickle.dump(BTOterm2uE, open(BTOterm_file, 'w'))
cPickle.dump(BTOname2uE, open(BTOname_file, 'w'))
#----------------------------------#
# OBTAIN HPA AND TISSUES FILES #
#----------------------------------#
HPA_tissue_file = os.path.join(options.pickles_path, 'tissue2uEs.pcl')
HPA_complete_file = os.path.join(options.pickles_path, 'tissue2cell2uE.pcl')
if not fileExist(HPA_tissue_file) or not fileExist(HPA_complete_file):
tissue2uEs, tissue2cell2uE = obtain_uEs_from_HPA(cnx, config.get('BIANA', 'unification_protocol'))
cPickle.dump(tissue2uEs, open(HPA_tissue_file, 'w'))
cPickle.dump(tissue2cell2uE, open(HPA_complete_file, 'w'))
prot2tissues_file = os.path.join(options.pickles_path, 'UEprot2UETissues.pcl')
if not fileExist(prot2tissues_file):
UEprot2UETissues = obtain_uE_prot_2_uE_Tissues(cnx, config.get('BIANA', 'unification_protocol'))
cPickle.dump(UEprot2UETissues, open(prot2tissues_file, 'w'))
prot2HPAmic_file = os.path.join(options.pickles_path, 'UEprot2UEHPAmic.pcl')
if not fileExist(prot2HPAmic_file):
UEprot2UEHPAmic = obtain_uE_prot_2_uE_HPAmic(cnx, config.get('BIANA', 'unification_protocol'))
cPickle.dump(UEprot2UEHPAmic, open(prot2HPAmic_file, 'w'))
prot2HPARNAseq_file = os.path.join(options.pickles_path, 'UEprot2UEHPARNAseq.pcl')
if not fileExist(prot2HPARNAseq_file):
UEprot2UEHPARNAseq = obtain_uE_prot_2_uE_HPARNAseq(cnx, config.get('BIANA', 'unification_protocol'))
cPickle.dump(UEprot2UEHPARNAseq, open(prot2HPARNAseq_file, 'w'))
#-----------------------------#
# OBTAIN CODES OF METHODS #
#-----------------------------#
psimi2method_file = os.path.join(options.pickles_path, 'psimi2method.pcl')
if not fileExist(psimi2method_file):
key_attribute_table = NA_drug.return_key_attribute_table(cnx, ontology_name='psimiobo')
psimi2method = obtain_psimi_to_method(cnx, key_attribute_table)
cPickle.dump(psimi2method, open(psimi2method_file, 'w'))
#-----------------------------------#
# PARSE WANG LIVER INTERACTIONS #
#-----------------------------------#
wang_liver_file = os.path.join(options.pickles_path, 'wang_liver_network.pcl')
if not fileExist(wang_liver_file):
wang_liver_network = obtain_wang_liver_interactions(cnx, config.get('BIANA', 'unification_protocol'))
cPickle.dump(wang_liver_network, open(wang_liver_file, 'w'))
#-------------------------#
# PARSE HIPPIE SCORES #
#-------------------------#
hippie_scores_file = os.path.join(main_path, 'NetworkAnalysis/hippie_scores/experimental_scores.tsv')
psimi2score_file = os.path.join(options.pickles_path, 'psimi2score.pcl')
if not fileExist(psimi2score_file):
psimi2score = parse_hippie_scores(hippie_scores_file)
cPickle.dump(psimi2score, open(psimi2score_file, 'w'))
#------------------------------#
# OBTAIN HOUSEKEEPING DATA #
#------------------------------#
# Obtain housekeeping genes data
HPA_housekeeping_file = os.path.join(main_path, 'NetworkAnalysis/housekeeping/tissue_specificity_rna_any_expressed.tab')
elieis_housekeeping_file = os.path.join(main_path, '/home/quim/project/tissue_specificity/housekeeping/HK_genes.txt')
hpa_geneid_dump = os.path.join(options.pickles_path, 'hpa_hk_geneIDs.pcl')
hpa_uE_dump = os.path.join(options.pickles_path, 'hpa_hk_uEs.pcl')
elieis_geneid_dump = os.path.join(options.pickles_path, 'eisenberg_hk_geneIDs.pcl')
elieis_uE_dump = os.path.join(options.pickles_path, 'eisenberg_hk_uEs.pcl')
try:
hpa_hk_geneIDs = cPickle.load(open(hpa_geneid_dump))
hpa_hk_uEs = cPickle.load(open(hpa_uE_dump))
eisenberg_hk_geneIDs = cPickle.load(open(elieis_geneid_dump))
eisenberg_hk_uEs = cPickle.load(open(elieis_uE_dump))
except:
hpa_hk_geneIDs, hpa_hk_uEs, eisenberg_hk_geneIDs, eisenberg_hk_uEs = obtain_housekeeping_genes(cnx, config.get('BIANA', 'unification_protocol'), HPA_housekeeping_file, elieis_housekeeping_file, hpa_geneid_dump, hpa_uE_dump, elieis_geneid_dump, elieis_uE_dump)
return
#######################
#######################
# SECONDARY FUNCTIONS #
#######################
#######################
def fileExist(file):
"""
Checks if a file exists AND is a file
"""
return os.path.exists(file) and os.path.isfile(file)
def obtain_uEs_from_Tissues(cnx, unification_protocol):
"""
Obtain dictionary uE : {'BTO_term' : ... , 'BTO_name' : ...}
"""
up_table = NA_drug.return_unification_protocol_table(cnx, unification_protocol)
cursor = cnx.cursor()
# Get the user entity of the TISSUE, not the BTOelement!!
# Because BTO and TISSUES are not properly unified, as they have different entities (tissue and BTOelement).
# So, using this command, I am able to get the user entity of the tissue, and not the BTOelement.
query = (''' SELECT U.userEntityID, B1.value, N.value
FROM externalEntityBTO_name N, externalEntityBTO B1, externalEntityBTO B2, {} U
WHERE N.externalEntityID = B1.externalEntityID AND B1.value = B2.value AND B1.externalEntityID != B2.externalEntityID AND B2.externalEntityID = U.externalEntityID
'''.format(up_table))
cursor.execute(query)
BTOterm2uE = {}
BTOname2uE = {}
for items in cursor:
uE, BTO_term, BTO_name = items
BTO_name = BTO_name.lower()
if BTO_term not in BTOterm2uE:
BTOterm2uE[BTO_term] = uE
else:
if uE != BTOterm2uE[BTO_term]:
print('BTO_term {} has multiple uEs'.format(BTO_term))
sys.exit(10)
# There can be more than one BTO term with the same BTO name
# This is why we add the user entities in a set
if BTO_name not in BTOname2uE:
BTOname2uE.setdefault(BTO_name, set())
BTOname2uE[BTO_name].add(uE)
cursor.close()
return BTOterm2uE, BTOname2uE
def obtain_uEs_from_HPA(cnx, unification_protocol):
"""
Obtain dictionary uE : {'tissue_name' : ... , 'cell_type' : ...}
"""
up_table = NA_drug.return_unification_protocol_table(cnx, unification_protocol)
cursor = cnx.cursor()
query = (''' SELECT uE.userEntityID, HPAT.value, HPAC.value
FROM {} uE, externalEntityHumanProteinAtlas_tissue HPAT, externalEntityHumanProteinAtlas_celltype HPAC
WHERE uE.externalEntityID = HPAT.externalEntityID AND uE.externalEntityID = HPAC.externalEntityID
'''.format(up_table))
cursor.execute(query)
tissue2uEs = {}
tissue2cell2uE = {}
for items in cursor:
uE, tissue_name, cell_type = items
tissue_name = tissue_name.lower()
cell_type = cell_type.lower()
tissue2uEs.setdefault(tissue_name, set())
tissue2uEs[tissue_name].add(uE)
tissue2cell2uE.setdefault(tissue_name, {})
if cell_type not in tissue2cell2uE[tissue_name]:
tissue2cell2uE[tissue_name][cell_type] = uE
else:
print('Tissue {} and cell_type {} have multiple uEs'.format(tissue_name, cell_type))
sys.exit(10)
cursor.close()
return tissue2uEs, tissue2cell2uE
def obtain_uE_prot_2_uE_Tissues(cnx, unification_protocol):
"""
Obtain dictionary uE_prot : {'uE_tissue' : {'confidence' : ..., 'source' : ..., 'evidence' : ...} }
"""
print('\n.....Obtaining dictionary of user entity proteins to user entity TISSUES.....\n')
up_table = NA_drug.return_unification_protocol_table(cnx, unification_protocol)
cursor = cnx.cursor()
query = (''' SELECT UP.userEntityID, UT.userEntityID, TC.value, TS.value, TE.value
FROM {} UP, externalEntityRelationParticipant RP, externalEntityRelationParticipant RT, externalEntityTissuesConfidence TC, externalEntityTissuesSource TS, externalEntityTissuesEvidence TE, {} UT, externalEntity ET
WHERE UP.externalEntityID = RP.externalEntityID AND RP.externalEntityID != RT.externalEntityID AND RP.externalEntityRelationID = RT.externalEntityRelationID AND RT.externalEntityRelationID = TC.externalEntityID AND RT.externalEntityRelationID = TS.externalEntityID AND RT.externalEntityRelationID = TE.externalEntityID AND RT.externalEntityID = UT.externalEntityID AND RT.externalEntityID = ET.externalEntityID AND ET.type = 'tissue'
'''.format(up_table, up_table))
cursor.execute(query)
UEprot2UETissues = {}
for items in cursor:
uEprot, uEtissues, confidence, source, evidence = items
source = source.lower()
UEprot2UETissues.setdefault(uEprot, {})
UEprot2UETissues[uEprot].setdefault(uEtissues, {})
UEprot2UETissues[uEprot][uEtissues]['confidence'] = confidence
UEprot2UETissues[uEprot][uEtissues]['source'] = source
UEprot2UETissues[uEprot][uEtissues]['evidence'] = source
cursor.close()
print('\nProtein 2 TISSUES dictionary obtained!\n')
return UEprot2UETissues
def obtain_uE_prot_2_uE_HPAmic(cnx, unification_protocol):
"""
Obtain dictionary uE_prot : {'uE_tissue' : {'level' : ..., 'reliability' : ...} }
"""
print('\n.....Obtaining dictionary of user entity proteins to user entity HUMAN PROTEIN ATLAS (microarray).....\n')
up_table = NA_drug.return_unification_protocol_table(cnx, unification_protocol)
cursor = cnx.cursor()
query = (''' SELECT UP.userEntityID, UT.userEntityID, TL.value, TR.value
FROM {} UP, externalEntityRelationParticipant RP, externalEntityRelationParticipant RT, externalEntityHumanProteinAtlas_level TL, externalEntityHumanProteinAtlas_reliability TR, {} UT, externalEntity ET
WHERE UP.externalEntityID = RP.externalEntityID AND RP.externalEntityID != RT.externalEntityID AND RP.externalEntityRelationID = RT.externalEntityRelationID AND RT.externalEntityRelationID = TL.externalEntityID AND RT.externalEntityRelationID = TR.externalEntityID AND RT.externalEntityID = UT.externalEntityID AND RT.externalEntityID = ET.externalEntityID AND ET.type = 'tissue'
'''.format(up_table, up_table))
cursor.execute(query)
UEprot2UEHPA = {}
for items in cursor:
uEprot, uEtissues, level, reliability = items
level = level.lower()
reliability = reliability.lower()
UEprot2UEHPA.setdefault(uEprot, {})
UEprot2UEHPA[uEprot].setdefault(uEtissues, {})
UEprot2UEHPA[uEprot][uEtissues]['level'] = level
UEprot2UEHPA[uEprot][uEtissues]['reliability'] = reliability
cursor.close()
print('\nProtein 2 HUMAN PROTEIN ATLAS (microarray) dictionary obtained!\n')
return UEprot2UEHPA
def obtain_uE_prot_2_uE_HPARNAseq(cnx, unification_protocol):
"""
Obtain dictionary uE_prot : {'uE_tissue' : {'value' : ..., 'unit' : ...} }
"""
print('\n.....Obtaining dictionary of user entity proteins to user entity HUMAN PROTEIN ATLAS (RNAseq).....\n')
up_table = NA_drug.return_unification_protocol_table(cnx, unification_protocol)
cursor = cnx.cursor()
query = (''' SELECT UP.userEntityID, UT.userEntityID, RNA.value, RNA.unit
FROM {} UP, externalEntityRelationParticipant RP, externalEntityRelationParticipant RT, externalEntityHumanProteinAtlas_RNAseq_value RNA, {} UT, externalEntity ET
WHERE UP.externalEntityID = RP.externalEntityID AND RP.externalEntityID != RT.externalEntityID AND RP.externalEntityRelationID = RT.externalEntityRelationID AND RT.externalEntityRelationID = RNA.externalEntityID AND RT.externalEntityID = UT.externalEntityID AND RT.externalEntityID = ET.externalEntityID AND ET.type = 'tissue'
'''.format(up_table, up_table))
cursor.execute(query)
UEprot2UEHPARNAseq = {}
for items in cursor:
uEprot, uEtissues, value, unit = items
value = float(value)
unit = unit.lower()
if unit != 'tpm':
print('Incorrect RNAseq unit for uE protein {} and uE tissue {}: {}'.format(uEprot, uEtissues, unit))
sys.exit(10)
UEprot2UEHPARNAseq.setdefault(uEprot, {})
UEprot2UEHPARNAseq[uEprot].setdefault(uEtissues, {})
UEprot2UEHPARNAseq[uEprot][uEtissues]['value'] = value
UEprot2UEHPARNAseq[uEprot][uEtissues]['unit'] = unit
cursor.close()
print('\nProtein 2 HUMAN PROTEIN ATLAS (RNAseq) dictionary obtained!\n')
return UEprot2UEHPARNAseq
def obtain_psimi_to_method(cnx, key_attribute_table):
"""
Obtain dictionary uE_prot : {'psi-mi code' : 'method_name' }
"""
print('\n.....Obtaining dictionary of PSI-MI codes to Method names.....\n')
cursor = cnx.cursor()
query = (''' SELECT K.value, P.value FROM externalEntitypsimi_name P, {} K where P.externalEntityID = K.externalEntityID
'''.format(key_attribute_table))
cursor.execute(query)
psimi2method = {}
for items in cursor:
psimi = items[0]
method = items[1]
psimi2method[psimi] = method
cursor.close()
print('\nPSI-MI 2 METHOD dictionary obtained!\n')
return psimi2method
def obtain_wang_liver_interactions(cnx, unification_protocol):
"""
Obtain the liver-specific interactions in Wang et al.
"""
print('\n.....Obtaining liver interactions from Wang.....\n')
up_table = NA_drug.return_unification_protocol_table(cnx, unification_protocol)
cursor = cnx.cursor()
query = (''' SELECT G1.value, G2.value
FROM {} U1, {} U2, externalEntityRelationParticipant R1, externalEntityRelationParticipant R2, externalEntityPubmed P, externalEntityGeneID G1, externalEntityGeneID G2
WHERE U1.userEntityID != U2.userEntityID AND U1.externalEntityID = R1.externalEntityID AND U2.externalEntityID = R2.externalEntityID AND R1.externalEntityID != R2.externalEntityID AND R1.externalEntityRelationID = R2.externalEntityRelationID AND R1.externalEntityRelationID = P.externalEntityID AND P.value = 21988832 AND U1.externalEntityID = G1.externalEntityID AND U2.externalEntityID = G2.externalEntityID
'''.format(up_table, up_table))
cursor.execute(query)
G=nx.Graph()
for items in cursor:
node1 = items[0]
node2 = items[1]
G.add_edge(node1,node2)
cursor.close()
print('\nWang liver interactions obtained!\n')
return G
def parse_hippie_scores(hippie_scores_file):
"""
Obtain dictionary uE_prot : {'psi-mi code' : 'method_name' }
"""
print('\n.....Parsing HIPPIE scores.....\n')
psimi2score = {}
mi_regex = re.compile('MI:([0-9]{4})')
hippie_scores_fd = open(hippie_scores_file, 'r')
for line in hippie_scores_fd:
method_name, psimi, score = line.strip().split('\t')
m = mi_regex.search(psimi)
if m:
psimi = int(m.group(1))
psimi2score[psimi]=float(score)
hippie_scores_fd.close()
print('\n.....Parsing of HIPPIE scores done!.....\n')
return psimi2score
def obtain_housekeeping_genes(cnx, unification_protocol, HPA_housekeeping_file, elieis_housekeeping_file, HPA_geneid_output, HPA_uE_output, eisenberg_geneid_output, eisenberg_uE_output):
"""
Parses the housekeeping files and obtains a dictionary with them.
"""
print('\n.....The housekeeping set of genes has not been parsed. Parsing the files.....\n')
# Obtain Human Protein Atlas HOUSKEEPING geneIDs --> http://www.proteinatlas.org/humanproteome/housekeeping
hpa_hk_geneIDs = set()
hpa_hk_uEs = set()
hpa_housekeeping_fd = open(HPA_housekeeping_file, 'r')
first_line = hpa_housekeeping_fd.readline()
fields_dict = obtain_header_fields(first_line)
# Gene Gene synonym Ensembl Gene description Chromosome Position Protein class Evidence HPA evidence UniProt evidence MS evidence Antibody Reliability (IH) Reliability (Mouse Brain) Reliability (IF) Subcellular location RNA tissue category RNA TS RNA TS TPM TPM max in non-specific
for line in hpa_housekeeping_fd:
fields = line.strip().split("\t")
gene = fields[ fields_dict['gene'] ]
ensembl = fields[ fields_dict['ensembl'] ]
reliability = fields[ fields_dict['reliability (ih)'] ].lower()
if reliability != '' and reliability != '-' and reliability != 'uncertain':
print(reliability)
pass
else:
print('Skipping {} for low reliability'.format(gene))
continue
if ensembl != '' and ensembl != '-':
uEs, geneids = obtain_uE_and_geneid_from_ensembl(cnx, unification_protocol, ensembl)
for geneid in geneids:
hpa_hk_geneIDs.add(int(geneid))
for uE in uEs:
hpa_hk_uEs.add(int(uE))
else:
print('Missing ensembl in HPA housekeeping for gene: {}'.format(gene))
sys.exit(10)
hpa_housekeeping_fd.close()
# Obtain Eisenberg HOUSKEEPING geneIDs --> https://www.tau.ac.il/~elieis/HKG/
eisenberg_hk_geneIDs = set()
eisenberg_hk_uEs = set()
elieis_housekeeping_fd = open(elieis_housekeeping_file, 'r')
for line in elieis_housekeeping_fd:
fields = line.strip().split("\t")
gene = fields[0]
refseq = fields[1]
if refseq != '' or refseq != '-':
uEs, geneids = obtain_uE_and_geneid_from_refseq(cnx, unification_protocol, refseq)
for geneid in geneids:
eisenberg_hk_geneIDs.add(int(geneid))
for uE in uEs:
eisenberg_hk_uEs.add(int(uE))
else:
print('Missing RefSeq in HPA housekeeping for gene: {}'.format(gene))
sys.exit(10)
elieis_housekeeping_fd.close()
cPickle.dump(hpa_hk_geneIDs, open(HPA_geneid_output, 'w'))
cPickle.dump(hpa_hk_uEs, open(HPA_uE_output, 'w'))
cPickle.dump(eisenberg_hk_geneIDs, open(eisenberg_geneid_output, 'w'))
cPickle.dump(eisenberg_hk_uEs, open(eisenberg_uE_output, 'w'))
return hpa_hk_geneIDs, hpa_hk_uEs, eisenberg_hk_geneIDs, eisenberg_hk_uEs
def obtain_uE_and_geneid_from_ensembl(cnx, unification_protocol, ensembl):
"""
Obtain geneIDs and user entities sets from their corresponding Ensembl.
"""
up_table = NA_drug.return_unification_protocol_table(cnx, unification_protocol)
cursor = cnx.cursor()
query = (''' SELECT U1.userEntityID, G.value
FROM externalEntityEnsembl EN, {} U1, {} U2, externalEntityGeneID G
WHERE EN.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = G.externalEntityID AND EN.value = %s
'''.format(up_table, up_table))
cursor.execute(query, (ensembl,))
uEs = set()
geneids = set()
for items in cursor:
uE, geneid = items
uEs.add(uE)
geneids.add(geneid)
cursor.close()
return uEs, geneids
def obtain_uE_and_geneid_from_refseq(cnx, unification_protocol, refseq):
"""
Obtain geneIDs and user entities sets from their corresponding RefSeq
"""
up_table = NA_drug.return_unification_protocol_table(cnx, unification_protocol)
cursor = cnx.cursor()
query = (''' SELECT U1.userEntityID, G.value
FROM externalEntityRefSeq R, {} U1, {} U2, externalEntityGeneID G
WHERE R.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = G.externalEntityID AND R.value = %s
'''.format(up_table, up_table))
cursor.execute(query, (refseq,))
uEs = set()
geneids = set()
for items in cursor:
uE, geneid = items
uEs.add(uE)
geneids.add(geneid)
cursor.close()
return uEs, geneids
def obtain_header_fields(first_line, sep='\t'):
"""
Obtain a dictionary: "field_name" => "position"
"""
fields_dict = {}
header_fields = first_line.strip().split(sep)
for x in xrange(0, len(header_fields)):
fields_dict[header_fields[x].lower()] = x
return fields_dict
if __name__ == "__main__":
main() | 37.480992 | 450 | 0.66233 | import argparse
import ConfigParser
import cPickle
import mysql.connector
import networkx as nx
import sys, os, re
from context import NetworkAnalysis
import NetworkAnalysis.drug as NA_drug
def main():
options = parse_user_arguments()
create_tissue_specific_network(options)
def parse_user_arguments(*args, **kwds):
parser = argparse.ArgumentParser(
description = "Obtain tissue-specificity data from BIANA and save it in Pickle files",
epilog = "@oliva's lab 2017")
parser.add_argument('-p','--pickles_path',dest='pickles_path',action = 'store',default=os.path.join(os.path.join(os.path.dirname(__file__), '..'), 'NetworkAnalysis/pickles'),
help = """Define the directory where the data will be stored. """)
options=parser.parse_args()
return options
#################
#################
# MAIN FUNCTION #
#################
#################
def create_tissue_specific_network(options):
#--------------------------------------#
# GET INFORMATION FROM CONFIG FILE #
#--------------------------------------#
# Get the program path
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# Read the config file
config_file = os.path.join(main_path, 'config.ini')
config = ConfigParser.ConfigParser()
config.read(config_file)
cnx = mysql.connector.connect( user=config.get('BIANA', 'user'),
password=config.get('BIANA', 'password'),
host=config.get('BIANA', 'host'),
database=config.get('BIANA', 'database') )
#----------------------#
# OBTAIN BTO FILES #
#----------------------#
BTOterm_file = os.path.join(options.pickles_path, 'BTOterm2uE.pcl')
BTOname_file = os.path.join(options.pickles_path, 'BTOname2uE.pcl')
if not fileExist(BTOterm_file) or not fileExist(BTOname_file):
BTOterm2uE, BTOname2uE = obtain_uEs_from_Tissues(cnx, config.get('BIANA', 'unification_protocol'))
cPickle.dump(BTOterm2uE, open(BTOterm_file, 'w'))
cPickle.dump(BTOname2uE, open(BTOname_file, 'w'))
#----------------------------------#
# OBTAIN HPA AND TISSUES FILES #
#----------------------------------#
HPA_tissue_file = os.path.join(options.pickles_path, 'tissue2uEs.pcl')
HPA_complete_file = os.path.join(options.pickles_path, 'tissue2cell2uE.pcl')
if not fileExist(HPA_tissue_file) or not fileExist(HPA_complete_file):
tissue2uEs, tissue2cell2uE = obtain_uEs_from_HPA(cnx, config.get('BIANA', 'unification_protocol'))
cPickle.dump(tissue2uEs, open(HPA_tissue_file, 'w'))
cPickle.dump(tissue2cell2uE, open(HPA_complete_file, 'w'))
prot2tissues_file = os.path.join(options.pickles_path, 'UEprot2UETissues.pcl')
if not fileExist(prot2tissues_file):
UEprot2UETissues = obtain_uE_prot_2_uE_Tissues(cnx, config.get('BIANA', 'unification_protocol'))
cPickle.dump(UEprot2UETissues, open(prot2tissues_file, 'w'))
prot2HPAmic_file = os.path.join(options.pickles_path, 'UEprot2UEHPAmic.pcl')
if not fileExist(prot2HPAmic_file):
UEprot2UEHPAmic = obtain_uE_prot_2_uE_HPAmic(cnx, config.get('BIANA', 'unification_protocol'))
cPickle.dump(UEprot2UEHPAmic, open(prot2HPAmic_file, 'w'))
prot2HPARNAseq_file = os.path.join(options.pickles_path, 'UEprot2UEHPARNAseq.pcl')
if not fileExist(prot2HPARNAseq_file):
UEprot2UEHPARNAseq = obtain_uE_prot_2_uE_HPARNAseq(cnx, config.get('BIANA', 'unification_protocol'))
cPickle.dump(UEprot2UEHPARNAseq, open(prot2HPARNAseq_file, 'w'))
#-----------------------------#
# OBTAIN CODES OF METHODS #
#-----------------------------#
psimi2method_file = os.path.join(options.pickles_path, 'psimi2method.pcl')
if not fileExist(psimi2method_file):
key_attribute_table = NA_drug.return_key_attribute_table(cnx, ontology_name='psimiobo')
psimi2method = obtain_psimi_to_method(cnx, key_attribute_table)
cPickle.dump(psimi2method, open(psimi2method_file, 'w'))
#-----------------------------------#
# PARSE WANG LIVER INTERACTIONS #
#-----------------------------------#
wang_liver_file = os.path.join(options.pickles_path, 'wang_liver_network.pcl')
if not fileExist(wang_liver_file):
wang_liver_network = obtain_wang_liver_interactions(cnx, config.get('BIANA', 'unification_protocol'))
cPickle.dump(wang_liver_network, open(wang_liver_file, 'w'))
#-------------------------#
# PARSE HIPPIE SCORES #
#-------------------------#
hippie_scores_file = os.path.join(main_path, 'NetworkAnalysis/hippie_scores/experimental_scores.tsv')
psimi2score_file = os.path.join(options.pickles_path, 'psimi2score.pcl')
if not fileExist(psimi2score_file):
psimi2score = parse_hippie_scores(hippie_scores_file)
cPickle.dump(psimi2score, open(psimi2score_file, 'w'))
#------------------------------#
# OBTAIN HOUSEKEEPING DATA #
#------------------------------#
# Obtain housekeeping genes data
HPA_housekeeping_file = os.path.join(main_path, 'NetworkAnalysis/housekeeping/tissue_specificity_rna_any_expressed.tab')
elieis_housekeeping_file = os.path.join(main_path, '/home/quim/project/tissue_specificity/housekeeping/HK_genes.txt')
hpa_geneid_dump = os.path.join(options.pickles_path, 'hpa_hk_geneIDs.pcl')
hpa_uE_dump = os.path.join(options.pickles_path, 'hpa_hk_uEs.pcl')
elieis_geneid_dump = os.path.join(options.pickles_path, 'eisenberg_hk_geneIDs.pcl')
elieis_uE_dump = os.path.join(options.pickles_path, 'eisenberg_hk_uEs.pcl')
try:
hpa_hk_geneIDs = cPickle.load(open(hpa_geneid_dump))
hpa_hk_uEs = cPickle.load(open(hpa_uE_dump))
eisenberg_hk_geneIDs = cPickle.load(open(elieis_geneid_dump))
eisenberg_hk_uEs = cPickle.load(open(elieis_uE_dump))
except:
hpa_hk_geneIDs, hpa_hk_uEs, eisenberg_hk_geneIDs, eisenberg_hk_uEs = obtain_housekeeping_genes(cnx, config.get('BIANA', 'unification_protocol'), HPA_housekeeping_file, elieis_housekeeping_file, hpa_geneid_dump, hpa_uE_dump, elieis_geneid_dump, elieis_uE_dump)
return
#######################
#######################
# SECONDARY FUNCTIONS #
#######################
#######################
def fileExist(file):
return os.path.exists(file) and os.path.isfile(file)
def obtain_uEs_from_Tissues(cnx, unification_protocol):
up_table = NA_drug.return_unification_protocol_table(cnx, unification_protocol)
cursor = cnx.cursor()
# Get the user entity of the TISSUE, not the BTOelement!!
# Because BTO and TISSUES are not properly unified, as they have different entities (tissue and BTOelement).
# So, using this command, I am able to get the user entity of the tissue, and not the BTOelement.
query = (''' SELECT U.userEntityID, B1.value, N.value
FROM externalEntityBTO_name N, externalEntityBTO B1, externalEntityBTO B2, {} U
WHERE N.externalEntityID = B1.externalEntityID AND B1.value = B2.value AND B1.externalEntityID != B2.externalEntityID AND B2.externalEntityID = U.externalEntityID
'''.format(up_table))
cursor.execute(query)
BTOterm2uE = {}
BTOname2uE = {}
for items in cursor:
uE, BTO_term, BTO_name = items
BTO_name = BTO_name.lower()
if BTO_term not in BTOterm2uE:
BTOterm2uE[BTO_term] = uE
else:
if uE != BTOterm2uE[BTO_term]:
print('BTO_term {} has multiple uEs'.format(BTO_term))
sys.exit(10)
# There can be more than one BTO term with the same BTO name
# This is why we add the user entities in a set
if BTO_name not in BTOname2uE:
BTOname2uE.setdefault(BTO_name, set())
BTOname2uE[BTO_name].add(uE)
cursor.close()
return BTOterm2uE, BTOname2uE
def obtain_uEs_from_HPA(cnx, unification_protocol):
up_table = NA_drug.return_unification_protocol_table(cnx, unification_protocol)
cursor = cnx.cursor()
query = (''' SELECT uE.userEntityID, HPAT.value, HPAC.value
FROM {} uE, externalEntityHumanProteinAtlas_tissue HPAT, externalEntityHumanProteinAtlas_celltype HPAC
WHERE uE.externalEntityID = HPAT.externalEntityID AND uE.externalEntityID = HPAC.externalEntityID
'''.format(up_table))
cursor.execute(query)
tissue2uEs = {}
tissue2cell2uE = {}
for items in cursor:
uE, tissue_name, cell_type = items
tissue_name = tissue_name.lower()
cell_type = cell_type.lower()
tissue2uEs.setdefault(tissue_name, set())
tissue2uEs[tissue_name].add(uE)
tissue2cell2uE.setdefault(tissue_name, {})
if cell_type not in tissue2cell2uE[tissue_name]:
tissue2cell2uE[tissue_name][cell_type] = uE
else:
print('Tissue {} and cell_type {} have multiple uEs'.format(tissue_name, cell_type))
sys.exit(10)
cursor.close()
return tissue2uEs, tissue2cell2uE
def obtain_uE_prot_2_uE_Tissues(cnx, unification_protocol):
print('\n.....Obtaining dictionary of user entity proteins to user entity TISSUES.....\n')
up_table = NA_drug.return_unification_protocol_table(cnx, unification_protocol)
cursor = cnx.cursor()
query = (''' SELECT UP.userEntityID, UT.userEntityID, TC.value, TS.value, TE.value
FROM {} UP, externalEntityRelationParticipant RP, externalEntityRelationParticipant RT, externalEntityTissuesConfidence TC, externalEntityTissuesSource TS, externalEntityTissuesEvidence TE, {} UT, externalEntity ET
WHERE UP.externalEntityID = RP.externalEntityID AND RP.externalEntityID != RT.externalEntityID AND RP.externalEntityRelationID = RT.externalEntityRelationID AND RT.externalEntityRelationID = TC.externalEntityID AND RT.externalEntityRelationID = TS.externalEntityID AND RT.externalEntityRelationID = TE.externalEntityID AND RT.externalEntityID = UT.externalEntityID AND RT.externalEntityID = ET.externalEntityID AND ET.type = 'tissue'
'''.format(up_table, up_table))
cursor.execute(query)
UEprot2UETissues = {}
for items in cursor:
uEprot, uEtissues, confidence, source, evidence = items
source = source.lower()
UEprot2UETissues.setdefault(uEprot, {})
UEprot2UETissues[uEprot].setdefault(uEtissues, {})
UEprot2UETissues[uEprot][uEtissues]['confidence'] = confidence
UEprot2UETissues[uEprot][uEtissues]['source'] = source
UEprot2UETissues[uEprot][uEtissues]['evidence'] = source
cursor.close()
print('\nProtein 2 TISSUES dictionary obtained!\n')
return UEprot2UETissues
def obtain_uE_prot_2_uE_HPAmic(cnx, unification_protocol):
print('\n.....Obtaining dictionary of user entity proteins to user entity HUMAN PROTEIN ATLAS (microarray).....\n')
up_table = NA_drug.return_unification_protocol_table(cnx, unification_protocol)
cursor = cnx.cursor()
query = (''' SELECT UP.userEntityID, UT.userEntityID, TL.value, TR.value
FROM {} UP, externalEntityRelationParticipant RP, externalEntityRelationParticipant RT, externalEntityHumanProteinAtlas_level TL, externalEntityHumanProteinAtlas_reliability TR, {} UT, externalEntity ET
WHERE UP.externalEntityID = RP.externalEntityID AND RP.externalEntityID != RT.externalEntityID AND RP.externalEntityRelationID = RT.externalEntityRelationID AND RT.externalEntityRelationID = TL.externalEntityID AND RT.externalEntityRelationID = TR.externalEntityID AND RT.externalEntityID = UT.externalEntityID AND RT.externalEntityID = ET.externalEntityID AND ET.type = 'tissue'
'''.format(up_table, up_table))
cursor.execute(query)
UEprot2UEHPA = {}
for items in cursor:
uEprot, uEtissues, level, reliability = items
level = level.lower()
reliability = reliability.lower()
UEprot2UEHPA.setdefault(uEprot, {})
UEprot2UEHPA[uEprot].setdefault(uEtissues, {})
UEprot2UEHPA[uEprot][uEtissues]['level'] = level
UEprot2UEHPA[uEprot][uEtissues]['reliability'] = reliability
cursor.close()
print('\nProtein 2 HUMAN PROTEIN ATLAS (microarray) dictionary obtained!\n')
return UEprot2UEHPA
def obtain_uE_prot_2_uE_HPARNAseq(cnx, unification_protocol):
print('\n.....Obtaining dictionary of user entity proteins to user entity HUMAN PROTEIN ATLAS (RNAseq).....\n')
up_table = NA_drug.return_unification_protocol_table(cnx, unification_protocol)
cursor = cnx.cursor()
query = (''' SELECT UP.userEntityID, UT.userEntityID, RNA.value, RNA.unit
FROM {} UP, externalEntityRelationParticipant RP, externalEntityRelationParticipant RT, externalEntityHumanProteinAtlas_RNAseq_value RNA, {} UT, externalEntity ET
WHERE UP.externalEntityID = RP.externalEntityID AND RP.externalEntityID != RT.externalEntityID AND RP.externalEntityRelationID = RT.externalEntityRelationID AND RT.externalEntityRelationID = RNA.externalEntityID AND RT.externalEntityID = UT.externalEntityID AND RT.externalEntityID = ET.externalEntityID AND ET.type = 'tissue'
'''.format(up_table, up_table))
cursor.execute(query)
UEprot2UEHPARNAseq = {}
for items in cursor:
uEprot, uEtissues, value, unit = items
value = float(value)
unit = unit.lower()
if unit != 'tpm':
print('Incorrect RNAseq unit for uE protein {} and uE tissue {}: {}'.format(uEprot, uEtissues, unit))
sys.exit(10)
UEprot2UEHPARNAseq.setdefault(uEprot, {})
UEprot2UEHPARNAseq[uEprot].setdefault(uEtissues, {})
UEprot2UEHPARNAseq[uEprot][uEtissues]['value'] = value
UEprot2UEHPARNAseq[uEprot][uEtissues]['unit'] = unit
cursor.close()
print('\nProtein 2 HUMAN PROTEIN ATLAS (RNAseq) dictionary obtained!\n')
return UEprot2UEHPARNAseq
def obtain_psimi_to_method(cnx, key_attribute_table):
print('\n.....Obtaining dictionary of PSI-MI codes to Method names.....\n')
cursor = cnx.cursor()
query = (''' SELECT K.value, P.value FROM externalEntitypsimi_name P, {} K where P.externalEntityID = K.externalEntityID
'''.format(key_attribute_table))
cursor.execute(query)
psimi2method = {}
for items in cursor:
psimi = items[0]
method = items[1]
psimi2method[psimi] = method
cursor.close()
print('\nPSI-MI 2 METHOD dictionary obtained!\n')
return psimi2method
def obtain_wang_liver_interactions(cnx, unification_protocol):
print('\n.....Obtaining liver interactions from Wang.....\n')
up_table = NA_drug.return_unification_protocol_table(cnx, unification_protocol)
cursor = cnx.cursor()
query = (''' SELECT G1.value, G2.value
FROM {} U1, {} U2, externalEntityRelationParticipant R1, externalEntityRelationParticipant R2, externalEntityPubmed P, externalEntityGeneID G1, externalEntityGeneID G2
WHERE U1.userEntityID != U2.userEntityID AND U1.externalEntityID = R1.externalEntityID AND U2.externalEntityID = R2.externalEntityID AND R1.externalEntityID != R2.externalEntityID AND R1.externalEntityRelationID = R2.externalEntityRelationID AND R1.externalEntityRelationID = P.externalEntityID AND P.value = 21988832 AND U1.externalEntityID = G1.externalEntityID AND U2.externalEntityID = G2.externalEntityID
'''.format(up_table, up_table))
cursor.execute(query)
G=nx.Graph()
for items in cursor:
node1 = items[0]
node2 = items[1]
G.add_edge(node1,node2)
cursor.close()
print('\nWang liver interactions obtained!\n')
return G
def parse_hippie_scores(hippie_scores_file):
print('\n.....Parsing HIPPIE scores.....\n')
psimi2score = {}
mi_regex = re.compile('MI:([0-9]{4})')
hippie_scores_fd = open(hippie_scores_file, 'r')
for line in hippie_scores_fd:
method_name, psimi, score = line.strip().split('\t')
m = mi_regex.search(psimi)
if m:
psimi = int(m.group(1))
psimi2score[psimi]=float(score)
hippie_scores_fd.close()
print('\n.....Parsing of HIPPIE scores done!.....\n')
return psimi2score
def obtain_housekeeping_genes(cnx, unification_protocol, HPA_housekeeping_file, elieis_housekeeping_file, HPA_geneid_output, HPA_uE_output, eisenberg_geneid_output, eisenberg_uE_output):
print('\n.....The housekeeping set of genes has not been parsed. Parsing the files.....\n')
# Obtain Human Protein Atlas HOUSKEEPING geneIDs --> http://www.proteinatlas.org/humanproteome/housekeeping
hpa_hk_geneIDs = set()
hpa_hk_uEs = set()
hpa_housekeeping_fd = open(HPA_housekeeping_file, 'r')
first_line = hpa_housekeeping_fd.readline()
fields_dict = obtain_header_fields(first_line)
# Gene Gene synonym Ensembl Gene description Chromosome Position Protein class Evidence HPA evidence UniProt evidence MS evidence Antibody Reliability (IH) Reliability (Mouse Brain) Reliability (IF) Subcellular location RNA tissue category RNA TS RNA TS TPM TPM max in non-specific
for line in hpa_housekeeping_fd:
fields = line.strip().split("\t")
gene = fields[ fields_dict['gene'] ]
ensembl = fields[ fields_dict['ensembl'] ]
reliability = fields[ fields_dict['reliability (ih)'] ].lower()
if reliability != '' and reliability != '-' and reliability != 'uncertain':
print(reliability)
pass
else:
print('Skipping {} for low reliability'.format(gene))
continue
if ensembl != '' and ensembl != '-':
uEs, geneids = obtain_uE_and_geneid_from_ensembl(cnx, unification_protocol, ensembl)
for geneid in geneids:
hpa_hk_geneIDs.add(int(geneid))
for uE in uEs:
hpa_hk_uEs.add(int(uE))
else:
print('Missing ensembl in HPA housekeeping for gene: {}'.format(gene))
sys.exit(10)
hpa_housekeeping_fd.close()
# Obtain Eisenberg HOUSKEEPING geneIDs --> https://www.tau.ac.il/~elieis/HKG/
eisenberg_hk_geneIDs = set()
eisenberg_hk_uEs = set()
elieis_housekeeping_fd = open(elieis_housekeeping_file, 'r')
for line in elieis_housekeeping_fd:
fields = line.strip().split("\t")
gene = fields[0]
refseq = fields[1]
if refseq != '' or refseq != '-':
uEs, geneids = obtain_uE_and_geneid_from_refseq(cnx, unification_protocol, refseq)
for geneid in geneids:
eisenberg_hk_geneIDs.add(int(geneid))
for uE in uEs:
eisenberg_hk_uEs.add(int(uE))
else:
print('Missing RefSeq in HPA housekeeping for gene: {}'.format(gene))
sys.exit(10)
elieis_housekeeping_fd.close()
cPickle.dump(hpa_hk_geneIDs, open(HPA_geneid_output, 'w'))
cPickle.dump(hpa_hk_uEs, open(HPA_uE_output, 'w'))
cPickle.dump(eisenberg_hk_geneIDs, open(eisenberg_geneid_output, 'w'))
cPickle.dump(eisenberg_hk_uEs, open(eisenberg_uE_output, 'w'))
return hpa_hk_geneIDs, hpa_hk_uEs, eisenberg_hk_geneIDs, eisenberg_hk_uEs
def obtain_uE_and_geneid_from_ensembl(cnx, unification_protocol, ensembl):
up_table = NA_drug.return_unification_protocol_table(cnx, unification_protocol)
cursor = cnx.cursor()
query = (''' SELECT U1.userEntityID, G.value
FROM externalEntityEnsembl EN, {} U1, {} U2, externalEntityGeneID G
WHERE EN.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = G.externalEntityID AND EN.value = %s
'''.format(up_table, up_table))
cursor.execute(query, (ensembl,))
uEs = set()
geneids = set()
for items in cursor:
uE, geneid = items
uEs.add(uE)
geneids.add(geneid)
cursor.close()
return uEs, geneids
def obtain_uE_and_geneid_from_refseq(cnx, unification_protocol, refseq):
up_table = NA_drug.return_unification_protocol_table(cnx, unification_protocol)
cursor = cnx.cursor()
query = (''' SELECT U1.userEntityID, G.value
FROM externalEntityRefSeq R, {} U1, {} U2, externalEntityGeneID G
WHERE R.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = G.externalEntityID AND R.value = %s
'''.format(up_table, up_table))
cursor.execute(query, (refseq,))
uEs = set()
geneids = set()
for items in cursor:
uE, geneid = items
uEs.add(uE)
geneids.add(geneid)
cursor.close()
return uEs, geneids
def obtain_header_fields(first_line, sep='\t'):
fields_dict = {}
header_fields = first_line.strip().split(sep)
for x in xrange(0, len(header_fields)):
fields_dict[header_fields[x].lower()] = x
return fields_dict
if __name__ == "__main__":
main() | true | true |
1c3504f81075d5cdf675fc34b13c33b452655f06 | 1,919 | py | Python | test_scripts/imutest.py | sofwerx/dataglove | e49d72bef23fcba840e67fabc2fb81ce9f91b775 | [
"MIT"
] | 5 | 2019-05-07T17:28:20.000Z | 2020-06-18T15:08:04.000Z | test_scripts/imutest.py | sofwerx/dataglove | e49d72bef23fcba840e67fabc2fb81ce9f91b775 | [
"MIT"
] | 1 | 2019-08-29T22:54:07.000Z | 2019-08-29T23:03:57.000Z | test_scripts/imutest.py | sofwerx/dataglove | e49d72bef23fcba840e67fabc2fb81ce9f91b775 | [
"MIT"
] | 2 | 2019-05-28T13:11:09.000Z | 2019-06-05T17:47:28.000Z | import serial
import time
import sys
import threading
import numpy as np
# © 2019 BeBop Sensors, Inc.
data = []
class GloveSerialListener(threading.Thread):
def __init__(self, port):
threading.Thread.__init__(self)
self.glove = serial.Serial()
self.glove.baudrate = 460800
self.glove.port = '/dev/rfcomm0'
self.glove.timeout = 1
self.glove.open()
self.data = []
self.data_shared = []
def parse(self, byte_to_parse):
global data
b = int.from_bytes(byte_to_parse, byteorder='big')
#print(b)
if b == 240:
self.data = []
elif b == 247:
self.data.append(b)
#might need some thread saftey here
if (self.data[0] == 2):
data = self.data
else:
self.data.append(b)
def run(self):
global data
if self.glove.is_open:
# data on
self.glove.write(bytearray([176, 115, 1]))
# usb mode
#self.glove.write(bytearray([176, 118, 1]))
# bluetooth mode
self.glove.write(bytearray([176, 118, 2]))
while True:
self.parse(self.glove.read())
else:
self.close()
def main():
data_glove_thread = GloveSerialListener('/dev/rfcomm0')
data_glove_thread.start()
#Wait for data
if not data:
while True:
print("Waiting for data...")
time.sleep(2)
if data:
break
while True:
time.sleep(1)
if (data[0] == 2 and data[1] == 12):
#Accelerometer Data
print(data)
data_glove_thread.close()
#MainLoop
while True:
try:
main()
except(OSError):
print("Failed to connect to glove. Retrying...")
time.sleep(1)
except(KeyboardInterrupt):
exit() | 21.561798 | 59 | 0.5284 | import serial
import time
import sys
import threading
import numpy as np
data = []
class GloveSerialListener(threading.Thread):
def __init__(self, port):
threading.Thread.__init__(self)
self.glove = serial.Serial()
self.glove.baudrate = 460800
self.glove.port = '/dev/rfcomm0'
self.glove.timeout = 1
self.glove.open()
self.data = []
self.data_shared = []
def parse(self, byte_to_parse):
global data
b = int.from_bytes(byte_to_parse, byteorder='big')
if b == 240:
self.data = []
elif b == 247:
self.data.append(b)
if (self.data[0] == 2):
data = self.data
else:
self.data.append(b)
def run(self):
global data
if self.glove.is_open:
self.glove.write(bytearray([176, 115, 1]))
self.glove.write(bytearray([176, 118, 2]))
while True:
self.parse(self.glove.read())
else:
self.close()
def main():
data_glove_thread = GloveSerialListener('/dev/rfcomm0')
data_glove_thread.start()
if not data:
while True:
print("Waiting for data...")
time.sleep(2)
if data:
break
while True:
time.sleep(1)
if (data[0] == 2 and data[1] == 12):
print(data)
data_glove_thread.close()
while True:
try:
main()
except(OSError):
print("Failed to connect to glove. Retrying...")
time.sleep(1)
except(KeyboardInterrupt):
exit() | true | true |
1c35065379e7fecc69342e3601f2049d6fbe96f9 | 514 | py | Python | other/abnormal_sample_detection/scripts/calculate_sampleAB.py | fabiodepa/ForestQC | aba6d0f2f6925c62229bd01ace7370be314f5886 | [
"MIT"
] | 21 | 2018-10-18T08:56:04.000Z | 2022-01-15T10:18:52.000Z | other/abnormal_sample_detection/scripts/calculate_sampleAB.py | fabiodepa/ForestQC | aba6d0f2f6925c62229bd01ace7370be314f5886 | [
"MIT"
] | 7 | 2018-10-25T23:50:12.000Z | 2022-01-26T17:44:11.000Z | other/abnormal_sample_detection/scripts/calculate_sampleAB.py | fabiodepa/ForestQC | aba6d0f2f6925c62229bd01ace7370be314f5886 | [
"MIT"
] | 7 | 2018-11-21T10:32:56.000Z | 2021-09-16T05:26:08.000Z | from sample_level_vcf_stat import *
import os
import sys
import pandas as pd
# sample = os.listdir('/u/home/k/k8688933/Jaehoon/data')
# good_variants_rsid_file = '/u/scratch2/k/k8688933/stat_output/vqsr_qc4/good.all.clfB.rsid'
vcf_file = sys.argv[1]
outfile = sys.argv[2]
# list_ = []
# with open(good_variants_rsid_file, 'r') as f:
# for line in f:
# if not line.startswith('RSID'):
# list_.append(line.strip())
sample_ab = sampleLevelAB([vcf_file])
ab = pd.DataFrame(sample_ab)
ab.to_csv(outfile)
| 25.7 | 92 | 0.719844 | from sample_level_vcf_stat import *
import os
import sys
import pandas as pd
vcf_file = sys.argv[1]
outfile = sys.argv[2]
sample_ab = sampleLevelAB([vcf_file])
ab = pd.DataFrame(sample_ab)
ab.to_csv(outfile)
| true | true |
1c35067aafd2936571b8659c35155dc178d8e7d5 | 815 | py | Python | app/models/loginUserSchema.py | SE-4398/Slither | 2d3a196329250cdd1f09e472b5b6de05de6c24cb | [
"Unlicense",
"MIT"
] | 1 | 2020-05-25T20:47:48.000Z | 2020-05-25T20:47:48.000Z | app/models/loginUserSchema.py | SE-4398/Slither | 2d3a196329250cdd1f09e472b5b6de05de6c24cb | [
"Unlicense",
"MIT"
] | 8 | 2020-04-16T01:50:47.000Z | 2020-10-22T14:51:32.000Z | app/models/loginUserSchema.py | SE-4398/Slither | 2d3a196329250cdd1f09e472b5b6de05de6c24cb | [
"Unlicense",
"MIT"
] | 1 | 2020-05-21T05:54:21.000Z | 2020-05-21T05:54:21.000Z | import datetime
from click import DateTime
from flask import Flask, render_template, request
# from flask_mysqldb import MySQL
# import pymysql
# import yaml
from flask_wtf import FlaskForm, RecaptchaField
from wtforms import StringField, IntegerField, DateField, DecimalField, SubmitField, PasswordField, BooleanField, \
DateTimeField
from datetime import datetime
from wtforms.validators import DataRequired, Length, Email, ValidationError, InputRequired
# class required to represent form. inherits from FlaskForm
class LoginForm(FlaskForm):
username = StringField('username', validators=[InputRequired(), Length(min=4, max=15)])
password = PasswordField('password', validators=[InputRequired(), Length(min=8, max=80)])
recaptcha = RecaptchaField()
remember = BooleanField('remember me')
| 37.045455 | 115 | 0.788957 | import datetime
from click import DateTime
from flask import Flask, render_template, request
from flask_wtf import FlaskForm, RecaptchaField
from wtforms import StringField, IntegerField, DateField, DecimalField, SubmitField, PasswordField, BooleanField, \
DateTimeField
from datetime import datetime
from wtforms.validators import DataRequired, Length, Email, ValidationError, InputRequired
class LoginForm(FlaskForm):
username = StringField('username', validators=[InputRequired(), Length(min=4, max=15)])
password = PasswordField('password', validators=[InputRequired(), Length(min=8, max=80)])
recaptcha = RecaptchaField()
remember = BooleanField('remember me')
| true | true |
1c3506c1d1436bd754bfb43ddce7129a383c968f | 928 | py | Python | src/ploomber/tasks/__init__.py | MarcoJHB/ploomber | 4849ef6915572f7934392443b4faf138172b9596 | [
"Apache-2.0"
] | 2,141 | 2020-02-14T02:34:34.000Z | 2022-03-31T22:43:20.000Z | src/ploomber/tasks/__init__.py | MarcoJHB/ploomber | 4849ef6915572f7934392443b4faf138172b9596 | [
"Apache-2.0"
] | 660 | 2020-02-06T16:15:57.000Z | 2022-03-31T22:55:01.000Z | src/ploomber/tasks/__init__.py | MarcoJHB/ploomber | 4849ef6915572f7934392443b4faf138172b9596 | [
"Apache-2.0"
] | 122 | 2020-02-14T18:53:05.000Z | 2022-03-27T22:33:24.000Z | from ploomber.tasks.tasks import (PythonCallable, ShellScript, DownloadFromURL,
Link, Input, task_factory)
from ploomber.tasks.taskfactory import TaskFactory
from ploomber.tasks.sql import (SQLScript, SQLDump, SQLTransfer, SQLUpload,
PostgresCopyFrom)
from ploomber.tasks.notebook import NotebookRunner
from ploomber.tasks.aws import UploadToS3
from ploomber.tasks.param_forward import input_data_passer, in_memory_callable
from ploomber.tasks.taskgroup import TaskGroup
from ploomber.tasks.abc import Task
__all__ = [
'Task',
'PythonCallable',
'ShellScript',
'TaskFactory',
'SQLScript',
'SQLDump',
'SQLTransfer',
'SQLUpload',
'PostgresCopyFrom',
'NotebookRunner',
'DownloadFromURL',
'Link',
'Input',
'UploadToS3',
'TaskGroup',
'input_data_passer',
'in_memory_callable',
'task_factory',
]
| 29 | 79 | 0.6875 | from ploomber.tasks.tasks import (PythonCallable, ShellScript, DownloadFromURL,
Link, Input, task_factory)
from ploomber.tasks.taskfactory import TaskFactory
from ploomber.tasks.sql import (SQLScript, SQLDump, SQLTransfer, SQLUpload,
PostgresCopyFrom)
from ploomber.tasks.notebook import NotebookRunner
from ploomber.tasks.aws import UploadToS3
from ploomber.tasks.param_forward import input_data_passer, in_memory_callable
from ploomber.tasks.taskgroup import TaskGroup
from ploomber.tasks.abc import Task
__all__ = [
'Task',
'PythonCallable',
'ShellScript',
'TaskFactory',
'SQLScript',
'SQLDump',
'SQLTransfer',
'SQLUpload',
'PostgresCopyFrom',
'NotebookRunner',
'DownloadFromURL',
'Link',
'Input',
'UploadToS3',
'TaskGroup',
'input_data_passer',
'in_memory_callable',
'task_factory',
]
| true | true |
1c3507a628143c1371019fd50632146e96f30fbd | 54,445 | py | Python | numpy/lib/tests/test_arraypad.py | jcw780/numpy | 1912db21e0f5e61739168864f6b1f37dff3b4006 | [
"BSD-3-Clause"
] | null | null | null | numpy/lib/tests/test_arraypad.py | jcw780/numpy | 1912db21e0f5e61739168864f6b1f37dff3b4006 | [
"BSD-3-Clause"
] | null | null | null | numpy/lib/tests/test_arraypad.py | jcw780/numpy | 1912db21e0f5e61739168864f6b1f37dff3b4006 | [
"BSD-3-Clause"
] | null | null | null | """Tests for the array padding functions.
"""
from __future__ import division, absolute_import, print_function
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose, assert_equal
from numpy.lib.arraypad import _as_pairs
_numeric_dtypes = (
np.sctypes["uint"]
+ np.sctypes["int"]
+ np.sctypes["float"]
+ np.sctypes["complex"]
)
_all_modes = {
'constant': {'constant_values': 0},
'edge': {},
'linear_ramp': {'end_values': 0},
'maximum': {'stat_length': None},
'mean': {'stat_length': None},
'median': {'stat_length': None},
'minimum': {'stat_length': None},
'reflect': {'reflect_type': 'even'},
'symmetric': {'reflect_type': 'even'},
'wrap': {},
'empty': {}
}
class TestAsPairs(object):
def test_single_value(self):
"""Test casting for a single value."""
expected = np.array([[3, 3]] * 10)
for x in (3, [3], [[3]]):
result = _as_pairs(x, 10)
assert_equal(result, expected)
# Test with dtype=object
obj = object()
assert_equal(
_as_pairs(obj, 10),
np.array([[obj, obj]] * 10)
)
def test_two_values(self):
"""Test proper casting for two different values."""
# Broadcasting in the first dimension with numbers
expected = np.array([[3, 4]] * 10)
for x in ([3, 4], [[3, 4]]):
result = _as_pairs(x, 10)
assert_equal(result, expected)
# and with dtype=object
obj = object()
assert_equal(
_as_pairs(["a", obj], 10),
np.array([["a", obj]] * 10)
)
# Broadcasting in the second / last dimension with numbers
assert_equal(
_as_pairs([[3], [4]], 2),
np.array([[3, 3], [4, 4]])
)
# and with dtype=object
assert_equal(
_as_pairs([["a"], [obj]], 2),
np.array([["a", "a"], [obj, obj]])
)
def test_with_none(self):
expected = ((None, None), (None, None), (None, None))
assert_equal(
_as_pairs(None, 3, as_index=False),
expected
)
assert_equal(
_as_pairs(None, 3, as_index=True),
expected
)
def test_pass_through(self):
"""Test if `x` already matching desired output are passed through."""
expected = np.arange(12).reshape((6, 2))
assert_equal(
_as_pairs(expected, 6),
expected
)
def test_as_index(self):
"""Test results if `as_index=True`."""
assert_equal(
_as_pairs([2.6, 3.3], 10, as_index=True),
np.array([[3, 3]] * 10, dtype=np.intp)
)
assert_equal(
_as_pairs([2.6, 4.49], 10, as_index=True),
np.array([[3, 4]] * 10, dtype=np.intp)
)
for x in (-3, [-3], [[-3]], [-3, 4], [3, -4], [[-3, 4]], [[4, -3]],
[[1, 2]] * 9 + [[1, -2]]):
with pytest.raises(ValueError, match="negative values"):
_as_pairs(x, 10, as_index=True)
def test_exceptions(self):
"""Ensure faulty usage is discovered."""
with pytest.raises(ValueError, match="more dimensions than allowed"):
_as_pairs([[[3]]], 10)
with pytest.raises(ValueError, match="could not be broadcast"):
_as_pairs([[1, 2], [3, 4]], 3)
with pytest.raises(ValueError, match="could not be broadcast"):
_as_pairs(np.ones((2, 3)), 3)
class TestConditionalShortcuts(object):
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_zero_padding_shortcuts(self, mode):
test = np.arange(120).reshape(4, 5, 6)
pad_amt = [(0, 0) for _ in test.shape]
assert_array_equal(test, np.pad(test, pad_amt, mode=mode))
@pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',])
def test_shallow_statistic_range(self, mode):
test = np.arange(120).reshape(4, 5, 6)
pad_amt = [(1, 1) for _ in test.shape]
assert_array_equal(np.pad(test, pad_amt, mode='edge'),
np.pad(test, pad_amt, mode=mode, stat_length=1))
@pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',])
def test_clip_statistic_range(self, mode):
test = np.arange(30).reshape(5, 6)
pad_amt = [(3, 3) for _ in test.shape]
assert_array_equal(np.pad(test, pad_amt, mode=mode),
np.pad(test, pad_amt, mode=mode, stat_length=30))
class TestStatistic(object):
def test_check_mean_stat_length(self):
a = np.arange(100).astype('f')
a = np.pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), ))
b = np.array(
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
98., 98., 98., 98., 98., 98., 98., 98., 98., 98.,
98., 98., 98., 98., 98., 98., 98., 98., 98., 98.
])
assert_array_equal(a, b)
def test_check_maximum_1(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'maximum')
b = np.array(
[99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99]
)
assert_array_equal(a, b)
def test_check_maximum_2(self):
a = np.arange(100) + 1
a = np.pad(a, (25, 20), 'maximum')
b = np.array(
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
)
assert_array_equal(a, b)
def test_check_maximum_stat_length(self):
a = np.arange(100) + 1
a = np.pad(a, (25, 20), 'maximum', stat_length=10)
b = np.array(
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
)
assert_array_equal(a, b)
def test_check_minimum_1(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'minimum')
b = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
assert_array_equal(a, b)
def test_check_minimum_2(self):
a = np.arange(100) + 2
a = np.pad(a, (25, 20), 'minimum')
b = np.array(
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2,
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
)
assert_array_equal(a, b)
def test_check_minimum_stat_length(self):
a = np.arange(100) + 1
a = np.pad(a, (25, 20), 'minimum', stat_length=10)
b = np.array(
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
91, 91, 91, 91, 91, 91, 91, 91, 91, 91,
91, 91, 91, 91, 91, 91, 91, 91, 91, 91]
)
assert_array_equal(a, b)
def test_check_median(self):
a = np.arange(100).astype('f')
a = np.pad(a, (25, 20), 'median')
b = np.array(
[49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
)
assert_array_equal(a, b)
def test_check_median_01(self):
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
a = np.pad(a, 1, 'median')
b = np.array(
[[4, 4, 5, 4, 4],
[3, 3, 1, 4, 3],
[5, 4, 5, 9, 5],
[8, 9, 8, 2, 8],
[4, 4, 5, 4, 4]]
)
assert_array_equal(a, b)
def test_check_median_02(self):
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
a = np.pad(a.T, 1, 'median').T
b = np.array(
[[5, 4, 5, 4, 5],
[3, 3, 1, 4, 3],
[5, 4, 5, 9, 5],
[8, 9, 8, 2, 8],
[5, 4, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_median_stat_length(self):
a = np.arange(100).astype('f')
a[1] = 2.
a[97] = 96.
a = np.pad(a, (25, 20), 'median', stat_length=(3, 5))
b = np.array(
[ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2., 2.,
0., 2., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 96., 98., 99.,
96., 96., 96., 96., 96., 96., 96., 96., 96., 96.,
96., 96., 96., 96., 96., 96., 96., 96., 96., 96.]
)
assert_array_equal(a, b)
def test_check_mean_shape_one(self):
a = [[4, 5, 6]]
a = np.pad(a, (5, 7), 'mean', stat_length=2)
b = np.array(
[[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]]
)
assert_array_equal(a, b)
def test_check_mean_2(self):
a = np.arange(100).astype('f')
a = np.pad(a, (25, 20), 'mean')
b = np.array(
[49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
)
assert_array_equal(a, b)
@pytest.mark.parametrize("mode", [
"mean",
"median",
"minimum",
"maximum"
])
def test_same_prepend_append(self, mode):
""" Test that appended and prepended values are equal """
# This test is constructed to trigger floating point rounding errors in
# a way that caused gh-11216 for mode=='mean'
a = np.array([-1, 2, -1]) + np.array([0, 1e-12, 0], dtype=np.float64)
a = np.pad(a, (1, 1), mode)
assert_equal(a[0], a[-1])
@pytest.mark.parametrize("mode", ["mean", "median", "minimum", "maximum"])
@pytest.mark.parametrize(
"stat_length", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))]
)
def test_check_negative_stat_length(self, mode, stat_length):
arr = np.arange(30).reshape((6, 5))
match = "index can't contain negative values"
with pytest.raises(ValueError, match=match):
np.pad(arr, 2, mode, stat_length=stat_length)
def test_simple_stat_length(self):
a = np.arange(30)
a = np.reshape(a, (6, 5))
a = np.pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,))
b = np.array(
[[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
[1, 1, 1, 0, 1, 2, 3, 4, 3, 3],
[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
[11, 11, 11, 10, 11, 12, 13, 14, 13, 13],
[16, 16, 16, 15, 16, 17, 18, 19, 18, 18],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
[26, 26, 26, 25, 26, 27, 28, 29, 28, 28],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23]]
)
assert_array_equal(a, b)
@pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning")
@pytest.mark.filterwarnings(
"ignore:invalid value encountered in (true_divide|double_scalars):"
"RuntimeWarning"
)
@pytest.mark.parametrize("mode", ["mean", "median"])
def test_zero_stat_length_valid(self, mode):
arr = np.pad([1., 2.], (1, 2), mode, stat_length=0)
expected = np.array([np.nan, 1., 2., np.nan, np.nan])
assert_equal(arr, expected)
@pytest.mark.parametrize("mode", ["minimum", "maximum"])
def test_zero_stat_length_invalid(self, mode):
match = "stat_length of 0 yields no value for padding"
with pytest.raises(ValueError, match=match):
np.pad([1., 2.], 0, mode, stat_length=0)
with pytest.raises(ValueError, match=match):
np.pad([1., 2.], 0, mode, stat_length=(1, 0))
with pytest.raises(ValueError, match=match):
np.pad([1., 2.], 1, mode, stat_length=0)
with pytest.raises(ValueError, match=match):
np.pad([1., 2.], 1, mode, stat_length=(1, 0))
class TestConstant(object):
def test_check_constant(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'constant', constant_values=(10, 20))
b = np.array(
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20]
)
assert_array_equal(a, b)
def test_check_constant_zeros(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'constant')
b = np.array(
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
assert_array_equal(a, b)
def test_check_constant_float(self):
# If input array is int, but constant_values are float, the dtype of
# the array to be padded is kept
arr = np.arange(30).reshape(5, 6)
test = np.pad(arr, (1, 2), mode='constant',
constant_values=1.1)
expected = np.array(
[[ 1, 1, 1, 1, 1, 1, 1, 1, 1],
[ 1, 0, 1, 2, 3, 4, 5, 1, 1],
[ 1, 6, 7, 8, 9, 10, 11, 1, 1],
[ 1, 12, 13, 14, 15, 16, 17, 1, 1],
[ 1, 18, 19, 20, 21, 22, 23, 1, 1],
[ 1, 24, 25, 26, 27, 28, 29, 1, 1],
[ 1, 1, 1, 1, 1, 1, 1, 1, 1],
[ 1, 1, 1, 1, 1, 1, 1, 1, 1]]
)
assert_allclose(test, expected)
def test_check_constant_float2(self):
# If input array is float, and constant_values are float, the dtype of
# the array to be padded is kept - here retaining the float constants
arr = np.arange(30).reshape(5, 6)
arr_float = arr.astype(np.float64)
test = np.pad(arr_float, ((1, 2), (1, 2)), mode='constant',
constant_values=1.1)
expected = np.array(
[[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
[ 1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1],
[ 1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1],
[ 1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1],
[ 1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1],
[ 1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1],
[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]]
)
assert_allclose(test, expected)
def test_check_constant_float3(self):
a = np.arange(100, dtype=float)
a = np.pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2))
b = np.array(
[-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
-1.1, -1.1, -1.1, -1.1, -1.1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
-1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2,
-1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2]
)
assert_allclose(a, b)
def test_check_constant_odd_pad_amount(self):
arr = np.arange(30).reshape(5, 6)
test = np.pad(arr, ((1,), (2,)), mode='constant',
constant_values=3)
expected = np.array(
[[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 3, 3, 0, 1, 2, 3, 4, 5, 3, 3],
[ 3, 3, 6, 7, 8, 9, 10, 11, 3, 3],
[ 3, 3, 12, 13, 14, 15, 16, 17, 3, 3],
[ 3, 3, 18, 19, 20, 21, 22, 23, 3, 3],
[ 3, 3, 24, 25, 26, 27, 28, 29, 3, 3],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]]
)
assert_allclose(test, expected)
def test_check_constant_pad_2d(self):
arr = np.arange(4).reshape(2, 2)
test = np.lib.pad(arr, ((1, 2), (1, 3)), mode='constant',
constant_values=((1, 2), (3, 4)))
expected = np.array(
[[3, 1, 1, 4, 4, 4],
[3, 0, 1, 4, 4, 4],
[3, 2, 3, 4, 4, 4],
[3, 2, 2, 4, 4, 4],
[3, 2, 2, 4, 4, 4]]
)
assert_allclose(test, expected)
def test_check_large_integers(self):
uint64_max = 2 ** 64 - 1
arr = np.full(5, uint64_max, dtype=np.uint64)
test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
expected = np.full(7, uint64_max, dtype=np.uint64)
assert_array_equal(test, expected)
int64_max = 2 ** 63 - 1
arr = np.full(5, int64_max, dtype=np.int64)
test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
expected = np.full(7, int64_max, dtype=np.int64)
assert_array_equal(test, expected)
def test_check_object_array(self):
arr = np.empty(1, dtype=object)
obj_a = object()
arr[0] = obj_a
obj_b = object()
obj_c = object()
arr = np.pad(arr, pad_width=1, mode='constant',
constant_values=(obj_b, obj_c))
expected = np.empty((3,), dtype=object)
expected[0] = obj_b
expected[1] = obj_a
expected[2] = obj_c
assert_array_equal(arr, expected)
def test_pad_empty_dimension(self):
arr = np.zeros((3, 0, 2))
result = np.pad(arr, [(0,), (2,), (1,)], mode="constant")
assert result.shape == (3, 4, 4)
class TestLinearRamp(object):
def test_check_simple(self):
a = np.arange(100).astype('f')
a = np.pad(a, (25, 20), 'linear_ramp', end_values=(4, 5))
b = np.array(
[4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56,
2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96,
0.80, 0.64, 0.48, 0.32, 0.16,
0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00,
10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0,
20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0,
30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0,
40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0,
50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0,
60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0,
70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0,
80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0,
90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0,
94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0,
47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.]
)
assert_allclose(a, b, rtol=1e-5, atol=1e-5)
def test_check_2d(self):
arr = np.arange(20).reshape(4, 5).astype(np.float64)
test = np.pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0))
expected = np.array(
[[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.],
[0., 0., 0., 1., 2., 3., 4., 2., 0.],
[0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.],
[0., 5., 10., 11., 12., 13., 14., 7., 0.],
[0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.],
[0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.]])
assert_allclose(test, expected)
@pytest.mark.xfail(exceptions=(AssertionError,))
def test_object_array(self):
from fractions import Fraction
arr = np.array([Fraction(1, 2), Fraction(-1, 2)])
actual = np.pad(arr, (2, 3), mode='linear_ramp', end_values=0)
# deliberately chosen to have a non-power-of-2 denominator such that
# rounding to floats causes a failure.
expected = np.array([
Fraction( 0, 12),
Fraction( 3, 12),
Fraction( 6, 12),
Fraction(-6, 12),
Fraction(-4, 12),
Fraction(-2, 12),
Fraction(-0, 12),
])
assert_equal(actual, expected)
def test_end_values(self):
"""Ensure that end values are exact."""
a = np.pad(np.ones(10).reshape(2, 5), (223, 123), mode="linear_ramp")
assert_equal(a[:, 0], 0.)
assert_equal(a[:, -1], 0.)
assert_equal(a[0, :], 0.)
assert_equal(a[-1, :], 0.)
@pytest.mark.parametrize("dtype", _numeric_dtypes)
def test_negative_difference(self, dtype):
"""
Check correct behavior of unsigned dtypes if there is a negative
difference between the edge to pad and `end_values`. Check both cases
to be independent of implementation. Test behavior for all other dtypes
in case dtype casting interferes with complex dtypes. See gh-14191.
"""
x = np.array([3], dtype=dtype)
result = np.pad(x, 3, mode="linear_ramp", end_values=0)
expected = np.array([0, 1, 2, 3, 2, 1, 0], dtype=dtype)
assert_equal(result, expected)
x = np.array([0], dtype=dtype)
result = np.pad(x, 3, mode="linear_ramp", end_values=3)
expected = np.array([3, 2, 1, 0, 1, 2, 3], dtype=dtype)
assert_equal(result, expected)
class TestReflect(object):
def test_check_simple(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'reflect')
b = np.array(
[25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
5, 4, 3, 2, 1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
98, 97, 96, 95, 94, 93, 92, 91, 90, 89,
88, 87, 86, 85, 84, 83, 82, 81, 80, 79]
)
assert_array_equal(a, b)
def test_check_odd_method(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'reflect', reflect_type='odd')
b = np.array(
[-25, -24, -23, -22, -21, -20, -19, -18, -17, -16,
-15, -14, -13, -12, -11, -10, -9, -8, -7, -6,
-5, -4, -3, -2, -1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117, 118, 119]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = [[4, 5, 6], [6, 7, 8]]
a = np.pad(a, (5, 7), 'reflect')
b = np.array(
[[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_shape(self):
a = [[4, 5, 6]]
a = np.pad(a, (5, 7), 'reflect')
b = np.array(
[[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = np.pad([1, 2, 3], 2, 'reflect')
b = np.array([3, 2, 1, 2, 3, 2, 1])
assert_array_equal(a, b)
def test_check_02(self):
a = np.pad([1, 2, 3], 3, 'reflect')
b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2])
assert_array_equal(a, b)
def test_check_03(self):
a = np.pad([1, 2, 3], 4, 'reflect')
b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3])
assert_array_equal(a, b)
class TestEmptyArray(object):
"""Check how padding behaves on arrays with an empty dimension."""
@pytest.mark.parametrize(
# Keep parametrization ordered, otherwise pytest-xdist might believe
# that different tests were collected during parallelization
"mode", sorted(_all_modes.keys() - {"constant", "empty"})
)
def test_pad_empty_dimension(self, mode):
match = ("can't extend empty axis 0 using modes other than 'constant' "
"or 'empty'")
with pytest.raises(ValueError, match=match):
np.pad([], 4, mode=mode)
with pytest.raises(ValueError, match=match):
np.pad(np.ndarray(0), 4, mode=mode)
with pytest.raises(ValueError, match=match):
np.pad(np.zeros((0, 3)), ((1,), (0,)), mode=mode)
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_pad_non_empty_dimension(self, mode):
result = np.pad(np.ones((2, 0, 2)), ((3,), (0,), (1,)), mode=mode)
assert result.shape == (8, 0, 4)
class TestSymmetric(object):
def test_check_simple(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'symmetric')
b = np.array(
[24, 23, 22, 21, 20, 19, 18, 17, 16, 15,
14, 13, 12, 11, 10, 9, 8, 7, 6, 5,
4, 3, 2, 1, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 98, 97, 96, 95, 94, 93, 92, 91, 90,
89, 88, 87, 86, 85, 84, 83, 82, 81, 80]
)
assert_array_equal(a, b)
def test_check_odd_method(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'symmetric', reflect_type='odd')
b = np.array(
[-24, -23, -22, -21, -20, -19, -18, -17, -16, -15,
-14, -13, -12, -11, -10, -9, -8, -7, -6, -5,
-4, -3, -2, -1, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
109, 110, 111, 112, 113, 114, 115, 116, 117, 118]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = [[4, 5, 6], [6, 7, 8]]
a = np.pad(a, (5, 7), 'symmetric')
b = np.array(
[[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
)
assert_array_equal(a, b)
def test_check_large_pad_odd(self):
a = [[4, 5, 6], [6, 7, 8]]
a = np.pad(a, (5, 7), 'symmetric', reflect_type='odd')
b = np.array(
[[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
[-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
[-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
[ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
[ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
[ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
[ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
[ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
[ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
[ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
[ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
[ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18],
[ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]]
)
assert_array_equal(a, b)
def test_check_shape(self):
a = [[4, 5, 6]]
a = np.pad(a, (5, 7), 'symmetric')
b = np.array(
[[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = np.pad([1, 2, 3], 2, 'symmetric')
b = np.array([2, 1, 1, 2, 3, 3, 2])
assert_array_equal(a, b)
def test_check_02(self):
a = np.pad([1, 2, 3], 3, 'symmetric')
b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1])
assert_array_equal(a, b)
def test_check_03(self):
a = np.pad([1, 2, 3], 6, 'symmetric')
b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3])
assert_array_equal(a, b)
class TestWrap(object):
def test_check_simple(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'wrap')
b = np.array(
[75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
95, 96, 97, 98, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = np.arange(12)
a = np.reshape(a, (3, 4))
a = np.pad(a, (10, 12), 'wrap')
b = np.array(
[[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = np.pad([1, 2, 3], 3, 'wrap')
b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
assert_array_equal(a, b)
def test_check_02(self):
a = np.pad([1, 2, 3], 4, 'wrap')
b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1])
assert_array_equal(a, b)
def test_pad_with_zero(self):
a = np.ones((3, 5))
b = np.pad(a, (0, 5), mode="wrap")
assert_array_equal(a, b[:-5, :-5])
def test_repeated_wrapping(self):
"""
Check wrapping on each side individually if the wrapped area is longer
than the original array.
"""
a = np.arange(5)
b = np.pad(a, (12, 0), mode="wrap")
assert_array_equal(np.r_[a, a, a, a][3:], b)
a = np.arange(5)
b = np.pad(a, (0, 12), mode="wrap")
assert_array_equal(np.r_[a, a, a, a][:-3], b)
class TestEdge(object):
def test_check_simple(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
a = np.pad(a, ((2, 3), (3, 2)), 'edge')
b = np.array(
[[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[3, 3, 3, 3, 4, 5, 5, 5],
[6, 6, 6, 6, 7, 8, 8, 8],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11]]
)
assert_array_equal(a, b)
def test_check_width_shape_1_2(self):
# Check a pad_width of the form ((1, 2),).
# Regression test for issue gh-7808.
a = np.array([1, 2, 3])
padded = np.pad(a, ((1, 2),), 'edge')
expected = np.array([1, 1, 2, 3, 3, 3])
assert_array_equal(padded, expected)
a = np.array([[1, 2, 3], [4, 5, 6]])
padded = np.pad(a, ((1, 2),), 'edge')
expected = np.pad(a, ((1, 2), (1, 2)), 'edge')
assert_array_equal(padded, expected)
a = np.arange(24).reshape(2, 3, 4)
padded = np.pad(a, ((1, 2),), 'edge')
expected = np.pad(a, ((1, 2), (1, 2), (1, 2)), 'edge')
assert_array_equal(padded, expected)
class TestEmpty(object):
def test_simple(self):
arr = np.arange(24).reshape(4, 6)
result = np.pad(arr, [(2, 3), (3, 1)], mode="empty")
assert result.shape == (9, 10)
assert_equal(arr, result[2:-3, 3:-1])
def test_pad_empty_dimension(self):
arr = np.zeros((3, 0, 2))
result = np.pad(arr, [(0,), (2,), (1,)], mode="empty")
assert result.shape == (3, 4, 4)
def test_legacy_vector_functionality():
def _padwithtens(vector, pad_width, iaxis, kwargs):
vector[:pad_width[0]] = 10
vector[-pad_width[1]:] = 10
a = np.arange(6).reshape(2, 3)
a = np.pad(a, 2, _padwithtens)
b = np.array(
[[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]]
)
assert_array_equal(a, b)
def test_unicode_mode():
a = np.pad([1], 2, mode=u'constant')
b = np.array([0, 0, 1, 0, 0])
assert_array_equal(a, b)
@pytest.mark.parametrize("mode", ["edge", "symmetric", "reflect", "wrap"])
def test_object_input(mode):
# Regression test for issue gh-11395.
a = np.full((4, 3), fill_value=None)
pad_amt = ((2, 3), (3, 2))
b = np.full((9, 8), fill_value=None)
assert_array_equal(np.pad(a, pad_amt, mode=mode), b)
class TestPadWidth(object):
@pytest.mark.parametrize("pad_width", [
(4, 5, 6, 7),
((1,), (2,), (3,)),
((1, 2), (3, 4), (5, 6)),
((3, 4, 5), (0, 1, 2)),
])
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_misshaped_pad_width(self, pad_width, mode):
arr = np.arange(30).reshape((6, 5))
match = "operands could not be broadcast together"
with pytest.raises(ValueError, match=match):
np.pad(arr, pad_width, mode)
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_misshaped_pad_width_2(self, mode):
arr = np.arange(30).reshape((6, 5))
match = ("input operand has more dimensions than allowed by the axis "
"remapping")
with pytest.raises(ValueError, match=match):
np.pad(arr, (((3,), (4,), (5,)), ((0,), (1,), (2,))), mode)
@pytest.mark.parametrize(
"pad_width", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))])
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_negative_pad_width(self, pad_width, mode):
arr = np.arange(30).reshape((6, 5))
match = "index can't contain negative values"
with pytest.raises(ValueError, match=match):
np.pad(arr, pad_width, mode)
@pytest.mark.parametrize("pad_width, dtype", [
("3", None),
("word", None),
(None, None),
(object(), None),
(3.4, None),
(((2, 3, 4), (3, 2)), object),
(complex(1, -1), None),
(((-2.1, 3), (3, 2)), None),
])
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_bad_type(self, pad_width, dtype, mode):
arr = np.arange(30).reshape((6, 5))
match = "`pad_width` must be of integral type."
if dtype is not None:
# avoid DeprecationWarning when not specifying dtype
with pytest.raises(TypeError, match=match):
np.pad(arr, np.array(pad_width, dtype=dtype), mode)
else:
with pytest.raises(TypeError, match=match):
np.pad(arr, pad_width, mode)
with pytest.raises(TypeError, match=match):
np.pad(arr, np.array(pad_width), mode)
def test_pad_width_as_ndarray(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
a = np.pad(a, np.array(((2, 3), (3, 2))), 'edge')
b = np.array(
[[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[3, 3, 3, 3, 4, 5, 5, 5],
[6, 6, 6, 6, 7, 8, 8, 8],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11]]
)
assert_array_equal(a, b)
@pytest.mark.parametrize("pad_width", [0, (0, 0), ((0, 0), (0, 0))])
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_zero_pad_width(self, pad_width, mode):
arr = np.arange(30).reshape(6, 5)
assert_array_equal(arr, np.pad(arr, pad_width, mode=mode))
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_kwargs(mode):
"""Test behavior of pad's kwargs for the given mode."""
allowed = _all_modes[mode]
not_allowed = {}
for kwargs in _all_modes.values():
if kwargs != allowed:
not_allowed.update(kwargs)
# Test if allowed keyword arguments pass
np.pad([1, 2, 3], 1, mode, **allowed)
# Test if prohibited keyword arguments of other modes raise an error
for key, value in not_allowed.items():
match = "unsupported keyword arguments for mode '{}'".format(mode)
with pytest.raises(ValueError, match=match):
np.pad([1, 2, 3], 1, mode, **{key: value})
def test_constant_zero_default():
arr = np.array([1, 1])
assert_array_equal(np.pad(arr, 2), [0, 0, 1, 1, 0, 0])
@pytest.mark.parametrize("mode", [1, "const", object(), None, True, False])
def test_unsupported_mode(mode):
match= "mode '{}' is not supported".format(mode)
with pytest.raises(ValueError, match=match):
np.pad([1, 2, 3], 4, mode=mode)
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_non_contiguous_array(mode):
arr = np.arange(24).reshape(4, 6)[::2, ::2]
result = np.pad(arr, (2, 3), mode)
assert result.shape == (7, 8)
assert_equal(result[2:-3, 2:-3], arr)
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_memory_layout_persistence(mode):
"""Test if C and F order is preserved for all pad modes."""
x = np.ones((5, 10), order='C')
assert np.pad(x, 5, mode).flags["C_CONTIGUOUS"]
x = np.ones((5, 10), order='F')
assert np.pad(x, 5, mode).flags["F_CONTIGUOUS"]
@pytest.mark.parametrize("dtype", _numeric_dtypes)
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_dtype_persistence(dtype, mode):
arr = np.zeros((3, 2, 1), dtype=dtype)
result = np.pad(arr, 1, mode=mode)
assert result.dtype == dtype
| 39.828091 | 79 | 0.424502 | from __future__ import division, absolute_import, print_function
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose, assert_equal
from numpy.lib.arraypad import _as_pairs
_numeric_dtypes = (
np.sctypes["uint"]
+ np.sctypes["int"]
+ np.sctypes["float"]
+ np.sctypes["complex"]
)
_all_modes = {
'constant': {'constant_values': 0},
'edge': {},
'linear_ramp': {'end_values': 0},
'maximum': {'stat_length': None},
'mean': {'stat_length': None},
'median': {'stat_length': None},
'minimum': {'stat_length': None},
'reflect': {'reflect_type': 'even'},
'symmetric': {'reflect_type': 'even'},
'wrap': {},
'empty': {}
}
class TestAsPairs(object):
def test_single_value(self):
expected = np.array([[3, 3]] * 10)
for x in (3, [3], [[3]]):
result = _as_pairs(x, 10)
assert_equal(result, expected)
obj = object()
assert_equal(
_as_pairs(obj, 10),
np.array([[obj, obj]] * 10)
)
def test_two_values(self):
expected = np.array([[3, 4]] * 10)
for x in ([3, 4], [[3, 4]]):
result = _as_pairs(x, 10)
assert_equal(result, expected)
obj = object()
assert_equal(
_as_pairs(["a", obj], 10),
np.array([["a", obj]] * 10)
)
assert_equal(
_as_pairs([[3], [4]], 2),
np.array([[3, 3], [4, 4]])
)
assert_equal(
_as_pairs([["a"], [obj]], 2),
np.array([["a", "a"], [obj, obj]])
)
def test_with_none(self):
expected = ((None, None), (None, None), (None, None))
assert_equal(
_as_pairs(None, 3, as_index=False),
expected
)
assert_equal(
_as_pairs(None, 3, as_index=True),
expected
)
def test_pass_through(self):
expected = np.arange(12).reshape((6, 2))
assert_equal(
_as_pairs(expected, 6),
expected
)
def test_as_index(self):
assert_equal(
_as_pairs([2.6, 3.3], 10, as_index=True),
np.array([[3, 3]] * 10, dtype=np.intp)
)
assert_equal(
_as_pairs([2.6, 4.49], 10, as_index=True),
np.array([[3, 4]] * 10, dtype=np.intp)
)
for x in (-3, [-3], [[-3]], [-3, 4], [3, -4], [[-3, 4]], [[4, -3]],
[[1, 2]] * 9 + [[1, -2]]):
with pytest.raises(ValueError, match="negative values"):
_as_pairs(x, 10, as_index=True)
def test_exceptions(self):
with pytest.raises(ValueError, match="more dimensions than allowed"):
_as_pairs([[[3]]], 10)
with pytest.raises(ValueError, match="could not be broadcast"):
_as_pairs([[1, 2], [3, 4]], 3)
with pytest.raises(ValueError, match="could not be broadcast"):
_as_pairs(np.ones((2, 3)), 3)
class TestConditionalShortcuts(object):
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_zero_padding_shortcuts(self, mode):
test = np.arange(120).reshape(4, 5, 6)
pad_amt = [(0, 0) for _ in test.shape]
assert_array_equal(test, np.pad(test, pad_amt, mode=mode))
@pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',])
def test_shallow_statistic_range(self, mode):
test = np.arange(120).reshape(4, 5, 6)
pad_amt = [(1, 1) for _ in test.shape]
assert_array_equal(np.pad(test, pad_amt, mode='edge'),
np.pad(test, pad_amt, mode=mode, stat_length=1))
@pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',])
def test_clip_statistic_range(self, mode):
test = np.arange(30).reshape(5, 6)
pad_amt = [(3, 3) for _ in test.shape]
assert_array_equal(np.pad(test, pad_amt, mode=mode),
np.pad(test, pad_amt, mode=mode, stat_length=30))
class TestStatistic(object):
def test_check_mean_stat_length(self):
a = np.arange(100).astype('f')
a = np.pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), ))
b = np.array(
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
98., 98., 98., 98., 98., 98., 98., 98., 98., 98.,
98., 98., 98., 98., 98., 98., 98., 98., 98., 98.
])
assert_array_equal(a, b)
def test_check_maximum_1(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'maximum')
b = np.array(
[99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99]
)
assert_array_equal(a, b)
def test_check_maximum_2(self):
a = np.arange(100) + 1
a = np.pad(a, (25, 20), 'maximum')
b = np.array(
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
)
assert_array_equal(a, b)
def test_check_maximum_stat_length(self):
a = np.arange(100) + 1
a = np.pad(a, (25, 20), 'maximum', stat_length=10)
b = np.array(
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
)
assert_array_equal(a, b)
def test_check_minimum_1(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'minimum')
b = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
assert_array_equal(a, b)
def test_check_minimum_2(self):
a = np.arange(100) + 2
a = np.pad(a, (25, 20), 'minimum')
b = np.array(
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2,
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
)
assert_array_equal(a, b)
def test_check_minimum_stat_length(self):
a = np.arange(100) + 1
a = np.pad(a, (25, 20), 'minimum', stat_length=10)
b = np.array(
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
91, 91, 91, 91, 91, 91, 91, 91, 91, 91,
91, 91, 91, 91, 91, 91, 91, 91, 91, 91]
)
assert_array_equal(a, b)
def test_check_median(self):
a = np.arange(100).astype('f')
a = np.pad(a, (25, 20), 'median')
b = np.array(
[49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
)
assert_array_equal(a, b)
def test_check_median_01(self):
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
a = np.pad(a, 1, 'median')
b = np.array(
[[4, 4, 5, 4, 4],
[3, 3, 1, 4, 3],
[5, 4, 5, 9, 5],
[8, 9, 8, 2, 8],
[4, 4, 5, 4, 4]]
)
assert_array_equal(a, b)
def test_check_median_02(self):
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
a = np.pad(a.T, 1, 'median').T
b = np.array(
[[5, 4, 5, 4, 5],
[3, 3, 1, 4, 3],
[5, 4, 5, 9, 5],
[8, 9, 8, 2, 8],
[5, 4, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_median_stat_length(self):
a = np.arange(100).astype('f')
a[1] = 2.
a[97] = 96.
a = np.pad(a, (25, 20), 'median', stat_length=(3, 5))
b = np.array(
[ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2., 2.,
0., 2., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 96., 98., 99.,
96., 96., 96., 96., 96., 96., 96., 96., 96., 96.,
96., 96., 96., 96., 96., 96., 96., 96., 96., 96.]
)
assert_array_equal(a, b)
def test_check_mean_shape_one(self):
a = [[4, 5, 6]]
a = np.pad(a, (5, 7), 'mean', stat_length=2)
b = np.array(
[[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]]
)
assert_array_equal(a, b)
def test_check_mean_2(self):
a = np.arange(100).astype('f')
a = np.pad(a, (25, 20), 'mean')
b = np.array(
[49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
)
assert_array_equal(a, b)
@pytest.mark.parametrize("mode", [
"mean",
"median",
"minimum",
"maximum"
])
def test_same_prepend_append(self, mode):
a = np.array([-1, 2, -1]) + np.array([0, 1e-12, 0], dtype=np.float64)
a = np.pad(a, (1, 1), mode)
assert_equal(a[0], a[-1])
@pytest.mark.parametrize("mode", ["mean", "median", "minimum", "maximum"])
@pytest.mark.parametrize(
"stat_length", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))]
)
def test_check_negative_stat_length(self, mode, stat_length):
arr = np.arange(30).reshape((6, 5))
match = "index can't contain negative values"
with pytest.raises(ValueError, match=match):
np.pad(arr, 2, mode, stat_length=stat_length)
def test_simple_stat_length(self):
a = np.arange(30)
a = np.reshape(a, (6, 5))
a = np.pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,))
b = np.array(
[[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
[1, 1, 1, 0, 1, 2, 3, 4, 3, 3],
[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
[11, 11, 11, 10, 11, 12, 13, 14, 13, 13],
[16, 16, 16, 15, 16, 17, 18, 19, 18, 18],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
[26, 26, 26, 25, 26, 27, 28, 29, 28, 28],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23]]
)
assert_array_equal(a, b)
@pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning")
@pytest.mark.filterwarnings(
"ignore:invalid value encountered in (true_divide|double_scalars):"
"RuntimeWarning"
)
@pytest.mark.parametrize("mode", ["mean", "median"])
def test_zero_stat_length_valid(self, mode):
arr = np.pad([1., 2.], (1, 2), mode, stat_length=0)
expected = np.array([np.nan, 1., 2., np.nan, np.nan])
assert_equal(arr, expected)
@pytest.mark.parametrize("mode", ["minimum", "maximum"])
def test_zero_stat_length_invalid(self, mode):
match = "stat_length of 0 yields no value for padding"
with pytest.raises(ValueError, match=match):
np.pad([1., 2.], 0, mode, stat_length=0)
with pytest.raises(ValueError, match=match):
np.pad([1., 2.], 0, mode, stat_length=(1, 0))
with pytest.raises(ValueError, match=match):
np.pad([1., 2.], 1, mode, stat_length=0)
with pytest.raises(ValueError, match=match):
np.pad([1., 2.], 1, mode, stat_length=(1, 0))
class TestConstant(object):
def test_check_constant(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'constant', constant_values=(10, 20))
b = np.array(
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20]
)
assert_array_equal(a, b)
def test_check_constant_zeros(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'constant')
b = np.array(
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
assert_array_equal(a, b)
def test_check_constant_float(self):
# If input array is int, but constant_values are float, the dtype of
# the array to be padded is kept
arr = np.arange(30).reshape(5, 6)
test = np.pad(arr, (1, 2), mode='constant',
constant_values=1.1)
expected = np.array(
[[ 1, 1, 1, 1, 1, 1, 1, 1, 1],
[ 1, 0, 1, 2, 3, 4, 5, 1, 1],
[ 1, 6, 7, 8, 9, 10, 11, 1, 1],
[ 1, 12, 13, 14, 15, 16, 17, 1, 1],
[ 1, 18, 19, 20, 21, 22, 23, 1, 1],
[ 1, 24, 25, 26, 27, 28, 29, 1, 1],
[ 1, 1, 1, 1, 1, 1, 1, 1, 1],
[ 1, 1, 1, 1, 1, 1, 1, 1, 1]]
)
assert_allclose(test, expected)
def test_check_constant_float2(self):
# If input array is float, and constant_values are float, the dtype of
# the array to be padded is kept - here retaining the float constants
arr = np.arange(30).reshape(5, 6)
arr_float = arr.astype(np.float64)
test = np.pad(arr_float, ((1, 2), (1, 2)), mode='constant',
constant_values=1.1)
expected = np.array(
[[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
[ 1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1],
[ 1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1],
[ 1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1],
[ 1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1],
[ 1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1],
[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]]
)
assert_allclose(test, expected)
def test_check_constant_float3(self):
a = np.arange(100, dtype=float)
a = np.pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2))
b = np.array(
[-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
-1.1, -1.1, -1.1, -1.1, -1.1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
-1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2,
-1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2]
)
assert_allclose(a, b)
def test_check_constant_odd_pad_amount(self):
arr = np.arange(30).reshape(5, 6)
test = np.pad(arr, ((1,), (2,)), mode='constant',
constant_values=3)
expected = np.array(
[[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 3, 3, 0, 1, 2, 3, 4, 5, 3, 3],
[ 3, 3, 6, 7, 8, 9, 10, 11, 3, 3],
[ 3, 3, 12, 13, 14, 15, 16, 17, 3, 3],
[ 3, 3, 18, 19, 20, 21, 22, 23, 3, 3],
[ 3, 3, 24, 25, 26, 27, 28, 29, 3, 3],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]]
)
assert_allclose(test, expected)
def test_check_constant_pad_2d(self):
arr = np.arange(4).reshape(2, 2)
test = np.lib.pad(arr, ((1, 2), (1, 3)), mode='constant',
constant_values=((1, 2), (3, 4)))
expected = np.array(
[[3, 1, 1, 4, 4, 4],
[3, 0, 1, 4, 4, 4],
[3, 2, 3, 4, 4, 4],
[3, 2, 2, 4, 4, 4],
[3, 2, 2, 4, 4, 4]]
)
assert_allclose(test, expected)
def test_check_large_integers(self):
uint64_max = 2 ** 64 - 1
arr = np.full(5, uint64_max, dtype=np.uint64)
test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
expected = np.full(7, uint64_max, dtype=np.uint64)
assert_array_equal(test, expected)
int64_max = 2 ** 63 - 1
arr = np.full(5, int64_max, dtype=np.int64)
test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
expected = np.full(7, int64_max, dtype=np.int64)
assert_array_equal(test, expected)
def test_check_object_array(self):
arr = np.empty(1, dtype=object)
obj_a = object()
arr[0] = obj_a
obj_b = object()
obj_c = object()
arr = np.pad(arr, pad_width=1, mode='constant',
constant_values=(obj_b, obj_c))
expected = np.empty((3,), dtype=object)
expected[0] = obj_b
expected[1] = obj_a
expected[2] = obj_c
assert_array_equal(arr, expected)
def test_pad_empty_dimension(self):
arr = np.zeros((3, 0, 2))
result = np.pad(arr, [(0,), (2,), (1,)], mode="constant")
assert result.shape == (3, 4, 4)
class TestLinearRamp(object):
def test_check_simple(self):
a = np.arange(100).astype('f')
a = np.pad(a, (25, 20), 'linear_ramp', end_values=(4, 5))
b = np.array(
[4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56,
2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96,
0.80, 0.64, 0.48, 0.32, 0.16,
0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00,
10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0,
20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0,
30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0,
40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0,
50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0,
60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0,
70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0,
80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0,
90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0,
94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0,
47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.]
)
assert_allclose(a, b, rtol=1e-5, atol=1e-5)
def test_check_2d(self):
arr = np.arange(20).reshape(4, 5).astype(np.float64)
test = np.pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0))
expected = np.array(
[[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.],
[0., 0., 0., 1., 2., 3., 4., 2., 0.],
[0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.],
[0., 5., 10., 11., 12., 13., 14., 7., 0.],
[0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.],
[0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.]])
assert_allclose(test, expected)
@pytest.mark.xfail(exceptions=(AssertionError,))
def test_object_array(self):
from fractions import Fraction
arr = np.array([Fraction(1, 2), Fraction(-1, 2)])
actual = np.pad(arr, (2, 3), mode='linear_ramp', end_values=0)
# deliberately chosen to have a non-power-of-2 denominator such that
# rounding to floats causes a failure.
expected = np.array([
Fraction( 0, 12),
Fraction( 3, 12),
Fraction( 6, 12),
Fraction(-6, 12),
Fraction(-4, 12),
Fraction(-2, 12),
Fraction(-0, 12),
])
assert_equal(actual, expected)
def test_end_values(self):
a = np.pad(np.ones(10).reshape(2, 5), (223, 123), mode="linear_ramp")
assert_equal(a[:, 0], 0.)
assert_equal(a[:, -1], 0.)
assert_equal(a[0, :], 0.)
assert_equal(a[-1, :], 0.)
@pytest.mark.parametrize("dtype", _numeric_dtypes)
def test_negative_difference(self, dtype):
x = np.array([3], dtype=dtype)
result = np.pad(x, 3, mode="linear_ramp", end_values=0)
expected = np.array([0, 1, 2, 3, 2, 1, 0], dtype=dtype)
assert_equal(result, expected)
x = np.array([0], dtype=dtype)
result = np.pad(x, 3, mode="linear_ramp", end_values=3)
expected = np.array([3, 2, 1, 0, 1, 2, 3], dtype=dtype)
assert_equal(result, expected)
class TestReflect(object):
def test_check_simple(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'reflect')
b = np.array(
[25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
5, 4, 3, 2, 1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
98, 97, 96, 95, 94, 93, 92, 91, 90, 89,
88, 87, 86, 85, 84, 83, 82, 81, 80, 79]
)
assert_array_equal(a, b)
def test_check_odd_method(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'reflect', reflect_type='odd')
b = np.array(
[-25, -24, -23, -22, -21, -20, -19, -18, -17, -16,
-15, -14, -13, -12, -11, -10, -9, -8, -7, -6,
-5, -4, -3, -2, -1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117, 118, 119]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = [[4, 5, 6], [6, 7, 8]]
a = np.pad(a, (5, 7), 'reflect')
b = np.array(
[[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_shape(self):
a = [[4, 5, 6]]
a = np.pad(a, (5, 7), 'reflect')
b = np.array(
[[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = np.pad([1, 2, 3], 2, 'reflect')
b = np.array([3, 2, 1, 2, 3, 2, 1])
assert_array_equal(a, b)
def test_check_02(self):
a = np.pad([1, 2, 3], 3, 'reflect')
b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2])
assert_array_equal(a, b)
def test_check_03(self):
a = np.pad([1, 2, 3], 4, 'reflect')
b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3])
assert_array_equal(a, b)
class TestEmptyArray(object):
@pytest.mark.parametrize(
# Keep parametrization ordered, otherwise pytest-xdist might believe
# that different tests were collected during parallelization
"mode", sorted(_all_modes.keys() - {"constant", "empty"})
)
def test_pad_empty_dimension(self, mode):
match = ("can't extend empty axis 0 using modes other than 'constant' "
"or 'empty'")
with pytest.raises(ValueError, match=match):
np.pad([], 4, mode=mode)
with pytest.raises(ValueError, match=match):
np.pad(np.ndarray(0), 4, mode=mode)
with pytest.raises(ValueError, match=match):
np.pad(np.zeros((0, 3)), ((1,), (0,)), mode=mode)
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_pad_non_empty_dimension(self, mode):
result = np.pad(np.ones((2, 0, 2)), ((3,), (0,), (1,)), mode=mode)
assert result.shape == (8, 0, 4)
class TestSymmetric(object):
def test_check_simple(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'symmetric')
b = np.array(
[24, 23, 22, 21, 20, 19, 18, 17, 16, 15,
14, 13, 12, 11, 10, 9, 8, 7, 6, 5,
4, 3, 2, 1, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 98, 97, 96, 95, 94, 93, 92, 91, 90,
89, 88, 87, 86, 85, 84, 83, 82, 81, 80]
)
assert_array_equal(a, b)
def test_check_odd_method(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'symmetric', reflect_type='odd')
b = np.array(
[-24, -23, -22, -21, -20, -19, -18, -17, -16, -15,
-14, -13, -12, -11, -10, -9, -8, -7, -6, -5,
-4, -3, -2, -1, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
109, 110, 111, 112, 113, 114, 115, 116, 117, 118]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = [[4, 5, 6], [6, 7, 8]]
a = np.pad(a, (5, 7), 'symmetric')
b = np.array(
[[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
)
assert_array_equal(a, b)
def test_check_large_pad_odd(self):
a = [[4, 5, 6], [6, 7, 8]]
a = np.pad(a, (5, 7), 'symmetric', reflect_type='odd')
b = np.array(
[[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
[-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
[-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
[ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
[ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
[ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
[ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
[ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
[ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
[ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
[ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
[ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18],
[ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]]
)
assert_array_equal(a, b)
def test_check_shape(self):
a = [[4, 5, 6]]
a = np.pad(a, (5, 7), 'symmetric')
b = np.array(
[[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = np.pad([1, 2, 3], 2, 'symmetric')
b = np.array([2, 1, 1, 2, 3, 3, 2])
assert_array_equal(a, b)
def test_check_02(self):
a = np.pad([1, 2, 3], 3, 'symmetric')
b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1])
assert_array_equal(a, b)
def test_check_03(self):
a = np.pad([1, 2, 3], 6, 'symmetric')
b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3])
assert_array_equal(a, b)
class TestWrap(object):
def test_check_simple(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'wrap')
b = np.array(
[75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
95, 96, 97, 98, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = np.arange(12)
a = np.reshape(a, (3, 4))
a = np.pad(a, (10, 12), 'wrap')
b = np.array(
[[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = np.pad([1, 2, 3], 3, 'wrap')
b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
assert_array_equal(a, b)
def test_check_02(self):
a = np.pad([1, 2, 3], 4, 'wrap')
b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1])
assert_array_equal(a, b)
def test_pad_with_zero(self):
a = np.ones((3, 5))
b = np.pad(a, (0, 5), mode="wrap")
assert_array_equal(a, b[:-5, :-5])
def test_repeated_wrapping(self):
a = np.arange(5)
b = np.pad(a, (12, 0), mode="wrap")
assert_array_equal(np.r_[a, a, a, a][3:], b)
a = np.arange(5)
b = np.pad(a, (0, 12), mode="wrap")
assert_array_equal(np.r_[a, a, a, a][:-3], b)
class TestEdge(object):
def test_check_simple(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
a = np.pad(a, ((2, 3), (3, 2)), 'edge')
b = np.array(
[[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[3, 3, 3, 3, 4, 5, 5, 5],
[6, 6, 6, 6, 7, 8, 8, 8],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11]]
)
assert_array_equal(a, b)
def test_check_width_shape_1_2(self):
a = np.array([1, 2, 3])
padded = np.pad(a, ((1, 2),), 'edge')
expected = np.array([1, 1, 2, 3, 3, 3])
assert_array_equal(padded, expected)
a = np.array([[1, 2, 3], [4, 5, 6]])
padded = np.pad(a, ((1, 2),), 'edge')
expected = np.pad(a, ((1, 2), (1, 2)), 'edge')
assert_array_equal(padded, expected)
a = np.arange(24).reshape(2, 3, 4)
padded = np.pad(a, ((1, 2),), 'edge')
expected = np.pad(a, ((1, 2), (1, 2), (1, 2)), 'edge')
assert_array_equal(padded, expected)
class TestEmpty(object):
def test_simple(self):
arr = np.arange(24).reshape(4, 6)
result = np.pad(arr, [(2, 3), (3, 1)], mode="empty")
assert result.shape == (9, 10)
assert_equal(arr, result[2:-3, 3:-1])
def test_pad_empty_dimension(self):
arr = np.zeros((3, 0, 2))
result = np.pad(arr, [(0,), (2,), (1,)], mode="empty")
assert result.shape == (3, 4, 4)
def test_legacy_vector_functionality():
def _padwithtens(vector, pad_width, iaxis, kwargs):
vector[:pad_width[0]] = 10
vector[-pad_width[1]:] = 10
a = np.arange(6).reshape(2, 3)
a = np.pad(a, 2, _padwithtens)
b = np.array(
[[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]]
)
assert_array_equal(a, b)
def test_unicode_mode():
a = np.pad([1], 2, mode=u'constant')
b = np.array([0, 0, 1, 0, 0])
assert_array_equal(a, b)
@pytest.mark.parametrize("mode", ["edge", "symmetric", "reflect", "wrap"])
def test_object_input(mode):
a = np.full((4, 3), fill_value=None)
pad_amt = ((2, 3), (3, 2))
b = np.full((9, 8), fill_value=None)
assert_array_equal(np.pad(a, pad_amt, mode=mode), b)
class TestPadWidth(object):
@pytest.mark.parametrize("pad_width", [
(4, 5, 6, 7),
((1,), (2,), (3,)),
((1, 2), (3, 4), (5, 6)),
((3, 4, 5), (0, 1, 2)),
])
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_misshaped_pad_width(self, pad_width, mode):
arr = np.arange(30).reshape((6, 5))
match = "operands could not be broadcast together"
with pytest.raises(ValueError, match=match):
np.pad(arr, pad_width, mode)
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_misshaped_pad_width_2(self, mode):
arr = np.arange(30).reshape((6, 5))
match = ("input operand has more dimensions than allowed by the axis "
"remapping")
with pytest.raises(ValueError, match=match):
np.pad(arr, (((3,), (4,), (5,)), ((0,), (1,), (2,))), mode)
@pytest.mark.parametrize(
"pad_width", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))])
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_negative_pad_width(self, pad_width, mode):
arr = np.arange(30).reshape((6, 5))
match = "index can't contain negative values"
with pytest.raises(ValueError, match=match):
np.pad(arr, pad_width, mode)
@pytest.mark.parametrize("pad_width, dtype", [
("3", None),
("word", None),
(None, None),
(object(), None),
(3.4, None),
(((2, 3, 4), (3, 2)), object),
(complex(1, -1), None),
(((-2.1, 3), (3, 2)), None),
])
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_bad_type(self, pad_width, dtype, mode):
arr = np.arange(30).reshape((6, 5))
match = "`pad_width` must be of integral type."
if dtype is not None:
# avoid DeprecationWarning when not specifying dtype
with pytest.raises(TypeError, match=match):
np.pad(arr, np.array(pad_width, dtype=dtype), mode)
else:
with pytest.raises(TypeError, match=match):
np.pad(arr, pad_width, mode)
with pytest.raises(TypeError, match=match):
np.pad(arr, np.array(pad_width), mode)
def test_pad_width_as_ndarray(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
a = np.pad(a, np.array(((2, 3), (3, 2))), 'edge')
b = np.array(
[[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[3, 3, 3, 3, 4, 5, 5, 5],
[6, 6, 6, 6, 7, 8, 8, 8],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11]]
)
assert_array_equal(a, b)
@pytest.mark.parametrize("pad_width", [0, (0, 0), ((0, 0), (0, 0))])
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_zero_pad_width(self, pad_width, mode):
arr = np.arange(30).reshape(6, 5)
assert_array_equal(arr, np.pad(arr, pad_width, mode=mode))
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_kwargs(mode):
allowed = _all_modes[mode]
not_allowed = {}
for kwargs in _all_modes.values():
if kwargs != allowed:
not_allowed.update(kwargs)
# Test if allowed keyword arguments pass
np.pad([1, 2, 3], 1, mode, **allowed)
# Test if prohibited keyword arguments of other modes raise an error
for key, value in not_allowed.items():
match = "unsupported keyword arguments for mode '{}'".format(mode)
with pytest.raises(ValueError, match=match):
np.pad([1, 2, 3], 1, mode, **{key: value})
def test_constant_zero_default():
arr = np.array([1, 1])
assert_array_equal(np.pad(arr, 2), [0, 0, 1, 1, 0, 0])
@pytest.mark.parametrize("mode", [1, "const", object(), None, True, False])
def test_unsupported_mode(mode):
match= "mode '{}' is not supported".format(mode)
with pytest.raises(ValueError, match=match):
np.pad([1, 2, 3], 4, mode=mode)
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_non_contiguous_array(mode):
arr = np.arange(24).reshape(4, 6)[::2, ::2]
result = np.pad(arr, (2, 3), mode)
assert result.shape == (7, 8)
assert_equal(result[2:-3, 2:-3], arr)
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_memory_layout_persistence(mode):
x = np.ones((5, 10), order='C')
assert np.pad(x, 5, mode).flags["C_CONTIGUOUS"]
x = np.ones((5, 10), order='F')
assert np.pad(x, 5, mode).flags["F_CONTIGUOUS"]
@pytest.mark.parametrize("dtype", _numeric_dtypes)
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_dtype_persistence(dtype, mode):
arr = np.zeros((3, 2, 1), dtype=dtype)
result = np.pad(arr, 1, mode=mode)
assert result.dtype == dtype
| true | true |
1c3508df5c374c4813d7bd926e4b8697e3ef238f | 998 | py | Python | layers/shortcuts.py | ultraglorious/cyclegan-learning | ed141e155d60cdfb1e2c14bdf64fc96fee0b5200 | [
"MIT"
] | null | null | null | layers/shortcuts.py | ultraglorious/cyclegan-learning | ed141e155d60cdfb1e2c14bdf64fc96fee0b5200 | [
"MIT"
] | null | null | null | layers/shortcuts.py | ultraglorious/cyclegan-learning | ed141e155d60cdfb1e2c14bdf64fc96fee0b5200 | [
"MIT"
] | null | null | null | import layers
def c7s1k(k: int, activation: str = "relu") -> layers.ConvolutionBlock:
"""Shortcut function to c7s1-k layer."""
return layers.ConvolutionBlock(7, 1, k, activation=activation)
def dk(k: int) -> layers.ConvolutionBlock:
"""Shortcut to dk layer. Reflection padding seems to have only been done on this layer."""
return layers.ConvolutionBlock(3, 2, k, reflect_padding=True)
def uk(k: int) -> layers.ConvolutionBlock:
"""Shortcut to uk layer."""
return layers.ConvolutionBlock(3, 2, k, transpose=True)
def ck(k: int, normalize: bool = True) -> layers.ConvolutionBlock:
"""Shortcut to ck layer."""
return layers.ConvolutionBlock(4, 2, k, normalize=normalize, leaky_slope=0.2)
def rk(k: int, filters_changed: bool = False) -> layers.ResidualBlock:
"""Shortcut to residual blocks. It's undefined in the paper what their stride is so we'll assume 1."""
return layers.ResidualBlock(n_filters=k, stride=1, change_n_channels=filters_changed)
| 36.962963 | 107 | 0.713427 | import layers
def c7s1k(k: int, activation: str = "relu") -> layers.ConvolutionBlock:
return layers.ConvolutionBlock(7, 1, k, activation=activation)
def dk(k: int) -> layers.ConvolutionBlock:
return layers.ConvolutionBlock(3, 2, k, reflect_padding=True)
def uk(k: int) -> layers.ConvolutionBlock:
return layers.ConvolutionBlock(3, 2, k, transpose=True)
def ck(k: int, normalize: bool = True) -> layers.ConvolutionBlock:
return layers.ConvolutionBlock(4, 2, k, normalize=normalize, leaky_slope=0.2)
def rk(k: int, filters_changed: bool = False) -> layers.ResidualBlock:
return layers.ResidualBlock(n_filters=k, stride=1, change_n_channels=filters_changed)
| true | true |
1c350afe4ddd366b54a79a06bb6777bfe5eab20f | 4,532 | py | Python | dalle_pytorch/vae.py | haskie-lambda/DALLE-pytorch | 3c59dc9864cc900cefd656f73772e151af4fb97f | [
"MIT"
] | 2 | 2021-06-24T19:36:02.000Z | 2021-06-24T20:32:32.000Z | dalle_pytorch/vae.py | haskie-lambda/DALLE-pytorch | 3c59dc9864cc900cefd656f73772e151af4fb97f | [
"MIT"
] | null | null | null | dalle_pytorch/vae.py | haskie-lambda/DALLE-pytorch | 3c59dc9864cc900cefd656f73772e151af4fb97f | [
"MIT"
] | null | null | null | import io
import sys
import os, sys
import requests
import PIL
import warnings
import os
import hashlib
import urllib
import yaml
from pathlib import Path
from tqdm import tqdm
from math import sqrt
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
# constants
CACHE_PATH = os.path.expanduser("~/.cache/dalle")
OPENAI_VAE_ENCODER_PATH = 'https://cdn.openai.com/dall-e/encoder.pkl'
OPENAI_VAE_DECODER_PATH = 'https://cdn.openai.com/dall-e/decoder.pkl'
VQGAN_VAE_PATH = 'https://heibox.uni-heidelberg.de/f/140747ba53464f49b476/?dl=1'
VQGAN_VAE_CONFIG_PATH = 'https://heibox.uni-heidelberg.de/f/6ecf2af6c658432c8298/?dl=1'
# helpers methods
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def load_model(path):
with open(path, 'rb') as f:
return torch.load(f, map_location = torch.device('cpu'))
def map_pixels(x, eps = 0.1):
return (1 - 2 * eps) * x + eps
def unmap_pixels(x, eps = 0.1):
return torch.clamp((x - eps) / (1 - 2 * eps), 0, 1)
def download(url, filename = None, root = CACHE_PATH):
os.makedirs(root, exist_ok = True)
filename = default(filename, os.path.basename(url))
download_target = os.path.join(root, filename)
download_target_tmp = os.path.join(root, f'tmp.{filename}')
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
return download_target
with urllib.request.urlopen(url) as source, open(download_target_tmp, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
os.rename(download_target_tmp, download_target)
return download_target
# pretrained Discrete VAE from OpenAI
class OpenAIDiscreteVAE(nn.Module):
def __init__(self):
super().__init__()
self.enc = load_model(download(OPENAI_VAE_ENCODER_PATH))
self.dec = load_model(download(OPENAI_VAE_DECODER_PATH))
self.num_layers = 3
self.image_size = 256
self.num_tokens = 8192
@torch.no_grad()
def get_codebook_indices(self, img):
img = map_pixels(img)
z_logits = self.enc(img)
z = torch.argmax(z_logits, dim = 1)
return rearrange(z, 'b h w -> b (h w)')
def decode(self, img_seq):
b, n = img_seq.shape
img_seq = rearrange(img_seq, 'b (h w) -> b h w', h = int(sqrt(n)))
z = F.one_hot(img_seq, num_classes = self.num_tokens)
z = rearrange(z, 'b h w c -> b c h w').float()
x_stats = self.dec(z).float()
x_rec = unmap_pixels(torch.sigmoid(x_stats[:, :3]))
return x_rec
def forward(self, img):
raise NotImplemented
# VQGAN from Taming Transformers paper
# https://arxiv.org/abs/2012.09841
class VQGanVAE1024(nn.Module):
def __init__(self):
super().__init__()
model_filename = 'vqgan.1024.model.ckpt'
config_filename = 'vqgan.1024.config.yml'
download(VQGAN_VAE_CONFIG_PATH, config_filename)
download(VQGAN_VAE_PATH, model_filename)
config = OmegaConf.load(str(Path(CACHE_PATH) / config_filename))
model = VQModel(**config.model.params)
state = torch.load(str(Path(CACHE_PATH) / model_filename), map_location = 'cpu')['state_dict']
model.load_state_dict(state, strict = False)
self.model = model
self.num_layers = 4
self.image_size = 256
self.num_tokens = 1024
@torch.no_grad()
def get_codebook_indices(self, img):
b = img.shape[0]
img = (2 * img) - 1
_, _, [_, _, indices] = self.model.encode(img)
return rearrange(indices, '(b n) () -> b n', b = b)
def decode(self, img_seq):
b, n = img_seq.shape
one_hot_indices = F.one_hot(img_seq, num_classes = self.num_tokens).float()
z = (one_hot_indices @ self.model.quantize.embedding.weight)
z = rearrange(z, 'b (h w) c -> b c h w', h = int(sqrt(n)))
img = self.model.decode(z)
img = (img.clamp(-1., 1.) + 1) * 0.5
return img
def forward(self, img):
raise NotImplemented
| 29.23871 | 102 | 0.647176 | import io
import sys
import os, sys
import requests
import PIL
import warnings
import os
import hashlib
import urllib
import yaml
from pathlib import Path
from tqdm import tqdm
from math import sqrt
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
CACHE_PATH = os.path.expanduser("~/.cache/dalle")
OPENAI_VAE_ENCODER_PATH = 'https://cdn.openai.com/dall-e/encoder.pkl'
OPENAI_VAE_DECODER_PATH = 'https://cdn.openai.com/dall-e/decoder.pkl'
VQGAN_VAE_PATH = 'https://heibox.uni-heidelberg.de/f/140747ba53464f49b476/?dl=1'
VQGAN_VAE_CONFIG_PATH = 'https://heibox.uni-heidelberg.de/f/6ecf2af6c658432c8298/?dl=1'
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def load_model(path):
with open(path, 'rb') as f:
return torch.load(f, map_location = torch.device('cpu'))
def map_pixels(x, eps = 0.1):
return (1 - 2 * eps) * x + eps
def unmap_pixels(x, eps = 0.1):
return torch.clamp((x - eps) / (1 - 2 * eps), 0, 1)
def download(url, filename = None, root = CACHE_PATH):
os.makedirs(root, exist_ok = True)
filename = default(filename, os.path.basename(url))
download_target = os.path.join(root, filename)
download_target_tmp = os.path.join(root, f'tmp.{filename}')
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
return download_target
with urllib.request.urlopen(url) as source, open(download_target_tmp, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
os.rename(download_target_tmp, download_target)
return download_target
class OpenAIDiscreteVAE(nn.Module):
def __init__(self):
super().__init__()
self.enc = load_model(download(OPENAI_VAE_ENCODER_PATH))
self.dec = load_model(download(OPENAI_VAE_DECODER_PATH))
self.num_layers = 3
self.image_size = 256
self.num_tokens = 8192
@torch.no_grad()
def get_codebook_indices(self, img):
img = map_pixels(img)
z_logits = self.enc(img)
z = torch.argmax(z_logits, dim = 1)
return rearrange(z, 'b h w -> b (h w)')
def decode(self, img_seq):
b, n = img_seq.shape
img_seq = rearrange(img_seq, 'b (h w) -> b h w', h = int(sqrt(n)))
z = F.one_hot(img_seq, num_classes = self.num_tokens)
z = rearrange(z, 'b h w c -> b c h w').float()
x_stats = self.dec(z).float()
x_rec = unmap_pixels(torch.sigmoid(x_stats[:, :3]))
return x_rec
def forward(self, img):
raise NotImplemented
class VQGanVAE1024(nn.Module):
def __init__(self):
super().__init__()
model_filename = 'vqgan.1024.model.ckpt'
config_filename = 'vqgan.1024.config.yml'
download(VQGAN_VAE_CONFIG_PATH, config_filename)
download(VQGAN_VAE_PATH, model_filename)
config = OmegaConf.load(str(Path(CACHE_PATH) / config_filename))
model = VQModel(**config.model.params)
state = torch.load(str(Path(CACHE_PATH) / model_filename), map_location = 'cpu')['state_dict']
model.load_state_dict(state, strict = False)
self.model = model
self.num_layers = 4
self.image_size = 256
self.num_tokens = 1024
@torch.no_grad()
def get_codebook_indices(self, img):
b = img.shape[0]
img = (2 * img) - 1
_, _, [_, _, indices] = self.model.encode(img)
return rearrange(indices, '(b n) () -> b n', b = b)
def decode(self, img_seq):
b, n = img_seq.shape
one_hot_indices = F.one_hot(img_seq, num_classes = self.num_tokens).float()
z = (one_hot_indices @ self.model.quantize.embedding.weight)
z = rearrange(z, 'b (h w) c -> b c h w', h = int(sqrt(n)))
img = self.model.decode(z)
img = (img.clamp(-1., 1.) + 1) * 0.5
return img
def forward(self, img):
raise NotImplemented
| true | true |
1c350b4b10fd65ce70ca77ba9f9e4419bd36a485 | 1,215 | py | Python | app/users/models.py | onosendi/flask-boilerplate | 4e4734e2ac416c5ef6a82b2b36b2458de0463091 | [
"Unlicense"
] | 5 | 2020-05-25T02:06:50.000Z | 2021-05-03T22:37:12.000Z | app/users/models.py | onosendi/flask-boilerplate | 4e4734e2ac416c5ef6a82b2b36b2458de0463091 | [
"Unlicense"
] | null | null | null | app/users/models.py | onosendi/flask-boilerplate | 4e4734e2ac416c5ef6a82b2b36b2458de0463091 | [
"Unlicense"
] | 2 | 2020-07-18T13:01:29.000Z | 2020-11-26T16:43:56.000Z | from flask_login import UserMixin
from werkzeug.security import check_password_hash, generate_password_hash
from app.common.extensions import db, login
from app.common.models import BaseMixin, SoftDeleteMixin, TimestampMixin
class User(
UserMixin,
BaseMixin,
SoftDeleteMixin,
TimestampMixin,
db.Model,
):
username = db.Column(db.String(35), nullable=False, unique=True)
email = db.Column(db.String(255), nullable=False, unique=True)
password = db.Column(db.String(128), nullable=False)
posts = db.relationship('Post', backref='author', lazy='dynamic',
order_by='desc(Post.created)')
def __init__(self, *args, **kwargs):
# Set given email address to lowercase.
kwargs.update({'email': kwargs.get('email').lower()})
super().__init__(*args, **kwargs)
def __repr__(self) -> str:
return f'<User {self.username}>'
def set_password(self, password: str) -> None:
self.password = generate_password_hash(password)
def check_password(self, password: str) -> bool:
return check_password_hash(self.password, password)
@login.user_loader
def load_user(id):
return User.query.get(int(id))
| 31.153846 | 73 | 0.683951 | from flask_login import UserMixin
from werkzeug.security import check_password_hash, generate_password_hash
from app.common.extensions import db, login
from app.common.models import BaseMixin, SoftDeleteMixin, TimestampMixin
class User(
UserMixin,
BaseMixin,
SoftDeleteMixin,
TimestampMixin,
db.Model,
):
username = db.Column(db.String(35), nullable=False, unique=True)
email = db.Column(db.String(255), nullable=False, unique=True)
password = db.Column(db.String(128), nullable=False)
posts = db.relationship('Post', backref='author', lazy='dynamic',
order_by='desc(Post.created)')
def __init__(self, *args, **kwargs):
kwargs.update({'email': kwargs.get('email').lower()})
super().__init__(*args, **kwargs)
def __repr__(self) -> str:
return f'<User {self.username}>'
def set_password(self, password: str) -> None:
self.password = generate_password_hash(password)
def check_password(self, password: str) -> bool:
return check_password_hash(self.password, password)
@login.user_loader
def load_user(id):
return User.query.get(int(id))
| true | true |
1c350bc3b76d57a7c99c9fd181d2b1de2a842cb7 | 5,185 | py | Python | groupdocs_signature_cloud/models/time_stamp.py | groupdocs-signature-cloud/groupdocs-signature-cloud-python | 2b7f03b3d70f191dc1292f6221ed9301811681cf | [
"MIT"
] | null | null | null | groupdocs_signature_cloud/models/time_stamp.py | groupdocs-signature-cloud/groupdocs-signature-cloud-python | 2b7f03b3d70f191dc1292f6221ed9301811681cf | [
"MIT"
] | null | null | null | groupdocs_signature_cloud/models/time_stamp.py | groupdocs-signature-cloud/groupdocs-signature-cloud-python | 2b7f03b3d70f191dc1292f6221ed9301811681cf | [
"MIT"
] | 1 | 2021-02-03T00:18:17.000Z | 2021-02-03T00:18:17.000Z | # coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose Pty Ltd" file="TimeStamp.py">
# Copyright (c) 2003-2021 Aspose Pty Ltd
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
class TimeStamp(object):
"""
Represents data to get time stamp from third-party site.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'url': 'str',
'user': 'str',
'password': 'str'
}
attribute_map = {
'url': 'Url',
'user': 'User',
'password': 'Password'
}
def __init__(self, url=None, user=None, password=None, **kwargs): # noqa: E501
"""Initializes new instance of TimeStamp""" # noqa: E501
self._url = None
self._user = None
self._password = None
if url is not None:
self.url = url
if user is not None:
self.user = user
if password is not None:
self.password = password
@property
def url(self):
"""
Gets the url. # noqa: E501
Url of third-party site. # noqa: E501
:return: The url. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""
Sets the url.
Url of third-party site. # noqa: E501
:param url: The url. # noqa: E501
:type: str
"""
self._url = url
@property
def user(self):
"""
Gets the user. # noqa: E501
User. # noqa: E501
:return: The user. # noqa: E501
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""
Sets the user.
User. # noqa: E501
:param user: The user. # noqa: E501
:type: str
"""
self._user = user
@property
def password(self):
"""
Gets the password. # noqa: E501
Password. # noqa: E501
:return: The password. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""
Sets the password.
Password. # noqa: E501
:param password: The password. # noqa: E501
:type: str
"""
self._password = password
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TimeStamp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.027027 | 85 | 0.543105 |
import pprint
import re
import six
class TimeStamp(object):
swagger_types = {
'url': 'str',
'user': 'str',
'password': 'str'
}
attribute_map = {
'url': 'Url',
'user': 'User',
'password': 'Password'
}
def __init__(self, url=None, user=None, password=None, **kwargs):
self._url = None
self._user = None
self._password = None
if url is not None:
self.url = url
if user is not None:
self.user = user
if password is not None:
self.password = password
@property
def url(self):
return self._url
@url.setter
def url(self, url):
self._url = url
@property
def user(self):
return self._user
@user.setter
def user(self, user):
self._user = user
@property
def password(self):
return self._password
@password.setter
def password(self, password):
self._password = password
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, TimeStamp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c350cff265b3d8016a43c446d64999c2d32b3e3 | 6,479 | py | Python | tests/test_packages/test_skills/test_generic_seller/test_dialogues.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | 28 | 2021-10-31T18:54:14.000Z | 2022-03-17T13:10:43.000Z | tests/test_packages/test_skills/test_generic_seller/test_dialogues.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | 66 | 2021-10-31T11:55:48.000Z | 2022-03-31T06:26:23.000Z | tests/test_packages/test_skills/test_generic_seller/test_dialogues.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the dialogue classes of the generic seller skill."""
from pathlib import Path
from typing import cast
import pytest
from aea.exceptions import AEAEnforceError
from aea.helpers.transaction.base import Terms
from aea.protocols.dialogue.base import DialogueLabel
from aea.test_tools.test_skill import BaseSkillTestCase, COUNTERPARTY_AGENT_ADDRESS
from packages.fetchai.protocols.default.message import DefaultMessage
from packages.fetchai.protocols.fipa.message import FipaMessage
from packages.fetchai.protocols.ledger_api.message import LedgerApiMessage
from packages.fetchai.protocols.oef_search.message import OefSearchMessage
from packages.fetchai.skills.generic_seller.dialogues import (
DefaultDialogue,
DefaultDialogues,
FipaDialogue,
FipaDialogues,
LedgerApiDialogue,
LedgerApiDialogues,
OefSearchDialogue,
OefSearchDialogues,
)
from tests.conftest import ROOT_DIR
class TestDialogues(BaseSkillTestCase):
"""Test dialogue classes of generic seller."""
path_to_skill = Path(ROOT_DIR, "packages", "fetchai", "skills", "generic_seller")
@classmethod
def setup(cls):
"""Setup the test class."""
super().setup()
cls.default_dialogues = cast(
DefaultDialogues, cls._skill.skill_context.default_dialogues
)
cls.fipa_dialogues = cast(
FipaDialogues, cls._skill.skill_context.fipa_dialogues
)
cls.ledger_api_dialogues = cast(
LedgerApiDialogues, cls._skill.skill_context.ledger_api_dialogues
)
cls.oef_search_dialogues = cast(
OefSearchDialogues, cls._skill.skill_context.oef_search_dialogues
)
def test_default_dialogues(self):
"""Test the DefaultDialogues class."""
_, dialogue = self.default_dialogues.create(
counterparty=COUNTERPARTY_AGENT_ADDRESS,
performative=DefaultMessage.Performative.BYTES,
content=b"some_content",
)
assert dialogue.role == DefaultDialogue.Role.AGENT
assert dialogue.self_address == self.skill.skill_context.agent_address
def test_fipa_dialogue(self):
"""Test the FipaDialogue class."""
fipa_dialogue = FipaDialogue(
DialogueLabel(
("", ""),
COUNTERPARTY_AGENT_ADDRESS,
self.skill.skill_context.agent_address,
),
self.skill.skill_context.agent_address,
role=DefaultDialogue.Role.AGENT,
)
# terms
with pytest.raises(AEAEnforceError, match="Terms not set!"):
assert fipa_dialogue.terms
terms = Terms(
"some_ledger_id",
self.skill.skill_context.agent_address,
"counterprty",
{"currency_id": 50},
{"good_id": -10},
"some_nonce",
)
fipa_dialogue.terms = terms
with pytest.raises(AEAEnforceError, match="Terms already set!"):
fipa_dialogue.terms = terms
assert fipa_dialogue.terms == terms
def test_fipa_dialogues(self):
"""Test the FipaDialogues class."""
_, dialogue = self.fipa_dialogues.create(
counterparty=COUNTERPARTY_AGENT_ADDRESS,
performative=FipaMessage.Performative.CFP,
query="some_query",
)
assert dialogue.role == FipaDialogue.Role.SELLER
assert dialogue.self_address == self.skill.skill_context.agent_address
def test_ledger_api_dialogue(self):
"""Test the LedgerApiDialogue class."""
ledger_api_dialogue = LedgerApiDialogue(
DialogueLabel(
("", ""),
COUNTERPARTY_AGENT_ADDRESS,
self.skill.skill_context.agent_address,
),
self.skill.skill_context.agent_address,
role=LedgerApiDialogue.Role.AGENT,
)
# associated_fipa_dialogue
with pytest.raises(AEAEnforceError, match="FipaDialogue not set!"):
assert ledger_api_dialogue.associated_fipa_dialogue
fipa_dialogue = FipaDialogue(
DialogueLabel(
("", ""),
COUNTERPARTY_AGENT_ADDRESS,
self.skill.skill_context.agent_address,
),
self.skill.skill_context.agent_address,
role=FipaDialogue.Role.BUYER,
)
ledger_api_dialogue.associated_fipa_dialogue = fipa_dialogue
with pytest.raises(AEAEnforceError, match="FipaDialogue already set!"):
ledger_api_dialogue.associated_fipa_dialogue = fipa_dialogue
assert ledger_api_dialogue.associated_fipa_dialogue == fipa_dialogue
def test_ledger_api_dialogues(self):
"""Test the LedgerApiDialogues class."""
_, dialogue = self.ledger_api_dialogues.create(
counterparty=COUNTERPARTY_AGENT_ADDRESS,
performative=LedgerApiMessage.Performative.GET_BALANCE,
ledger_id="some_ledger_id",
address="some_address",
)
assert dialogue.role == LedgerApiDialogue.Role.AGENT
assert dialogue.self_address == str(self.skill.skill_context.skill_id)
def test_oef_search_dialogues(self):
"""Test the OefSearchDialogues class."""
_, dialogue = self.oef_search_dialogues.create(
counterparty=COUNTERPARTY_AGENT_ADDRESS,
performative=OefSearchMessage.Performative.SEARCH_SERVICES,
query="some_query",
)
assert dialogue.role == OefSearchDialogue.Role.AGENT
assert dialogue.self_address == str(self.skill.skill_context.skill_id)
| 38.337278 | 89 | 0.656428 |
from pathlib import Path
from typing import cast
import pytest
from aea.exceptions import AEAEnforceError
from aea.helpers.transaction.base import Terms
from aea.protocols.dialogue.base import DialogueLabel
from aea.test_tools.test_skill import BaseSkillTestCase, COUNTERPARTY_AGENT_ADDRESS
from packages.fetchai.protocols.default.message import DefaultMessage
from packages.fetchai.protocols.fipa.message import FipaMessage
from packages.fetchai.protocols.ledger_api.message import LedgerApiMessage
from packages.fetchai.protocols.oef_search.message import OefSearchMessage
from packages.fetchai.skills.generic_seller.dialogues import (
DefaultDialogue,
DefaultDialogues,
FipaDialogue,
FipaDialogues,
LedgerApiDialogue,
LedgerApiDialogues,
OefSearchDialogue,
OefSearchDialogues,
)
from tests.conftest import ROOT_DIR
class TestDialogues(BaseSkillTestCase):
path_to_skill = Path(ROOT_DIR, "packages", "fetchai", "skills", "generic_seller")
@classmethod
def setup(cls):
super().setup()
cls.default_dialogues = cast(
DefaultDialogues, cls._skill.skill_context.default_dialogues
)
cls.fipa_dialogues = cast(
FipaDialogues, cls._skill.skill_context.fipa_dialogues
)
cls.ledger_api_dialogues = cast(
LedgerApiDialogues, cls._skill.skill_context.ledger_api_dialogues
)
cls.oef_search_dialogues = cast(
OefSearchDialogues, cls._skill.skill_context.oef_search_dialogues
)
def test_default_dialogues(self):
_, dialogue = self.default_dialogues.create(
counterparty=COUNTERPARTY_AGENT_ADDRESS,
performative=DefaultMessage.Performative.BYTES,
content=b"some_content",
)
assert dialogue.role == DefaultDialogue.Role.AGENT
assert dialogue.self_address == self.skill.skill_context.agent_address
def test_fipa_dialogue(self):
fipa_dialogue = FipaDialogue(
DialogueLabel(
("", ""),
COUNTERPARTY_AGENT_ADDRESS,
self.skill.skill_context.agent_address,
),
self.skill.skill_context.agent_address,
role=DefaultDialogue.Role.AGENT,
)
with pytest.raises(AEAEnforceError, match="Terms not set!"):
assert fipa_dialogue.terms
terms = Terms(
"some_ledger_id",
self.skill.skill_context.agent_address,
"counterprty",
{"currency_id": 50},
{"good_id": -10},
"some_nonce",
)
fipa_dialogue.terms = terms
with pytest.raises(AEAEnforceError, match="Terms already set!"):
fipa_dialogue.terms = terms
assert fipa_dialogue.terms == terms
def test_fipa_dialogues(self):
_, dialogue = self.fipa_dialogues.create(
counterparty=COUNTERPARTY_AGENT_ADDRESS,
performative=FipaMessage.Performative.CFP,
query="some_query",
)
assert dialogue.role == FipaDialogue.Role.SELLER
assert dialogue.self_address == self.skill.skill_context.agent_address
def test_ledger_api_dialogue(self):
ledger_api_dialogue = LedgerApiDialogue(
DialogueLabel(
("", ""),
COUNTERPARTY_AGENT_ADDRESS,
self.skill.skill_context.agent_address,
),
self.skill.skill_context.agent_address,
role=LedgerApiDialogue.Role.AGENT,
)
with pytest.raises(AEAEnforceError, match="FipaDialogue not set!"):
assert ledger_api_dialogue.associated_fipa_dialogue
fipa_dialogue = FipaDialogue(
DialogueLabel(
("", ""),
COUNTERPARTY_AGENT_ADDRESS,
self.skill.skill_context.agent_address,
),
self.skill.skill_context.agent_address,
role=FipaDialogue.Role.BUYER,
)
ledger_api_dialogue.associated_fipa_dialogue = fipa_dialogue
with pytest.raises(AEAEnforceError, match="FipaDialogue already set!"):
ledger_api_dialogue.associated_fipa_dialogue = fipa_dialogue
assert ledger_api_dialogue.associated_fipa_dialogue == fipa_dialogue
def test_ledger_api_dialogues(self):
_, dialogue = self.ledger_api_dialogues.create(
counterparty=COUNTERPARTY_AGENT_ADDRESS,
performative=LedgerApiMessage.Performative.GET_BALANCE,
ledger_id="some_ledger_id",
address="some_address",
)
assert dialogue.role == LedgerApiDialogue.Role.AGENT
assert dialogue.self_address == str(self.skill.skill_context.skill_id)
def test_oef_search_dialogues(self):
_, dialogue = self.oef_search_dialogues.create(
counterparty=COUNTERPARTY_AGENT_ADDRESS,
performative=OefSearchMessage.Performative.SEARCH_SERVICES,
query="some_query",
)
assert dialogue.role == OefSearchDialogue.Role.AGENT
assert dialogue.self_address == str(self.skill.skill_context.skill_id)
| true | true |
1c350d442c7d186deb105cfcbcba108ae69e92e0 | 2,660 | py | Python | Turla Group/Kopiluwak/kopiluwakUAShodanSearch.py | CharityW4CTI/Research | 75ef5dada737148bc105b2b0cc2f276cf35266d7 | [
"MIT"
] | null | null | null | Turla Group/Kopiluwak/kopiluwakUAShodanSearch.py | CharityW4CTI/Research | 75ef5dada737148bc105b2b0cc2f276cf35266d7 | [
"MIT"
] | null | null | null | Turla Group/Kopiluwak/kopiluwakUAShodanSearch.py | CharityW4CTI/Research | 75ef5dada737148bc105b2b0cc2f276cf35266d7 | [
"MIT"
] | null | null | null | import shodan
import re
import argparse
import textwrap
def kopiluwak_match(ua):
found = False
# get only the last 32 characters of the UA
ua_stripped = ua[-32:]
# see if the last 32 characters of the array match the Kopiluwak regex
matchObj = re.search("([0-9]{16}[a-zA-Z0-9]{16})", ua_stripped)
if matchObj:
found = True
return found
def uaShodanCheck(ua, SHODAN_API_KEY):
api = shodan.Shodan(SHODAN_API_KEY)
scannedUA = {}
try:
# Search Shodan
results = api.search(ua)
# Show the results
total = results["total"]
# Iterate though the first 100, extracting the User-Agent and then checking to see if it matches the kopiluqak string
for result in results["matches"]:
headers = result["data"].splitlines()
for header in headers:
if "User-Agent" in header:
ua = header.split(":", 1)
found = kopiluwak_match(ua[1])
scannedUA[ua[1]] = [result["ip_str"], found]
except shodan.APIError as e:
print("Error: {}".format(e))
return total, scannedUA
def main():
logo = """
╦┌┐┌┌─┐┬┬┌─┌┬┐ ╔═╗┬─┐┌─┐┬ ┬┌─┐
║│││└─┐│├┴┐ │ ║ ╦├┬┘│ ││ │├─┘
╩┘└┘└─┘┴┴ ┴ ┴ ╚═╝┴└─└─┘└─┘┴
"""
banner = """
%s
Turla Kopiluwak User-Agent Shodan Search
----------------------------------------------------------------
This tool will perform a regex search over user-agents in Shodan looking for the unique Kopiluwak string appended to the end.
To use, just include your Shodan API token as a parameter.
Examples:
\t python kopiluwakUAShodanSearch.py -t Shodan API Token
""" % (
logo
)
# Checks to make sure that file is passed via command line
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(banner),
)
parser.add_argument("-t", "--token", help="Shodan API Token")
args = parser.parse_args()
if args.token:
print("%s\nTurla Kopiluwak User-Agent Shodan Search\n" % (logo))
total, scannedUA = uaShodanCheck(
"User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64)", args.token
)
print("Scanned %s User-Agents, results are below: \n" % (total))
for ua, info in scannedUA.items():
ip = info[1]
print("Scanned:%s\n\tIP: %s\n\tResult:%s\n" % (ua, info[0], info[1]))
else:
print(
'Error: Please Provide Shodan API Token as a parameter, "python kopiluwakUAShodanSearch.py -t Shodan API Token"'
)
if __name__ == "__main__":
main()
| 29.88764 | 125 | 0.579323 | import shodan
import re
import argparse
import textwrap
def kopiluwak_match(ua):
found = False
ua_stripped = ua[-32:]
matchObj = re.search("([0-9]{16}[a-zA-Z0-9]{16})", ua_stripped)
if matchObj:
found = True
return found
def uaShodanCheck(ua, SHODAN_API_KEY):
api = shodan.Shodan(SHODAN_API_KEY)
scannedUA = {}
try:
results = api.search(ua)
total = results["total"]
for result in results["matches"]:
headers = result["data"].splitlines()
for header in headers:
if "User-Agent" in header:
ua = header.split(":", 1)
found = kopiluwak_match(ua[1])
scannedUA[ua[1]] = [result["ip_str"], found]
except shodan.APIError as e:
print("Error: {}".format(e))
return total, scannedUA
def main():
logo = """
╦┌┐┌┌─┐┬┬┌─┌┬┐ ╔═╗┬─┐┌─┐┬ ┬┌─┐
║│││└─┐│├┴┐ │ ║ ╦├┬┘│ ││ │├─┘
╩┘└┘└─┘┴┴ ┴ ┴ ╚═╝┴└─└─┘└─┘┴
"""
banner = """
%s
Turla Kopiluwak User-Agent Shodan Search
----------------------------------------------------------------
This tool will perform a regex search over user-agents in Shodan looking for the unique Kopiluwak string appended to the end.
To use, just include your Shodan API token as a parameter.
Examples:
\t python kopiluwakUAShodanSearch.py -t Shodan API Token
""" % (
logo
)
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(banner),
)
parser.add_argument("-t", "--token", help="Shodan API Token")
args = parser.parse_args()
if args.token:
print("%s\nTurla Kopiluwak User-Agent Shodan Search\n" % (logo))
total, scannedUA = uaShodanCheck(
"User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64)", args.token
)
print("Scanned %s User-Agents, results are below: \n" % (total))
for ua, info in scannedUA.items():
ip = info[1]
print("Scanned:%s\n\tIP: %s\n\tResult:%s\n" % (ua, info[0], info[1]))
else:
print(
'Error: Please Provide Shodan API Token as a parameter, "python kopiluwakUAShodanSearch.py -t Shodan API Token"'
)
if __name__ == "__main__":
main()
| true | true |
1c350edcd8d3589e728d7f3781f5b74c4ada5167 | 82,249 | py | Python | PythonVirtEnv/Lib/site-packages/plotly/graph_objs/_pie.py | zuhorski/EPL_Project | 2d2417652879cfbe33c44c003ad77b7222590849 | [
"MIT"
] | 2 | 2021-07-18T11:39:56.000Z | 2021-11-06T17:13:05.000Z | venv/Lib/site-packages/plotly/graph_objs/_pie.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | null | null | null | venv/Lib/site-packages/plotly/graph_objs/_pie.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | null | null | null | from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Pie(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "pie"
_valid_props = {
"automargin",
"customdata",
"customdatasrc",
"direction",
"dlabel",
"domain",
"hole",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"insidetextfont",
"insidetextorientation",
"label0",
"labels",
"labelssrc",
"legendgroup",
"legendgrouptitle",
"legendrank",
"marker",
"meta",
"metasrc",
"name",
"opacity",
"outsidetextfont",
"pull",
"pullsrc",
"rotation",
"scalegroup",
"showlegend",
"sort",
"stream",
"text",
"textfont",
"textinfo",
"textposition",
"textpositionsrc",
"textsrc",
"texttemplate",
"texttemplatesrc",
"title",
"titlefont",
"titleposition",
"type",
"uid",
"uirevision",
"values",
"valuessrc",
"visible",
}
# automargin
# ----------
@property
def automargin(self):
"""
Determines whether outside text labels can push the margins.
The 'automargin' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["automargin"]
@automargin.setter
def automargin(self, val):
self["automargin"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# direction
# ---------
@property
def direction(self):
"""
Specifies the direction at which succeeding sectors follow one
another.
The 'direction' property is an enumeration that may be specified as:
- One of the following enumeration values:
['clockwise', 'counterclockwise']
Returns
-------
Any
"""
return self["direction"]
@direction.setter
def direction(self, val):
self["direction"] = val
# dlabel
# ------
@property
def dlabel(self):
"""
Sets the label step. See `label0` for more info.
The 'dlabel' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["dlabel"]
@dlabel.setter
def dlabel(self, val):
self["dlabel"] = val
# domain
# ------
@property
def domain(self):
"""
The 'domain' property is an instance of Domain
that may be specified as:
- An instance of :class:`plotly.graph_objs.pie.Domain`
- A dict of string/value properties that will be passed
to the Domain constructor
Supported dict properties:
column
If there is a layout grid, use the domain for
this column in the grid for this pie trace .
row
If there is a layout grid, use the domain for
this row in the grid for this pie trace .
x
Sets the horizontal domain of this pie trace
(in plot fraction).
y
Sets the vertical domain of this pie trace (in
plot fraction).
Returns
-------
plotly.graph_objs.pie.Domain
"""
return self["domain"]
@domain.setter
def domain(self, val):
self["domain"] = val
# hole
# ----
@property
def hole(self):
"""
Sets the fraction of the radius to cut out of the pie. Use this
to make a donut chart.
The 'hole' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["hole"]
@hole.setter
def hole(self, val):
self["hole"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['label', 'text', 'value', 'percent', 'name'] joined with '+' characters
(e.g. 'label+text')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for hoverinfo
.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.pie.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
Returns
-------
plotly.graph_objs.pie.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format#locale_format for details on the date formatting syntax.
The variables available in `hovertemplate` are the ones emitted
as event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-data.
Additionally, every attributes that can be specified per-point
(the ones that are `arrayOk: true`) are available. variables
`label`, `color`, `value`, `percent` and `text`. Anything
contained in tag `<extra>` is displayed in the secondary box,
for example "<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
hovertemplate .
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Sets hover text elements associated with each sector. If a
single string, the same string appears for all data points. If
an array of string, the items are mapped in order of this
trace's sectors. To be seen, trace `hoverinfo` must contain a
"text" flag.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for hovertext
.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# insidetextfont
# --------------
@property
def insidetextfont(self):
"""
Sets the font used for `textinfo` lying inside the sector.
The 'insidetextfont' property is an instance of Insidetextfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.pie.Insidetextfont`
- A dict of string/value properties that will be passed
to the Insidetextfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.pie.Insidetextfont
"""
return self["insidetextfont"]
@insidetextfont.setter
def insidetextfont(self, val):
self["insidetextfont"] = val
# insidetextorientation
# ---------------------
@property
def insidetextorientation(self):
"""
Controls the orientation of the text inside chart sectors. When
set to "auto", text may be oriented in any direction in order
to be as big as possible in the middle of a sector. The
"horizontal" option orients text to be parallel with the bottom
of the chart, and may make text smaller in order to achieve
that goal. The "radial" option orients text along the radius of
the sector. The "tangential" option orients text perpendicular
to the radius of the sector.
The 'insidetextorientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['horizontal', 'radial', 'tangential', 'auto']
Returns
-------
Any
"""
return self["insidetextorientation"]
@insidetextorientation.setter
def insidetextorientation(self, val):
self["insidetextorientation"] = val
# label0
# ------
@property
def label0(self):
"""
Alternate to `labels`. Builds a numeric set of labels. Use with
`dlabel` where `label0` is the starting label and `dlabel` the
step.
The 'label0' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["label0"]
@label0.setter
def label0(self, val):
self["label0"] = val
# labels
# ------
@property
def labels(self):
"""
Sets the sector labels. If `labels` entries are duplicated, we
sum associated `values` or simply count occurrences if `values`
is not provided. For other array attributes (including color)
we use the first non-empty entry among all occurrences of the
label.
The 'labels' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["labels"]
@labels.setter
def labels(self, val):
self["labels"] = val
# labelssrc
# ---------
@property
def labelssrc(self):
"""
Sets the source reference on Chart Studio Cloud for labels .
The 'labelssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["labelssrc"]
@labelssrc.setter
def labelssrc(self, val):
self["labelssrc"] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
# legendgrouptitle
# ----------------
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.pie.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Supported dict properties:
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
plotly.graph_objs.pie.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
# legendrank
# ----------
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
`*reversed* `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.pie.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
colors
Sets the color of each sector. If not
specified, the default trace color set is used
to pick the sector colors.
colorssrc
Sets the source reference on Chart Studio Cloud
for colors .
line
:class:`plotly.graph_objects.pie.marker.Line`
instance or dict with compatible properties
Returns
-------
plotly.graph_objs.pie.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for meta .
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# outsidetextfont
# ---------------
@property
def outsidetextfont(self):
"""
Sets the font used for `textinfo` lying outside the sector.
The 'outsidetextfont' property is an instance of Outsidetextfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.pie.Outsidetextfont`
- A dict of string/value properties that will be passed
to the Outsidetextfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.pie.Outsidetextfont
"""
return self["outsidetextfont"]
@outsidetextfont.setter
def outsidetextfont(self, val):
self["outsidetextfont"] = val
# pull
# ----
@property
def pull(self):
"""
Sets the fraction of larger radius to pull the sectors out from
the center. This can be a constant to pull all slices apart
from each other equally or an array to highlight one or more
slices.
The 'pull' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["pull"]
@pull.setter
def pull(self, val):
self["pull"] = val
# pullsrc
# -------
@property
def pullsrc(self):
"""
Sets the source reference on Chart Studio Cloud for pull .
The 'pullsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["pullsrc"]
@pullsrc.setter
def pullsrc(self, val):
self["pullsrc"] = val
# rotation
# --------
@property
def rotation(self):
"""
Instead of the first slice starting at 12 o'clock, rotate to
some other angle.
The 'rotation' property is a number and may be specified as:
- An int or float in the interval [-360, 360]
Returns
-------
int|float
"""
return self["rotation"]
@rotation.setter
def rotation(self, val):
self["rotation"] = val
# scalegroup
# ----------
@property
def scalegroup(self):
"""
If there are multiple pie charts that should be sized according
to their totals, link them by providing a non-empty group id
here shared by every trace in the same group.
The 'scalegroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["scalegroup"]
@scalegroup.setter
def scalegroup(self, val):
self["scalegroup"] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
# sort
# ----
@property
def sort(self):
"""
Determines whether or not the sectors are reordered from
largest to smallest.
The 'sort' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["sort"]
@sort.setter
def sort(self, val):
self["sort"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.pie.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.pie.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# text
# ----
@property
def text(self):
"""
Sets text elements associated with each sector. If trace
`textinfo` contains a "text" flag, these elements will be seen
on the chart. If trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in the
hover labels.
The 'text' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textfont
# --------
@property
def textfont(self):
"""
Sets the font used for `textinfo`.
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.pie.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.pie.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# textinfo
# --------
@property
def textinfo(self):
"""
Determines which trace information appear on the graph.
The 'textinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['label', 'text', 'value', 'percent'] joined with '+' characters
(e.g. 'label+text')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["textinfo"]
@textinfo.setter
def textinfo(self, val):
self["textinfo"] = val
# textposition
# ------------
@property
def textposition(self):
"""
Specifies the location of the `textinfo`.
The 'textposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['inside', 'outside', 'auto', 'none']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textposition"]
@textposition.setter
def textposition(self, val):
self["textposition"] = val
# textpositionsrc
# ---------------
@property
def textpositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
textposition .
The 'textpositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textpositionsrc"]
@textpositionsrc.setter
def textpositionsrc(self, val):
self["textpositionsrc"] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for text .
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
# texttemplate
# ------------
@property
def texttemplate(self):
"""
Template string used for rendering the information text that
appear on points. Note that this will override `textinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format#locale_format for details on the date formatting syntax.
Every attributes that can be specified per-point (the ones that
are `arrayOk: true`) are available. variables `label`, `color`,
`value`, `percent` and `text`.
The 'texttemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["texttemplate"]
@texttemplate.setter
def texttemplate(self, val):
self["texttemplate"] = val
# texttemplatesrc
# ---------------
@property
def texttemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
texttemplate .
The 'texttemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["texttemplatesrc"]
@texttemplatesrc.setter
def texttemplatesrc(self, val):
self["texttemplatesrc"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.pie.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets the font used for `title`. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
position
Specifies the location of the `title`. Note
that the title's position used to be set by the
now deprecated `titleposition` attribute.
text
Sets the title of the chart. If it is empty, no
title is displayed. Note that before the
existence of `title.text`, the title's contents
used to be defined as the `title` attribute
itself. This behavior has been deprecated.
Returns
-------
plotly.graph_objs.pie.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use pie.title.font instead. Sets the font
used for `title`. Note that the title's font used to be set by
the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.pie.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleposition
# -------------
@property
def titleposition(self):
"""
Deprecated: Please use pie.title.position instead. Specifies
the location of the `title`. Note that the title's position
used to be set by the now deprecated `titleposition` attribute.
The 'position' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top left', 'top center', 'top right', 'middle center',
'bottom left', 'bottom center', 'bottom right']
Returns
-------
"""
return self["titleposition"]
@titleposition.setter
def titleposition(self, val):
self["titleposition"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# values
# ------
@property
def values(self):
"""
Sets the values of the sectors. If omitted, we count
occurrences of each label.
The 'values' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["values"]
@values.setter
def values(self, val):
self["values"] = val
# valuessrc
# ---------
@property
def valuessrc(self):
"""
Sets the source reference on Chart Studio Cloud for values .
The 'valuessrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["valuessrc"]
@valuessrc.setter
def valuessrc(self, val):
self["valuessrc"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
automargin
Determines whether outside text labels can push the
margins.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
direction
Specifies the direction at which succeeding sectors
follow one another.
dlabel
Sets the label step. See `label0` for more info.
domain
:class:`plotly.graph_objects.pie.Domain` instance or
dict with compatible properties
hole
Sets the fraction of the radius to cut out of the pie.
Use this to make a donut chart.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.pie.Hoverlabel` instance
or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `label`, `color`, `value`,
`percent` and `text`. Anything contained in tag
`<extra>` is displayed in the secondary box, for
example "<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Sets hover text elements associated with each sector.
If a single string, the same string appears for all
data points. If an array of string, the items are
mapped in order of this trace's sectors. To be seen,
trace `hoverinfo` must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
insidetextfont
Sets the font used for `textinfo` lying inside the
sector.
insidetextorientation
Controls the orientation of the text inside chart
sectors. When set to "auto", text may be oriented in
any direction in order to be as big as possible in the
middle of a sector. The "horizontal" option orients
text to be parallel with the bottom of the chart, and
may make text smaller in order to achieve that goal.
The "radial" option orients text along the radius of
the sector. The "tangential" option orients text
perpendicular to the radius of the sector.
label0
Alternate to `labels`. Builds a numeric set of labels.
Use with `dlabel` where `label0` is the starting label
and `dlabel` the step.
labels
Sets the sector labels. If `labels` entries are
duplicated, we sum associated `values` or simply count
occurrences if `values` is not provided. For other
array attributes (including color) we use the first
non-empty entry among all occurrences of the label.
labelssrc
Sets the source reference on Chart Studio Cloud for
labels .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.pie.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
marker
:class:`plotly.graph_objects.pie.Marker` instance or
dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
outsidetextfont
Sets the font used for `textinfo` lying outside the
sector.
pull
Sets the fraction of larger radius to pull the sectors
out from the center. This can be a constant to pull all
slices apart from each other equally or an array to
highlight one or more slices.
pullsrc
Sets the source reference on Chart Studio Cloud for
pull .
rotation
Instead of the first slice starting at 12 o'clock,
rotate to some other angle.
scalegroup
If there are multiple pie charts that should be sized
according to their totals, link them by providing a
non-empty group id here shared by every trace in the
same group.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
sort
Determines whether or not the sectors are reordered
from largest to smallest.
stream
:class:`plotly.graph_objects.pie.Stream` instance or
dict with compatible properties
text
Sets text elements associated with each sector. If
trace `textinfo` contains a "text" flag, these elements
will be seen on the chart. If trace `hoverinfo`
contains a "text" flag and "hovertext" is not set,
these elements will be seen in the hover labels.
textfont
Sets the font used for `textinfo`.
textinfo
Determines which trace information appear on the graph.
textposition
Specifies the location of the `textinfo`.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
textposition .
textsrc
Sets the source reference on Chart Studio Cloud for
text .
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. Every attributes
that can be specified per-point (the ones that are
`arrayOk: true`) are available. variables `label`,
`color`, `value`, `percent` and `text`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
texttemplate .
title
:class:`plotly.graph_objects.pie.Title` instance or
dict with compatible properties
titlefont
Deprecated: Please use pie.title.font instead. Sets the
font used for `title`. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleposition
Deprecated: Please use pie.title.position instead.
Specifies the location of the `title`. Note that the
title's position used to be set by the now deprecated
`titleposition` attribute.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
values
Sets the values of the sectors. If omitted, we count
occurrences of each label.
valuessrc
Sets the source reference on Chart Studio Cloud for
values .
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleposition": ("title", "position"),
}
def __init__(
self,
arg=None,
automargin=None,
customdata=None,
customdatasrc=None,
direction=None,
dlabel=None,
domain=None,
hole=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
insidetextfont=None,
insidetextorientation=None,
label0=None,
labels=None,
labelssrc=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
marker=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
outsidetextfont=None,
pull=None,
pullsrc=None,
rotation=None,
scalegroup=None,
showlegend=None,
sort=None,
stream=None,
text=None,
textfont=None,
textinfo=None,
textposition=None,
textpositionsrc=None,
textsrc=None,
texttemplate=None,
texttemplatesrc=None,
title=None,
titlefont=None,
titleposition=None,
uid=None,
uirevision=None,
values=None,
valuessrc=None,
visible=None,
**kwargs
):
"""
Construct a new Pie object
A data visualized by the sectors of the pie is set in `values`.
The sector labels are set in `labels`. The sector colors are
set in `marker.colors`
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Pie`
automargin
Determines whether outside text labels can push the
margins.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
direction
Specifies the direction at which succeeding sectors
follow one another.
dlabel
Sets the label step. See `label0` for more info.
domain
:class:`plotly.graph_objects.pie.Domain` instance or
dict with compatible properties
hole
Sets the fraction of the radius to cut out of the pie.
Use this to make a donut chart.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.pie.Hoverlabel` instance
or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `label`, `color`, `value`,
`percent` and `text`. Anything contained in tag
`<extra>` is displayed in the secondary box, for
example "<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Sets hover text elements associated with each sector.
If a single string, the same string appears for all
data points. If an array of string, the items are
mapped in order of this trace's sectors. To be seen,
trace `hoverinfo` must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
insidetextfont
Sets the font used for `textinfo` lying inside the
sector.
insidetextorientation
Controls the orientation of the text inside chart
sectors. When set to "auto", text may be oriented in
any direction in order to be as big as possible in the
middle of a sector. The "horizontal" option orients
text to be parallel with the bottom of the chart, and
may make text smaller in order to achieve that goal.
The "radial" option orients text along the radius of
the sector. The "tangential" option orients text
perpendicular to the radius of the sector.
label0
Alternate to `labels`. Builds a numeric set of labels.
Use with `dlabel` where `label0` is the starting label
and `dlabel` the step.
labels
Sets the sector labels. If `labels` entries are
duplicated, we sum associated `values` or simply count
occurrences if `values` is not provided. For other
array attributes (including color) we use the first
non-empty entry among all occurrences of the label.
labelssrc
Sets the source reference on Chart Studio Cloud for
labels .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.pie.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
marker
:class:`plotly.graph_objects.pie.Marker` instance or
dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
outsidetextfont
Sets the font used for `textinfo` lying outside the
sector.
pull
Sets the fraction of larger radius to pull the sectors
out from the center. This can be a constant to pull all
slices apart from each other equally or an array to
highlight one or more slices.
pullsrc
Sets the source reference on Chart Studio Cloud for
pull .
rotation
Instead of the first slice starting at 12 o'clock,
rotate to some other angle.
scalegroup
If there are multiple pie charts that should be sized
according to their totals, link them by providing a
non-empty group id here shared by every trace in the
same group.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
sort
Determines whether or not the sectors are reordered
from largest to smallest.
stream
:class:`plotly.graph_objects.pie.Stream` instance or
dict with compatible properties
text
Sets text elements associated with each sector. If
trace `textinfo` contains a "text" flag, these elements
will be seen on the chart. If trace `hoverinfo`
contains a "text" flag and "hovertext" is not set,
these elements will be seen in the hover labels.
textfont
Sets the font used for `textinfo`.
textinfo
Determines which trace information appear on the graph.
textposition
Specifies the location of the `textinfo`.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
textposition .
textsrc
Sets the source reference on Chart Studio Cloud for
text .
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. Every attributes
that can be specified per-point (the ones that are
`arrayOk: true`) are available. variables `label`,
`color`, `value`, `percent` and `text`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
texttemplate .
title
:class:`plotly.graph_objects.pie.Title` instance or
dict with compatible properties
titlefont
Deprecated: Please use pie.title.font instead. Sets the
font used for `title`. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleposition
Deprecated: Please use pie.title.position instead.
Specifies the location of the `title`. Note that the
title's position used to be set by the now deprecated
`titleposition` attribute.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
values
Sets the values of the sectors. If omitted, we count
occurrences of each label.
valuessrc
Sets the source reference on Chart Studio Cloud for
values .
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
Returns
-------
Pie
"""
super(Pie, self).__init__("pie")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Pie
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Pie`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("automargin", None)
_v = automargin if automargin is not None else _v
if _v is not None:
self["automargin"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("direction", None)
_v = direction if direction is not None else _v
if _v is not None:
self["direction"] = _v
_v = arg.pop("dlabel", None)
_v = dlabel if dlabel is not None else _v
if _v is not None:
self["dlabel"] = _v
_v = arg.pop("domain", None)
_v = domain if domain is not None else _v
if _v is not None:
self["domain"] = _v
_v = arg.pop("hole", None)
_v = hole if hole is not None else _v
if _v is not None:
self["hole"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("hovertextsrc", None)
_v = hovertextsrc if hovertextsrc is not None else _v
if _v is not None:
self["hovertextsrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("insidetextfont", None)
_v = insidetextfont if insidetextfont is not None else _v
if _v is not None:
self["insidetextfont"] = _v
_v = arg.pop("insidetextorientation", None)
_v = insidetextorientation if insidetextorientation is not None else _v
if _v is not None:
self["insidetextorientation"] = _v
_v = arg.pop("label0", None)
_v = label0 if label0 is not None else _v
if _v is not None:
self["label0"] = _v
_v = arg.pop("labels", None)
_v = labels if labels is not None else _v
if _v is not None:
self["labels"] = _v
_v = arg.pop("labelssrc", None)
_v = labelssrc if labelssrc is not None else _v
if _v is not None:
self["labelssrc"] = _v
_v = arg.pop("legendgroup", None)
_v = legendgroup if legendgroup is not None else _v
if _v is not None:
self["legendgroup"] = _v
_v = arg.pop("legendgrouptitle", None)
_v = legendgrouptitle if legendgrouptitle is not None else _v
if _v is not None:
self["legendgrouptitle"] = _v
_v = arg.pop("legendrank", None)
_v = legendrank if legendrank is not None else _v
if _v is not None:
self["legendrank"] = _v
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("outsidetextfont", None)
_v = outsidetextfont if outsidetextfont is not None else _v
if _v is not None:
self["outsidetextfont"] = _v
_v = arg.pop("pull", None)
_v = pull if pull is not None else _v
if _v is not None:
self["pull"] = _v
_v = arg.pop("pullsrc", None)
_v = pullsrc if pullsrc is not None else _v
if _v is not None:
self["pullsrc"] = _v
_v = arg.pop("rotation", None)
_v = rotation if rotation is not None else _v
if _v is not None:
self["rotation"] = _v
_v = arg.pop("scalegroup", None)
_v = scalegroup if scalegroup is not None else _v
if _v is not None:
self["scalegroup"] = _v
_v = arg.pop("showlegend", None)
_v = showlegend if showlegend is not None else _v
if _v is not None:
self["showlegend"] = _v
_v = arg.pop("sort", None)
_v = sort if sort is not None else _v
if _v is not None:
self["sort"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textfont", None)
_v = textfont if textfont is not None else _v
if _v is not None:
self["textfont"] = _v
_v = arg.pop("textinfo", None)
_v = textinfo if textinfo is not None else _v
if _v is not None:
self["textinfo"] = _v
_v = arg.pop("textposition", None)
_v = textposition if textposition is not None else _v
if _v is not None:
self["textposition"] = _v
_v = arg.pop("textpositionsrc", None)
_v = textpositionsrc if textpositionsrc is not None else _v
if _v is not None:
self["textpositionsrc"] = _v
_v = arg.pop("textsrc", None)
_v = textsrc if textsrc is not None else _v
if _v is not None:
self["textsrc"] = _v
_v = arg.pop("texttemplate", None)
_v = texttemplate if texttemplate is not None else _v
if _v is not None:
self["texttemplate"] = _v
_v = arg.pop("texttemplatesrc", None)
_v = texttemplatesrc if texttemplatesrc is not None else _v
if _v is not None:
self["texttemplatesrc"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleposition", None)
_v = titleposition if titleposition is not None else _v
if _v is not None:
self["titleposition"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("values", None)
_v = values if values is not None else _v
if _v is not None:
self["values"] = _v
_v = arg.pop("valuessrc", None)
_v = valuessrc if valuessrc is not None else _v
if _v is not None:
self["valuessrc"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
# Read-only literals
# ------------------
self._props["type"] = "pie"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 34.500419 | 103 | 0.560566 | from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Pie(_BaseTraceType):
_parent_path_str = ""
_path_str = "pie"
_valid_props = {
"automargin",
"customdata",
"customdatasrc",
"direction",
"dlabel",
"domain",
"hole",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"insidetextfont",
"insidetextorientation",
"label0",
"labels",
"labelssrc",
"legendgroup",
"legendgrouptitle",
"legendrank",
"marker",
"meta",
"metasrc",
"name",
"opacity",
"outsidetextfont",
"pull",
"pullsrc",
"rotation",
"scalegroup",
"showlegend",
"sort",
"stream",
"text",
"textfont",
"textinfo",
"textposition",
"textpositionsrc",
"textsrc",
"texttemplate",
"texttemplatesrc",
"title",
"titlefont",
"titleposition",
"type",
"uid",
"uirevision",
"values",
"valuessrc",
"visible",
}
@property
def automargin(self):
return self["automargin"]
@automargin.setter
def automargin(self, val):
self["automargin"] = val
@property
def customdata(self):
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
@property
def customdatasrc(self):
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
@property
def direction(self):
return self["direction"]
@direction.setter
def direction(self, val):
self["direction"] = val
@property
def dlabel(self):
return self["dlabel"]
@dlabel.setter
def dlabel(self, val):
self["dlabel"] = val
@property
def domain(self):
return self["domain"]
@domain.setter
def domain(self, val):
self["domain"] = val
@property
def hole(self):
return self["hole"]
@hole.setter
def hole(self, val):
self["hole"] = val
@property
def hoverinfo(self):
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
@property
def hoverinfosrc(self):
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
@property
def hoverlabel(self):
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
@property
def hovertemplate(self):
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
@property
def hovertemplatesrc(self):
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
@property
def hovertext(self):
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
@property
def hovertextsrc(self):
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
@property
def ids(self):
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
@property
def idssrc(self):
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
@property
def insidetextfont(self):
return self["insidetextfont"]
@insidetextfont.setter
def insidetextfont(self, val):
self["insidetextfont"] = val
@property
def insidetextorientation(self):
return self["insidetextorientation"]
@insidetextorientation.setter
def insidetextorientation(self, val):
self["insidetextorientation"] = val
@property
def label0(self):
return self["label0"]
@label0.setter
def label0(self, val):
self["label0"] = val
@property
def labels(self):
return self["labels"]
@labels.setter
def labels(self, val):
self["labels"] = val
@property
def labelssrc(self):
return self["labelssrc"]
@labelssrc.setter
def labelssrc(self, val):
self["labelssrc"] = val
@property
def legendgroup(self):
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
@property
def legendgrouptitle(self):
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
@property
def legendrank(self):
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
@property
def marker(self):
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def meta(self):
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
@property
def metasrc(self):
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
@property
def name(self):
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def opacity(self):
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def outsidetextfont(self):
return self["outsidetextfont"]
@outsidetextfont.setter
def outsidetextfont(self, val):
self["outsidetextfont"] = val
@property
def pull(self):
return self["pull"]
@pull.setter
def pull(self, val):
self["pull"] = val
@property
def pullsrc(self):
return self["pullsrc"]
@pullsrc.setter
def pullsrc(self, val):
self["pullsrc"] = val
@property
def rotation(self):
return self["rotation"]
@rotation.setter
def rotation(self, val):
self["rotation"] = val
@property
def scalegroup(self):
return self["scalegroup"]
@scalegroup.setter
def scalegroup(self, val):
self["scalegroup"] = val
@property
def showlegend(self):
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
@property
def sort(self):
return self["sort"]
@sort.setter
def sort(self, val):
self["sort"] = val
@property
def stream(self):
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
@property
def text(self):
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def textfont(self):
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
@property
def textinfo(self):
return self["textinfo"]
@textinfo.setter
def textinfo(self, val):
self["textinfo"] = val
@property
def textposition(self):
return self["textposition"]
@textposition.setter
def textposition(self, val):
self["textposition"] = val
@property
def textpositionsrc(self):
return self["textpositionsrc"]
@textpositionsrc.setter
def textpositionsrc(self, val):
self["textpositionsrc"] = val
@property
def textsrc(self):
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
@property
def texttemplate(self):
return self["texttemplate"]
@texttemplate.setter
def texttemplate(self, val):
self["texttemplate"] = val
@property
def texttemplatesrc(self):
return self["texttemplatesrc"]
@texttemplatesrc.setter
def texttemplatesrc(self, val):
self["texttemplatesrc"] = val
@property
def title(self):
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def titlefont(self):
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
@property
def titleposition(self):
return self["titleposition"]
@titleposition.setter
def titleposition(self, val):
self["titleposition"] = val
@property
def uid(self):
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
@property
def uirevision(self):
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def values(self):
return self["values"]
@values.setter
def values(self, val):
self["values"] = val
@property
def valuessrc(self):
return self["valuessrc"]
@valuessrc.setter
def valuessrc(self, val):
self["valuessrc"] = val
@property
def visible(self):
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def type(self):
return self._props["type"]
@property
def _prop_descriptions(self):
return """\
automargin
Determines whether outside text labels can push the
margins.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
direction
Specifies the direction at which succeeding sectors
follow one another.
dlabel
Sets the label step. See `label0` for more info.
domain
:class:`plotly.graph_objects.pie.Domain` instance or
dict with compatible properties
hole
Sets the fraction of the radius to cut out of the pie.
Use this to make a donut chart.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.pie.Hoverlabel` instance
or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `label`, `color`, `value`,
`percent` and `text`. Anything contained in tag
`<extra>` is displayed in the secondary box, for
example "<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Sets hover text elements associated with each sector.
If a single string, the same string appears for all
data points. If an array of string, the items are
mapped in order of this trace's sectors. To be seen,
trace `hoverinfo` must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
insidetextfont
Sets the font used for `textinfo` lying inside the
sector.
insidetextorientation
Controls the orientation of the text inside chart
sectors. When set to "auto", text may be oriented in
any direction in order to be as big as possible in the
middle of a sector. The "horizontal" option orients
text to be parallel with the bottom of the chart, and
may make text smaller in order to achieve that goal.
The "radial" option orients text along the radius of
the sector. The "tangential" option orients text
perpendicular to the radius of the sector.
label0
Alternate to `labels`. Builds a numeric set of labels.
Use with `dlabel` where `label0` is the starting label
and `dlabel` the step.
labels
Sets the sector labels. If `labels` entries are
duplicated, we sum associated `values` or simply count
occurrences if `values` is not provided. For other
array attributes (including color) we use the first
non-empty entry among all occurrences of the label.
labelssrc
Sets the source reference on Chart Studio Cloud for
labels .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.pie.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
marker
:class:`plotly.graph_objects.pie.Marker` instance or
dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
outsidetextfont
Sets the font used for `textinfo` lying outside the
sector.
pull
Sets the fraction of larger radius to pull the sectors
out from the center. This can be a constant to pull all
slices apart from each other equally or an array to
highlight one or more slices.
pullsrc
Sets the source reference on Chart Studio Cloud for
pull .
rotation
Instead of the first slice starting at 12 o'clock,
rotate to some other angle.
scalegroup
If there are multiple pie charts that should be sized
according to their totals, link them by providing a
non-empty group id here shared by every trace in the
same group.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
sort
Determines whether or not the sectors are reordered
from largest to smallest.
stream
:class:`plotly.graph_objects.pie.Stream` instance or
dict with compatible properties
text
Sets text elements associated with each sector. If
trace `textinfo` contains a "text" flag, these elements
will be seen on the chart. If trace `hoverinfo`
contains a "text" flag and "hovertext" is not set,
these elements will be seen in the hover labels.
textfont
Sets the font used for `textinfo`.
textinfo
Determines which trace information appear on the graph.
textposition
Specifies the location of the `textinfo`.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
textposition .
textsrc
Sets the source reference on Chart Studio Cloud for
text .
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. Every attributes
that can be specified per-point (the ones that are
`arrayOk: true`) are available. variables `label`,
`color`, `value`, `percent` and `text`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
texttemplate .
title
:class:`plotly.graph_objects.pie.Title` instance or
dict with compatible properties
titlefont
Deprecated: Please use pie.title.font instead. Sets the
font used for `title`. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleposition
Deprecated: Please use pie.title.position instead.
Specifies the location of the `title`. Note that the
title's position used to be set by the now deprecated
`titleposition` attribute.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
values
Sets the values of the sectors. If omitted, we count
occurrences of each label.
valuessrc
Sets the source reference on Chart Studio Cloud for
values .
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleposition": ("title", "position"),
}
def __init__(
self,
arg=None,
automargin=None,
customdata=None,
customdatasrc=None,
direction=None,
dlabel=None,
domain=None,
hole=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
insidetextfont=None,
insidetextorientation=None,
label0=None,
labels=None,
labelssrc=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
marker=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
outsidetextfont=None,
pull=None,
pullsrc=None,
rotation=None,
scalegroup=None,
showlegend=None,
sort=None,
stream=None,
text=None,
textfont=None,
textinfo=None,
textposition=None,
textpositionsrc=None,
textsrc=None,
texttemplate=None,
texttemplatesrc=None,
title=None,
titlefont=None,
titleposition=None,
uid=None,
uirevision=None,
values=None,
valuessrc=None,
visible=None,
**kwargs
):
super(Pie, self).__init__("pie")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Pie
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Pie`"""
)
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
_v = arg.pop("automargin", None)
_v = automargin if automargin is not None else _v
if _v is not None:
self["automargin"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("direction", None)
_v = direction if direction is not None else _v
if _v is not None:
self["direction"] = _v
_v = arg.pop("dlabel", None)
_v = dlabel if dlabel is not None else _v
if _v is not None:
self["dlabel"] = _v
_v = arg.pop("domain", None)
_v = domain if domain is not None else _v
if _v is not None:
self["domain"] = _v
_v = arg.pop("hole", None)
_v = hole if hole is not None else _v
if _v is not None:
self["hole"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("hovertextsrc", None)
_v = hovertextsrc if hovertextsrc is not None else _v
if _v is not None:
self["hovertextsrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("insidetextfont", None)
_v = insidetextfont if insidetextfont is not None else _v
if _v is not None:
self["insidetextfont"] = _v
_v = arg.pop("insidetextorientation", None)
_v = insidetextorientation if insidetextorientation is not None else _v
if _v is not None:
self["insidetextorientation"] = _v
_v = arg.pop("label0", None)
_v = label0 if label0 is not None else _v
if _v is not None:
self["label0"] = _v
_v = arg.pop("labels", None)
_v = labels if labels is not None else _v
if _v is not None:
self["labels"] = _v
_v = arg.pop("labelssrc", None)
_v = labelssrc if labelssrc is not None else _v
if _v is not None:
self["labelssrc"] = _v
_v = arg.pop("legendgroup", None)
_v = legendgroup if legendgroup is not None else _v
if _v is not None:
self["legendgroup"] = _v
_v = arg.pop("legendgrouptitle", None)
_v = legendgrouptitle if legendgrouptitle is not None else _v
if _v is not None:
self["legendgrouptitle"] = _v
_v = arg.pop("legendrank", None)
_v = legendrank if legendrank is not None else _v
if _v is not None:
self["legendrank"] = _v
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("outsidetextfont", None)
_v = outsidetextfont if outsidetextfont is not None else _v
if _v is not None:
self["outsidetextfont"] = _v
_v = arg.pop("pull", None)
_v = pull if pull is not None else _v
if _v is not None:
self["pull"] = _v
_v = arg.pop("pullsrc", None)
_v = pullsrc if pullsrc is not None else _v
if _v is not None:
self["pullsrc"] = _v
_v = arg.pop("rotation", None)
_v = rotation if rotation is not None else _v
if _v is not None:
self["rotation"] = _v
_v = arg.pop("scalegroup", None)
_v = scalegroup if scalegroup is not None else _v
if _v is not None:
self["scalegroup"] = _v
_v = arg.pop("showlegend", None)
_v = showlegend if showlegend is not None else _v
if _v is not None:
self["showlegend"] = _v
_v = arg.pop("sort", None)
_v = sort if sort is not None else _v
if _v is not None:
self["sort"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textfont", None)
_v = textfont if textfont is not None else _v
if _v is not None:
self["textfont"] = _v
_v = arg.pop("textinfo", None)
_v = textinfo if textinfo is not None else _v
if _v is not None:
self["textinfo"] = _v
_v = arg.pop("textposition", None)
_v = textposition if textposition is not None else _v
if _v is not None:
self["textposition"] = _v
_v = arg.pop("textpositionsrc", None)
_v = textpositionsrc if textpositionsrc is not None else _v
if _v is not None:
self["textpositionsrc"] = _v
_v = arg.pop("textsrc", None)
_v = textsrc if textsrc is not None else _v
if _v is not None:
self["textsrc"] = _v
_v = arg.pop("texttemplate", None)
_v = texttemplate if texttemplate is not None else _v
if _v is not None:
self["texttemplate"] = _v
_v = arg.pop("texttemplatesrc", None)
_v = texttemplatesrc if texttemplatesrc is not None else _v
if _v is not None:
self["texttemplatesrc"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleposition", None)
_v = titleposition if titleposition is not None else _v
if _v is not None:
self["titleposition"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("values", None)
_v = values if values is not None else _v
if _v is not None:
self["values"] = _v
_v = arg.pop("valuessrc", None)
_v = valuessrc if valuessrc is not None else _v
if _v is not None:
self["valuessrc"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
self._props["type"] = "pie"
arg.pop("type", None)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| true | true |
1c351047d4d3a9a39b90f064887092fa28a80ef7 | 68,641 | py | Python | python/ccxt/kucoinfutures.py | mattepozz/ccxt | f60278e707af6f6baa55ee027a907bd72d852201 | [
"MIT"
] | null | null | null | python/ccxt/kucoinfutures.py | mattepozz/ccxt | f60278e707af6f6baa55ee027a907bd72d852201 | [
"MIT"
] | null | null | null | python/ccxt/kucoinfutures.py | mattepozz/ccxt | f60278e707af6f6baa55ee027a907bd72d852201 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.kucoin import kucoin
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import NotSupported
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class kucoinfutures(kucoin):
def describe(self):
return self.deep_extend(super(kucoinfutures, self).describe(), {
'id': 'kucoinfutures',
'name': 'KuCoin Futures',
'countries': ['SC'],
'rateLimit': 75,
'version': 'v1',
'certified': False,
'pro': False,
'comment': 'Platform 2.0',
'quoteJsonNumbers': False,
'has': {
'CORS': None,
'spot': False,
'margin': False,
'swap': True,
'future': True,
'option': False,
'addMargin': True,
'cancelAllOrders': True,
'cancelOrder': True,
'createDepositAddress': True,
'createOrder': True,
'fetchAccounts': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchClosedOrders': True,
'fetchCurrencies': False,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingFee': True,
'fetchFundingHistory': True,
'fetchFundingRate': True,
'fetchFundingRateHistory': False,
'fetchIndexOHLCV': False,
'fetchL3OrderBook': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchPositions': True,
'fetchPremiumIndexOHLCV': False,
'fetchStatus': True,
'fetchTicker': True,
'fetchTickers': False,
'fetchTime': True,
'fetchTrades': True,
'fetchWithdrawals': True,
'setMarginMode': False,
'transfer': True,
'withdraw': None,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/147508995-9e35030a-d046-43a1-a006-6fabd981b554.jpg',
'doc': [
'https://docs.kucoin.com/futures',
'https://docs.kucoin.com',
],
'www': 'https://futures.kucoin.com/',
'referral': 'https://futures.kucoin.com/?rcode=E5wkqe',
'api': {
'public': 'https://openapi-v2.kucoin.com',
'private': 'https://openapi-v2.kucoin.com',
'futuresPrivate': 'https://api-futures.kucoin.com',
'futuresPublic': 'https://api-futures.kucoin.com',
},
'test': {
'public': 'https://openapi-sandbox.kucoin.com',
'private': 'https://openapi-sandbox.kucoin.com',
'futuresPrivate': 'https://api-sandbox-futures.kucoin.com',
'futuresPublic': 'https://api-sandbox-futures.kucoin.com',
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'password': True,
},
'api': {
'futuresPublic': {
'get': {
'contracts/active': 1,
'contracts/{symbol}': 1,
'ticker': 1,
'level2/snapshot': 1.33,
'level2/depth{limit}': 1,
'level2/message/query': 1,
'level3/message/query': 1, # deprecated,level3/snapshot is suggested
'level3/snapshot': 1, # v2
'trade/history': 1,
'interest/query': 1,
'index/query': 1,
'mark-price/{symbol}/current': 1,
'premium/query': 1,
'funding-rate/{symbol}/current': 1,
'timestamp': 1,
'status': 1,
'kline/query': 1,
},
'post': {
'bullet-public': 1,
},
},
'futuresPrivate': {
'get': {
'account-overview': 1.33,
'transaction-history': 4.44,
'deposit-address': 1,
'deposit-list': 1,
'withdrawals/quotas': 1,
'withdrawal-list': 1,
'transfer-list': 1,
'orders': 1.33,
'stopOrders': 1,
'recentDoneOrders': 1,
'orders/{orderId}': 1, # ?clientOid={client-order-id} # get order by orderId
'orders/byClientOid': 1, # ?clientOid=eresc138b21023a909e5ad59 # get order by clientOid
'fills': 4.44,
'recentFills': 4.44,
'openOrderStatistics': 1,
'position': 1,
'positions': 4.44,
'funding-history': 4.44,
},
'post': {
'withdrawals': 1,
'transfer-out': 1, # v2
'orders': 1.33,
'position/margin/auto-deposit-status': 1,
'position/margin/deposit-margin': 1,
'bullet-private': 1,
},
'delete': {
'withdrawals/{withdrawalId}': 1,
'cancel/transfer-out': 1,
'orders/{orderId}': 1,
'orders': 4.44,
'stopOrders': 1,
},
},
},
'precisionMode': TICK_SIZE,
'exceptions': {
'exact': {
'400': BadRequest, # Bad Request -- Invalid request format
'401': AuthenticationError, # Unauthorized -- Invalid API Key
'403': NotSupported, # Forbidden -- The request is forbidden
'404': NotSupported, # Not Found -- The specified resource could not be found
'405': NotSupported, # Method Not Allowed -- You tried to access the resource with an invalid method.
'415': BadRequest, # Content-Type -- application/json
'429': RateLimitExceeded, # Too Many Requests -- Access limit breached
'500': ExchangeNotAvailable, # Internal Server Error -- We had a problem with our server. Try again later.
'503': ExchangeNotAvailable, # Service Unavailable -- We're temporarily offline for maintenance. Please try again later.
'100001': InvalidOrder, # {"code":"100001","msg":"Unavailable to enable both \"postOnly\" and \"hidden\""}
'100004': BadRequest, # {"code":"100004","msg":"Order is in not cancelable state"}
'101030': PermissionDenied, # {"code":"101030","msg":"You haven't yet enabled the margin trading"}
'200004': InsufficientFunds,
'230003': InsufficientFunds, # {"code":"230003","msg":"Balance insufficient!"}
'260100': InsufficientFunds, # {"code":"260100","msg":"account.noBalance"}
'300003': InsufficientFunds,
'300012': InvalidOrder,
'400001': AuthenticationError, # Any of KC-API-KEY, KC-API-SIGN, KC-API-TIMESTAMP, KC-API-PASSPHRASE is missing in your request header.
'400002': InvalidNonce, # KC-API-TIMESTAMP Invalid -- Time differs from server time by more than 5 seconds
'400003': AuthenticationError, # KC-API-KEY not exists
'400004': AuthenticationError, # KC-API-PASSPHRASE error
'400005': AuthenticationError, # Signature error -- Please check your signature
'400006': AuthenticationError, # The IP address is not in the API whitelist
'400007': AuthenticationError, # Access Denied -- Your API key does not have sufficient permissions to access the URI
'404000': NotSupported, # URL Not Found -- The requested resource could not be found
'400100': BadRequest, # Parameter Error -- You tried to access the resource with invalid parameters
'411100': AccountSuspended, # User is frozen -- Please contact us via support center
'500000': ExchangeNotAvailable, # Internal Server Error -- We had a problem with our server. Try again later.
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0006'),
'maker': self.parse_number('0.0002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0006')],
[self.parse_number('50'), self.parse_number('0.0006')],
[self.parse_number('200'), self.parse_number('0.0006')],
[self.parse_number('500'), self.parse_number('0.0005')],
[self.parse_number('1000'), self.parse_number('0.0004')],
[self.parse_number('2000'), self.parse_number('0.0004')],
[self.parse_number('4000'), self.parse_number('0.00038')],
[self.parse_number('8000'), self.parse_number('0.00035')],
[self.parse_number('15000'), self.parse_number('0.00032')],
[self.parse_number('25000'), self.parse_number('0.0003')],
[self.parse_number('40000'), self.parse_number('0.0003')],
[self.parse_number('60000'), self.parse_number('0.0003')],
[self.parse_number('80000'), self.parse_number('0.0003')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.02')],
[self.parse_number('50'), self.parse_number('0.015')],
[self.parse_number('200'), self.parse_number('0.01')],
[self.parse_number('500'), self.parse_number('0.01')],
[self.parse_number('1000'), self.parse_number('0.01')],
[self.parse_number('2000'), self.parse_number('0')],
[self.parse_number('4000'), self.parse_number('0')],
[self.parse_number('8000'), self.parse_number('0')],
[self.parse_number('15000'), self.parse_number('-0.003')],
[self.parse_number('25000'), self.parse_number('-0.006')],
[self.parse_number('40000'), self.parse_number('-0.009')],
[self.parse_number('60000'), self.parse_number('-0.012')],
[self.parse_number('80000'), self.parse_number('-0.015')],
],
},
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {},
'deposit': {},
},
},
'commonCurrencies': {
'HOT': 'HOTNOW',
'EDGE': 'DADI', # https://github.com/ccxt/ccxt/issues/5756
'WAX': 'WAXP',
'TRY': 'Trias',
'VAI': 'VAIOT',
'XBT': 'BTC',
},
'timeframes': {
'1m': 1,
'3m': None,
'5m': 5,
'15m': 15,
'30m': 30,
'1h': 60,
'2h': 120,
'4h': 240,
'6h': None,
'8h': 480,
'12h': 720,
'1d': 1440,
'1w': 10080,
},
'options': {
'version': 'v1',
'symbolSeparator': '-',
'defaultType': 'swap',
'marginTypes': {},
# endpoint versions
'versions': {
'futuresPrivate': {
'POST': {
'transfer-out': 'v2',
},
},
'futuresPublic': {
'GET': {
'level3/snapshot': 'v2',
},
},
},
'networks': {
'OMNI': 'omni',
'ERC20': 'eth',
'TRC20': 'trx',
},
},
})
def fetch_accounts(self, params={}):
raise BadRequest(self.id + ' has no method fetchAccounts')
def fetch_status(self, params={}):
response = self.futuresPublicGetStatus(params)
#
# {
# "code":"200000",
# "data":{
# "msg":"",
# "status":"open"
# }
# }
#
data = self.safe_value(response, 'data', {})
status = self.safe_value(data, 'status')
if status is not None:
status = 'ok' if (status == 'open') else 'maintenance'
self.status = self.extend(self.status, {
'status': status,
'updated': self.milliseconds(),
})
return self.status
def fetch_markets(self, params={}):
response = self.futuresPublicGetContractsActive(params)
#
# {
# "code": "200000",
# "data": {
# "symbol": "ETHUSDTM",
# "rootSymbol": "USDT",
# "type": "FFWCSX",
# "firstOpenDate": 1591086000000,
# "expireDate": null,
# "settleDate": null,
# "baseCurrency": "ETH",
# "quoteCurrency": "USDT",
# "settleCurrency": "USDT",
# "maxOrderQty": 1000000,
# "maxPrice": 1000000.0000000000,
# "lotSize": 1,
# "tickSize": 0.05,
# "indexPriceTickSize": 0.01,
# "multiplier": 0.01,
# "initialMargin": 0.01,
# "maintainMargin": 0.005,
# "maxRiskLimit": 1000000,
# "minRiskLimit": 1000000,
# "riskStep": 500000,
# "makerFeeRate": 0.00020,
# "takerFeeRate": 0.00060,
# "takerFixFee": 0.0000000000,
# "makerFixFee": 0.0000000000,
# "settlementFee": null,
# "isDeleverage": True,
# "isQuanto": True,
# "isInverse": False,
# "markMethod": "FairPrice",
# "fairMethod": "FundingRate",
# "fundingBaseSymbol": ".ETHINT8H",
# "fundingQuoteSymbol": ".USDTINT8H",
# "fundingRateSymbol": ".ETHUSDTMFPI8H",
# "indexSymbol": ".KETHUSDT",
# "settlementSymbol": "",
# "status": "Open",
# "fundingFeeRate": 0.000535,
# "predictedFundingFeeRate": 0.002197,
# "openInterest": "8724443",
# "turnoverOf24h": 341156641.03354263,
# "volumeOf24h": 74833.54000000,
# "markPrice": 4534.07,
# "indexPrice":4531.92,
# "lastTradePrice": 4545.4500000000,
# "nextFundingRateTime": 25481884,
# "maxLeverage": 100,
# "sourceExchanges": [
# "huobi",
# "Okex",
# "Binance",
# "Kucoin",
# "Poloniex",
# "Hitbtc"
# ],
# "premiumsSymbol1M": ".ETHUSDTMPI",
# "premiumsSymbol8H": ".ETHUSDTMPI8H",
# "fundingBaseSymbol1M": ".ETHINT",
# "fundingQuoteSymbol1M": ".USDTINT",
# "lowPrice": 4456.90,
# "highPrice": 4674.25,
# "priceChgPct": 0.0046,
# "priceChg": 21.15
# }
# }
#
result = []
data = self.safe_value(response, 'data')
for i in range(0, len(data)):
market = data[i]
id = self.safe_string(market, 'symbol')
expiry = self.safe_integer(market, 'expireDate')
future = True if expiry else False
swap = not future
baseId = self.safe_string(market, 'baseCurrency')
quoteId = self.safe_string(market, 'quoteCurrency')
settleId = self.safe_string(market, 'settleCurrency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settle = self.safe_currency_code(settleId)
symbol = base + '/' + quote + ':' + settle
type = 'swap'
if future:
symbol = symbol + '-' + self.yymmdd(expiry, '')
type = 'future'
baseMaxSize = self.safe_number(market, 'baseMaxSize')
baseMinSizeString = self.safe_string(market, 'baseMinSize')
quoteMaxSizeString = self.safe_string(market, 'quoteMaxSize')
baseMinSize = self.parse_number(baseMinSizeString)
quoteMaxSize = self.parse_number(quoteMaxSizeString)
quoteMinSize = self.safe_number(market, 'quoteMinSize')
inverse = self.safe_value(market, 'isInverse')
status = self.safe_string(market, 'status')
multiplier = self.safe_string(market, 'multiplier')
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': type,
'spot': False,
'margin': False,
'swap': swap,
'future': future,
'option': False,
'active': (status == 'Open'),
'contract': True,
'linear': not inverse,
'inverse': inverse,
'taker': self.safe_number(market, 'takerFeeRate'),
'maker': self.safe_number(market, 'makerFeeRate'),
'contractSize': self.parse_number(Precise.string_abs(multiplier)),
'expiry': expiry,
'expiryDatetime': self.iso8601(expiry),
'strike': None,
'optionType': None,
'precision': {
'price': self.safe_number(market, 'tickSize'),
'amount': self.safe_number(market, 'lotSize'),
},
'limits': {
'leverage': {
'min': self.parse_number('1'),
'max': self.safe_number(market, 'maxLeverage'),
},
'amount': {
'min': baseMinSize,
'max': baseMaxSize,
},
'price': {
'min': None,
'max': self.parse_number(Precise.string_div(quoteMaxSizeString, baseMinSizeString)),
},
'cost': {
'min': quoteMinSize,
'max': quoteMaxSize,
},
},
'info': market,
})
return result
def fetch_time(self, params={}):
response = self.futuresPublicGetTimestamp(params)
#
# {
# code: "200000",
# data: 1637385119302,
# }
#
return self.safe_number(response, 'data')
def fetch_ohlcv(self, symbol, timeframe='15m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
marketId = market['id']
request = {
'symbol': marketId,
'granularity': self.timeframes[timeframe],
}
duration = self.parse_timeframe(timeframe) * 1000
endAt = self.milliseconds()
if since is not None:
request['from'] = since
if limit is None:
limit = self.safe_integer(self.options, 'fetchOHLCVLimit', 200)
endAt = self.sum(since, limit * duration)
elif limit is not None:
since = endAt - limit * duration
request['from'] = since
request['to'] = endAt
response = self.futuresPublicGetKlineQuery(self.extend(request, params))
#
# {
# "code": "200000",
# "data": [
# [1636459200000, 4779.3, 4792.1, 4768.7, 4770.3, 78051],
# [1636460100000, 4770.25, 4778.55, 4757.55, 4777.25, 80164],
# [1636461000000, 4777.25, 4791.45, 4774.5, 4791.3, 51555]
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# "1545904980000", # Start time of the candle cycle
# "0.058", # opening price
# "0.049", # closing price
# "0.058", # highest price
# "0.049", # lowest price
# "0.018", # base volume
# "0.000945", # quote volume
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
def create_deposit_address(self, code, params={}):
raise BadRequest(self.id + ' has no method createDepositAddress')
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
currencyId = currency['id']
request = {
'currency': currencyId, # Currency,including XBT,USDT
}
response = self.futuresPrivateGetDepositAddress(self.extend(request, params))
#
# {
# "code": "200000",
# "data": {
# "address": "0x78d3ad1c0aa1bf068e19c94a2d7b16c9c0fcd8b1",//Deposit address
# "memo": null//Address tag. If the returned value is null, it means that the requested token has no memo. If you are to transfer funds from another platform to KuCoin Futures and if the token to be #transferred has memo(tag), you need to fill in the memo to ensure the transferred funds will be sent #to the address you specified.
# }
# }
#
data = self.safe_value(response, 'data', {})
address = self.safe_string(data, 'address')
if currencyId != 'NIM':
# contains spaces
self.check_address(address)
return {
'info': response,
'currency': currencyId,
'address': address,
'tag': self.safe_string(data, 'memo'),
'network': self.safe_string(data, 'chain'),
}
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
level = self.safe_number(params, 'level')
if level != 2 and level is not None:
raise BadRequest(self.id + ' fetchOrderBook can only return level 2')
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
if (limit == 20) or (limit == 100):
request['limit'] = limit
else:
raise BadRequest(self.id + ' fetchOrderBook limit argument must be 20 or 100')
else:
request['limit'] = 20
response = self.futuresPublicGetLevel2DepthLimit(self.extend(request, params))
#
# {
# "code": "200000",
# "data": {
# "symbol": "XBTUSDM", #Symbol
# "sequence": 100, #Ticker sequence number
# "asks": [
# ["5000.0", 1000], #Price, quantity
# ["6000.0", 1983] #Price, quantity
# ],
# "bids": [
# ["3200.0", 800], #Price, quantity
# ["3100.0", 100] #Price, quantity
# ],
# "ts": 1604643655040584408 # timestamp
# }
# }
#
data = self.safe_value(response, 'data', {})
timestamp = int(self.safe_integer(data, 'ts') / 1000000)
orderbook = self.parse_order_book(data, symbol, timestamp, 'bids', 'asks', 0, 1)
orderbook['nonce'] = self.safe_integer(data, 'sequence')
return orderbook
def fetch_l3_order_book(self, symbol, limit=None, params={}):
raise BadRequest(self.id + ' only can only fetch the L2 order book')
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.futuresPublicGetTicker(self.extend(request, params))
#
# {
# "code": "200000",
# "data": {
# "sequence": 1638444978558,
# "symbol": "ETHUSDTM",
# "side": "sell",
# "size": 4,
# "price": "4229.35",
# "bestBidSize": 2160,
# "bestBidPrice": "4229.0",
# "bestAskPrice": "4229.05",
# "tradeId": "61aaa8b777a0c43055fe4851",
# "ts": 1638574296209786785,
# "bestAskSize": 36,
# }
# }
#
return self.parse_ticker(response['data'], market)
def parse_ticker(self, ticker, market=None):
#
# {
# "code": "200000",
# "data": {
# "sequence": 1629930362547,
# "symbol": "ETHUSDTM",
# "side": "buy",
# "size": 130,
# "price": "4724.7",
# "bestBidSize": 5,
# "bestBidPrice": "4724.6",
# "bestAskPrice": "4724.65",
# "tradeId": "618d2a5a77a0c4431d2335f4",
# "ts": 1636641371963227600,
# "bestAskSize": 1789
# }
# }
#
last = self.safe_string(ticker, 'price')
marketId = self.safe_string(ticker, 'symbol')
market = self.safe_market(marketId, market, '-')
timestamp = Precise.string_div(self.safe_string(ticker, 'ts'), '1000000')
return self.safe_ticker({
'symbol': market['symbol'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': self.safe_string(ticker, 'bestBidPrice'),
'bidVolume': self.safe_string(ticker, 'bestBidSize'),
'ask': self.safe_string(ticker, 'bestAskPrice'),
'askVolume': self.safe_string(ticker, 'bestAskSize'),
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': None,
'quoteVolume': None,
'info': ticker,
}, market, False)
def fetch_funding_history(self, symbol=None, since=None, limit=None, params={}):
#
# Private
# @param symbol(string): The pair for which the contract was traded
# @param since(number): The unix start time of the first funding payment requested
# @param limit(number): The number of results to return
# @param params(dict): Additional parameters to send to the API
# @param return: Data for the history of the accounts funding payments for futures contracts
#
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchFundingHistory() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if since is not None:
request['startAt'] = since
if limit is not None:
# * Since is ignored if limit is defined
request['maxCount'] = limit
response = self.futuresPrivateGetFundingHistory(self.extend(request, params))
#
# {
# "code": "200000",
# "data": {
# "dataList": [
# {
# "id": 239471298749817,
# "symbol": "ETHUSDTM",
# "timePoint": 1638532800000,
# "fundingRate": 0.000100,
# "markPrice": 4612.8300000000,
# "positionQty": 12,
# "positionCost": 553.5396000000,
# "funding": -0.0553539600,
# "settleCurrency": "USDT"
# },
# ...
# ],
# "hasMore": True
# }
# }
#
data = self.safe_value(response, 'data')
dataList = self.safe_value(data, 'dataList')
fees = []
for i in range(0, len(dataList)):
listItem = dataList[i]
timestamp = self.safe_integer(listItem, 'timePoint')
fees.append({
'info': listItem,
'symbol': symbol,
'code': self.safe_currency_code(self.safe_string(listItem, 'settleCurrency')),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'id': self.safe_number(listItem, 'id'),
'amount': self.safe_number(listItem, 'funding'),
'fundingRate': self.safe_number(listItem, 'fundingRate'),
'markPrice': self.safe_number(listItem, 'markPrice'),
'positionQty': self.safe_number(listItem, 'positionQty'),
'positionCost': self.safe_number(listItem, 'positionCost'),
})
return fees
def fetch_positions(self, symbols=None, params={}):
self.load_markets()
response = self.futuresPrivateGetPositions(params)
#
# {
# "code": "200000",
# "data": [
# {
# "id": "615ba79f83a3410001cde321",
# "symbol": "ETHUSDTM",
# "autoDeposit": False,
# "maintMarginReq": 0.005,
# "riskLimit": 1000000,
# "realLeverage": 18.61,
# "crossMode": False,
# "delevPercentage": 0.86,
# "openingTimestamp": 1638563515618,
# "currentTimestamp": 1638576872774,
# "currentQty": 2,
# "currentCost": 83.64200000,
# "currentComm": 0.05018520,
# "unrealisedCost": 83.64200000,
# "realisedGrossCost": 0.00000000,
# "realisedCost": 0.05018520,
# "isOpen": True,
# "markPrice": 4225.01,
# "markValue": 84.50020000,
# "posCost": 83.64200000,
# "posCross": 0.0000000000,
# "posInit": 3.63660870,
# "posComm": 0.05236717,
# "posLoss": 0.00000000,
# "posMargin": 3.68897586,
# "posMaint": 0.50637594,
# "maintMargin": 4.54717586,
# "realisedGrossPnl": 0.00000000,
# "realisedPnl": -0.05018520,
# "unrealisedPnl": 0.85820000,
# "unrealisedPnlPcnt": 0.0103,
# "unrealisedRoePcnt": 0.2360,
# "avgEntryPrice": 4182.10,
# "liquidationPrice": 4023.00,
# "bankruptPrice": 4000.25,
# "settleCurrency": "USDT",
# "isInverse": False
# }
# ]
# }
#
return self.parse_positions(self.safe_value(response, 'data'))
def parse_positions(self, positions):
result = []
for i in range(0, len(positions)):
result.append(self.parse_position(positions[i]))
return result
def parse_position(self, position, market=None):
#
# {
# "code": "200000",
# "data": [
# {
# "id": "615ba79f83a3410001cde321", # Position ID
# "symbol": "ETHUSDTM", # Symbol
# "autoDeposit": False, # Auto deposit margin or not
# "maintMarginReq": 0.005, # Maintenance margin requirement
# "riskLimit": 1000000, # Risk limit
# "realLeverage": 25.92, # Leverage of the order
# "crossMode": False, # Cross mode or not
# "delevPercentage": 0.76, # ADL ranking percentile
# "openingTimestamp": 1638578546031, # Open time
# "currentTimestamp": 1638578563580, # Current timestamp
# "currentQty": 2, # Current postion quantity
# "currentCost": 83.787, # Current postion value
# "currentComm": 0.0167574, # Current commission
# "unrealisedCost": 83.787, # Unrealised value
# "realisedGrossCost": 0.0, # Accumulated realised gross profit value
# "realisedCost": 0.0167574, # Current realised position value
# "isOpen": True, # Opened position or not
# "markPrice": 4183.38, # Mark price
# "markValue": 83.6676, # Mark value
# "posCost": 83.787, # Position value
# "posCross": 0.0, # added margin
# "posInit": 3.35148, # Leverage margin
# "posComm": 0.05228309, # Bankruptcy cost
# "posLoss": 0.0, # Funding fees paid out
# "posMargin": 3.40376309, # Position margin
# "posMaint": 0.50707892, # Maintenance margin
# "maintMargin": 3.28436309, # Position margin
# "realisedGrossPnl": 0.0, # Accumulated realised gross profit value
# "realisedPnl": -0.0167574, # Realised profit and loss
# "unrealisedPnl": -0.1194, # Unrealised profit and loss
# "unrealisedPnlPcnt": -0.0014, # Profit-loss ratio of the position
# "unrealisedRoePcnt": -0.0356, # Rate of return on investment
# "avgEntryPrice": 4189.35, # Average entry price
# "liquidationPrice": 4044.55, # Liquidation price
# "bankruptPrice": 4021.75, # Bankruptcy price
# "settleCurrency": "USDT", # Currency used to clear and settle the trades
# "isInverse": False
# }
# ]
# }
#
symbol = self.safe_string(position, 'symbol')
market = self.safe_market(symbol, market)
timestamp = self.safe_number(position, 'currentTimestamp')
size = self.safe_string(position, 'currentQty')
side = None
if Precise.string_gt(size, '0'):
side = 'long'
elif Precise.string_lt(size, '0'):
side = 'short'
notional = Precise.string_abs(self.safe_string(position, 'posCost'))
initialMargin = self.safe_string(position, 'posInit')
initialMarginPercentage = Precise.string_div(initialMargin, notional)
# marginRatio = Precise.string_div(maintenanceRate, collateral)
unrealisedPnl = self.safe_string(position, 'unrealisedPnl')
crossMode = self.safe_value(position, 'crossMode')
# currently crossMode is always set to False and only isolated positions are supported
marginType = 'cross' if crossMode else 'isolated'
return {
'info': position,
'symbol': self.safe_string(market, 'symbol'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'initialMargin': self.parse_number(initialMargin),
'initialMarginPercentage': self.parse_number(initialMarginPercentage),
'maintenanceMargin': self.safe_number(position, 'posMaint'),
'maintenanceMarginPercentage': self.safe_number(position, 'maintMarginReq'),
'entryPrice': self.safe_number(position, 'avgEntryPrice'),
'notional': self.parse_number(notional),
'leverage': self.safe_number(position, 'realLeverage'),
'unrealizedPnl': self.parse_number(unrealisedPnl),
'contracts': self.parse_number(Precise.string_abs(size)),
'contractSize': self.safe_value(market, 'contractSize'),
# realisedPnl: position['realised_pnl'],
'marginRatio': None,
'liquidationPrice': self.safe_number(position, 'liquidationPrice'),
'markPrice': self.safe_number(position, 'markPrice'),
'collateral': self.safe_number(position, 'maintMargin'),
'marginType': marginType,
'side': side,
'percentage': self.parse_number(Precise.string_div(unrealisedPnl, initialMargin)),
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
# required param, cannot be used twice
clientOrderId = self.safe_string_2(params, 'clientOid', 'clientOrderId', self.uuid())
params = self.omit(params, ['clientOid', 'clientOrderId'])
if amount < 1:
raise InvalidOrder(self.id + ' createOrder() minimum contract order amount is 1')
preciseAmount = int(self.amount_to_precision(symbol, amount))
request = {
'clientOid': clientOrderId,
'side': side,
'symbol': market['id'],
'type': type, # limit or market
'size': preciseAmount,
'leverage': 1,
# 'remark': '', # optional remark for the order, length cannot exceed 100 utf8 characters
# 'tradeType': 'TRADE', # TRADE, MARGIN_TRADE # not used with margin orders
# limit orders ---------------------------------------------------
# 'timeInForce': 'GTC', # GTC, GTT, IOC, or FOK(default is GTC), limit orders only
# 'cancelAfter': long, # cancel after n seconds, requires timeInForce to be GTT
# 'postOnly': False, # Post only flag, invalid when timeInForce is IOC or FOK
# 'hidden': False, # Order will not be displayed in the order book
# 'iceberg': False, # Only a portion of the order is displayed in the order book
# 'visibleSize': self.amount_to_precision(symbol, visibleSize), # The maximum visible size of an iceberg order
# market orders --------------------------------------------------
# 'funds': self.cost_to_precision(symbol, cost), # Amount of quote currency to use
# stop orders ----------------------------------------------------
# 'stop': 'loss', # loss or entry, the default is loss, requires stopPrice
# 'stopPrice': self.price_to_precision(symbol, amount), # need to be defined if stop is specified
# 'stopPriceType' # Either TP, IP or MP, Need to be defined if stop is specified.
# margin orders --------------------------------------------------
# 'marginMode': 'cross', # cross(cross mode) and isolated(isolated mode), set to cross by default, the isolated mode will be released soon, stay tuned
# 'autoBorrow': False, # The system will first borrow you funds at the optimal interest rate and then place an order for you
# futures orders -------------------------------------------------
# reduceOnly #(boolean) A mark to reduce the position size only. Set to False by default. Need to set the position size when reduceOnly is True.
# closeOrder #(boolean) A mark to close the position. Set to False by default. It will close all the positions when closeOrder is True.
# forceHold #(boolean) A mark to forcely hold the funds for an order, even though it's an order to reduce the position size. This helps the order stay on the order book and not get canceled when the position size changes. Set to False by default.
}
stopPrice = self.safe_number(params, 'stopPrice')
if stopPrice:
request['stop'] = 'down' if (side == 'buy') else 'up'
stopPriceType = self.safe_string(params, 'stopPriceType')
if not stopPriceType:
raise ArgumentsRequired(self.id + ' createOrder() trigger orders require a stopPriceType parameter to be set to TP, IP or MP(Trade Price, Index Price or Mark Price)')
uppercaseType = type.upper()
timeInForce = self.safe_string(params, 'timeInForce')
if uppercaseType == 'LIMIT':
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument for limit orders')
else:
request['price'] = self.price_to_precision(symbol, price)
if timeInForce is not None:
timeInForce = timeInForce.upper()
request['timeInForce'] = timeInForce
postOnly = self.safe_value(params, 'postOnly', False)
hidden = self.safe_value(params, 'hidden')
if postOnly and hidden is not None:
raise BadRequest(self.id + ' createOrder() does not support the postOnly parameter together with a hidden parameter')
iceberg = self.safe_value(params, 'iceberg')
if iceberg:
visibleSize = self.safe_value(params, 'visibleSize')
if visibleSize is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a visibleSize parameter for iceberg orders')
params = self.omit(params, 'timeInForce') # Time in force only valid for limit orders, exchange error when gtc for market orders
response = self.futuresPrivatePostOrders(self.extend(request, params))
#
# {
# code: "200000",
# data: {
# orderId: "619717484f1d010001510cde",
# },
# }
#
data = self.safe_value(response, 'data', {})
return {
'id': self.safe_string(data, 'orderId'),
'clientOrderId': None,
'timestamp': None,
'datetime': None,
'lastTradeTimestamp': None,
'symbol': None,
'type': None,
'side': None,
'price': None,
'amount': None,
'cost': None,
'average': None,
'filled': None,
'remaining': None,
'status': None,
'fee': None,
'trades': None,
'timeInForce': None,
'postOnly': None,
'stopPrice': None,
'info': response,
}
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'orderId': id,
}
response = self.futuresPrivateDeleteOrdersOrderId(self.extend(request, params))
#
# {
# code: "200000",
# data: {
# cancelledOrderIds: [
# "619714b8b6353000014c505a",
# ],
# },
# }
#
return self.safe_value(response, 'data')
def cancel_all_orders(self, symbol=None, params={}):
self.load_markets()
request = {}
if symbol is not None:
request['symbol'] = self.market_id(symbol)
response = self.futuresPrivateDeleteOrders(self.extend(request, params))
# ? futuresPrivateDeleteStopOrders
# {
# code: "200000",
# data: {
# cancelledOrderIds: [
# "619714b8b6353000014c505a",
# ],
# },
# }
#
return self.safe_value(response, 'data')
def add_margin(self, symbol, amount, params={}):
self.load_markets()
market = self.market(symbol)
uuid = self.uuid()
request = {
'symbol': market['id'],
'margin': amount,
'bizNo': uuid,
}
return self.futuresPrivatePostPositionMarginDepositMargin(self.extend(request, params))
def fetch_orders_by_status(self, status, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
'status': status,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['startAt'] = since
response = self.futuresPrivateGetOrders(self.extend(request, params))
responseData = self.safe_value(response, 'data', {})
orders = self.safe_value(responseData, 'items', [])
return self.parse_orders(orders, market, since, limit)
def fetch_order(self, id=None, symbol=None, params={}):
self.load_markets()
request = {}
method = 'futuresPrivateGetOrdersOrderId'
if id is None:
clientOrderId = self.safe_string_2(params, 'clientOid', 'clientOrderId')
if clientOrderId is None:
raise InvalidOrder(self.id + ' fetchOrder() requires parameter id or params.clientOid')
request['clientOid'] = clientOrderId
method = 'futuresPrivateGetOrdersByClientOid'
params = self.omit(params, ['clientOid', 'clientOrderId'])
else:
request['orderId'] = id
response = getattr(self, method)(self.extend(request, params))
market = self.market(symbol) if (symbol is not None) else None
responseData = self.safe_value(response, 'data')
return self.parse_order(responseData, market)
def parse_order(self, order, market=None):
marketId = self.safe_string(order, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
orderId = self.safe_string(order, 'id')
type = self.safe_string(order, 'type')
timestamp = self.safe_integer(order, 'createdAt')
datetime = self.iso8601(timestamp)
price = self.safe_string(order, 'price')
# price is zero for market order
# omitZero is called in safeOrder2
side = self.safe_string(order, 'side')
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrency = self.safe_currency_code(feeCurrencyId)
feeCost = self.safe_number(order, 'fee')
amount = self.safe_string(order, 'size')
filled = self.safe_string(order, 'dealSize')
rawCost = self.safe_string_2(order, 'dealFunds', 'filledValue')
leverage = self.safe_string(order, 'leverage')
cost = Precise.string_div(rawCost, leverage)
average = None
if Precise.string_gt(filled, '0'):
contractSize = self.safe_string(market, 'contractSize')
if market['linear']:
average = Precise.string_div(rawCost, Precise.string_mul(contractSize, filled))
else:
average = Precise.string_div(Precise.string_mul(contractSize, filled), rawCost)
# precision reported by their api is 8 d.p.
# average = Precise.string_div(rawCost, Precise.string_mul(filled, market['contractSize']))
# bool
isActive = self.safe_value(order, 'isActive', False)
cancelExist = self.safe_value(order, 'cancelExist', False)
status = 'open' if isActive else 'closed'
status = 'canceled' if cancelExist else status
fee = {
'currency': feeCurrency,
'cost': feeCost,
}
clientOrderId = self.safe_string(order, 'clientOid')
timeInForce = self.safe_string(order, 'timeInForce')
stopPrice = self.safe_number(order, 'stopPrice')
postOnly = self.safe_value(order, 'postOnly')
return self.safe_order({
'id': orderId,
'clientOrderId': clientOrderId,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'amount': amount,
'price': price,
'stopPrice': stopPrice,
'cost': cost,
'filled': filled,
'remaining': None,
'timestamp': timestamp,
'datetime': datetime,
'fee': fee,
'status': status,
'info': order,
'lastTradeTimestamp': None,
'average': average,
'trades': None,
}, market)
def fetch_funding_rate(self, symbol, params={}):
self.load_markets()
request = {
'symbol': self.market_id(symbol),
}
response = self.futuresPublicGetFundingRateSymbolCurrent(self.extend(request, params))
#
# {
# code: "200000",
# data: {
# symbol: ".ETHUSDTMFPI8H",
# granularity: 28800000,
# timePoint: 1637380800000,
# value: 0.0001,
# predictedValue: 0.0001,
# },
# }
#
data = self.safe_value(response, 'data')
fundingTimestamp = self.safe_number(data, 'timePoint')
return {
'info': data,
'symbol': symbol,
'markPrice': None,
'indexPrice': None,
'interestRate': None,
'estimatedSettlePrice': None,
'timestamp': None,
'datetime': None,
'fundingRate': self.safe_number(data, 'value'),
'fundingTimestamp': fundingTimestamp,
'fundingDatetime': self.iso8601(fundingTimestamp),
'nextFundingRate': self.safe_number(data, 'predictedValue'),
'nextFundingTimestamp': None,
'nextFundingDatetime': None,
'previousFundingRate': None,
'previousFundingTimestamp': None,
'previousFundingDatetime': None,
}
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
data = self.safe_value(response, 'data')
currencyId = self.safe_string(data, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(data, 'availableBalance')
account['total'] = self.safe_string(data, 'accountEquity')
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
self.load_markets()
# only fetches one balance at a time
# by default it will only fetch the BTC balance of the futures account
# you can send 'currency' in params to fetch other currencies
# fetchBalance({'type': 'future', 'currency': 'USDT'})
response = self.futuresPrivateGetAccountOverview(params)
#
# {
# code: '200000',
# data: {
# accountEquity: 0.00005,
# unrealisedPNL: 0,
# marginBalance: 0.00005,
# positionMargin: 0,
# orderMargin: 0,
# frozenFunds: 0,
# availableBalance: 0.00005,
# currency: 'XBT'
# }
# }
#
return self.parse_balance(response)
def transfer(self, code, amount, fromAccount, toAccount, params={}):
if (toAccount != 'main' and toAccount != 'funding') or (fromAccount != 'futures' and fromAccount != 'future' and fromAccount != 'contract'):
raise BadRequest(self.id + ' only supports transfers from contract(future) account to main(funding) account')
return self.transfer_out(code, amount, params)
def transfer_out(self, code, amount, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': self.safe_string(currency, 'id'), # Currency,including XBT,USDT
'amount': amount,
}
# transfer from usdm futures wallet to spot wallet
response = self.futuresPrivatePostTransferOut(self.extend(request, params))
#
# {
# "code": "200000",
# "data": {
# "applyId": "5bffb63303aa675e8bbe18f9" # Transfer-out request ID
# }
# }
#
data = self.safe_value(response, 'data')
timestamp = self.safe_string(data, 'updatedAt')
return {
'info': response,
'id': self.safe_string(data, 'applyId'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'currency': code,
'amount': amount,
'fromAccount': 'future',
'toAccount': 'spot',
'status': self.safe_string(data, 'status'),
}
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# orderId(String) [optional] Fills for a specific order(other parameters can be ignored if specified)
# symbol(String) [optional] Symbol of the contract
# side(String) [optional] buy or sell
# type(String) [optional] limit, market, limit_stop or market_stop
# startAt(long) [optional] Start time(milisecond)
# endAt(long) [optional] End time(milisecond)
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['startAt'] = since
response = self.futuresPrivateGetFills(self.extend(request, params))
#
# {
# "code": "200000",
# "data": {
# "currentPage": 1,
# "pageSize": 1,
# "totalNum": 251915,
# "totalPage": 251915,
# "items": [
# {
# "symbol": "XBTUSDM", # Ticker symbol of the contract
# "tradeId": "5ce24c1f0c19fc3c58edc47c", # Trade ID
# "orderId": "5ce24c16b210233c36ee321d", # Order ID
# "side": "sell", # Transaction side
# "liquidity": "taker", # Liquidity- taker or maker
# "price": "8302", # Filled price
# "size": 10, # Filled amount
# "value": "0.001204529", # Order value
# "feeRate": "0.0005", # Floating fees
# "fixFee": "0.00000006", # Fixed fees
# "feeCurrency": "XBT", # Charging currency
# "stop": "", # A mark to the stop order type
# "fee": "0.0000012022", # Transaction fee
# "orderType": "limit", # Order type
# "tradeType": "trade", # Trade type(trade, liquidation, ADL or settlement)
# "createdAt": 1558334496000, # Time the order created
# "settleCurrency": "XBT", # settlement currency
# "tradeTime": 1558334496000000000 # trade time in nanosecond
# }
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
trades = self.safe_value(data, 'items', {})
return self.parse_trades(trades, market, since, limit)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.futuresPublicGetTradeHistory(self.extend(request, params))
#
# {
# "code": "200000",
# "data": [
# {
# "sequence": 32114961,
# "side": "buy",
# "size": 39,
# "price": "4001.6500000000",
# "takerOrderId": "61c20742f172110001e0ebe4",
# "makerOrderId": "61c2073fcfc88100010fcb5d",
# "tradeId": "61c2074277a0c473e69029b8",
# "ts": 1640105794099993896 # filled time
# }
# ]
# }
#
trades = self.safe_value(response, 'data', [])
return self.parse_trades(trades, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "sequence": 32114961,
# "side": "buy",
# "size": 39,
# "price": "4001.6500000000",
# "takerOrderId": "61c20742f172110001e0ebe4",
# "makerOrderId": "61c2073fcfc88100010fcb5d",
# "tradeId": "61c2074277a0c473e69029b8",
# "ts": 1640105794099993896 # filled time
# }
#
# fetchMyTrades(private) v2
#
# {
# "symbol":"BTC-USDT",
# "tradeId":"5c35c02709e4f67d5266954e",
# "orderId":"5c35c02703aa673ceec2a168",
# "counterOrderId":"5c1ab46003aa676e487fa8e3",
# "side":"buy",
# "liquidity":"taker",
# "forceTaker":true,
# "price":"0.083",
# "size":"0.8424304",
# "funds":"0.0699217232",
# "fee":"0",
# "feeRate":"0",
# "feeCurrency":"USDT",
# "stop":"",
# "type":"limit",
# "createdAt":1547026472000
# }
#
marketId = self.safe_string(trade, 'symbol')
symbol = self.safe_symbol(marketId, market, '-')
id = self.safe_string_2(trade, 'tradeId', 'id')
orderId = self.safe_string(trade, 'orderId')
takerOrMaker = self.safe_string(trade, 'liquidity')
timestamp = self.safe_integer(trade, 'time')
if timestamp is not None:
timestamp = int(timestamp / 1000000)
else:
timestamp = self.safe_integer(trade, 'createdAt')
# if it's a historical v1 trade, the exchange returns timestamp in seconds
if ('dealValue' in trade) and (timestamp is not None):
timestamp = timestamp * 1000
priceString = self.safe_string_2(trade, 'price', 'dealPrice')
amountString = self.safe_string_2(trade, 'size', 'amount')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
side = self.safe_string(trade, 'side')
fee = None
feeCost = self.safe_number(trade, 'fee')
if feeCost is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrency = self.safe_currency_code(feeCurrencyId)
if feeCurrency is None:
if market is not None:
feeCurrency = market['quote'] if (side == 'sell') else market['base']
fee = {
'cost': feeCost,
'currency': feeCurrency,
'rate': self.safe_number(trade, 'feeRate'),
}
type = self.safe_string_2(trade, 'type', 'orderType')
if type == 'match':
type = None
cost = self.safe_number_2(trade, 'funds', 'dealValue')
if cost is None:
market = self.market(symbol)
contractSize = self.safe_string(market, 'contractSize')
contractCost = Precise.string_mul(priceString, amountString)
if contractSize and contractCost:
cost = self.parse_number(Precise.string_mul(contractCost, contractSize))
return {
'info': trade,
'id': id,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['pageSize'] = limit
if since is not None:
request['startAt'] = since
response = self.futuresPrivateGetDepositList(self.extend(request, params))
#
# {
# code: '200000',
# data: {
# "currentPage": 1,
# "pageSize": 5,
# "totalNum": 2,
# "totalPage": 1,
# "items": [
# {
# "address": "0x5f047b29041bcfdbf0e4478cdfa753a336ba6989",
# "memo": "5c247c8a03aa677cea2a251d",
# "amount": 1,
# "fee": 0.0001,
# "currency": "KCS",
# "isInner": False,
# "walletTxId": "5bbb57386d99522d9f954c5a@test004",
# "status": "SUCCESS",
# "createdAt": 1544178843000,
# "updatedAt": 1544178891000
# "remark":"foobar"
# },
# ...
# ]
# }
# }
#
responseData = response['data']['items']
return self.parse_transactions(responseData, currency, since, limit, {'type': 'deposit'})
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['pageSize'] = limit
if since is not None:
request['startAt'] = since
response = self.futuresPrivateGetWithdrawalList(self.extend(request, params))
#
# {
# code: '200000',
# data: {
# "currentPage": 1,
# "pageSize": 5,
# "totalNum": 2,
# "totalPage": 1,
# "items": [
# {
# "id": "5c2dc64e03aa675aa263f1ac",
# "address": "0x5bedb060b8eb8d823e2414d82acce78d38be7fe9",
# "memo": "",
# "currency": "ETH",
# "amount": 1.0000000,
# "fee": 0.0100000,
# "walletTxId": "3e2414d82acce78d38be7fe9",
# "isInner": False,
# "status": "FAILURE",
# "createdAt": 1546503758000,
# "updatedAt": 1546504603000
# },
# ...
# ]
# }
# }
#
responseData = response['data']['items']
return self.parse_transactions(responseData, currency, since, limit, {'type': 'withdrawal'})
def fetch_funding_fee(self, code, params={}):
raise BadRequest(self.id + ' has no method fetchFundingFee')
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
raise BadRequest(self.id + ' has no method fetchLedger')
| 45.010492 | 352 | 0.478271 |
rt kucoin
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import NotSupported
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class kucoinfutures(kucoin):
def describe(self):
return self.deep_extend(super(kucoinfutures, self).describe(), {
'id': 'kucoinfutures',
'name': 'KuCoin Futures',
'countries': ['SC'],
'rateLimit': 75,
'version': 'v1',
'certified': False,
'pro': False,
'comment': 'Platform 2.0',
'quoteJsonNumbers': False,
'has': {
'CORS': None,
'spot': False,
'margin': False,
'swap': True,
'future': True,
'option': False,
'addMargin': True,
'cancelAllOrders': True,
'cancelOrder': True,
'createDepositAddress': True,
'createOrder': True,
'fetchAccounts': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchClosedOrders': True,
'fetchCurrencies': False,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingFee': True,
'fetchFundingHistory': True,
'fetchFundingRate': True,
'fetchFundingRateHistory': False,
'fetchIndexOHLCV': False,
'fetchL3OrderBook': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchPositions': True,
'fetchPremiumIndexOHLCV': False,
'fetchStatus': True,
'fetchTicker': True,
'fetchTickers': False,
'fetchTime': True,
'fetchTrades': True,
'fetchWithdrawals': True,
'setMarginMode': False,
'transfer': True,
'withdraw': None,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/147508995-9e35030a-d046-43a1-a006-6fabd981b554.jpg',
'doc': [
'https://docs.kucoin.com/futures',
'https://docs.kucoin.com',
],
'www': 'https://futures.kucoin.com/',
'referral': 'https://futures.kucoin.com/?rcode=E5wkqe',
'api': {
'public': 'https://openapi-v2.kucoin.com',
'private': 'https://openapi-v2.kucoin.com',
'futuresPrivate': 'https://api-futures.kucoin.com',
'futuresPublic': 'https://api-futures.kucoin.com',
},
'test': {
'public': 'https://openapi-sandbox.kucoin.com',
'private': 'https://openapi-sandbox.kucoin.com',
'futuresPrivate': 'https://api-sandbox-futures.kucoin.com',
'futuresPublic': 'https://api-sandbox-futures.kucoin.com',
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'password': True,
},
'api': {
'futuresPublic': {
'get': {
'contracts/active': 1,
'contracts/{symbol}': 1,
'ticker': 1,
'level2/snapshot': 1.33,
'level2/depth{limit}': 1,
'level2/message/query': 1,
'level3/message/query': 1,
'level3/snapshot': 1,
'trade/history': 1,
'interest/query': 1,
'index/query': 1,
'mark-price/{symbol}/current': 1,
'premium/query': 1,
'funding-rate/{symbol}/current': 1,
'timestamp': 1,
'status': 1,
'kline/query': 1,
},
'post': {
'bullet-public': 1,
},
},
'futuresPrivate': {
'get': {
'account-overview': 1.33,
'transaction-history': 4.44,
'deposit-address': 1,
'deposit-list': 1,
'withdrawals/quotas': 1,
'withdrawal-list': 1,
'transfer-list': 1,
'orders': 1.33,
'stopOrders': 1,
'recentDoneOrders': 1,
'orders/{orderId}': 1, 'orders/byClientOid': 1, 'fills': 4.44,
'recentFills': 4.44,
'openOrderStatistics': 1,
'position': 1,
'positions': 4.44,
'funding-history': 4.44,
},
'post': {
'withdrawals': 1,
'transfer-out': 1,
'orders': 1.33,
'position/margin/auto-deposit-status': 1,
'position/margin/deposit-margin': 1,
'bullet-private': 1,
},
'delete': {
'withdrawals/{withdrawalId}': 1,
'cancel/transfer-out': 1,
'orders/{orderId}': 1,
'orders': 4.44,
'stopOrders': 1,
},
},
},
'precisionMode': TICK_SIZE,
'exceptions': {
'exact': {
'400': BadRequest,
'401': AuthenticationError,
'403': NotSupported,
'404': NotSupported,
'405': NotSupported,
'415': BadRequest,
'429': RateLimitExceeded,
'500': ExchangeNotAvailable,
'503': ExchangeNotAvailable,
'100001': InvalidOrder, # {"code":"100001","msg":"Unavailable to enable both \"postOnly\" and \"hidden\""}
'100004': BadRequest, # {"code":"100004","msg":"Order is in not cancelable state"}
'101030': PermissionDenied, # {"code":"101030","msg":"You haven't yet enabled the margin trading"}
'200004': InsufficientFunds,
'230003': InsufficientFunds,
'260100': InsufficientFunds,
'300003': InsufficientFunds,
'300012': InvalidOrder,
'400001': AuthenticationError,
'400002': InvalidNonce,
'400003': AuthenticationError,
'400004': AuthenticationError,
'400005': AuthenticationError,
'400006': AuthenticationError,
'400007': AuthenticationError,
'404000': NotSupported,
'400100': BadRequest,
'411100': AccountSuspended,
'500000': ExchangeNotAvailable,
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0006'),
'maker': self.parse_number('0.0002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0006')],
[self.parse_number('50'), self.parse_number('0.0006')],
[self.parse_number('200'), self.parse_number('0.0006')],
[self.parse_number('500'), self.parse_number('0.0005')],
[self.parse_number('1000'), self.parse_number('0.0004')],
[self.parse_number('2000'), self.parse_number('0.0004')],
[self.parse_number('4000'), self.parse_number('0.00038')],
[self.parse_number('8000'), self.parse_number('0.00035')],
[self.parse_number('15000'), self.parse_number('0.00032')],
[self.parse_number('25000'), self.parse_number('0.0003')],
[self.parse_number('40000'), self.parse_number('0.0003')],
[self.parse_number('60000'), self.parse_number('0.0003')],
[self.parse_number('80000'), self.parse_number('0.0003')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.02')],
[self.parse_number('50'), self.parse_number('0.015')],
[self.parse_number('200'), self.parse_number('0.01')],
[self.parse_number('500'), self.parse_number('0.01')],
[self.parse_number('1000'), self.parse_number('0.01')],
[self.parse_number('2000'), self.parse_number('0')],
[self.parse_number('4000'), self.parse_number('0')],
[self.parse_number('8000'), self.parse_number('0')],
[self.parse_number('15000'), self.parse_number('-0.003')],
[self.parse_number('25000'), self.parse_number('-0.006')],
[self.parse_number('40000'), self.parse_number('-0.009')],
[self.parse_number('60000'), self.parse_number('-0.012')],
[self.parse_number('80000'), self.parse_number('-0.015')],
],
},
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {},
'deposit': {},
},
},
'commonCurrencies': {
'HOT': 'HOTNOW',
'EDGE': 'DADI',
'WAX': 'WAXP',
'TRY': 'Trias',
'VAI': 'VAIOT',
'XBT': 'BTC',
},
'timeframes': {
'1m': 1,
'3m': None,
'5m': 5,
'15m': 15,
'30m': 30,
'1h': 60,
'2h': 120,
'4h': 240,
'6h': None,
'8h': 480,
'12h': 720,
'1d': 1440,
'1w': 10080,
},
'options': {
'version': 'v1',
'symbolSeparator': '-',
'defaultType': 'swap',
'marginTypes': {},
'versions': {
'futuresPrivate': {
'POST': {
'transfer-out': 'v2',
},
},
'futuresPublic': {
'GET': {
'level3/snapshot': 'v2',
},
},
},
'networks': {
'OMNI': 'omni',
'ERC20': 'eth',
'TRC20': 'trx',
},
},
})
def fetch_accounts(self, params={}):
raise BadRequest(self.id + ' has no method fetchAccounts')
def fetch_status(self, params={}):
response = self.futuresPublicGetStatus(params)
data = self.safe_value(response, 'data', {})
status = self.safe_value(data, 'status')
if status is not None:
status = 'ok' if (status == 'open') else 'maintenance'
self.status = self.extend(self.status, {
'status': status,
'updated': self.milliseconds(),
})
return self.status
def fetch_markets(self, params={}):
response = self.futuresPublicGetContractsActive(params)
result = []
data = self.safe_value(response, 'data')
for i in range(0, len(data)):
market = data[i]
id = self.safe_string(market, 'symbol')
expiry = self.safe_integer(market, 'expireDate')
future = True if expiry else False
swap = not future
baseId = self.safe_string(market, 'baseCurrency')
quoteId = self.safe_string(market, 'quoteCurrency')
settleId = self.safe_string(market, 'settleCurrency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settle = self.safe_currency_code(settleId)
symbol = base + '/' + quote + ':' + settle
type = 'swap'
if future:
symbol = symbol + '-' + self.yymmdd(expiry, '')
type = 'future'
baseMaxSize = self.safe_number(market, 'baseMaxSize')
baseMinSizeString = self.safe_string(market, 'baseMinSize')
quoteMaxSizeString = self.safe_string(market, 'quoteMaxSize')
baseMinSize = self.parse_number(baseMinSizeString)
quoteMaxSize = self.parse_number(quoteMaxSizeString)
quoteMinSize = self.safe_number(market, 'quoteMinSize')
inverse = self.safe_value(market, 'isInverse')
status = self.safe_string(market, 'status')
multiplier = self.safe_string(market, 'multiplier')
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': type,
'spot': False,
'margin': False,
'swap': swap,
'future': future,
'option': False,
'active': (status == 'Open'),
'contract': True,
'linear': not inverse,
'inverse': inverse,
'taker': self.safe_number(market, 'takerFeeRate'),
'maker': self.safe_number(market, 'makerFeeRate'),
'contractSize': self.parse_number(Precise.string_abs(multiplier)),
'expiry': expiry,
'expiryDatetime': self.iso8601(expiry),
'strike': None,
'optionType': None,
'precision': {
'price': self.safe_number(market, 'tickSize'),
'amount': self.safe_number(market, 'lotSize'),
},
'limits': {
'leverage': {
'min': self.parse_number('1'),
'max': self.safe_number(market, 'maxLeverage'),
},
'amount': {
'min': baseMinSize,
'max': baseMaxSize,
},
'price': {
'min': None,
'max': self.parse_number(Precise.string_div(quoteMaxSizeString, baseMinSizeString)),
},
'cost': {
'min': quoteMinSize,
'max': quoteMaxSize,
},
},
'info': market,
})
return result
def fetch_time(self, params={}):
response = self.futuresPublicGetTimestamp(params)
return self.safe_number(response, 'data')
def fetch_ohlcv(self, symbol, timeframe='15m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
marketId = market['id']
request = {
'symbol': marketId,
'granularity': self.timeframes[timeframe],
}
duration = self.parse_timeframe(timeframe) * 1000
endAt = self.milliseconds()
if since is not None:
request['from'] = since
if limit is None:
limit = self.safe_integer(self.options, 'fetchOHLCVLimit', 200)
endAt = self.sum(since, limit * duration)
elif limit is not None:
since = endAt - limit * duration
request['from'] = since
request['to'] = endAt
response = self.futuresPublicGetKlineQuery(self.extend(request, params))
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
r(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
def create_deposit_address(self, code, params={}):
raise BadRequest(self.id + ' has no method createDepositAddress')
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
currencyId = currency['id']
request = {
'currency': currencyId,
}
response = self.futuresPrivateGetDepositAddress(self.extend(request, params))
:
self.check_address(address)
return {
'info': response,
'currency': currencyId,
'address': address,
'tag': self.safe_string(data, 'memo'),
'network': self.safe_string(data, 'chain'),
}
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
level = self.safe_number(params, 'level')
if level != 2 and level is not None:
raise BadRequest(self.id + ' fetchOrderBook can only return level 2')
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
if (limit == 20) or (limit == 100):
request['limit'] = limit
else:
raise BadRequest(self.id + ' fetchOrderBook limit argument must be 20 or 100')
else:
request['limit'] = 20
response = self.futuresPublicGetLevel2DepthLimit(self.extend(request, params))
data = self.safe_value(response, 'data', {})
timestamp = int(self.safe_integer(data, 'ts') / 1000000)
orderbook = self.parse_order_book(data, symbol, timestamp, 'bids', 'asks', 0, 1)
orderbook['nonce'] = self.safe_integer(data, 'sequence')
return orderbook
def fetch_l3_order_book(self, symbol, limit=None, params={}):
raise BadRequest(self.id + ' only can only fetch the L2 order book')
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.futuresPublicGetTicker(self.extend(request, params))
return self.parse_ticker(response['data'], market)
def parse_ticker(self, ticker, market=None):
last = self.safe_string(ticker, 'price')
marketId = self.safe_string(ticker, 'symbol')
market = self.safe_market(marketId, market, '-')
timestamp = Precise.string_div(self.safe_string(ticker, 'ts'), '1000000')
return self.safe_ticker({
'symbol': market['symbol'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': self.safe_string(ticker, 'bestBidPrice'),
'bidVolume': self.safe_string(ticker, 'bestBidSize'),
'ask': self.safe_string(ticker, 'bestAskPrice'),
'askVolume': self.safe_string(ticker, 'bestAskSize'),
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': None,
'quoteVolume': None,
'info': ticker,
}, market, False)
def fetch_funding_history(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchFundingHistory() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if since is not None:
request['startAt'] = since
if limit is not None:
request['maxCount'] = limit
response = self.futuresPrivateGetFundingHistory(self.extend(request, params))
data = self.safe_value(response, 'data')
dataList = self.safe_value(data, 'dataList')
fees = []
for i in range(0, len(dataList)):
listItem = dataList[i]
timestamp = self.safe_integer(listItem, 'timePoint')
fees.append({
'info': listItem,
'symbol': symbol,
'code': self.safe_currency_code(self.safe_string(listItem, 'settleCurrency')),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'id': self.safe_number(listItem, 'id'),
'amount': self.safe_number(listItem, 'funding'),
'fundingRate': self.safe_number(listItem, 'fundingRate'),
'markPrice': self.safe_number(listItem, 'markPrice'),
'positionQty': self.safe_number(listItem, 'positionQty'),
'positionCost': self.safe_number(listItem, 'positionCost'),
})
return fees
def fetch_positions(self, symbols=None, params={}):
self.load_markets()
response = self.futuresPrivateGetPositions(params)
return self.parse_positions(self.safe_value(response, 'data'))
def parse_positions(self, positions):
result = []
for i in range(0, len(positions)):
result.append(self.parse_position(positions[i]))
return result
def parse_position(self, position, market=None):
ition, 'posCost'))
initialMargin = self.safe_string(position, 'posInit')
initialMarginPercentage = Precise.string_div(initialMargin, notional)
unrealisedPnl = self.safe_string(position, 'unrealisedPnl')
crossMode = self.safe_value(position, 'crossMode')
marginType = 'cross' if crossMode else 'isolated'
return {
'info': position,
'symbol': self.safe_string(market, 'symbol'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'initialMargin': self.parse_number(initialMargin),
'initialMarginPercentage': self.parse_number(initialMarginPercentage),
'maintenanceMargin': self.safe_number(position, 'posMaint'),
'maintenanceMarginPercentage': self.safe_number(position, 'maintMarginReq'),
'entryPrice': self.safe_number(position, 'avgEntryPrice'),
'notional': self.parse_number(notional),
'leverage': self.safe_number(position, 'realLeverage'),
'unrealizedPnl': self.parse_number(unrealisedPnl),
'contracts': self.parse_number(Precise.string_abs(size)),
'contractSize': self.safe_value(market, 'contractSize'),
'marginRatio': None,
'liquidationPrice': self.safe_number(position, 'liquidationPrice'),
'markPrice': self.safe_number(position, 'markPrice'),
'collateral': self.safe_number(position, 'maintMargin'),
'marginType': marginType,
'side': side,
'percentage': self.parse_number(Precise.string_div(unrealisedPnl, initialMargin)),
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
clientOrderId = self.safe_string_2(params, 'clientOid', 'clientOrderId', self.uuid())
params = self.omit(params, ['clientOid', 'clientOrderId'])
if amount < 1:
raise InvalidOrder(self.id + ' createOrder() minimum contract order amount is 1')
preciseAmount = int(self.amount_to_precision(symbol, amount))
request = {
'clientOid': clientOrderId,
'side': side,
'symbol': market['id'],
'type': type,
'size': preciseAmount,
'leverage': 1,
e:
raise BadRequest(self.id + ' createOrder() does not support the postOnly parameter together with a hidden parameter')
iceberg = self.safe_value(params, 'iceberg')
if iceberg:
visibleSize = self.safe_value(params, 'visibleSize')
if visibleSize is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a visibleSize parameter for iceberg orders')
params = self.omit(params, 'timeInForce') # Time in force only valid for limit orders, exchange error when gtc for market orders
response = self.futuresPrivatePostOrders(self.extend(request, params))
#
# {
# code: "200000",
# data: {
# orderId: "619717484f1d010001510cde",
# },
# }
#
data = self.safe_value(response, 'data', {})
return {
'id': self.safe_string(data, 'orderId'),
'clientOrderId': None,
'timestamp': None,
'datetime': None,
'lastTradeTimestamp': None,
'symbol': None,
'type': None,
'side': None,
'price': None,
'amount': None,
'cost': None,
'average': None,
'filled': None,
'remaining': None,
'status': None,
'fee': None,
'trades': None,
'timeInForce': None,
'postOnly': None,
'stopPrice': None,
'info': response,
}
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'orderId': id,
}
response = self.futuresPrivateDeleteOrdersOrderId(self.extend(request, params))
#
# {
# code: "200000",
# data: {
# cancelledOrderIds: [
# "619714b8b6353000014c505a",
# ],
# },
# }
#
return self.safe_value(response, 'data')
def cancel_all_orders(self, symbol=None, params={}):
self.load_markets()
request = {}
if symbol is not None:
request['symbol'] = self.market_id(symbol)
response = self.futuresPrivateDeleteOrders(self.extend(request, params))
# ? futuresPrivateDeleteStopOrders
# {
# code: "200000",
# data: {
# cancelledOrderIds: [
# "619714b8b6353000014c505a",
# ],
# },
# }
#
return self.safe_value(response, 'data')
def add_margin(self, symbol, amount, params={}):
self.load_markets()
market = self.market(symbol)
uuid = self.uuid()
request = {
'symbol': market['id'],
'margin': amount,
'bizNo': uuid,
}
return self.futuresPrivatePostPositionMarginDepositMargin(self.extend(request, params))
def fetch_orders_by_status(self, status, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
'status': status,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['startAt'] = since
response = self.futuresPrivateGetOrders(self.extend(request, params))
responseData = self.safe_value(response, 'data', {})
orders = self.safe_value(responseData, 'items', [])
return self.parse_orders(orders, market, since, limit)
def fetch_order(self, id=None, symbol=None, params={}):
self.load_markets()
request = {}
method = 'futuresPrivateGetOrdersOrderId'
if id is None:
clientOrderId = self.safe_string_2(params, 'clientOid', 'clientOrderId')
if clientOrderId is None:
raise InvalidOrder(self.id + ' fetchOrder() requires parameter id or params.clientOid')
request['clientOid'] = clientOrderId
method = 'futuresPrivateGetOrdersByClientOid'
params = self.omit(params, ['clientOid', 'clientOrderId'])
else:
request['orderId'] = id
response = getattr(self, method)(self.extend(request, params))
market = self.market(symbol) if (symbol is not None) else None
responseData = self.safe_value(response, 'data')
return self.parse_order(responseData, market)
def parse_order(self, order, market=None):
marketId = self.safe_string(order, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
orderId = self.safe_string(order, 'id')
type = self.safe_string(order, 'type')
timestamp = self.safe_integer(order, 'createdAt')
datetime = self.iso8601(timestamp)
price = self.safe_string(order, 'price')
# price is zero for market order
# omitZero is called in safeOrder2
side = self.safe_string(order, 'side')
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrency = self.safe_currency_code(feeCurrencyId)
feeCost = self.safe_number(order, 'fee')
amount = self.safe_string(order, 'size')
filled = self.safe_string(order, 'dealSize')
rawCost = self.safe_string_2(order, 'dealFunds', 'filledValue')
leverage = self.safe_string(order, 'leverage')
cost = Precise.string_div(rawCost, leverage)
average = None
if Precise.string_gt(filled, '0'):
contractSize = self.safe_string(market, 'contractSize')
if market['linear']:
average = Precise.string_div(rawCost, Precise.string_mul(contractSize, filled))
else:
average = Precise.string_div(Precise.string_mul(contractSize, filled), rawCost)
# precision reported by their api is 8 d.p.
# average = Precise.string_div(rawCost, Precise.string_mul(filled, market['contractSize']))
# bool
isActive = self.safe_value(order, 'isActive', False)
cancelExist = self.safe_value(order, 'cancelExist', False)
status = 'open' if isActive else 'closed'
status = 'canceled' if cancelExist else status
fee = {
'currency': feeCurrency,
'cost': feeCost,
}
clientOrderId = self.safe_string(order, 'clientOid')
timeInForce = self.safe_string(order, 'timeInForce')
stopPrice = self.safe_number(order, 'stopPrice')
postOnly = self.safe_value(order, 'postOnly')
return self.safe_order({
'id': orderId,
'clientOrderId': clientOrderId,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'amount': amount,
'price': price,
'stopPrice': stopPrice,
'cost': cost,
'filled': filled,
'remaining': None,
'timestamp': timestamp,
'datetime': datetime,
'fee': fee,
'status': status,
'info': order,
'lastTradeTimestamp': None,
'average': average,
'trades': None,
}, market)
def fetch_funding_rate(self, symbol, params={}):
self.load_markets()
request = {
'symbol': self.market_id(symbol),
}
response = self.futuresPublicGetFundingRateSymbolCurrent(self.extend(request, params))
#
# {
# code: "200000",
# data: {
# symbol: ".ETHUSDTMFPI8H",
# granularity: 28800000,
# timePoint: 1637380800000,
# value: 0.0001,
# predictedValue: 0.0001,
# },
# }
#
data = self.safe_value(response, 'data')
fundingTimestamp = self.safe_number(data, 'timePoint')
return {
'info': data,
'symbol': symbol,
'markPrice': None,
'indexPrice': None,
'interestRate': None,
'estimatedSettlePrice': None,
'timestamp': None,
'datetime': None,
'fundingRate': self.safe_number(data, 'value'),
'fundingTimestamp': fundingTimestamp,
'fundingDatetime': self.iso8601(fundingTimestamp),
'nextFundingRate': self.safe_number(data, 'predictedValue'),
'nextFundingTimestamp': None,
'nextFundingDatetime': None,
'previousFundingRate': None,
'previousFundingTimestamp': None,
'previousFundingDatetime': None,
}
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
data = self.safe_value(response, 'data')
currencyId = self.safe_string(data, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(data, 'availableBalance')
account['total'] = self.safe_string(data, 'accountEquity')
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
self.load_markets()
# only fetches one balance at a time
# by default it will only fetch the BTC balance of the futures account
# you can send 'currency' in params to fetch other currencies
# fetchBalance({'type': 'future', 'currency': 'USDT'})
response = self.futuresPrivateGetAccountOverview(params)
#
# {
# code: '200000',
# data: {
# accountEquity: 0.00005,
# unrealisedPNL: 0,
# marginBalance: 0.00005,
# positionMargin: 0,
# orderMargin: 0,
# frozenFunds: 0,
# availableBalance: 0.00005,
# currency: 'XBT'
# }
# }
#
return self.parse_balance(response)
def transfer(self, code, amount, fromAccount, toAccount, params={}):
if (toAccount != 'main' and toAccount != 'funding') or (fromAccount != 'futures' and fromAccount != 'future' and fromAccount != 'contract'):
raise BadRequest(self.id + ' only supports transfers from contract(future) account to main(funding) account')
return self.transfer_out(code, amount, params)
def transfer_out(self, code, amount, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': self.safe_string(currency, 'id'), # Currency,including XBT,USDT
'amount': amount,
}
# transfer from usdm futures wallet to spot wallet
response = self.futuresPrivatePostTransferOut(self.extend(request, params))
#
# {
# "code": "200000",
# "data": {
# "applyId": "5bffb63303aa675e8bbe18f9" # Transfer-out request ID
# }
# }
#
data = self.safe_value(response, 'data')
timestamp = self.safe_string(data, 'updatedAt')
return {
'info': response,
'id': self.safe_string(data, 'applyId'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'currency': code,
'amount': amount,
'fromAccount': 'future',
'toAccount': 'spot',
'status': self.safe_string(data, 'status'),
}
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# orderId(String) [optional] Fills for a specific order(other parameters can be ignored if specified)
# symbol(String) [optional] Symbol of the contract
# side(String) [optional] buy or sell
# type(String) [optional] limit, market, limit_stop or market_stop
# startAt(long) [optional] Start time(milisecond)
# endAt(long) [optional] End time(milisecond)
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['startAt'] = since
response = self.futuresPrivateGetFills(self.extend(request, params))
#
# {
# "code": "200000",
# "data": {
# "currentPage": 1,
# "pageSize": 1,
# "totalNum": 251915,
# "totalPage": 251915,
# "items": [
# {
# "symbol": "XBTUSDM", # Ticker symbol of the contract
# "tradeId": "5ce24c1f0c19fc3c58edc47c", # Trade ID
# "orderId": "5ce24c16b210233c36ee321d", # Order ID
# "side": "sell", # Transaction side
# "liquidity": "taker", # Liquidity- taker or maker
# "price": "8302", # Filled price
# "size": 10, # Filled amount
# "value": "0.001204529", # Order value
# "feeRate": "0.0005", # Floating fees
# "fixFee": "0.00000006", # Fixed fees
# "feeCurrency": "XBT", # Charging currency
# "stop": "", # A mark to the stop order type
# "fee": "0.0000012022", # Transaction fee
# "orderType": "limit", # Order type
# "tradeType": "trade", # Trade type(trade, liquidation, ADL or settlement)
# "createdAt": 1558334496000, # Time the order created
# "settleCurrency": "XBT", # settlement currency
# "tradeTime": 1558334496000000000 # trade time in nanosecond
# }
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
trades = self.safe_value(data, 'items', {})
return self.parse_trades(trades, market, since, limit)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.futuresPublicGetTradeHistory(self.extend(request, params))
#
# {
# "code": "200000",
# "data": [
# {
# "sequence": 32114961,
# "side": "buy",
# "size": 39,
# "price": "4001.6500000000",
# "takerOrderId": "61c20742f172110001e0ebe4",
# "makerOrderId": "61c2073fcfc88100010fcb5d",
# "tradeId": "61c2074277a0c473e69029b8",
# "ts": 1640105794099993896 # filled time
# }
# ]
# }
#
trades = self.safe_value(response, 'data', [])
return self.parse_trades(trades, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "sequence": 32114961,
# "side": "buy",
# "size": 39,
# "price": "4001.6500000000",
# "takerOrderId": "61c20742f172110001e0ebe4",
# "makerOrderId": "61c2073fcfc88100010fcb5d",
# "tradeId": "61c2074277a0c473e69029b8",
# "ts": 1640105794099993896 # filled time
# }
#
# fetchMyTrades(private) v2
#
# {
# "symbol":"BTC-USDT",
# "tradeId":"5c35c02709e4f67d5266954e",
# "orderId":"5c35c02703aa673ceec2a168",
# "counterOrderId":"5c1ab46003aa676e487fa8e3",
# "side":"buy",
# "liquidity":"taker",
# "forceTaker":true,
# "price":"0.083",
# "size":"0.8424304",
# "funds":"0.0699217232",
# "fee":"0",
# "feeRate":"0",
# "feeCurrency":"USDT",
# "stop":"",
# "type":"limit",
# "createdAt":1547026472000
# }
#
marketId = self.safe_string(trade, 'symbol')
symbol = self.safe_symbol(marketId, market, '-')
id = self.safe_string_2(trade, 'tradeId', 'id')
orderId = self.safe_string(trade, 'orderId')
takerOrMaker = self.safe_string(trade, 'liquidity')
timestamp = self.safe_integer(trade, 'time')
if timestamp is not None:
timestamp = int(timestamp / 1000000)
else:
timestamp = self.safe_integer(trade, 'createdAt')
# if it's a historical v1 trade, the exchange returns timestamp in seconds
if ('dealValue' in trade) and (timestamp is not None):
timestamp = timestamp * 1000
priceString = self.safe_string_2(trade, 'price', 'dealPrice')
amountString = self.safe_string_2(trade, 'size', 'amount')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
side = self.safe_string(trade, 'side')
fee = None
feeCost = self.safe_number(trade, 'fee')
if feeCost is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrency = self.safe_currency_code(feeCurrencyId)
if feeCurrency is None:
if market is not None:
feeCurrency = market['quote'] if (side == 'sell') else market['base']
fee = {
'cost': feeCost,
'currency': feeCurrency,
'rate': self.safe_number(trade, 'feeRate'),
}
type = self.safe_string_2(trade, 'type', 'orderType')
if type == 'match':
type = None
cost = self.safe_number_2(trade, 'funds', 'dealValue')
if cost is None:
market = self.market(symbol)
contractSize = self.safe_string(market, 'contractSize')
contractCost = Precise.string_mul(priceString, amountString)
if contractSize and contractCost:
cost = self.parse_number(Precise.string_mul(contractCost, contractSize))
return {
'info': trade,
'id': id,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['pageSize'] = limit
if since is not None:
request['startAt'] = since
response = self.futuresPrivateGetDepositList(self.extend(request, params))
responseData = response['data']['items']
return self.parse_transactions(responseData, currency, since, limit, {'type': 'deposit'})
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['pageSize'] = limit
if since is not None:
request['startAt'] = since
response = self.futuresPrivateGetWithdrawalList(self.extend(request, params))
responseData = response['data']['items']
return self.parse_transactions(responseData, currency, since, limit, {'type': 'withdrawal'})
def fetch_funding_fee(self, code, params={}):
raise BadRequest(self.id + ' has no method fetchFundingFee')
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
raise BadRequest(self.id + ' has no method fetchLedger')
| true | true |
1c3511fecc84eb80bd643980e0e9ad5b84b0f0ed | 5,265 | py | Python | synaptic_fitting/Heatmap_3000-4000.py | danielmk/pyDentate | df8f67d4523ce463701c5e5675e74e309dd151e7 | [
"MIT"
] | 1 | 2022-02-24T20:39:46.000Z | 2022-02-24T20:39:46.000Z | synaptic_fitting/Heatmap_3000-4000.py | danielmk/pydentate | df8f67d4523ce463701c5e5675e74e309dd151e7 | [
"MIT"
] | null | null | null | synaptic_fitting/Heatmap_3000-4000.py | danielmk/pydentate | df8f67d4523ce463701c5e5675e74e309dd151e7 | [
"MIT"
] | 4 | 2021-11-02T07:47:42.000Z | 2021-11-30T09:07:35.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 21 15:36:56 2019
@author: barisckuru
"""
import numpy as np
import os
from tmgexp2_simulator import simulate
import matplotlib.pyplot as plt
import time
import seaborn as sns
import pandas as pd
import pickle
begin = time.time()
# PARAMETERS
freq_1 = []
freq_10 = []
freq_30 = []
freq_50 = []
load_1 = []
peaks = []
taus = []
norm = []
'LOAD THE EXPERIMENTAL DATA'
data_path = "/home/can/Downloads/gc_to_mc/"
'Open and load the data in the same directory for diff freqs'
for files in os.walk(data_path):
for file in files[2]:
if '1hz' in file:
curr_path = data_path + file
load_1 = np.load(curr_path)
freq_1 = load_1['mean_arr']
if '10hz' in file:
curr_path = data_path + file
load_1 = np.load(curr_path)
freq_10 = load_1['mean_arr']
if '30hz' in file:
curr_path = data_path + file
load_1 = np.load(curr_path)
freq_30 = load_1['mean_arr']
if '50hz' in file:
curr_path = data_path + file
load_1 = np.load(curr_path)
freq_50 = load_1['mean_arr']
loads = [freq_1, freq_10, freq_30, freq_50]
'PEAK FINDER'
for i in range(len(loads)):
data = loads[i]
'''Data is current response, values are negative.
For stimulus artifact, both positive and negative
positive threshold was used to define the indices for stimuli'''
indices = np.argwhere(data > 200)
'Indices were shifted 40 dps, response without artifact in the beginning'
indices = np.transpose(indices)[0] + 40
'Data was reverted to compare it positive conductance values from sim'
data = -data
'One more indice was appended to create an interval for the last stimulus'
indices = np.append(indices, indices[-1] + (indices[2] - indices[1]))
'NORMALIZATION of data by the local max of first signal'
# end is cut to eliminate stim artifact
first_sig = data[indices[1]:indices[3]-60]
first_peak = max(first_sig)
data = data/first_peak
data_cut = data[(indices[1]-10000):]
norm.append(data_cut)
'''Indices for peak finder, 2 idcs for one stimulus, 1 picked
Shifted and selected idcs are now starting points
Stop points were also defined by adding the length of syn response
so stim artifact at the end was eliminated'''
start = indices[1::2]
stop = start + len(first_sig)
indices = np.concatenate((start, stop))
indices = np.sort(indices)
'''Data were splitted with respect to indices
local max was found for each part'''
split_data = np.split(data, indices)
split_data = split_data[1:len(split_data):2]
split_data = np.array(split_data)
peaks_data = np.amax(split_data, axis=1)
peaks.append(peaks_data)
'LOSS func w Mean Square Error for each freq and then avarage'
def loss(x):
tau_facil, tau_rec = x
taus.append(x)
u0 = 7.78641198e-02
sampling = 0.5
output1hz = simulate(x[0], x[1], 1, u0, sampling)[0]
Hz1 = peaks[0]
output10hz = simulate(x[0], x[1], 10, u0, sampling)[0]
Hz10 = peaks[1]
output30hz = simulate(x[0], x[1], 30, u0, sampling)[0]
Hz30 = peaks[2]
output50hz = simulate(x[0], x[1], 50, u0, sampling)[0]
Hz50 = peaks[3]
mse1 = (np.square(output1hz - Hz1)).mean(axis=None)
mse10 = (np.square(output10hz - Hz10)).mean(axis=None)
mse30 = (np.square(output30hz - Hz30)).mean(axis=None)
mse50 = (np.square(output50hz - Hz50)).mean(axis=None)
mse = (mse1 + mse10 + mse30 + mse50)/4
return mse
# with 1hz, 770 hours
# w/o 1hz 189 hours
Z = []
pars = []
mat = np.zeros((501,101))
tau_facil = np.arange(3000, 4001, 2)
tau_rec = np.arange(0, 201, 2)
for i in tau_facil:
for j in tau_rec:
x1 = np.array([i,j])
pars.append(x1)
curr_loss = loss(x1)
idc_facil = int((int(i)-3000)/2)
idc_rec = int(int(j)/2)
mat[idc_facil, idc_rec] = curr_loss
np.savez('heatmap_3k-4k', loss=mat)
loss_load = np.load('heatmap_3k-4k.npz')
end = time.time()
print('time(seconds): ', end-begin)
'''
np.random.seed(6)
num = 50000 #num/2 values for each loss calc
x0 = np.random.randint(0,3000,num)
res_all = []
times = []
X = []
Y = []
for i in range(int(num/2)):
begin = time.time()
x1 = np.array([x0[2*i], x0[2*i+1]])
X.append(x1[0])
Y.append(x1[1])
Z.append(loss(x1))
end = time.time()
times.append(end-begin)
all_values_25000 = []
all_values_25000.append(X)
all_values_25000.append(Y)
all_values_25000.append(Z)
data = pd.DataFrame({'X': X, 'Y': Y, 'Z': Z})
data_pivoted = data.pivot_table( "Z", "X", "Y") #_table extension for duplicates
ax = sns.heatmap(data_pivoted)
plt.savefig('taus_25000.png')
plt.show()
with open('all_values_25000', 'wb') as f:
pickle.dump(all_values_25000, f)
start_time = time.time()
print("--- %s seconds ---" % (time.time() - start_time))
previous results are stored
np.random.seed(6)
num = 10000
x0 = np.random.randint(0,2000,num)
import pickle
with open('all_values', 'wb') as f:
pickle.dump(all_values, f)
in all_values [X,Y,Z]
import pickle
with open('all_values', 'wb') as f:
pickle.dump(all_values, f)
'''
| 25.935961 | 80 | 0.640646 |
import numpy as np
import os
from tmgexp2_simulator import simulate
import matplotlib.pyplot as plt
import time
import seaborn as sns
import pandas as pd
import pickle
begin = time.time()
freq_1 = []
freq_10 = []
freq_30 = []
freq_50 = []
load_1 = []
peaks = []
taus = []
norm = []
data_path = "/home/can/Downloads/gc_to_mc/"
for files in os.walk(data_path):
for file in files[2]:
if '1hz' in file:
curr_path = data_path + file
load_1 = np.load(curr_path)
freq_1 = load_1['mean_arr']
if '10hz' in file:
curr_path = data_path + file
load_1 = np.load(curr_path)
freq_10 = load_1['mean_arr']
if '30hz' in file:
curr_path = data_path + file
load_1 = np.load(curr_path)
freq_30 = load_1['mean_arr']
if '50hz' in file:
curr_path = data_path + file
load_1 = np.load(curr_path)
freq_50 = load_1['mean_arr']
loads = [freq_1, freq_10, freq_30, freq_50]
for i in range(len(loads)):
data = loads[i]
indices = np.argwhere(data > 200)
indices = np.transpose(indices)[0] + 40
data = -data
indices = np.append(indices, indices[-1] + (indices[2] - indices[1]))
first_sig = data[indices[1]:indices[3]-60]
first_peak = max(first_sig)
data = data/first_peak
data_cut = data[(indices[1]-10000):]
norm.append(data_cut)
start = indices[1::2]
stop = start + len(first_sig)
indices = np.concatenate((start, stop))
indices = np.sort(indices)
split_data = np.split(data, indices)
split_data = split_data[1:len(split_data):2]
split_data = np.array(split_data)
peaks_data = np.amax(split_data, axis=1)
peaks.append(peaks_data)
def loss(x):
tau_facil, tau_rec = x
taus.append(x)
u0 = 7.78641198e-02
sampling = 0.5
output1hz = simulate(x[0], x[1], 1, u0, sampling)[0]
Hz1 = peaks[0]
output10hz = simulate(x[0], x[1], 10, u0, sampling)[0]
Hz10 = peaks[1]
output30hz = simulate(x[0], x[1], 30, u0, sampling)[0]
Hz30 = peaks[2]
output50hz = simulate(x[0], x[1], 50, u0, sampling)[0]
Hz50 = peaks[3]
mse1 = (np.square(output1hz - Hz1)).mean(axis=None)
mse10 = (np.square(output10hz - Hz10)).mean(axis=None)
mse30 = (np.square(output30hz - Hz30)).mean(axis=None)
mse50 = (np.square(output50hz - Hz50)).mean(axis=None)
mse = (mse1 + mse10 + mse30 + mse50)/4
return mse
Z = []
pars = []
mat = np.zeros((501,101))
tau_facil = np.arange(3000, 4001, 2)
tau_rec = np.arange(0, 201, 2)
for i in tau_facil:
for j in tau_rec:
x1 = np.array([i,j])
pars.append(x1)
curr_loss = loss(x1)
idc_facil = int((int(i)-3000)/2)
idc_rec = int(int(j)/2)
mat[idc_facil, idc_rec] = curr_loss
np.savez('heatmap_3k-4k', loss=mat)
loss_load = np.load('heatmap_3k-4k.npz')
end = time.time()
print('time(seconds): ', end-begin)
| true | true |
1c35124937409aabe6eca6f0ec63180114a253b1 | 1,431 | py | Python | kiss_cache/stores/django_cache.py | HiveTraum/KISSCache-Python | c6f601b3c510e0dd6f4340ea6c013267d8424643 | [
"MIT"
] | null | null | null | kiss_cache/stores/django_cache.py | HiveTraum/KISSCache-Python | c6f601b3c510e0dd6f4340ea6c013267d8424643 | [
"MIT"
] | null | null | null | kiss_cache/stores/django_cache.py | HiveTraum/KISSCache-Python | c6f601b3c510e0dd6f4340ea6c013267d8424643 | [
"MIT"
] | 1 | 2019-12-03T05:54:31.000Z | 2019-12-03T05:54:31.000Z | import logging
from typing import Callable, Any
from uuid import uuid4
logger = logging.getLogger(__name__)
def default_serialize(value: Any) -> str:
return value
def default_deserialize(value: str) -> Any:
return value
class DjangoCacheStore:
def __init__(self, cache_identifier='default',
serialize: Callable[[Any], str] = default_serialize,
deserialize: Callable[[str], Any] = default_deserialize):
try:
from django.core.cache import caches
except ImportError:
raise ImportError('Django required for this cache store')
self.prefix = str(uuid4())
self.cache = caches[cache_identifier]
self.serialize = serialize
self.deserialize = deserialize
def _key_prefix_combine(self, k: str) -> str:
return f'{self.prefix}:{k}'
def get(self, key):
key = self._key_prefix_combine(key)
try:
value = self.cache.get(key)
if value is None:
return None
return self.deserialize(value)
except Exception as e:
logger.exception(e)
return None
def set(self, key, value, expire):
key = self._key_prefix_combine(key)
try:
value = self.serialize(value)
return self.cache.set(key, value, expire)
except Exception as e:
logger.exception(e)
| 24.672414 | 74 | 0.60587 | import logging
from typing import Callable, Any
from uuid import uuid4
logger = logging.getLogger(__name__)
def default_serialize(value: Any) -> str:
return value
def default_deserialize(value: str) -> Any:
return value
class DjangoCacheStore:
def __init__(self, cache_identifier='default',
serialize: Callable[[Any], str] = default_serialize,
deserialize: Callable[[str], Any] = default_deserialize):
try:
from django.core.cache import caches
except ImportError:
raise ImportError('Django required for this cache store')
self.prefix = str(uuid4())
self.cache = caches[cache_identifier]
self.serialize = serialize
self.deserialize = deserialize
def _key_prefix_combine(self, k: str) -> str:
return f'{self.prefix}:{k}'
def get(self, key):
key = self._key_prefix_combine(key)
try:
value = self.cache.get(key)
if value is None:
return None
return self.deserialize(value)
except Exception as e:
logger.exception(e)
return None
def set(self, key, value, expire):
key = self._key_prefix_combine(key)
try:
value = self.serialize(value)
return self.cache.set(key, value, expire)
except Exception as e:
logger.exception(e)
| true | true |
1c35134960a8258bd7bd63f12ed1b98722ad5d7b | 1,130 | py | Python | cascad/agents/aritifcial_system/contracts/pm/MarketMaker.py | Will-Holden/cascadv2 | fd43d47d4be075d30e75053f9af3cd82c33b6623 | [
"Apache-2.0"
] | null | null | null | cascad/agents/aritifcial_system/contracts/pm/MarketMaker.py | Will-Holden/cascadv2 | fd43d47d4be075d30e75053f9af3cd82c33b6623 | [
"Apache-2.0"
] | null | null | null | cascad/agents/aritifcial_system/contracts/pm/MarketMaker.py | Will-Holden/cascadv2 | fd43d47d4be075d30e75053f9af3cd82c33b6623 | [
"Apache-2.0"
] | 1 | 2022-03-24T10:01:28.000Z | 2022-03-24T10:01:28.000Z | from cascad.agents.aritifcial_system.contracts.token.ERC20 import ERC20
class MarketMaker:
def __init__(self, pmSystem, collateralToken: ERC20, conditionIds, atomicOutcomeSlotCount, fee, funding, stage, whitelist, outcomeSlotCounts, collectionIds, positionIds, owner=None):
self.pmSystem = pmSystem
self.collateralToken = collateralToken
self.conditionIds = conditionIds
self.atomicOutcomeSlotCount = atomicOutcomeSlotCount
self.fee = fee
self.funding = funding
self.stage = stage
self.whitelist = whitelist
self.outcomeSlotCounts = outcomeSlotCounts
self.collectionIds = collectionIds
self.positionIds = positionIds
self.owner = owner
def changeFunding(self, fundingChange, caller):
assert fundingChange != 0
if (fundingChange > 0):
pass
def pause(self):
pass
def resume(self):
pass
def changeFee(self, fee):
pass
def trade(self, outcomeTokenAmounts, collateralLimit):
pass
def calMarketFee(self, outcomeTokenCost):
pass
| 29.736842 | 186 | 0.669027 | from cascad.agents.aritifcial_system.contracts.token.ERC20 import ERC20
class MarketMaker:
def __init__(self, pmSystem, collateralToken: ERC20, conditionIds, atomicOutcomeSlotCount, fee, funding, stage, whitelist, outcomeSlotCounts, collectionIds, positionIds, owner=None):
self.pmSystem = pmSystem
self.collateralToken = collateralToken
self.conditionIds = conditionIds
self.atomicOutcomeSlotCount = atomicOutcomeSlotCount
self.fee = fee
self.funding = funding
self.stage = stage
self.whitelist = whitelist
self.outcomeSlotCounts = outcomeSlotCounts
self.collectionIds = collectionIds
self.positionIds = positionIds
self.owner = owner
def changeFunding(self, fundingChange, caller):
assert fundingChange != 0
if (fundingChange > 0):
pass
def pause(self):
pass
def resume(self):
pass
def changeFee(self, fee):
pass
def trade(self, outcomeTokenAmounts, collateralLimit):
pass
def calMarketFee(self, outcomeTokenCost):
pass
| true | true |
1c351511f358442a292e6f21da74651a5abef80f | 9,844 | py | Python | src/schnetpack/representation/schnet.py | giadefa/schnetpack | 9dabc3b6e3b28deb2fb3743ea1857c46b055efbf | [
"MIT"
] | 2 | 2020-12-29T05:28:20.000Z | 2020-12-29T05:30:13.000Z | src/schnetpack/representation/schnet.py | giadefa/schnetpack | 9dabc3b6e3b28deb2fb3743ea1857c46b055efbf | [
"MIT"
] | null | null | null | src/schnetpack/representation/schnet.py | giadefa/schnetpack | 9dabc3b6e3b28deb2fb3743ea1857c46b055efbf | [
"MIT"
] | 1 | 2021-01-22T13:44:31.000Z | 2021-01-22T13:44:31.000Z | import torch
import torch.nn as nn
from schnetpack.nn.base import Dense
from schnetpack import Properties
from schnetpack.nn.cfconv import CFConv
from schnetpack.nn.cutoff import CosineCutoff
from schnetpack.nn.acsf import GaussianSmearing
from schnetpack.nn.neighbors import AtomDistances
from schnetpack.nn.activations import shifted_softplus
class SchNetInteraction(nn.Module):
r"""SchNet interaction block for modeling interactions of atomistic systems.
Args:
n_atom_basis (int): number of features to describe atomic environments.
n_spatial_basis (int): number of input features of filter-generating networks.
n_filters (int): number of filters used in continuous-filter convolution.
cutoff (float): cutoff radius.
cutoff_network (nn.Module, optional): cutoff layer.
normalize_filter (bool, optional): if True, divide aggregated filter by number
of neighbors over which convolution is applied.
"""
def __init__(
self,
n_atom_basis,
n_spatial_basis,
n_filters,
cutoff,
cutoff_network=CosineCutoff,
normalize_filter=False,
):
super(SchNetInteraction, self).__init__()
# filter block used in interaction block
self.filter_network = nn.Sequential(
Dense(n_spatial_basis, n_filters, activation=shifted_softplus),
Dense(n_filters, n_filters),
)
# cutoff layer used in interaction block
self.cutoff_network = cutoff_network(cutoff)
# interaction block
self.cfconv = CFConv(
n_atom_basis,
n_filters,
n_atom_basis,
self.filter_network,
cutoff_network=self.cutoff_network,
activation=shifted_softplus,
normalize_filter=normalize_filter,
)
# dense layer
self.dense = Dense(n_atom_basis, n_atom_basis, bias=True, activation=None)
def forward(self, x, r_ij, neighbors, neighbor_mask, f_ij=None):
"""Compute interaction output.
Args:
x (torch.Tensor): input representation/embedding of atomic environments
with (N_b, N_a, n_atom_basis) shape.
r_ij (torch.Tensor): interatomic distances of (N_b, N_a, N_nbh) shape.
neighbors (torch.Tensor): indices of neighbors of (N_b, N_a, N_nbh) shape.
neighbor_mask (torch.Tensor): mask to filter out non-existing neighbors
introduced via padding.
f_ij (torch.Tensor, optional): expanded interatomic distances in a basis.
If None, r_ij.unsqueeze(-1) is used.
Returns:
torch.Tensor: block output with (N_b, N_a, n_atom_basis) shape.
"""
# continuous-filter convolution interaction block followed by Dense layer
v = self.cfconv(x, r_ij, neighbors, neighbor_mask, f_ij)
v = self.dense(v)
return v
class SchNet(nn.Module):
"""SchNet architecture for learning representations of atomistic systems.
Args:
n_atom_basis (int, optional): number of features to describe atomic environments.
This determines the size of each embedding vector; i.e. embeddings_dim.
n_filters (int, optional): number of filters used in continuous-filter convolution
n_interactions (int, optional): number of interaction blocks.
cutoff (float, optional): cutoff radius.
n_gaussians (int, optional): number of Gaussian functions used to expand
atomic distances.
normalize_filter (bool, optional): if True, divide aggregated filter by number
of neighbors over which convolution is applied.
coupled_interactions (bool, optional): if True, share the weights across
interaction blocks and filter-generating networks.
return_intermediate (bool, optional): if True, `forward` method also returns
intermediate atomic representations after each interaction block is applied.
max_z (int, optional): maximum nuclear charge allowed in database. This
determines the size of the dictionary of embedding; i.e. num_embeddings.
cutoff_network (nn.Module, optional): cutoff layer.
trainable_gaussians (bool, optional): If True, widths and offset of Gaussian
functions are adjusted during training process.
distance_expansion (nn.Module, optional): layer for expanding interatomic
distances in a basis.
charged_systems (bool, optional):
References:
.. [#schnet1] Schütt, Arbabzadah, Chmiela, Müller, Tkatchenko:
Quantum-chemical insights from deep tensor neural networks.
Nature Communications, 8, 13890. 2017.
.. [#schnet_transfer] Schütt, Kindermans, Sauceda, Chmiela, Tkatchenko, Müller:
SchNet: A continuous-filter convolutional neural network for modeling quantum
interactions.
In Advances in Neural Information Processing Systems, pp. 992-1002. 2017.
.. [#schnet3] Schütt, Sauceda, Kindermans, Tkatchenko, Müller:
SchNet - a deep learning architecture for molceules and materials.
The Journal of Chemical Physics 148 (24), 241722. 2018.
"""
def __init__(
self,
n_atom_basis=128,
n_filters=128,
n_interactions=3,
cutoff=5.0,
n_gaussians=25,
normalize_filter=False,
coupled_interactions=False,
return_intermediate=False,
max_z=100,
cutoff_network=CosineCutoff,
trainable_gaussians=False,
distance_expansion=None,
charged_systems=False,
):
super(SchNet, self).__init__()
self.n_atom_basis = n_atom_basis
# make a lookup table to store embeddings for each element (up to atomic
# number max_z) each of which is a vector of size n_atom_basis
self.embedding = nn.Embedding(max_z, n_atom_basis, padding_idx=0)
# layer for computing interatomic distances
self.distances = AtomDistances()
# layer for expanding interatomic distances in a basis
if distance_expansion is None:
self.distance_expansion = GaussianSmearing(
0.0, cutoff, n_gaussians, trainable=trainable_gaussians
)
else:
self.distance_expansion = distance_expansion
# block for computing interaction
if coupled_interactions:
# use the same SchNetInteraction instance (hence the same weights)
self.interactions = nn.ModuleList(
[
SchNetInteraction(
n_atom_basis=n_atom_basis,
n_spatial_basis=n_gaussians,
n_filters=n_filters,
cutoff_network=cutoff_network,
cutoff=cutoff,
normalize_filter=normalize_filter,
)
]
* n_interactions
)
else:
# use one SchNetInteraction instance for each interaction
self.interactions = nn.ModuleList(
[
SchNetInteraction(
n_atom_basis=n_atom_basis,
n_spatial_basis=n_gaussians,
n_filters=n_filters,
cutoff_network=cutoff_network,
cutoff=cutoff,
normalize_filter=normalize_filter,
)
for _ in range(n_interactions)
]
)
# set attributes
self.return_intermediate = return_intermediate
self.charged_systems = charged_systems
if charged_systems:
self.charge = nn.Parameter(torch.Tensor(1, n_atom_basis))
self.charge.data.normal_(0, 1.0 / n_atom_basis ** 0.5)
def forward(self, inputs):
"""Compute atomic representations/embeddings.
Args:
inputs (dict of torch.Tensor): SchNetPack dictionary of input tensors.
Returns:
torch.Tensor: atom-wise representation.
list of torch.Tensor: intermediate atom-wise representations, if
return_intermediate=True was used.
"""
# get tensors from input dictionary
atomic_numbers = inputs[Properties.Z]
positions = inputs[Properties.R]
cell = inputs[Properties.cell]
cell_offset = inputs[Properties.cell_offset]
neighbors = inputs[Properties.neighbors]
neighbor_mask = inputs[Properties.neighbor_mask]
atom_mask = inputs[Properties.atom_mask]
# get atom embeddings for the input atomic numbers
x = self.embedding(atomic_numbers)
if False and self.charged_systems and Properties.charge in inputs.keys():
n_atoms = torch.sum(atom_mask, dim=1, keepdim=True)
charge = inputs[Properties.charge] / n_atoms # B
charge = charge[:, None] * self.charge # B x F
x = x + charge
# compute interatomic distance of every atom to its neighbors
r_ij = self.distances(
positions, neighbors, cell, cell_offset, neighbor_mask=neighbor_mask
)
# expand interatomic distances (for example, Gaussian smearing)
f_ij = self.distance_expansion(r_ij)
# store intermediate representations
if self.return_intermediate:
xs = [x]
# compute interaction block to update atomic embeddings
for interaction in self.interactions:
v = interaction(x, r_ij, neighbors, neighbor_mask, f_ij=f_ij)
x = x + v
if self.return_intermediate:
xs.append(x)
if self.return_intermediate:
return x, xs
return x
| 40.677686 | 90 | 0.636022 | import torch
import torch.nn as nn
from schnetpack.nn.base import Dense
from schnetpack import Properties
from schnetpack.nn.cfconv import CFConv
from schnetpack.nn.cutoff import CosineCutoff
from schnetpack.nn.acsf import GaussianSmearing
from schnetpack.nn.neighbors import AtomDistances
from schnetpack.nn.activations import shifted_softplus
class SchNetInteraction(nn.Module):
def __init__(
self,
n_atom_basis,
n_spatial_basis,
n_filters,
cutoff,
cutoff_network=CosineCutoff,
normalize_filter=False,
):
super(SchNetInteraction, self).__init__()
self.filter_network = nn.Sequential(
Dense(n_spatial_basis, n_filters, activation=shifted_softplus),
Dense(n_filters, n_filters),
)
self.cutoff_network = cutoff_network(cutoff)
self.cfconv = CFConv(
n_atom_basis,
n_filters,
n_atom_basis,
self.filter_network,
cutoff_network=self.cutoff_network,
activation=shifted_softplus,
normalize_filter=normalize_filter,
)
self.dense = Dense(n_atom_basis, n_atom_basis, bias=True, activation=None)
def forward(self, x, r_ij, neighbors, neighbor_mask, f_ij=None):
v = self.cfconv(x, r_ij, neighbors, neighbor_mask, f_ij)
v = self.dense(v)
return v
class SchNet(nn.Module):
def __init__(
self,
n_atom_basis=128,
n_filters=128,
n_interactions=3,
cutoff=5.0,
n_gaussians=25,
normalize_filter=False,
coupled_interactions=False,
return_intermediate=False,
max_z=100,
cutoff_network=CosineCutoff,
trainable_gaussians=False,
distance_expansion=None,
charged_systems=False,
):
super(SchNet, self).__init__()
self.n_atom_basis = n_atom_basis
self.embedding = nn.Embedding(max_z, n_atom_basis, padding_idx=0)
self.distances = AtomDistances()
if distance_expansion is None:
self.distance_expansion = GaussianSmearing(
0.0, cutoff, n_gaussians, trainable=trainable_gaussians
)
else:
self.distance_expansion = distance_expansion
if coupled_interactions:
self.interactions = nn.ModuleList(
[
SchNetInteraction(
n_atom_basis=n_atom_basis,
n_spatial_basis=n_gaussians,
n_filters=n_filters,
cutoff_network=cutoff_network,
cutoff=cutoff,
normalize_filter=normalize_filter,
)
]
* n_interactions
)
else:
self.interactions = nn.ModuleList(
[
SchNetInteraction(
n_atom_basis=n_atom_basis,
n_spatial_basis=n_gaussians,
n_filters=n_filters,
cutoff_network=cutoff_network,
cutoff=cutoff,
normalize_filter=normalize_filter,
)
for _ in range(n_interactions)
]
)
self.return_intermediate = return_intermediate
self.charged_systems = charged_systems
if charged_systems:
self.charge = nn.Parameter(torch.Tensor(1, n_atom_basis))
self.charge.data.normal_(0, 1.0 / n_atom_basis ** 0.5)
def forward(self, inputs):
atomic_numbers = inputs[Properties.Z]
positions = inputs[Properties.R]
cell = inputs[Properties.cell]
cell_offset = inputs[Properties.cell_offset]
neighbors = inputs[Properties.neighbors]
neighbor_mask = inputs[Properties.neighbor_mask]
atom_mask = inputs[Properties.atom_mask]
x = self.embedding(atomic_numbers)
if False and self.charged_systems and Properties.charge in inputs.keys():
n_atoms = torch.sum(atom_mask, dim=1, keepdim=True)
charge = inputs[Properties.charge] / n_atoms
charge = charge[:, None] * self.charge
x = x + charge
r_ij = self.distances(
positions, neighbors, cell, cell_offset, neighbor_mask=neighbor_mask
)
f_ij = self.distance_expansion(r_ij)
if self.return_intermediate:
xs = [x]
for interaction in self.interactions:
v = interaction(x, r_ij, neighbors, neighbor_mask, f_ij=f_ij)
x = x + v
if self.return_intermediate:
xs.append(x)
if self.return_intermediate:
return x, xs
return x
| true | true |
1c35157c9fe7539f712f26bd3ec3763008713658 | 2,202 | py | Python | peer/models/person.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | peer/models/person.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | peer/models/person.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ---------------------------------------------------------------------
# Person models
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Third-party modules
from django.db import models
# NOC modules
from noc.core.model.base import NOCModel
from noc.core.gridvcs.manager import GridVCSField
from noc.core.rpsl import rpsl_format, rpsl_multiple
from noc.core.model.decorator import on_save
from .rir import RIR
@on_save
class Person(NOCModel):
class Meta(object):
verbose_name = "Person"
verbose_name_plural = "Persons"
db_table = "peer_person"
app_label = "peer"
nic_hdl = models.CharField("nic-hdl", max_length=64, unique=True)
person = models.CharField("person", max_length=128)
type = models.CharField(
"type", max_length=1, default="P", choices=[("P", "Person"), ("R", "Role")]
)
address = models.TextField("address")
phone = models.TextField("phone")
fax_no = models.TextField("fax-no", blank=True, null=True)
email = models.TextField("email")
rir = models.ForeignKey(RIR, verbose_name="RIR", on_delete=models.CASCADE)
extra = models.TextField("extra", blank=True, null=True)
rpsl = GridVCSField("rpsl_person")
def __str__(self):
return " %s (%s)" % (self.nic_hdl, self.person)
def get_rpsl(self):
s = []
if self.type == "R":
s += ["role: %s" % self.person]
else:
s += ["person: %s" % self.person]
s += ["nic-hdl: %s" % self.nic_hdl]
s += rpsl_multiple("address", self.address)
s += rpsl_multiple("phone", self.phone)
s += rpsl_multiple("fax-no", self.fax_no)
s += rpsl_multiple("email", self.email)
if self.extra:
s += [self.extra]
return rpsl_format("\n".join(s))
def touch_rpsl(self):
c_rpsl = self.rpsl.read()
n_rpsl = self.get_rpsl()
if c_rpsl == n_rpsl:
return # Not changed
self.rpsl.write(n_rpsl)
def on_save(self):
self.touch_rpsl()
| 32.865672 | 83 | 0.556767 |
from django.db import models
from noc.core.model.base import NOCModel
from noc.core.gridvcs.manager import GridVCSField
from noc.core.rpsl import rpsl_format, rpsl_multiple
from noc.core.model.decorator import on_save
from .rir import RIR
@on_save
class Person(NOCModel):
class Meta(object):
verbose_name = "Person"
verbose_name_plural = "Persons"
db_table = "peer_person"
app_label = "peer"
nic_hdl = models.CharField("nic-hdl", max_length=64, unique=True)
person = models.CharField("person", max_length=128)
type = models.CharField(
"type", max_length=1, default="P", choices=[("P", "Person"), ("R", "Role")]
)
address = models.TextField("address")
phone = models.TextField("phone")
fax_no = models.TextField("fax-no", blank=True, null=True)
email = models.TextField("email")
rir = models.ForeignKey(RIR, verbose_name="RIR", on_delete=models.CASCADE)
extra = models.TextField("extra", blank=True, null=True)
rpsl = GridVCSField("rpsl_person")
def __str__(self):
return " %s (%s)" % (self.nic_hdl, self.person)
def get_rpsl(self):
s = []
if self.type == "R":
s += ["role: %s" % self.person]
else:
s += ["person: %s" % self.person]
s += ["nic-hdl: %s" % self.nic_hdl]
s += rpsl_multiple("address", self.address)
s += rpsl_multiple("phone", self.phone)
s += rpsl_multiple("fax-no", self.fax_no)
s += rpsl_multiple("email", self.email)
if self.extra:
s += [self.extra]
return rpsl_format("\n".join(s))
def touch_rpsl(self):
c_rpsl = self.rpsl.read()
n_rpsl = self.get_rpsl()
if c_rpsl == n_rpsl:
return
self.rpsl.write(n_rpsl)
def on_save(self):
self.touch_rpsl()
| true | true |
1c351733324a5dc157de87d80d6cc8aecc3b785f | 13,804 | py | Python | src/main.py | kppw99/UG_FedAVG | 61f6fcfedfed1136b19c12a6603231cda884e22f | [
"MIT"
] | 3 | 2021-09-23T02:10:17.000Z | 2022-01-16T03:38:34.000Z | src/main.py | kppw99/Uncert_FedAVG | 61f6fcfedfed1136b19c12a6603231cda884e22f | [
"MIT"
] | 1 | 2022-02-25T08:03:34.000Z | 2022-02-25T08:03:34.000Z | src/main.py | kppw99/Uncert_FedAVG | 61f6fcfedfed1136b19c12a6603231cda884e22f | [
"MIT"
] | 1 | 2022-02-23T11:49:25.000Z | 2022-02-23T11:49:25.000Z | from util import *
from model import *
if __name__=='__main__':
# Parse arguments
DATASET, MODEL_LIST, IID_NON_COR, NON_IID_NON_COR = arg_parse()
if DATASET == 'mnist':
from mnist_config import *
elif DATASET == 'fmnist':
from fmnist_config import *
elif DATASET == 'cifar10':
from cifar10_config import *
else:
print('{} is wrong dataset! [mnist|fmnist|cifar10]'.format(DATASET))
exit(1)
# Load data
tr_X, tr_y, te_X, te_y, pre_X, pre_y = load_data(data=DATASET, pre_train=PRE_TRAIN)
# for UNCERT_FEDAVG in [False, True]: # False, True
for MODEL in MODEL_LIST:
# Centralized Learning
if MODEL == 'central':
if IID_NON_COR or NON_IID_NON_COR:
print('\n===================================')
print('CUDA:', torch.cuda.is_available())
print('MODEL:', MODEL)
print('DATASET:', DATASET)
print('EPOCHS:', CENTRAL_EPOCHS)
print('BATCH_SIZE:', BATCH_SIZE)
print('===================================\n')
log_name = 'non_corrupted_'
do_centralize_learning(tr_X, tr_y, te_X, te_y, BATCH_SIZE, CENTRAL_EPOCHS, log_name, DATASET)
continue
for DIST in DIST_LIST:
if DIST == 'iid':
for COR_LABEL_RATIO in COR_LABEL_RATIO_LIST:
for COR_DATA_RATIO in COR_DATA_RATIO_LIST:
for COR_MODE in COR_MODE_LIST:
print('\n===================================')
print('CUDA:', torch.cuda.is_available())
print('MODEL:', MODEL)
print('DIST:', DIST)
print('DATASET:', DATASET)
print('EPOCHS:', CENTRAL_EPOCHS)
print('BATCH_SIZE:', BATCH_SIZE)
print('COR_MODE:', CORRUPTION_MODE[COR_MODE])
print('COR_LABEL_RATIO:', COR_LABEL_RATIO)
print('COR_DATA_RATIO:', COR_DATA_RATIO)
print('===================================\n')
log_name = DIST + '_'
log_name += str(int(COR_LOCAL_RATIO * 10)) + '_cor_local_'
log_name += str(int(COR_LABEL_RATIO * 100)) + '_cor_label_'
log_name += CORRUPTION_MODE[COR_MODE] + '_'
if COR_MODE == 2:
tr_X_dict, tr_y_dict, te_X_dict, te_y_dict, _, _ = create_backdoor_iid_samples(
tr_X, tr_y, te_X, te_y, target_label=TARGET_LABEL,
cor_local_ratio=1.0,
cor_label_ratio=COR_LABEL_RATIO,
cor_data_ratio=COR_DATA_RATIO,
num_of_sample=1,
verbose=True,
dataset=DATASET
)
else:
tr_X_dict, tr_y_dict, te_X_dict, te_y_dict = create_corrupted_iid_samples(
tr_X, tr_y, te_X, te_y,
cor_local_ratio=1.0,
cor_label_ratio=COR_LABEL_RATIO,
cor_data_ratio=COR_DATA_RATIO,
mode=COR_MODE,
num_of_sample=1,
verbose=True,
dataset=DATASET
)
tr_X = tr_X_dict['x_train0']
tr_y = tr_y_dict['y_train0']
te_X = te_X_dict['x_test0']
te_y = te_y_dict['y_test0']
do_centralize_learning(tr_X, tr_y, te_X, te_y, BATCH_SIZE, CENTRAL_EPOCHS,
log_name, DATASET)
else:
for COR_MINOR_LABEL_CNT in COR_MINOR_LABEL_CNT_LIST:
for COR_MINOR_DATA_RATIO in COR_MINOR_DATA_RATIO_LIST:
for COR_MODE in COR_MODE_LIST:
print('\n===================================')
print('CUDA:', torch.cuda.is_available())
print('MODEL:', MODEL)
print('DIST:', DIST)
print('DATASET:', DATASET)
print('EPOCHS:', CENTRAL_EPOCHS)
print('BATCH_SIZE:', BATCH_SIZE)
print('COR_MODE:', CORRUPTION_MODE[COR_MODE])
print('PDIST:', PDIST)
print('COR_MAJOR_DATA_RATIO:', COR_MAJOR_DATA_RATIO)
print('COR_MINOR_LABEL_CNT:', COR_MINOR_LABEL_CNT)
print('COR_MINOR_DATA_RATIO:', COR_MINOR_DATA_RATIO)
print('===================================\n')
log_name = DIST + '_'
log_name += str(int(COR_MINOR_LABEL_CNT)) + '_cor_minor_label_'
log_name += str(int(COR_MINOR_DATA_RATIO * 100)) + '_cor_minor_data_'
log_name += CORRUPTION_MODE[COR_MODE] + '_'
if COR_MODE == 2: # backdoor attack
tr_X_dict, tr_y_dict, te_X_dict, te_y_dict, _, _ = create_backdoor_non_iid_samples(
tr_X, tr_y, te_X, te_y, TARGET_LABEL,
cor_local_ratio=1.0,
cor_minor_label_cnt=COR_MINOR_LABEL_CNT,
cor_major_data_ratio=COR_MAJOR_DATA_RATIO,
cor_minor_data_ratio=COR_MINOR_DATA_RATIO,
pdist=PDIST,
num_of_sample=1,
verbose=True,
dataset=DATASET
)
else:
tr_X_dict, tr_y_dict, te_X_dict, te_y_dict = create_corrupted_non_iid_samples(
tr_X, tr_y, te_X, te_y,
cor_local_ratio=1.0,
cor_minor_label_cnt=COR_MINOR_LABEL_CNT,
cor_major_data_ratio=COR_MAJOR_DATA_RATIO,
cor_minor_data_ratio=COR_MINOR_DATA_RATIO,
mode=COR_MODE,
pdist=PDIST,
num_of_sample=1,
verbose=True,
dataset=DATASET
)
tr_X = tr_X_dict['x_train0']
tr_y = tr_y_dict['y_train0']
te_X = te_X_dict['x_test0']
te_y = te_y_dict['y_test0']
do_centralize_learning(tr_X, tr_y, te_X, te_y, BATCH_SIZE, CENTRAL_EPOCHS,
log_name, DATASET)
continue
# Federated Learning
for DIST in DIST_LIST:
cur_iid_cnt = 0
cur_non_iid_cnt = 0
total_common_cnt = len(UNCERT_FEDAVG_LIST) * len(COR_MODE_LIST)
total_iid_cnt = total_common_cnt * len(COR_DATA_RATIO_LIST) * len(COR_LABEL_RATIO_LIST)
total_non_iid_cnt = total_common_cnt * len(COR_MINOR_DATA_RATIO_LIST) * len(COR_MINOR_LABEL_CNT_LIST)
# UG-FedAvg 적용여부 확인
# default = 0 -> Original FedAvg
for UNCERT_FEDAVG in UNCERT_FEDAVG_LIST:
# IID Dist
if DIST == 'iid':
# Non-corrupted Dist
if IID_NON_COR:
do_non_corruption(tr_X, tr_y, te_X, te_y,
BATCH_SIZE, IID_ITERATION, IID_EPOCHS, NUM_OF_LOCAL, UNCERT_FEDAVG,
DIST, DATASET)
break
# Corrupted Dataset
for COR_LABEL_RATIO in COR_LABEL_RATIO_LIST:
for COR_DATA_RATIO in COR_DATA_RATIO_LIST:
for COR_MODE in COR_MODE_LIST:
print('\n===================================')
print('CUDA:', torch.cuda.is_available())
print('UNCERT_FEDAVG:', FL_ALGO[UNCERT_FEDAVG])
print('MODEL:', MODEL)
print('DIST:', DIST)
print('DATASET:', DATASET)
print('NUM_OF_LOCAL:', NUM_OF_LOCAL)
print('COR_MODE:', CORRUPTION_MODE[COR_MODE])
print('COR_LOCAL_RATIO:', COR_LOCAL_RATIO)
print('COR_LABEL_RATIO:', COR_LABEL_RATIO)
print('COR_DATA_RATIO:', COR_DATA_RATIO)
print('===================================\n')
cur_iid_cnt += 1
if COR_MODE == 2: # backdoor attack
do_iid_backdoor(total_iid_cnt, cur_iid_cnt, tr_X, tr_y, te_X, te_y,
BATCH_SIZE, IID_ITERATION, IID_EPOCHS, NUM_OF_LOCAL, UNCERT_FEDAVG,
COR_LOCAL_RATIO, COR_LABEL_RATIO, COR_DATA_RATIO, TARGET_LABEL,
DATASET)
else:
do_iid_corruption(total_iid_cnt, cur_iid_cnt, tr_X, tr_y, te_X, te_y,
BATCH_SIZE, IID_ITERATION, IID_EPOCHS, NUM_OF_LOCAL, UNCERT_FEDAVG,
COR_LOCAL_RATIO, COR_LABEL_RATIO, COR_DATA_RATIO, COR_MODE,
DATASET)
# Non-IID Dist
else:
# Non-corrupted Dataset
if NON_IID_NON_COR:
do_non_corruption(tr_X, tr_y, te_X, te_y,
BATCH_SIZE, NON_IID_ITERATION, NON_IID_EPOCHS, NUM_OF_LOCAL, UNCERT_FEDAVG,
DIST, DATASET)
break
# Corrupted Dataset
for COR_MINOR_LABEL_CNT in COR_MINOR_LABEL_CNT_LIST:
for COR_MINOR_DATA_RATIO in COR_MINOR_DATA_RATIO_LIST:
for COR_MODE in COR_MODE_LIST:
print('\n===================================')
print('CUDA:', torch.cuda.is_available())
print('UNCERT_FEDAVG:', FL_ALGO[UNCERT_FEDAVG])
print('MODEL:', MODEL)
print('DIST:', DIST)
print('DATASET:', DATASET)
print('NUM_OF_LOCAL:', NUM_OF_LOCAL)
print('COR_MODE:', CORRUPTION_MODE[COR_MODE])
print('PDIST:', PDIST)
print('COR_MAJOR_DATA_RATIO:', COR_MAJOR_DATA_RATIO)
print('COR_MINOR_LABEL_CNT:', COR_MINOR_LABEL_CNT)
print('COR_MINOR_DATA_RATIO:', COR_MINOR_DATA_RATIO)
print('===================================\n')
cur_non_iid_cnt += 1
if COR_MODE == 2: # backdoor attack
do_non_iid_backdoor(total_non_iid_cnt, cur_non_iid_cnt, tr_X, tr_y, te_X, te_y,
BATCH_SIZE, NON_IID_ITERATION, NON_IID_EPOCHS,
NUM_OF_LOCAL, UNCERT_FEDAVG,
COR_LOCAL_RATIO, COR_MINOR_LABEL_CNT, COR_MAJOR_DATA_RATIO,
COR_MINOR_DATA_RATIO, PDIST, TARGET_LABEL, DATASET)
else:
do_non_iid_corruption(total_non_iid_cnt, cur_non_iid_cnt, tr_X, tr_y, te_X, te_y,
BATCH_SIZE, NON_IID_ITERATION, NON_IID_EPOCHS,
NUM_OF_LOCAL, UNCERT_FEDAVG,
COR_LOCAL_RATIO, COR_MINOR_LABEL_CNT, COR_MAJOR_DATA_RATIO,
COR_MINOR_DATA_RATIO, PDIST, COR_MODE, DATASET)
| 58.740426 | 121 | 0.407925 | from util import *
from model import *
if __name__=='__main__':
DATASET, MODEL_LIST, IID_NON_COR, NON_IID_NON_COR = arg_parse()
if DATASET == 'mnist':
from mnist_config import *
elif DATASET == 'fmnist':
from fmnist_config import *
elif DATASET == 'cifar10':
from cifar10_config import *
else:
print('{} is wrong dataset! [mnist|fmnist|cifar10]'.format(DATASET))
exit(1)
tr_X, tr_y, te_X, te_y, pre_X, pre_y = load_data(data=DATASET, pre_train=PRE_TRAIN)
L in MODEL_LIST:
if MODEL == 'central':
if IID_NON_COR or NON_IID_NON_COR:
print('\n===================================')
print('CUDA:', torch.cuda.is_available())
print('MODEL:', MODEL)
print('DATASET:', DATASET)
print('EPOCHS:', CENTRAL_EPOCHS)
print('BATCH_SIZE:', BATCH_SIZE)
print('===================================\n')
log_name = 'non_corrupted_'
do_centralize_learning(tr_X, tr_y, te_X, te_y, BATCH_SIZE, CENTRAL_EPOCHS, log_name, DATASET)
continue
for DIST in DIST_LIST:
if DIST == 'iid':
for COR_LABEL_RATIO in COR_LABEL_RATIO_LIST:
for COR_DATA_RATIO in COR_DATA_RATIO_LIST:
for COR_MODE in COR_MODE_LIST:
print('\n===================================')
print('CUDA:', torch.cuda.is_available())
print('MODEL:', MODEL)
print('DIST:', DIST)
print('DATASET:', DATASET)
print('EPOCHS:', CENTRAL_EPOCHS)
print('BATCH_SIZE:', BATCH_SIZE)
print('COR_MODE:', CORRUPTION_MODE[COR_MODE])
print('COR_LABEL_RATIO:', COR_LABEL_RATIO)
print('COR_DATA_RATIO:', COR_DATA_RATIO)
print('===================================\n')
log_name = DIST + '_'
log_name += str(int(COR_LOCAL_RATIO * 10)) + '_cor_local_'
log_name += str(int(COR_LABEL_RATIO * 100)) + '_cor_label_'
log_name += CORRUPTION_MODE[COR_MODE] + '_'
if COR_MODE == 2:
tr_X_dict, tr_y_dict, te_X_dict, te_y_dict, _, _ = create_backdoor_iid_samples(
tr_X, tr_y, te_X, te_y, target_label=TARGET_LABEL,
cor_local_ratio=1.0,
cor_label_ratio=COR_LABEL_RATIO,
cor_data_ratio=COR_DATA_RATIO,
num_of_sample=1,
verbose=True,
dataset=DATASET
)
else:
tr_X_dict, tr_y_dict, te_X_dict, te_y_dict = create_corrupted_iid_samples(
tr_X, tr_y, te_X, te_y,
cor_local_ratio=1.0,
cor_label_ratio=COR_LABEL_RATIO,
cor_data_ratio=COR_DATA_RATIO,
mode=COR_MODE,
num_of_sample=1,
verbose=True,
dataset=DATASET
)
tr_X = tr_X_dict['x_train0']
tr_y = tr_y_dict['y_train0']
te_X = te_X_dict['x_test0']
te_y = te_y_dict['y_test0']
do_centralize_learning(tr_X, tr_y, te_X, te_y, BATCH_SIZE, CENTRAL_EPOCHS,
log_name, DATASET)
else:
for COR_MINOR_LABEL_CNT in COR_MINOR_LABEL_CNT_LIST:
for COR_MINOR_DATA_RATIO in COR_MINOR_DATA_RATIO_LIST:
for COR_MODE in COR_MODE_LIST:
print('\n===================================')
print('CUDA:', torch.cuda.is_available())
print('MODEL:', MODEL)
print('DIST:', DIST)
print('DATASET:', DATASET)
print('EPOCHS:', CENTRAL_EPOCHS)
print('BATCH_SIZE:', BATCH_SIZE)
print('COR_MODE:', CORRUPTION_MODE[COR_MODE])
print('PDIST:', PDIST)
print('COR_MAJOR_DATA_RATIO:', COR_MAJOR_DATA_RATIO)
print('COR_MINOR_LABEL_CNT:', COR_MINOR_LABEL_CNT)
print('COR_MINOR_DATA_RATIO:', COR_MINOR_DATA_RATIO)
print('===================================\n')
log_name = DIST + '_'
log_name += str(int(COR_MINOR_LABEL_CNT)) + '_cor_minor_label_'
log_name += str(int(COR_MINOR_DATA_RATIO * 100)) + '_cor_minor_data_'
log_name += CORRUPTION_MODE[COR_MODE] + '_'
if COR_MODE == 2:
tr_X_dict, tr_y_dict, te_X_dict, te_y_dict, _, _ = create_backdoor_non_iid_samples(
tr_X, tr_y, te_X, te_y, TARGET_LABEL,
cor_local_ratio=1.0,
cor_minor_label_cnt=COR_MINOR_LABEL_CNT,
cor_major_data_ratio=COR_MAJOR_DATA_RATIO,
cor_minor_data_ratio=COR_MINOR_DATA_RATIO,
pdist=PDIST,
num_of_sample=1,
verbose=True,
dataset=DATASET
)
else:
tr_X_dict, tr_y_dict, te_X_dict, te_y_dict = create_corrupted_non_iid_samples(
tr_X, tr_y, te_X, te_y,
cor_local_ratio=1.0,
cor_minor_label_cnt=COR_MINOR_LABEL_CNT,
cor_major_data_ratio=COR_MAJOR_DATA_RATIO,
cor_minor_data_ratio=COR_MINOR_DATA_RATIO,
mode=COR_MODE,
pdist=PDIST,
num_of_sample=1,
verbose=True,
dataset=DATASET
)
tr_X = tr_X_dict['x_train0']
tr_y = tr_y_dict['y_train0']
te_X = te_X_dict['x_test0']
te_y = te_y_dict['y_test0']
do_centralize_learning(tr_X, tr_y, te_X, te_y, BATCH_SIZE, CENTRAL_EPOCHS,
log_name, DATASET)
continue
for DIST in DIST_LIST:
cur_iid_cnt = 0
cur_non_iid_cnt = 0
total_common_cnt = len(UNCERT_FEDAVG_LIST) * len(COR_MODE_LIST)
total_iid_cnt = total_common_cnt * len(COR_DATA_RATIO_LIST) * len(COR_LABEL_RATIO_LIST)
total_non_iid_cnt = total_common_cnt * len(COR_MINOR_DATA_RATIO_LIST) * len(COR_MINOR_LABEL_CNT_LIST)
for UNCERT_FEDAVG in UNCERT_FEDAVG_LIST:
if DIST == 'iid':
if IID_NON_COR:
do_non_corruption(tr_X, tr_y, te_X, te_y,
BATCH_SIZE, IID_ITERATION, IID_EPOCHS, NUM_OF_LOCAL, UNCERT_FEDAVG,
DIST, DATASET)
break
for COR_LABEL_RATIO in COR_LABEL_RATIO_LIST:
for COR_DATA_RATIO in COR_DATA_RATIO_LIST:
for COR_MODE in COR_MODE_LIST:
print('\n===================================')
print('CUDA:', torch.cuda.is_available())
print('UNCERT_FEDAVG:', FL_ALGO[UNCERT_FEDAVG])
print('MODEL:', MODEL)
print('DIST:', DIST)
print('DATASET:', DATASET)
print('NUM_OF_LOCAL:', NUM_OF_LOCAL)
print('COR_MODE:', CORRUPTION_MODE[COR_MODE])
print('COR_LOCAL_RATIO:', COR_LOCAL_RATIO)
print('COR_LABEL_RATIO:', COR_LABEL_RATIO)
print('COR_DATA_RATIO:', COR_DATA_RATIO)
print('===================================\n')
cur_iid_cnt += 1
if COR_MODE == 2:
do_iid_backdoor(total_iid_cnt, cur_iid_cnt, tr_X, tr_y, te_X, te_y,
BATCH_SIZE, IID_ITERATION, IID_EPOCHS, NUM_OF_LOCAL, UNCERT_FEDAVG,
COR_LOCAL_RATIO, COR_LABEL_RATIO, COR_DATA_RATIO, TARGET_LABEL,
DATASET)
else:
do_iid_corruption(total_iid_cnt, cur_iid_cnt, tr_X, tr_y, te_X, te_y,
BATCH_SIZE, IID_ITERATION, IID_EPOCHS, NUM_OF_LOCAL, UNCERT_FEDAVG,
COR_LOCAL_RATIO, COR_LABEL_RATIO, COR_DATA_RATIO, COR_MODE,
DATASET)
else:
if NON_IID_NON_COR:
do_non_corruption(tr_X, tr_y, te_X, te_y,
BATCH_SIZE, NON_IID_ITERATION, NON_IID_EPOCHS, NUM_OF_LOCAL, UNCERT_FEDAVG,
DIST, DATASET)
break
for COR_MINOR_LABEL_CNT in COR_MINOR_LABEL_CNT_LIST:
for COR_MINOR_DATA_RATIO in COR_MINOR_DATA_RATIO_LIST:
for COR_MODE in COR_MODE_LIST:
print('\n===================================')
print('CUDA:', torch.cuda.is_available())
print('UNCERT_FEDAVG:', FL_ALGO[UNCERT_FEDAVG])
print('MODEL:', MODEL)
print('DIST:', DIST)
print('DATASET:', DATASET)
print('NUM_OF_LOCAL:', NUM_OF_LOCAL)
print('COR_MODE:', CORRUPTION_MODE[COR_MODE])
print('PDIST:', PDIST)
print('COR_MAJOR_DATA_RATIO:', COR_MAJOR_DATA_RATIO)
print('COR_MINOR_LABEL_CNT:', COR_MINOR_LABEL_CNT)
print('COR_MINOR_DATA_RATIO:', COR_MINOR_DATA_RATIO)
print('===================================\n')
cur_non_iid_cnt += 1
if COR_MODE == 2:
do_non_iid_backdoor(total_non_iid_cnt, cur_non_iid_cnt, tr_X, tr_y, te_X, te_y,
BATCH_SIZE, NON_IID_ITERATION, NON_IID_EPOCHS,
NUM_OF_LOCAL, UNCERT_FEDAVG,
COR_LOCAL_RATIO, COR_MINOR_LABEL_CNT, COR_MAJOR_DATA_RATIO,
COR_MINOR_DATA_RATIO, PDIST, TARGET_LABEL, DATASET)
else:
do_non_iid_corruption(total_non_iid_cnt, cur_non_iid_cnt, tr_X, tr_y, te_X, te_y,
BATCH_SIZE, NON_IID_ITERATION, NON_IID_EPOCHS,
NUM_OF_LOCAL, UNCERT_FEDAVG,
COR_LOCAL_RATIO, COR_MINOR_LABEL_CNT, COR_MAJOR_DATA_RATIO,
COR_MINOR_DATA_RATIO, PDIST, COR_MODE, DATASET)
| true | true |
1c3517ba62458442d409f0861659c1c996a2b301 | 4,814 | py | Python | setup.py | minddistrict/zope.index | 7fd8bbad0584e21c0158e73681bcf99b6bacb699 | [
"ZPL-2.1"
] | null | null | null | setup.py | minddistrict/zope.index | 7fd8bbad0584e21c0158e73681bcf99b6bacb699 | [
"ZPL-2.1"
] | null | null | null | setup.py | minddistrict/zope.index | 7fd8bbad0584e21c0158e73681bcf99b6bacb699 | [
"ZPL-2.1"
] | 1 | 2021-09-29T19:54:14.000Z | 2021-09-29T19:54:14.000Z | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
# This package is developed by the Zope Toolkit project, documented here:
# http://docs.zope.org/zopetoolkit
# When developing and releasing this package, please follow the documented
# Zope Toolkit policies as described by this documentation.
##############################################################################
"""Setup for zope.index package
"""
from __future__ import print_function
import sys
import os
from setuptools import setup, find_packages, Extension
from distutils.command.build_ext import build_ext
from distutils.errors import CCompilerError
from distutils.errors import DistutilsExecError
from distutils.errors import DistutilsPlatformError
def read(*rnames):
with open(os.path.join(os.path.dirname(__file__), *rnames)) as f:
return f.read()
long_description = (open('README.rst').read() +
'\n\n' +
open('CHANGES.rst').read())
class optional_build_ext(build_ext):
"""This class subclasses build_ext and allows
the building of C extensions to fail.
"""
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError as e:
self._unavailable(e)
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsExecError) as e:
self._unavailable(e)
def _unavailable(self, e):
print('*' * 80, file=sys.stderr)
print("""WARNING:
An optional code optimization (C extension) could not be compiled.
Optimizations for this package will not be available!""",
file=sys.stderr)
print('', file=sys.stderr)
print(e, file=sys.stderr)
print('*' * 80, file=sys.stderr)
def alltests():
import os
import sys
import unittest
# use the zope.testrunner machinery to find all the
# test suites we've put under ourselves
import zope.testrunner.find
import zope.testrunner.options
here = os.path.abspath(os.path.join(os.path.dirname(__file__), 'src'))
args = sys.argv[:]
defaults = ["--test-path", here]
options = zope.testrunner.options.get_options(args, defaults)
suites = list(zope.testrunner.find.find_suites(options))
return unittest.TestSuite(suites)
setup(name='zope.index',
version='4.1.1.dev0',
url='http://pypi.python.org/pypi/zope.index',
license='ZPL 2.1',
author='Zope Foundation and Contributors',
author_email='zope-dev@zope.org',
description="Indices for using with catalog like text, field, etc.",
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Zope Public License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Natural Language :: English',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development',
],
packages=find_packages('src'),
package_dir = {'': 'src'},
namespace_packages=['zope',],
extras_require={
'test': [],
'tools': ['ZODB', 'transaction']},
install_requires=[
'persistent',
'BTrees',
'setuptools',
'six',
'zope.interface'],
tests_require = ['zope.testrunner'],
test_suite = '__main__.alltests',
ext_modules=[
Extension('zope.index.text.okascore',
[os.path.join('src', 'zope', 'index', 'text', 'okascore.c')]),
],
cmdclass = {'build_ext':optional_build_ext},
include_package_data = True,
zip_safe=False,
)
| 36.195489 | 78 | 0.613004 | true | true | |
1c35183f1cdcc7b0458bca42b837355b152e5542 | 443 | py | Python | libs/model/Num3.py | zyfccc/Spectral-Illumination-Correction-Achieving-Relative-Color-Constancy-Under-the-Spectral-Domain | 051af9662dbe53deaf2d493fe8dbf0c9adce7ccb | [
"MIT"
] | 8 | 2019-12-17T15:07:17.000Z | 2021-08-19T09:13:58.000Z | libs/model/Num3.py | zyfccc/Spectral-Illumination-Correction-Achieving-Relative-Color-Constancy-Under-the-Spectral-Domain | 051af9662dbe53deaf2d493fe8dbf0c9adce7ccb | [
"MIT"
] | null | null | null | libs/model/Num3.py | zyfccc/Spectral-Illumination-Correction-Achieving-Relative-Color-Constancy-Under-the-Spectral-Domain | 051af9662dbe53deaf2d493fe8dbf0c9adce7ccb | [
"MIT"
] | 3 | 2020-01-06T04:20:55.000Z | 2020-01-25T08:42:30.000Z |
class Num3:
def __init__(self, x=0, y=0, z=0, json=None):
if json is not None:
self.fromJson(json)
else:
self.x = x
self.y = y
self.z = z
def toJson(self):
return {
'x': self.x,
'y': self.y,
'z': self.z
}
def fromJson(self, json):
self.x = json['x']
self.y = json['y']
self.z = json['z']
| 20.136364 | 49 | 0.399549 |
class Num3:
def __init__(self, x=0, y=0, z=0, json=None):
if json is not None:
self.fromJson(json)
else:
self.x = x
self.y = y
self.z = z
def toJson(self):
return {
'x': self.x,
'y': self.y,
'z': self.z
}
def fromJson(self, json):
self.x = json['x']
self.y = json['y']
self.z = json['z']
| true | true |
1c3518a52d1603a28901baeebc4e463bea27365a | 407 | py | Python | IncludeVisitor.py | ArmindoFlores/MineScript | 347d7dd61ac1e39e4707210ede98e9c3ca44c891 | [
"MIT"
] | 5 | 2019-07-31T19:20:07.000Z | 2022-02-16T09:48:06.000Z | IncludeVisitor.py | ArmindoFlores/MineScript | 347d7dd61ac1e39e4707210ede98e9c3ca44c891 | [
"MIT"
] | null | null | null | IncludeVisitor.py | ArmindoFlores/MineScript | 347d7dd61ac1e39e4707210ede98e9c3ca44c891 | [
"MIT"
] | null | null | null | from MineScriptVisitor import MineScriptVisitor
class IncludeVisitor(MineScriptVisitor):
def __init__(self):
self.modules = []
def add_module(self, module, line):
if module not in self.modules:
self.modules.append((module, line))
def visitInclude(self, ctx):
name = ctx.ID().getText()
self.add_module(name, ctx.start.line)
| 25.4375 | 48 | 0.619165 | from MineScriptVisitor import MineScriptVisitor
class IncludeVisitor(MineScriptVisitor):
def __init__(self):
self.modules = []
def add_module(self, module, line):
if module not in self.modules:
self.modules.append((module, line))
def visitInclude(self, ctx):
name = ctx.ID().getText()
self.add_module(name, ctx.start.line)
| true | true |
1c3518d2f2a5ab734aa304864fde549bb568a3b1 | 1,262 | gyp | Python | binding.gyp | ismailrei/devisPattern | b20dd604dcfa609fec4dd1d4a14129f604b5870e | [
"MIT"
] | 1 | 2017-11-06T08:23:54.000Z | 2017-11-06T08:23:54.000Z | binding.gyp | ismailrei/devisPattern | b20dd604dcfa609fec4dd1d4a14129f604b5870e | [
"MIT"
] | null | null | null | binding.gyp | ismailrei/devisPattern | b20dd604dcfa609fec4dd1d4a14129f604b5870e | [
"MIT"
] | null | null | null | {
"targets": [
{
"target_name": "addon",
"sources": [
"addon.cpp",
"devisPattern.cpp"
],
"cflags" : [ "-std=c++11"],
"cflags!": [ '-fno-exceptions' ],
"cflags_cc!": [ '-fno-exceptions' ],
"conditions": [
[ 'OS!="win"', {
"cflags+": [ "-std=c++11" ],
"cflags_c+": [ "-std=c++11" ],
"cflags_cc+": [ "-std=c++11" ],
}],
[ 'OS=="mac"', {
"xcode_settings": {
"OTHER_CPLUSPLUSFLAGS" : [ "-std=c++11", "-stdlib=libc++" ],
"OTHER_LDFLAGS": [ "-stdlib=libc++" ],
"MACOSX_DEPLOYMENT_TARGET": "10.7"
},
}],
],
}
]
}
| 42.066667 | 108 | 0.211569 | {
"targets": [
{
"target_name": "addon",
"sources": [
"addon.cpp",
"devisPattern.cpp"
],
"cflags" : [ "-std=c++11"],
"cflags!": [ '-fno-exceptions' ],
"cflags_cc!": [ '-fno-exceptions' ],
"conditions": [
[ 'OS!="win"', {
"cflags+": [ "-std=c++11" ],
"cflags_c+": [ "-std=c++11" ],
"cflags_cc+": [ "-std=c++11" ],
}],
[ 'OS=="mac"', {
"xcode_settings": {
"OTHER_CPLUSPLUSFLAGS" : [ "-std=c++11", "-stdlib=libc++" ],
"OTHER_LDFLAGS": [ "-stdlib=libc++" ],
"MACOSX_DEPLOYMENT_TARGET": "10.7"
},
}],
],
}
]
}
| true | true |
1c3519e8931da5c128759a4b15790e3e7153cd00 | 11,342 | py | Python | src/microprobe/utils/objdump.py | TheArni/microprobe | 46d17a9744b943bb448fc5e2872f3521084d8bec | [
"Apache-2.0"
] | 13 | 2018-09-06T05:16:08.000Z | 2022-03-07T23:03:46.000Z | src/microprobe/utils/objdump.py | TheArni/microprobe | 46d17a9744b943bb448fc5e2872f3521084d8bec | [
"Apache-2.0"
] | 24 | 2018-07-10T01:56:10.000Z | 2022-02-22T22:38:25.000Z | src/microprobe/utils/objdump.py | TheArni/microprobe | 46d17a9744b943bb448fc5e2872f3521084d8bec | [
"Apache-2.0"
] | 12 | 2018-09-06T13:58:24.000Z | 2022-01-27T21:15:39.000Z | # Copyright 2011-2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""":mod:`microprobe.utils.obdump` module
This module implements the required features to interpret objdump assembly
dumps and translate them into Microprobe internal represenation of instruction,
operands, labels and addreses, which can be translated to other formats (e.g.
MPT) afterwards.
The main elements of this module are the following:
- :func:`~.interpret_objdump` function parses the objdump output and
translates it into internal Microprobe represenation of instructions and
operands, etc.
"""
# Futures
from __future__ import absolute_import, print_function
# Built-in modules
import re
# Third party modules
from six.moves import zip
# Own modules
from microprobe import MICROPROBE_RC
from microprobe.code.address import Address
from microprobe.code.ins import MicroprobeInstructionDefinition
from microprobe.exceptions import MicroprobeAsmError, \
MicroprobeBinaryError, MicroprobeObjdumpError
from microprobe.utils.asm import interpret_asm
from microprobe.utils.logger import get_logger
from microprobe.utils.misc import Progress
from microprobe.utils.mpt import MicroprobeTestVariableDefinition
# Constants
LOG = get_logger(__name__)
__all__ = ["interpret_objdump"]
# Functions
def interpret_objdump(
objdump_output,
target,
strict=False,
sections=None,
start_address=-1,
end_address=float('+inf')
):
"""
Returns a :class:`~.MicroprobeTestDefinition` object
that results from interpreting the objdump output.
The *target* object is used to validate the existence of the instruction
and operands.
:param objdump_output: Assembly to interpret
:type objdump_output: Objdump textual output
:param target: Target definition
:type target: :class:`~.Target` object
:param strict: If set, fail if an opcode can not be interpreted.
(Default: False)
:type strict: :class:`~.bool`
:param sections: List of section names to parse
:type sections: :class:`~.list` of :class:`~.str`
:param start_address: Start address to interpret
:type start_address: ::class:`~.int`
:param end_address: End address to interpret
:type end_address: ::class:`~.int`
:return: An object representing the microprobe test
:rtype: :class:`~.list` of :class:`~.MicroprobeTestDefinition`
:raise microprobe.exceptions.MicroprobeObjdumpError: if something is wrong
during the interpretation of the objdump
"""
if not strict:
MICROPROBE_RC['safe_bin'] = True
if isinstance(objdump_output, str):
objdump_output = objdump_output.replace('\r', '\n')
objdump_output = objdump_output.split('\n')
filtered_objdump_output = _objdump_cleanup(
objdump_output, sections, start_address, end_address
)
code_labels = _find_code_labels(filtered_objdump_output)
var_labels = _find_var_labels(filtered_objdump_output, code_labels)
labels = code_labels + var_labels
label_pattern = _generate_label_pattern(labels)
binary_format = _binary_reformat(filtered_objdump_output)
asm_format = _asm_reformat(filtered_objdump_output)
assert len(binary_format) == len(asm_format)
instr_defs = []
current_labels = var_labels[:]
progress = Progress(len(binary_format), msg="Lines parsed:")
for binary, asm in zip(binary_format, asm_format):
instr_def = None
if not label_pattern.search(asm):
try:
instr_def = interpret_asm(binary, target, current_labels)
except MicroprobeBinaryError:
if strict:
raise MicroprobeObjdumpError(
"Unable to interpret binary '%s' (asm:' %s')" %
(binary, asm)
)
else:
LOG.warning("Skiping binary '%s' (asm:' %s')", binary, asm)
instr_def = None
else:
try:
instr_def = interpret_asm(
asm, target, current_labels,
log=False
)
except MicroprobeAsmError:
instr_def = interpret_asm(binary, target, current_labels)
if instr_def is not None:
fixed_instr_def = _fix_instr_definition(instr_def[0], asm, target)
instr_defs.append(fixed_instr_def)
if fixed_instr_def.label is not None:
current_labels.append(fixed_instr_def.label)
progress()
variable_defs = []
required_defs = []
for var_label in var_labels:
var_def = _interpret_variable(var_label, objdump_output)
if var_def is not None:
variable_defs.append(var_def)
else:
LOG.warning(
"Variable label: '%s' referenced but not found "
"in the dump"
)
required_defs.append(_default_variable(var_label))
return variable_defs, required_defs, instr_defs
def _asm_reformat(input_str):
output = []
symbol_line = ""
for line in input_str:
match = re.search("^[0-9a-fA-F]+ <(.*.)>:$", line)
if match is not None:
symbol_line = line
else:
mline = symbol_line + " " + " ".join(line.split('\t')[1:])
symbol_line = ""
output.append(mline.strip())
return output
def _binary_reformat(input_str):
output = []
symbol_line = ""
for line in input_str:
match = re.search("^[0-9a-fA-F]+ <(.*.)>:$", line)
if match is not None:
symbol_line = line
else:
mline = symbol_line + " 0x" + line.split('\t')[0].replace(" ", "")
symbol_line = ""
output.append(mline.strip())
return output
def _default_variable(var_name):
return MicroprobeTestVariableDefinition(
var_name, "char", 1, None, None, None
)
def _find_code_labels(input_text):
labels = []
for line in input_text:
match = re.search("^[0-9a-fA-F]+ <(.*.)>:$", line)
if match is not None:
label = _sanitize_label(match.group(1))
if label not in labels:
labels.append(label)
return labels
def _find_var_labels(input_text, code_labels):
labels = []
for line in input_text:
match = re.search("^.*<(.*.)>.*$", line)
if match is not None:
label = _sanitize_label(match.group(1))
if label not in labels and label not in code_labels:
labels.append(label)
return labels
def _fix_instr_definition(instr_def, asm, target):
if instr_def.instruction_type.name == 'raw':
return instr_def
labelmatch = re.search("^.*<(.*.)>.*$", asm)
label = None
if labelmatch is not None:
label = labelmatch.group(1)
instruction = target.new_instruction(instr_def.instruction_type.name)
operands = list(instr_def.operands)
for idx, operand in enumerate(instruction.operands()):
if operand.type.address_relative and label is not None:
operands[idx] = Address(base_address=label.upper())
operand.set_value(operands[idx])
instr_def = MicroprobeInstructionDefinition(
instr_def.instruction_type, operands, instr_def.label,
instr_def.address, instruction.assembly(), instr_def.decorators,
instr_def.comments
)
return instr_def
def _generate_label_pattern(labels):
regex = []
for label in labels:
regex.append("^.*<%s[+-]*[0-9a-fA-F]*.*>.*$" % label)
regex_str = "|".join(regex)
pattern = re.compile(regex_str)
return pattern
def _interpret_variable(var_name, input_str):
input_str = _objdump_cleanup(input_str, ['ALL'])
init_value = []
address = None
dump = False
for line in input_str:
if line.endswith(" <%s>:" % var_name):
dump = True
address = int(line.split(' ')[0], 16)
continue
elif dump and line.endswith(':'):
break
elif not dump:
continue
init_value.extend(
int(elem, 16) for elem in line.split('\t')[0].strip().split(' ')
)
if dump is False:
return None
len_init_value = len(init_value)
if init_value[1:] == init_value[:-1]:
init_value = [init_value[0]]
if init_value == [0]:
init_value = None
return MicroprobeTestVariableDefinition(
var_name, "char", len_init_value, address, None, init_value
)
def _objdump_cleanup(
input_text,
sections, start_address=0,
end_address=float('+inf')
):
if sections is None:
sections = ['.text']
all_sections = "ALL" in sections
# Remove uneded output
output_text = []
current_section = ""
for line in input_text:
if (
line.strip() == "" or line.find("file format elf") > -1 or
line.find("file format aix") > -1 or line.find("...") > -1
):
continue
if line.startswith("Disassembly of section"):
current_section = line.split(" ")[3][:-1]
continue
if "file format " in line:
continue
if (current_section in sections) or all_sections:
output_text.append(line.replace('@', '_'))
# Remove uneded addresses
def _is_hex(mstr):
return re.search(r"^[0-9a-fA-F]+$", mstr.strip()) is not None
input_text = output_text[:]
output_text = []
for line in input_text:
line = line.strip()
tabbed = line.split("\t")
if tabbed[0][-1] == ':' and len(tabbed) == 1:
if (
int(
tabbed[0].split(' ')[0], 16
) >= start_address and int(
tabbed[0].split(' ')[0], 16
) <= end_address
):
output_text.append(line)
elif len(tabbed) in [3, 4] and _is_hex(tabbed[0][:-1]):
if (
int(
tabbed[0][:-1], 16
) >= start_address and int(
tabbed[0][:-1], 16
) <= end_address
):
output_text.append("\t".join(tabbed[1:]))
else:
raise MicroprobeObjdumpError(
"Unable to parse line '%s' from input file." % line
)
if not output_text:
raise MicroprobeObjdumpError(
"Empty input. Check if the address ranges and/or the section names"
" provided exist in the input"
)
return output_text
def _sanitize_label(mstr):
# mstr = mstr.split('@')[0]
match = re.search("(^.*)[+-]0x.*$", mstr)
if match is not None:
mstr = match.group(1)
return mstr
# Classes
| 28.21393 | 79 | 0.615059 |
from __future__ import absolute_import, print_function
import re
from six.moves import zip
from microprobe import MICROPROBE_RC
from microprobe.code.address import Address
from microprobe.code.ins import MicroprobeInstructionDefinition
from microprobe.exceptions import MicroprobeAsmError, \
MicroprobeBinaryError, MicroprobeObjdumpError
from microprobe.utils.asm import interpret_asm
from microprobe.utils.logger import get_logger
from microprobe.utils.misc import Progress
from microprobe.utils.mpt import MicroprobeTestVariableDefinition
LOG = get_logger(__name__)
__all__ = ["interpret_objdump"]
def interpret_objdump(
objdump_output,
target,
strict=False,
sections=None,
start_address=-1,
end_address=float('+inf')
):
if not strict:
MICROPROBE_RC['safe_bin'] = True
if isinstance(objdump_output, str):
objdump_output = objdump_output.replace('\r', '\n')
objdump_output = objdump_output.split('\n')
filtered_objdump_output = _objdump_cleanup(
objdump_output, sections, start_address, end_address
)
code_labels = _find_code_labels(filtered_objdump_output)
var_labels = _find_var_labels(filtered_objdump_output, code_labels)
labels = code_labels + var_labels
label_pattern = _generate_label_pattern(labels)
binary_format = _binary_reformat(filtered_objdump_output)
asm_format = _asm_reformat(filtered_objdump_output)
assert len(binary_format) == len(asm_format)
instr_defs = []
current_labels = var_labels[:]
progress = Progress(len(binary_format), msg="Lines parsed:")
for binary, asm in zip(binary_format, asm_format):
instr_def = None
if not label_pattern.search(asm):
try:
instr_def = interpret_asm(binary, target, current_labels)
except MicroprobeBinaryError:
if strict:
raise MicroprobeObjdumpError(
"Unable to interpret binary '%s' (asm:' %s')" %
(binary, asm)
)
else:
LOG.warning("Skiping binary '%s' (asm:' %s')", binary, asm)
instr_def = None
else:
try:
instr_def = interpret_asm(
asm, target, current_labels,
log=False
)
except MicroprobeAsmError:
instr_def = interpret_asm(binary, target, current_labels)
if instr_def is not None:
fixed_instr_def = _fix_instr_definition(instr_def[0], asm, target)
instr_defs.append(fixed_instr_def)
if fixed_instr_def.label is not None:
current_labels.append(fixed_instr_def.label)
progress()
variable_defs = []
required_defs = []
for var_label in var_labels:
var_def = _interpret_variable(var_label, objdump_output)
if var_def is not None:
variable_defs.append(var_def)
else:
LOG.warning(
"Variable label: '%s' referenced but not found "
"in the dump"
)
required_defs.append(_default_variable(var_label))
return variable_defs, required_defs, instr_defs
def _asm_reformat(input_str):
output = []
symbol_line = ""
for line in input_str:
match = re.search("^[0-9a-fA-F]+ <(.*.)>:$", line)
if match is not None:
symbol_line = line
else:
mline = symbol_line + " " + " ".join(line.split('\t')[1:])
symbol_line = ""
output.append(mline.strip())
return output
def _binary_reformat(input_str):
output = []
symbol_line = ""
for line in input_str:
match = re.search("^[0-9a-fA-F]+ <(.*.)>:$", line)
if match is not None:
symbol_line = line
else:
mline = symbol_line + " 0x" + line.split('\t')[0].replace(" ", "")
symbol_line = ""
output.append(mline.strip())
return output
def _default_variable(var_name):
return MicroprobeTestVariableDefinition(
var_name, "char", 1, None, None, None
)
def _find_code_labels(input_text):
labels = []
for line in input_text:
match = re.search("^[0-9a-fA-F]+ <(.*.)>:$", line)
if match is not None:
label = _sanitize_label(match.group(1))
if label not in labels:
labels.append(label)
return labels
def _find_var_labels(input_text, code_labels):
labels = []
for line in input_text:
match = re.search("^.*<(.*.)>.*$", line)
if match is not None:
label = _sanitize_label(match.group(1))
if label not in labels and label not in code_labels:
labels.append(label)
return labels
def _fix_instr_definition(instr_def, asm, target):
if instr_def.instruction_type.name == 'raw':
return instr_def
labelmatch = re.search("^.*<(.*.)>.*$", asm)
label = None
if labelmatch is not None:
label = labelmatch.group(1)
instruction = target.new_instruction(instr_def.instruction_type.name)
operands = list(instr_def.operands)
for idx, operand in enumerate(instruction.operands()):
if operand.type.address_relative and label is not None:
operands[idx] = Address(base_address=label.upper())
operand.set_value(operands[idx])
instr_def = MicroprobeInstructionDefinition(
instr_def.instruction_type, operands, instr_def.label,
instr_def.address, instruction.assembly(), instr_def.decorators,
instr_def.comments
)
return instr_def
def _generate_label_pattern(labels):
regex = []
for label in labels:
regex.append("^.*<%s[+-]*[0-9a-fA-F]*.*>.*$" % label)
regex_str = "|".join(regex)
pattern = re.compile(regex_str)
return pattern
def _interpret_variable(var_name, input_str):
input_str = _objdump_cleanup(input_str, ['ALL'])
init_value = []
address = None
dump = False
for line in input_str:
if line.endswith(" <%s>:" % var_name):
dump = True
address = int(line.split(' ')[0], 16)
continue
elif dump and line.endswith(':'):
break
elif not dump:
continue
init_value.extend(
int(elem, 16) for elem in line.split('\t')[0].strip().split(' ')
)
if dump is False:
return None
len_init_value = len(init_value)
if init_value[1:] == init_value[:-1]:
init_value = [init_value[0]]
if init_value == [0]:
init_value = None
return MicroprobeTestVariableDefinition(
var_name, "char", len_init_value, address, None, init_value
)
def _objdump_cleanup(
input_text,
sections, start_address=0,
end_address=float('+inf')
):
if sections is None:
sections = ['.text']
all_sections = "ALL" in sections
output_text = []
current_section = ""
for line in input_text:
if (
line.strip() == "" or line.find("file format elf") > -1 or
line.find("file format aix") > -1 or line.find("...") > -1
):
continue
if line.startswith("Disassembly of section"):
current_section = line.split(" ")[3][:-1]
continue
if "file format " in line:
continue
if (current_section in sections) or all_sections:
output_text.append(line.replace('@', '_'))
def _is_hex(mstr):
return re.search(r"^[0-9a-fA-F]+$", mstr.strip()) is not None
input_text = output_text[:]
output_text = []
for line in input_text:
line = line.strip()
tabbed = line.split("\t")
if tabbed[0][-1] == ':' and len(tabbed) == 1:
if (
int(
tabbed[0].split(' ')[0], 16
) >= start_address and int(
tabbed[0].split(' ')[0], 16
) <= end_address
):
output_text.append(line)
elif len(tabbed) in [3, 4] and _is_hex(tabbed[0][:-1]):
if (
int(
tabbed[0][:-1], 16
) >= start_address and int(
tabbed[0][:-1], 16
) <= end_address
):
output_text.append("\t".join(tabbed[1:]))
else:
raise MicroprobeObjdumpError(
"Unable to parse line '%s' from input file." % line
)
if not output_text:
raise MicroprobeObjdumpError(
"Empty input. Check if the address ranges and/or the section names"
" provided exist in the input"
)
return output_text
def _sanitize_label(mstr):
match = re.search("(^.*)[+-]0x.*$", mstr)
if match is not None:
mstr = match.group(1)
return mstr
| true | true |
1c3519f6988a89d9216812d575f2b01c4a4a00ec | 3,246 | py | Python | sigmaproject/computer_vision/convolution.py | k-zen/SigmaProject | b844766d28d142ed1fb4d2e20f4e9dbad0ad90a6 | [
"BSD-2-Clause"
] | null | null | null | sigmaproject/computer_vision/convolution.py | k-zen/SigmaProject | b844766d28d142ed1fb4d2e20f4e9dbad0ad90a6 | [
"BSD-2-Clause"
] | 8 | 2020-04-27T19:31:23.000Z | 2021-08-06T19:43:46.000Z | sigmaproject/computer_vision/convolution.py | k-zen/SigmaProject | b844766d28d142ed1fb4d2e20f4e9dbad0ad90a6 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright (c) 2019, Andreas Koenzen <akoenzen | uvic.ca>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
from .utilities import Utilities
from typing import Dict
class Convolution(object):
"""
Class for computing convolutions.
"""
IDENTITY = 1
EDGES_1 = 2
EDGES_2 = 3
EDGES_3 = 4
KERNELS: Dict[int, np.array] = {
IDENTITY: np.array([
[+0, +0, +0],
[+0, +1, +0],
[+0, +0, +0]
]),
EDGES_1: np.array([
[+1, +0, -1],
[+0, +0, +0],
[-1, +0, +1]
]),
EDGES_2: np.array([
[+0, +1, +0],
[+1, -4, +1],
[+0, +1, +0]
]),
EDGES_3: np.array([
[-1, -1, -1],
[-1, +8, -1],
[-1, -1, -1]
])
}
DEBUG = False
"""
boolean: Flag to enable debug mode.
"""
def __init__(self):
pass
@staticmethod
def convolution2d(img: np.array,
kernel: np.array,
padding: int,
normalize_pixels: bool = True) -> np.array:
"""
Performs a convolution in 2 dimensions.
:return: The convolved image.
"""
stride: int = 1
# We must calculate the final size of the output image BEFORE adding the padding to the input image!
w = int(((img.shape[0] - kernel.shape[0] + 2 * padding) / stride) + 1)
h = int(((img.shape[1] - kernel.shape[1] + 2 * padding) / stride) + 1)
# Output image.
out = np.zeros((w - 2 * padding, h - 2 * padding))
for i in range(0, img.shape[0] - kernel.shape[0] + 1, stride):
for j in range(0, img.shape[1] - kernel.shape[1] + 1, stride):
rec_field = img[i: i + kernel.shape[0], j: j + kernel.shape[0]]
out[i, j] = np.sum(rec_field * kernel)
return Utilities.normalize_pixels(out) if normalize_pixels else out
| 32.46 | 108 | 0.603204 |
import numpy as np
from .utilities import Utilities
from typing import Dict
class Convolution(object):
IDENTITY = 1
EDGES_1 = 2
EDGES_2 = 3
EDGES_3 = 4
KERNELS: Dict[int, np.array] = {
IDENTITY: np.array([
[+0, +0, +0],
[+0, +1, +0],
[+0, +0, +0]
]),
EDGES_1: np.array([
[+1, +0, -1],
[+0, +0, +0],
[-1, +0, +1]
]),
EDGES_2: np.array([
[+0, +1, +0],
[+1, -4, +1],
[+0, +1, +0]
]),
EDGES_3: np.array([
[-1, -1, -1],
[-1, +8, -1],
[-1, -1, -1]
])
}
DEBUG = False
def __init__(self):
pass
@staticmethod
def convolution2d(img: np.array,
kernel: np.array,
padding: int,
normalize_pixels: bool = True) -> np.array:
stride: int = 1
w = int(((img.shape[0] - kernel.shape[0] + 2 * padding) / stride) + 1)
h = int(((img.shape[1] - kernel.shape[1] + 2 * padding) / stride) + 1)
out = np.zeros((w - 2 * padding, h - 2 * padding))
for i in range(0, img.shape[0] - kernel.shape[0] + 1, stride):
for j in range(0, img.shape[1] - kernel.shape[1] + 1, stride):
rec_field = img[i: i + kernel.shape[0], j: j + kernel.shape[0]]
out[i, j] = np.sum(rec_field * kernel)
return Utilities.normalize_pixels(out) if normalize_pixels else out
| true | true |
1c351ba2cb26f12b8aa12dea1aabed6bfb5db532 | 790 | py | Python | allennlp/data/tokenizers/whitespace_tokenizer.py | justindujardin/allennlp | c4559f3751775aa8bc018db417edc119d29d8051 | [
"Apache-2.0"
] | 2 | 2021-04-27T19:56:28.000Z | 2021-08-19T05:34:37.000Z | allennlp/data/tokenizers/whitespace_tokenizer.py | justindujardin/allennlp | c4559f3751775aa8bc018db417edc119d29d8051 | [
"Apache-2.0"
] | 5 | 2021-05-03T14:40:33.000Z | 2021-05-03T14:40:34.000Z | allennlp/data/tokenizers/whitespace_tokenizer.py | justindujardin/allennlp | c4559f3751775aa8bc018db417edc119d29d8051 | [
"Apache-2.0"
] | 2 | 2019-12-21T05:58:44.000Z | 2021-08-16T07:41:21.000Z | from typing import List
from overrides import overrides
from allennlp.data.tokenizers.token import Token
from allennlp.data.tokenizers.tokenizer import Tokenizer
@Tokenizer.register("whitespace")
@Tokenizer.register("just_spaces")
class WhitespaceTokenizer(Tokenizer):
"""
A `Tokenizer` that assumes you've already done your own tokenization somehow and have
separated the tokens by spaces. We just split the input string on whitespace and return the
resulting list.
Note that we use `text.split()`, which means that the amount of whitespace between the
tokens does not matter. This will never result in spaces being included as tokens.
"""
@overrides
def tokenize(self, text: str) -> List[Token]:
return [Token(t) for t in text.split()]
| 32.916667 | 96 | 0.743038 | from typing import List
from overrides import overrides
from allennlp.data.tokenizers.token import Token
from allennlp.data.tokenizers.tokenizer import Tokenizer
@Tokenizer.register("whitespace")
@Tokenizer.register("just_spaces")
class WhitespaceTokenizer(Tokenizer):
@overrides
def tokenize(self, text: str) -> List[Token]:
return [Token(t) for t in text.split()]
| true | true |
1c351c9423d609c42c4be79fbf5316d3b85b0cc5 | 17,886 | py | Python | corehq/apps/accounting/migrations/0010_auto__chg_field_billingaccount_name.py | dslowikowski/commcare-hq | ad8885cf8dab69dc85cb64f37aeaf06106124797 | [
"BSD-3-Clause"
] | 1 | 2015-02-10T23:26:39.000Z | 2015-02-10T23:26:39.000Z | corehq/apps/accounting/migrations/0010_auto__chg_field_billingaccount_name.py | SEL-Columbia/commcare-hq | 992ee34a679c37f063f86200e6df5a197d5e3ff6 | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/accounting/migrations/0010_auto__chg_field_billingaccount_name.py | SEL-Columbia/commcare-hq | 992ee34a679c37f063f86200e6df5a197d5e3ff6 | [
"BSD-3-Clause"
] | null | null | null | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'BillingAccount.name'
db.alter_column(u'accounting_billingaccount', 'name', self.gf('django.db.models.fields.CharField')(max_length=200))
def backwards(self, orm):
# Changing field 'BillingAccount.name'
db.alter_column(u'accounting_billingaccount', 'name', self.gf('django.db.models.fields.CharField')(max_length=40))
models = {
u'accounting.billingaccount': {
'Meta': {'object_name': 'BillingAccount'},
'account_type': ('django.db.models.fields.CharField', [], {'default': "'CONTRACT'", 'max_length': '25'}),
'billing_admins': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['accounting.BillingAccountAdmin']", 'null': 'True', 'symmetrical': 'False'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'created_by_domain': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Currency']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auto_invoiceable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'salesforce_account_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'})
},
u'accounting.billingaccountadmin': {
'Meta': {'object_name': 'BillingAccountAdmin'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'web_user': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80', 'db_index': 'True'})
},
u'accounting.billingcontactinfo': {
'Meta': {'object_name': 'BillingContactInfo'},
'account': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['accounting.BillingAccount']", 'unique': 'True', 'primary_key': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'emails': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'first_line': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'second_line': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'state_province_region': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'accounting.billingrecord': {
'Meta': {'object_name': 'BillingRecord'},
'date_emailed': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'emailed_to': ('django.db.models.fields.CharField', [], {'max_length': '254', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Invoice']"}),
'pdf_data_id': ('django.db.models.fields.CharField', [], {'max_length': '48'})
},
u'accounting.creditadjustment': {
'Meta': {'object_name': 'CreditAdjustment'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'credit_line': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.CreditLine']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Invoice']", 'null': 'True'}),
'line_item': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.LineItem']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'reason': ('django.db.models.fields.CharField', [], {'default': "'MANUAL'", 'max_length': '25'}),
'web_user': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'})
},
u'accounting.creditline': {
'Meta': {'object_name': 'CreditLine'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.BillingAccount']"}),
'balance': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feature_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.FeatureRate']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwareProductRate']", 'null': 'True', 'blank': 'True'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Subscription']", 'null': 'True', 'blank': 'True'})
},
u'accounting.currency': {
'Meta': {'object_name': 'Currency'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'date_updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'}),
'rate_to_default': ('django.db.models.fields.DecimalField', [], {'default': '1.0', 'max_digits': '20', 'decimal_places': '9'}),
'symbol': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'accounting.defaultproductplan': {
'Meta': {'object_name': 'DefaultProductPlan'},
'edition': ('django.db.models.fields.CharField', [], {'default': "'Community'", 'max_length': '25'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwarePlan']"}),
'product_type': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
u'accounting.feature': {
'Meta': {'object_name': 'Feature'},
'feature_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
u'accounting.featurerate': {
'Meta': {'object_name': 'FeatureRate'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feature': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Feature']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'monthly_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '10', 'decimal_places': '2'}),
'monthly_limit': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'per_excess_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '10', 'decimal_places': '2'})
},
u'accounting.invoice': {
'Meta': {'object_name': 'Invoice'},
'balance': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_due': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'date_end': ('django.db.models.fields.DateField', [], {}),
'date_paid': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_received': ('django.db.models.fields.DateField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Subscription']"}),
'tax_rate': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'})
},
u'accounting.lineitem': {
'Meta': {'object_name': 'LineItem'},
'base_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'base_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feature_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.FeatureRate']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Invoice']"}),
'product_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwareProductRate']", 'null': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'unit_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'accounting.softwareplan': {
'Meta': {'object_name': 'SoftwarePlan'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'edition': ('django.db.models.fields.CharField', [], {'default': "'Enterprise'", 'max_length': '25'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'visibility': ('django.db.models.fields.CharField', [], {'default': "'INTERNAL'", 'max_length': '10'})
},
u'accounting.softwareplanversion': {
'Meta': {'object_name': 'SoftwarePlanVersion'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feature_rates': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['accounting.FeatureRate']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwarePlan']"}),
'product_rates': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['accounting.SoftwareProductRate']", 'symmetrical': 'False', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_prbac.Role']"})
},
u'accounting.softwareproduct': {
'Meta': {'object_name': 'SoftwareProduct'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'product_type': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'})
},
u'accounting.softwareproductrate': {
'Meta': {'object_name': 'SoftwareProductRate'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'monthly_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '10', 'decimal_places': '2'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwareProduct']"})
},
u'accounting.subscriber': {
'Meta': {'object_name': 'Subscriber'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'db_index': 'True'})
},
u'accounting.subscription': {
'Meta': {'object_name': 'Subscription'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.BillingAccount']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_delay_invoicing': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'plan_version': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwarePlanVersion']"}),
'salesforce_contract_id': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Subscriber']"})
},
u'accounting.subscriptionadjustment': {
'Meta': {'object_name': 'SubscriptionAdjustment'},
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Invoice']", 'null': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'default': "'INTERNAL'", 'max_length': '50'}),
'new_date_delay_invoicing': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'new_date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'new_date_start': ('django.db.models.fields.DateField', [], {}),
'new_salesforce_contract_id': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'default': "'CREATE'", 'max_length': '50'}),
'related_subscription': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriptionadjustment_related'", 'null': 'True', 'to': u"orm['accounting.Subscription']"}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Subscription']"}),
'web_user': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'})
},
u'django_prbac.role': {
'Meta': {'object_name': 'Role'},
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'parameters': ('django_prbac.fields.StringSetField', [], {'default': '[]', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'})
}
}
complete_apps = ['accounting']
| 81.3 | 198 | 0.570558 |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.alter_column(u'accounting_billingaccount', 'name', self.gf('django.db.models.fields.CharField')(max_length=200))
def backwards(self, orm):
db.alter_column(u'accounting_billingaccount', 'name', self.gf('django.db.models.fields.CharField')(max_length=40))
models = {
u'accounting.billingaccount': {
'Meta': {'object_name': 'BillingAccount'},
'account_type': ('django.db.models.fields.CharField', [], {'default': "'CONTRACT'", 'max_length': '25'}),
'billing_admins': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['accounting.BillingAccountAdmin']", 'null': 'True', 'symmetrical': 'False'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'created_by_domain': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Currency']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auto_invoiceable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'salesforce_account_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'})
},
u'accounting.billingaccountadmin': {
'Meta': {'object_name': 'BillingAccountAdmin'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'web_user': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80', 'db_index': 'True'})
},
u'accounting.billingcontactinfo': {
'Meta': {'object_name': 'BillingContactInfo'},
'account': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['accounting.BillingAccount']", 'unique': 'True', 'primary_key': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'emails': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'first_line': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'second_line': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'state_province_region': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'accounting.billingrecord': {
'Meta': {'object_name': 'BillingRecord'},
'date_emailed': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'emailed_to': ('django.db.models.fields.CharField', [], {'max_length': '254', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Invoice']"}),
'pdf_data_id': ('django.db.models.fields.CharField', [], {'max_length': '48'})
},
u'accounting.creditadjustment': {
'Meta': {'object_name': 'CreditAdjustment'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'credit_line': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.CreditLine']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Invoice']", 'null': 'True'}),
'line_item': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.LineItem']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'reason': ('django.db.models.fields.CharField', [], {'default': "'MANUAL'", 'max_length': '25'}),
'web_user': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'})
},
u'accounting.creditline': {
'Meta': {'object_name': 'CreditLine'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.BillingAccount']"}),
'balance': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feature_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.FeatureRate']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwareProductRate']", 'null': 'True', 'blank': 'True'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Subscription']", 'null': 'True', 'blank': 'True'})
},
u'accounting.currency': {
'Meta': {'object_name': 'Currency'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'date_updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'}),
'rate_to_default': ('django.db.models.fields.DecimalField', [], {'default': '1.0', 'max_digits': '20', 'decimal_places': '9'}),
'symbol': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'accounting.defaultproductplan': {
'Meta': {'object_name': 'DefaultProductPlan'},
'edition': ('django.db.models.fields.CharField', [], {'default': "'Community'", 'max_length': '25'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwarePlan']"}),
'product_type': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
u'accounting.feature': {
'Meta': {'object_name': 'Feature'},
'feature_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
u'accounting.featurerate': {
'Meta': {'object_name': 'FeatureRate'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feature': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Feature']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'monthly_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '10', 'decimal_places': '2'}),
'monthly_limit': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'per_excess_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '10', 'decimal_places': '2'})
},
u'accounting.invoice': {
'Meta': {'object_name': 'Invoice'},
'balance': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_due': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'date_end': ('django.db.models.fields.DateField', [], {}),
'date_paid': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_received': ('django.db.models.fields.DateField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Subscription']"}),
'tax_rate': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'})
},
u'accounting.lineitem': {
'Meta': {'object_name': 'LineItem'},
'base_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'base_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feature_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.FeatureRate']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Invoice']"}),
'product_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwareProductRate']", 'null': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'unit_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'accounting.softwareplan': {
'Meta': {'object_name': 'SoftwarePlan'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'edition': ('django.db.models.fields.CharField', [], {'default': "'Enterprise'", 'max_length': '25'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'visibility': ('django.db.models.fields.CharField', [], {'default': "'INTERNAL'", 'max_length': '10'})
},
u'accounting.softwareplanversion': {
'Meta': {'object_name': 'SoftwarePlanVersion'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feature_rates': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['accounting.FeatureRate']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwarePlan']"}),
'product_rates': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['accounting.SoftwareProductRate']", 'symmetrical': 'False', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_prbac.Role']"})
},
u'accounting.softwareproduct': {
'Meta': {'object_name': 'SoftwareProduct'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'product_type': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'})
},
u'accounting.softwareproductrate': {
'Meta': {'object_name': 'SoftwareProductRate'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'monthly_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '10', 'decimal_places': '2'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwareProduct']"})
},
u'accounting.subscriber': {
'Meta': {'object_name': 'Subscriber'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'db_index': 'True'})
},
u'accounting.subscription': {
'Meta': {'object_name': 'Subscription'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.BillingAccount']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_delay_invoicing': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'plan_version': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwarePlanVersion']"}),
'salesforce_contract_id': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Subscriber']"})
},
u'accounting.subscriptionadjustment': {
'Meta': {'object_name': 'SubscriptionAdjustment'},
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Invoice']", 'null': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'default': "'INTERNAL'", 'max_length': '50'}),
'new_date_delay_invoicing': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'new_date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'new_date_start': ('django.db.models.fields.DateField', [], {}),
'new_salesforce_contract_id': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'default': "'CREATE'", 'max_length': '50'}),
'related_subscription': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriptionadjustment_related'", 'null': 'True', 'to': u"orm['accounting.Subscription']"}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Subscription']"}),
'web_user': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'})
},
u'django_prbac.role': {
'Meta': {'object_name': 'Role'},
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'parameters': ('django_prbac.fields.StringSetField', [], {'default': '[]', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'})
}
}
complete_apps = ['accounting']
| true | true |
1c351d62c4ba7e4ef4c5a90fea7c627e5ac13ffc | 1,096 | py | Python | sanaviron/src/3rd/pycha/tests/runner.py | StetHD/sanaviron | dcb5d3ac6725771942e669a29961ba3f811b7fd4 | [
"Apache-2.0"
] | null | null | null | sanaviron/src/3rd/pycha/tests/runner.py | StetHD/sanaviron | dcb5d3ac6725771942e669a29961ba3f811b7fd4 | [
"Apache-2.0"
] | null | null | null | sanaviron/src/3rd/pycha/tests/runner.py | StetHD/sanaviron | dcb5d3ac6725771942e669a29961ba3f811b7fd4 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2007-2008 by Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of PyCha.
#
# PyCha is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyCha is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with PyCha. If not, see <http://www.gnu.org/licenses/>.
import unittest
import bar
import chart
import color
import line
import pie
def test_suite():
return unittest.TestSuite((
bar.test_suite(),
chart.test_suite(),
color.test_suite(),
line.test_suite(),
pie.test_suite(),
))
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| 29.621622 | 80 | 0.720803 |
import unittest
import bar
import chart
import color
import line
import pie
def test_suite():
return unittest.TestSuite((
bar.test_suite(),
chart.test_suite(),
color.test_suite(),
line.test_suite(),
pie.test_suite(),
))
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| true | true |
1c351d91fd998c54cbe1f7db8cc1ef7c336525d6 | 1,633 | py | Python | month01/面向对象/类和对象/day02/homework01.py | chaofan-zheng/python_leanring_code | 0af44ff39b9ded2c1d2cc96c6d356d21170ac04d | [
"Apache-2.0"
] | 4 | 2021-01-07T14:25:10.000Z | 2021-02-01T10:36:01.000Z | month01/面向对象/类和对象/day02/homework01.py | chaofan-zheng/python_leanring_code | 0af44ff39b9ded2c1d2cc96c6d356d21170ac04d | [
"Apache-2.0"
] | null | null | null | month01/面向对象/类和对象/day02/homework01.py | chaofan-zheng/python_leanring_code | 0af44ff39b9ded2c1d2cc96c6d356d21170ac04d | [
"Apache-2.0"
] | null | null | null | """
以面向对象的思想,描述下列情景.
(1)需求:小明使用手机打电话
(2)小明一次请多个保洁打扫卫生
效果:调用一次小明通知方法,可以有多个保洁在打扫卫生.
(3)张无忌教赵敏九阳神功
赵敏教张无忌玉女心经
张无忌工作挣了5000元
赵敏工作挣了10000元
"""
class Person:
def __init__(self, name):
self.name = name
def use_phone(self, phone):
phone.call()
class Phone:
def call(self):
print("打电话")
xiaoming = Person("小明")
phone = Phone()
xiaoming.use_phone(phone)
# 第二小题
print()
class People:
def __init__(self, name):
self.name = name
# def ask_for_housekeeping(self, cleaner_list):
# for cleaner_name in cleaner_list:
# cleaner = Cleaner(cleaner_name)
# cleaner.clean()
# 上面的方法可以,但是每一次调用,调用的人就需要构建列表。就很麻烦
def ask_for_housekeeping(self, *args):
for cleaner_name in args:
cleaner = Cleaner(cleaner_name)
cleaner.clean()
# 这个方法每次调用的时候传递名字就行了
class Cleaner:
def __init__(self, name):
self.name = name
def clean(self):
print(f"{self.name}正在工作")
xiaoming = People("小明")
cleaner_list = ["小阿giao", "小药水", "老马"]
# xiaoming.ask_for_housekeeping(cleaner_list)
xiaoming.ask_for_housekeeping("小阿giao", "小药水", "老马")
"""
张无忌教赵敏九阳神功
赵敏教张无忌玉女心经
张无忌工作挣了5000元
赵敏工作挣了10000元
"""
print()
class Character:
def __init__(self, name):
self.name = name
def teach(self, student, course):
print(f"{self.name}教{student}{course}")
def go_to_work(self, salary):
print(f"{self.name}工作挣了{salary}元")
zwj = Character("张无忌")
zm = Character("赵敏")
zwj.teach("赵敏", "九阳神功")
zm.teach("张无忌", "玉女心经")
zwj.go_to_work(5000)
zm.go_to_work(10000)
| 17.945055 | 52 | 0.629516 |
class Person:
def __init__(self, name):
self.name = name
def use_phone(self, phone):
phone.call()
class Phone:
def call(self):
print("打电话")
xiaoming = Person("小明")
phone = Phone()
xiaoming.use_phone(phone)
print()
class People:
def __init__(self, name):
self.name = name
def ask_for_housekeeping(self, *args):
for cleaner_name in args:
cleaner = Cleaner(cleaner_name)
cleaner.clean()
class Cleaner:
def __init__(self, name):
self.name = name
def clean(self):
print(f"{self.name}正在工作")
xiaoming = People("小明")
cleaner_list = ["小阿giao", "小药水", "老马"]
xiaoming.ask_for_housekeeping("小阿giao", "小药水", "老马")
print()
class Character:
def __init__(self, name):
self.name = name
def teach(self, student, course):
print(f"{self.name}教{student}{course}")
def go_to_work(self, salary):
print(f"{self.name}工作挣了{salary}元")
zwj = Character("张无忌")
zm = Character("赵敏")
zwj.teach("赵敏", "九阳神功")
zm.teach("张无忌", "玉女心经")
zwj.go_to_work(5000)
zm.go_to_work(10000)
| true | true |
1c351dfa7f2fc57cc605e7d19dfea18fbc4b39ed | 748 | py | Python | var/spack/repos/builtin/packages/bc/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/bc/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/bc/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Bc(AutotoolsPackage, GNUMirrorPackage):
"""bc is an arbitrary precision numeric processing language. Syntax is
similar to C, but differs in many substantial areas. It supports
interactive execution of statements."""
homepage = "https://www.gnu.org/software/bc"
gnu_mirror_path = "bc/bc-1.07.tar.gz"
version('1.07', sha256='55cf1fc33a728d7c3d386cc7b0cb556eb5bacf8e0cb5a3fcca7f109fc61205ad')
depends_on('ed', type='build')
depends_on('texinfo', type='build')
parallel = False
| 32.521739 | 94 | 0.741979 |
from spack.package import *
class Bc(AutotoolsPackage, GNUMirrorPackage):
homepage = "https://www.gnu.org/software/bc"
gnu_mirror_path = "bc/bc-1.07.tar.gz"
version('1.07', sha256='55cf1fc33a728d7c3d386cc7b0cb556eb5bacf8e0cb5a3fcca7f109fc61205ad')
depends_on('ed', type='build')
depends_on('texinfo', type='build')
parallel = False
| true | true |
1c3521323cf7d57dc8b2b240d95a181b90cc3144 | 1,188 | py | Python | src/recognizeDigit.py | RsTaK/Sudoku | 8daa0a06906ce61d9a71586a8d28a3931ca4e5e3 | [
"MIT"
] | 2 | 2020-01-22T14:32:40.000Z | 2021-12-23T20:42:52.000Z | src/recognizeDigit.py | RsTaK/Sudoku | 8daa0a06906ce61d9a71586a8d28a3931ca4e5e3 | [
"MIT"
] | 4 | 2020-11-13T18:54:24.000Z | 2022-02-10T02:10:00.000Z | src/recognizeDigit.py | RsTaK/Sudoku | 8daa0a06906ce61d9a71586a8d28a3931ca4e5e3 | [
"MIT"
] | 1 | 2020-01-22T14:02:50.000Z | 2020-01-22T14:02:50.000Z | from keras.models import load_model
import cv2
import pickle
import keras.backend as K
import numpy as np
from src.model_path import MODEL_PATH
'''def predict(self, cell):
model = load_model('./model/Model.h5')
f = K.function([model.layers[0].input, K.learning_phase()],[model.layers[-1].output])
rescaled_cell = self.rescale(cell)
result = []
for _ in range(10):
result.append(f([rescaled_cell, 1]))
result = np.array(result)
prediction = result.mean(axis=0)
uncertainty = result.var(axis=0)
if uncertainty.argmax() > 3:
new_prediction = 0
print(prediction.argmax(),uncertainty.argmax(),new_prediction)
else:
print(prediction.argmax(),uncertainty.argmax())'''
class recognizeDigit:
def __init__(self, cell):
self._prediction = self.predict(cell)
def predict(self, cell):
model = load_model(MODEL_PATH)
rescaled_cell = self.rescale(cell)
pred = model.predict(rescaled_cell)
return pred.argmax()
def rescale(self, cell):
resized_cell = cv2.resize(cell, (28, 28))
return resized_cell.reshape(1, resized_cell.shape[0], resized_cell.shape[1], 1)
@property
def prediction(self):
return self._prediction | 27 | 87 | 0.705387 | from keras.models import load_model
import cv2
import pickle
import keras.backend as K
import numpy as np
from src.model_path import MODEL_PATH
class recognizeDigit:
def __init__(self, cell):
self._prediction = self.predict(cell)
def predict(self, cell):
model = load_model(MODEL_PATH)
rescaled_cell = self.rescale(cell)
pred = model.predict(rescaled_cell)
return pred.argmax()
def rescale(self, cell):
resized_cell = cv2.resize(cell, (28, 28))
return resized_cell.reshape(1, resized_cell.shape[0], resized_cell.shape[1], 1)
@property
def prediction(self):
return self._prediction | true | true |
1c3521b67dec540553facf88ad2e9e97f1fee4e7 | 14,075 | py | Python | python/fate_test/fate_test/scripts/pipeline_conversion_cli.py | rubenlozanoaht3m/DataDogm | cd605e8072cca31e8418830c3300657ae2fa5b16 | [
"Apache-2.0"
] | 715 | 2019-01-24T10:52:03.000Z | 2019-10-31T12:19:22.000Z | python/fate_test/fate_test/scripts/pipeline_conversion_cli.py | rubenlozanoaht3m/DataDogm | cd605e8072cca31e8418830c3300657ae2fa5b16 | [
"Apache-2.0"
] | 270 | 2019-02-11T02:57:36.000Z | 2019-08-29T11:22:33.000Z | python/fate_test/fate_test/scripts/pipeline_conversion_cli.py | rubenlozanoaht3m/DataDogm | cd605e8072cca31e8418830c3300657ae2fa5b16 | [
"Apache-2.0"
] | 200 | 2019-01-26T14:21:35.000Z | 2019-11-01T01:14:36.000Z | import copy
import os
import shutil
import sys
import time
import uuid
import json
import click
import importlib
from fate_test._config import Config
from fate_test._io import LOGGER, echo
from fate_test.scripts._options import SharedOptions
@click.group(name="convert")
def convert_group():
"""
Converting pipeline files to dsl v2
"""
...
@convert_group.command("pipeline-to-dsl")
@click.option('-i', '--include', required=True, type=click.Path(exists=True), multiple=True, metavar="<include>",
help="include *pipeline.py under these paths")
@click.option('-o', '--output-path', type=click.Path(exists=True), help="DSL output path, default to *pipeline.py path")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def to_dsl(ctx, include, output_path, **kwargs):
"""
This command will run pipeline, make sure data is uploaded
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
yes = ctx.obj["yes"]
echo.welcome()
echo.echo(f"converting namespace: {namespace}", fg='red')
for path in include:
echo.echo(f"pipeline path: {os.path.abspath(path)}")
if not yes and not click.confirm("running?"):
return
config_yaml_file = './examples/config.yaml'
temp_file_path = f'./logs/{namespace}/temp_pipeline.py'
for i in include:
try:
convert(i, temp_file_path, config_yaml_file, output_path, config_inst)
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception_id={exception_id}")
LOGGER.exception(f"exception id: {exception_id}")
finally:
echo.stdout_newline()
echo.farewell()
echo.echo(f"converting namespace: {namespace}", fg='red')
@convert_group.command("pipeline-testsuite-to-dsl-testsuite")
@click.option('-i', '--include', required=True, type=click.Path(exists=True), metavar="<include>",
help="include is the pipeline test folder containing *testsuite.py")
@click.option('-t', '--template-path', required=False, type=click.Path(exists=True), metavar="<include>",
help="specify the test template to use")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def to_testsuite(ctx, include, template_path, **kwargs):
"""
convert pipeline testsuite to dsl testsuite
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
yes = ctx.obj["yes"]
echo.welcome()
if not os.path.isdir(include):
raise Exception("Please fill in a folder.")
echo.echo(f"testsuite namespace: {namespace}", fg='red')
echo.echo(f"pipeline path: {os.path.abspath(include)}")
if not yes and not click.confirm("running?"):
return
input_path = os.path.abspath(include)
input_list = [input_path]
i = 0
while i < len(input_list):
dirs = os.listdir(input_list[i])
for d in dirs:
if os.path.isdir(d):
input_list.append(d)
i += 1
for file_path in input_list:
try:
module_name = os.path.basename(file_path)
do_generated(file_path, module_name, template_path, config_inst)
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception_id={exception_id}")
LOGGER.exception(f"exception id: {exception_id}")
finally:
echo.stdout_newline()
echo.farewell()
echo.echo(f"converting namespace: {namespace}", fg='red')
def make_temp_pipeline(pipeline_file, temp_file_path, folder_name):
def _conf_file_update(_line, k, end, conf_file=None):
if ")" in _line[0]:
if conf_file is None:
conf_file = os.path.abspath(folder_name + "/" + _line[0].replace("'", "").replace('"', "").
replace(")", "").replace(":", "").replace("\n", ""))
_line = k + conf_file + end
else:
if conf_file is None:
conf_file = os.path.abspath(folder_name + "/" + _line[0].replace('"', ""))
_line = k + conf_file + '",' + _line[-1]
return conf_file, _line
def _get_conf_file(_lines):
param_default = False
conf_file = None
for _line in _lines:
if "--param" in _line or param_default:
if "default" in _line:
_line_start = _line.split("default=")
_line_end = _line_start[1].split(",")
conf_file, _ = _conf_file_update(_line_end, 'default="', '")')
param_default = False
else:
param_default = True
return conf_file
code_list = []
with open(pipeline_file, 'r') as f:
lines = f.readlines()
start_main = False
has_returned = False
space_num = 0
conf_file_dir = _get_conf_file(lines)
for line in lines:
if line is None:
continue
elif "def main" in line:
for char in line:
if char.isspace():
space_num += 1
else:
break
start_main = True
if "param=" in line:
line_start = line.split("param=")
line_end = line_start[1].split(",")
conf_file_dir, line = _conf_file_update(line_end, 'param="', '")', conf_file_dir)
line = line_start[0] + line
elif start_main and "def " in line and not has_returned:
code_list.append(" " * (space_num + 4) + "return pipeline\n")
start_main = False
elif start_main and "return " in line:
code_list.append(" " * (space_num + 4) + "return pipeline\n")
start_main = False
continue
elif start_main and 'if __name__ ==' in line:
code_list.append(" " * (space_num + 4) + "return pipeline\n")
start_main = False
code_list.append(line)
if start_main:
code_list.append(" " * (space_num + 4) + "return pipeline\n")
with open(temp_file_path, 'w') as f:
f.writelines(code_list)
def convert(pipeline_file, temp_file_path, config_yaml_file, output_path, config: Config):
folder_name, file_name = os.path.split(pipeline_file)
if output_path is not None:
folder_name = output_path
echo.echo(f"folder_name: {os.path.abspath(folder_name)}, file_name: {file_name}")
conf_name = file_name.replace('.py', '_conf.json')
dsl_name = file_name.replace('.py', '_dsl.json')
conf_name = os.path.join(folder_name, conf_name)
dsl_name = os.path.join(folder_name, dsl_name)
make_temp_pipeline(pipeline_file, temp_file_path, folder_name)
additional_path = os.path.realpath(os.path.join(os.path.curdir, pipeline_file, os.pardir, os.pardir))
if additional_path not in sys.path:
sys.path.append(additional_path)
loader = importlib.machinery.SourceFileLoader("main", str(temp_file_path))
spec = importlib.util.spec_from_loader(loader.name, loader)
mod = importlib.util.module_from_spec(spec)
loader.exec_module(mod)
my_pipeline = mod.main(os.path.join(config.data_base_dir, config_yaml_file))
conf = my_pipeline.get_train_conf()
dsl = my_pipeline.get_train_dsl()
os.remove(temp_file_path)
with open(conf_name, 'w') as f:
json.dump(conf, f, indent=4)
echo.echo('conf name is {}'.format(os.path.abspath(conf_name)))
with open(dsl_name, 'w') as f:
json.dump(dsl, f, indent=4)
echo.echo('dsl name is {}'.format(os.path.abspath(dsl_name)))
def insert_extract_code(file_path):
code_lines = []
code = \
"""
import json
import os
def extract(my_pipeline, file_name, output_path='dsl_testsuite'):
out_name = file_name.split('/')[-1]
out_name = out_name.replace('pipeline-', '').replace('.py', '').replace('-', '_')
conf = my_pipeline.get_train_conf()
dsl = my_pipeline.get_train_dsl()
cur_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
conf_name = os.path.join(cur_dir, output_path, f"{out_name}_conf.json")
dsl_name = os.path.join(cur_dir, output_path, f"{out_name}_dsl.json")
json.dump(conf, open(conf_name, 'w'), indent=4)
json.dump(dsl, open(dsl_name, 'w'), indent=4)
"""
code_lines.append(code)
screen_keywords = [".predict(", ".fit(", ".deploy_component(", "predict_pipeline ",
"predict_pipeline."]
continue_to_screen = False
has_return = False
with open(file_path, 'r') as f:
lines = f.readlines()
for l in lines:
if ".predict(" in l or ".fit(" in l:
code_lines.append(f"# {l}")
elif 'if __name__ == "__main__":' in l:
if not has_return:
code_lines.append(" extract(pipeline, __file__)\n")
code_lines.append(l)
elif 'return' in l:
code_lines.append(" extract(pipeline, __file__)\n")
# code_lines.append(l)
has_return = True
elif "get_summary()" in l:
continue
elif continue_to_screen:
code_lines.append(f"# {l}")
if ")" in l:
continue_to_screen = False
else:
should_append = True
for key_word in screen_keywords:
if key_word in l:
code_lines.append(f"# {l}")
should_append = False
if ")" not in l:
continue_to_screen = True
if should_append:
code_lines.append(l)
return code_lines
def get_testsuite_file(testsuite_file_path):
echo.echo(f"testsuite_file_path: {testsuite_file_path}")
with open(testsuite_file_path, 'r', encoding='utf-8') as load_f:
testsuite_json = json.load(load_f)
if "tasks" in testsuite_json:
del testsuite_json["tasks"]
if "pipeline_tasks" in testsuite_json:
del testsuite_json["pipeline_tasks"]
return testsuite_json
def do_generated(file_path, fold_name, template_path, config: Config):
yaml_file = os.path.join(config.data_base_dir, "./examples/config.yaml")
PYTHONPATH = os.environ.get('PYTHONPATH') + ":" + str(config.data_base_dir)
os.environ['PYTHONPATH'] = PYTHONPATH
if not os.path.isdir(file_path):
return
files = os.listdir(file_path)
if template_path is None:
for f in files:
if "testsuite" in f and "generated_testsuite" not in f:
template_path = os.path.join(file_path, f)
break
if template_path is None:
return
suite_json = get_testsuite_file(template_path)
pipeline_suite = copy.deepcopy(suite_json)
suite_json["tasks"] = {}
pipeline_suite["pipeline_tasks"] = {}
replaced_path = os.path.join(file_path, 'replaced_code')
generated_path = os.path.join(file_path, 'dsl_testsuite')
if not os.path.exists(replaced_path):
os.system('mkdir {}'.format(replaced_path))
if not os.path.exists(generated_path):
os.system('mkdir {}'.format(generated_path))
for f in files:
if not f.startswith("pipeline"):
continue
echo.echo(f)
task_name = f.replace(".py", "")
task_name = "-".join(task_name.split('-')[1:])
pipeline_suite["pipeline_tasks"][task_name] = {
"script": f
}
f_path = os.path.join(file_path, f)
code_str = insert_extract_code(f_path)
pipeline_file_path = os.path.join(replaced_path, f)
open(pipeline_file_path, 'w').writelines(code_str)
exe_files = os.listdir(replaced_path)
fail_job_count = 0
task_type_list = []
exe_conf_file = None
exe_dsl_file = None
for i, f in enumerate(exe_files):
abs_file = os.path.join(replaced_path, f)
echo.echo('\n' + '[{}/{}] executing {}'.format(i + 1, len(exe_files), abs_file), fg='red')
result = os.system(f"python {abs_file} -config {yaml_file}")
if not result:
time.sleep(3)
conf_files = os.listdir(generated_path)
f_dsl = {"_".join(f.split('_')[:-1]): f for f in conf_files if 'dsl.json' in f}
f_conf = {"_".join(f.split('_')[:-1]): f for f in conf_files if 'conf.json' in f}
for task_type, dsl_file in f_dsl.items():
if task_type not in task_type_list:
exe_dsl_file = dsl_file
task_type_list.append(task_type)
exe_conf_file = f_conf[task_type]
suite_json['tasks'][task_type] = {
"conf": exe_conf_file,
"dsl": exe_dsl_file
}
echo.echo('conf name is {}'.format(os.path.join(file_path, "dsl_testsuite", exe_conf_file)))
echo.echo('dsl name is {}'.format(os.path.join(file_path, "dsl_testsuite", exe_dsl_file)))
else:
echo.echo('profile generation failed')
fail_job_count += 1
suite_path = os.path.join(generated_path, f"{fold_name}_testsuite.json")
with open(suite_path, 'w', encoding='utf-8') as json_file:
json.dump(suite_json, json_file, ensure_ascii=False, indent=4)
suite_path = os.path.join(file_path, f"{fold_name}_pipeline_testsuite.json")
with open(suite_path, 'w', encoding='utf-8') as json_file:
json.dump(pipeline_suite, json_file, ensure_ascii=False, indent=4)
shutil.rmtree(replaced_path)
if not fail_job_count:
echo.echo("Generate testsuite and dsl&conf finished!")
else:
echo.echo("Generate testsuite and dsl&conf finished! {} failures".format(fail_job_count))
| 38.881215 | 120 | 0.601634 | import copy
import os
import shutil
import sys
import time
import uuid
import json
import click
import importlib
from fate_test._config import Config
from fate_test._io import LOGGER, echo
from fate_test.scripts._options import SharedOptions
@click.group(name="convert")
def convert_group():
...
@convert_group.command("pipeline-to-dsl")
@click.option('-i', '--include', required=True, type=click.Path(exists=True), multiple=True, metavar="<include>",
help="include *pipeline.py under these paths")
@click.option('-o', '--output-path', type=click.Path(exists=True), help="DSL output path, default to *pipeline.py path")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def to_dsl(ctx, include, output_path, **kwargs):
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
yes = ctx.obj["yes"]
echo.welcome()
echo.echo(f"converting namespace: {namespace}", fg='red')
for path in include:
echo.echo(f"pipeline path: {os.path.abspath(path)}")
if not yes and not click.confirm("running?"):
return
config_yaml_file = './examples/config.yaml'
temp_file_path = f'./logs/{namespace}/temp_pipeline.py'
for i in include:
try:
convert(i, temp_file_path, config_yaml_file, output_path, config_inst)
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception_id={exception_id}")
LOGGER.exception(f"exception id: {exception_id}")
finally:
echo.stdout_newline()
echo.farewell()
echo.echo(f"converting namespace: {namespace}", fg='red')
@convert_group.command("pipeline-testsuite-to-dsl-testsuite")
@click.option('-i', '--include', required=True, type=click.Path(exists=True), metavar="<include>",
help="include is the pipeline test folder containing *testsuite.py")
@click.option('-t', '--template-path', required=False, type=click.Path(exists=True), metavar="<include>",
help="specify the test template to use")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def to_testsuite(ctx, include, template_path, **kwargs):
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
yes = ctx.obj["yes"]
echo.welcome()
if not os.path.isdir(include):
raise Exception("Please fill in a folder.")
echo.echo(f"testsuite namespace: {namespace}", fg='red')
echo.echo(f"pipeline path: {os.path.abspath(include)}")
if not yes and not click.confirm("running?"):
return
input_path = os.path.abspath(include)
input_list = [input_path]
i = 0
while i < len(input_list):
dirs = os.listdir(input_list[i])
for d in dirs:
if os.path.isdir(d):
input_list.append(d)
i += 1
for file_path in input_list:
try:
module_name = os.path.basename(file_path)
do_generated(file_path, module_name, template_path, config_inst)
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception_id={exception_id}")
LOGGER.exception(f"exception id: {exception_id}")
finally:
echo.stdout_newline()
echo.farewell()
echo.echo(f"converting namespace: {namespace}", fg='red')
def make_temp_pipeline(pipeline_file, temp_file_path, folder_name):
def _conf_file_update(_line, k, end, conf_file=None):
if ")" in _line[0]:
if conf_file is None:
conf_file = os.path.abspath(folder_name + "/" + _line[0].replace("'", "").replace('"', "").
replace(")", "").replace(":", "").replace("\n", ""))
_line = k + conf_file + end
else:
if conf_file is None:
conf_file = os.path.abspath(folder_name + "/" + _line[0].replace('"', ""))
_line = k + conf_file + '",' + _line[-1]
return conf_file, _line
def _get_conf_file(_lines):
param_default = False
conf_file = None
for _line in _lines:
if "--param" in _line or param_default:
if "default" in _line:
_line_start = _line.split("default=")
_line_end = _line_start[1].split(",")
conf_file, _ = _conf_file_update(_line_end, 'default="', '")')
param_default = False
else:
param_default = True
return conf_file
code_list = []
with open(pipeline_file, 'r') as f:
lines = f.readlines()
start_main = False
has_returned = False
space_num = 0
conf_file_dir = _get_conf_file(lines)
for line in lines:
if line is None:
continue
elif "def main" in line:
for char in line:
if char.isspace():
space_num += 1
else:
break
start_main = True
if "param=" in line:
line_start = line.split("param=")
line_end = line_start[1].split(",")
conf_file_dir, line = _conf_file_update(line_end, 'param="', '")', conf_file_dir)
line = line_start[0] + line
elif start_main and "def " in line and not has_returned:
code_list.append(" " * (space_num + 4) + "return pipeline\n")
start_main = False
elif start_main and "return " in line:
code_list.append(" " * (space_num + 4) + "return pipeline\n")
start_main = False
continue
elif start_main and 'if __name__ ==' in line:
code_list.append(" " * (space_num + 4) + "return pipeline\n")
start_main = False
code_list.append(line)
if start_main:
code_list.append(" " * (space_num + 4) + "return pipeline\n")
with open(temp_file_path, 'w') as f:
f.writelines(code_list)
def convert(pipeline_file, temp_file_path, config_yaml_file, output_path, config: Config):
folder_name, file_name = os.path.split(pipeline_file)
if output_path is not None:
folder_name = output_path
echo.echo(f"folder_name: {os.path.abspath(folder_name)}, file_name: {file_name}")
conf_name = file_name.replace('.py', '_conf.json')
dsl_name = file_name.replace('.py', '_dsl.json')
conf_name = os.path.join(folder_name, conf_name)
dsl_name = os.path.join(folder_name, dsl_name)
make_temp_pipeline(pipeline_file, temp_file_path, folder_name)
additional_path = os.path.realpath(os.path.join(os.path.curdir, pipeline_file, os.pardir, os.pardir))
if additional_path not in sys.path:
sys.path.append(additional_path)
loader = importlib.machinery.SourceFileLoader("main", str(temp_file_path))
spec = importlib.util.spec_from_loader(loader.name, loader)
mod = importlib.util.module_from_spec(spec)
loader.exec_module(mod)
my_pipeline = mod.main(os.path.join(config.data_base_dir, config_yaml_file))
conf = my_pipeline.get_train_conf()
dsl = my_pipeline.get_train_dsl()
os.remove(temp_file_path)
with open(conf_name, 'w') as f:
json.dump(conf, f, indent=4)
echo.echo('conf name is {}'.format(os.path.abspath(conf_name)))
with open(dsl_name, 'w') as f:
json.dump(dsl, f, indent=4)
echo.echo('dsl name is {}'.format(os.path.abspath(dsl_name)))
def insert_extract_code(file_path):
code_lines = []
code = \
"""
import json
import os
def extract(my_pipeline, file_name, output_path='dsl_testsuite'):
out_name = file_name.split('/')[-1]
out_name = out_name.replace('pipeline-', '').replace('.py', '').replace('-', '_')
conf = my_pipeline.get_train_conf()
dsl = my_pipeline.get_train_dsl()
cur_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
conf_name = os.path.join(cur_dir, output_path, f"{out_name}_conf.json")
dsl_name = os.path.join(cur_dir, output_path, f"{out_name}_dsl.json")
json.dump(conf, open(conf_name, 'w'), indent=4)
json.dump(dsl, open(dsl_name, 'w'), indent=4)
"""
code_lines.append(code)
screen_keywords = [".predict(", ".fit(", ".deploy_component(", "predict_pipeline ",
"predict_pipeline."]
continue_to_screen = False
has_return = False
with open(file_path, 'r') as f:
lines = f.readlines()
for l in lines:
if ".predict(" in l or ".fit(" in l:
code_lines.append(f"# {l}")
elif 'if __name__ == "__main__":' in l:
if not has_return:
code_lines.append(" extract(pipeline, __file__)\n")
code_lines.append(l)
elif 'return' in l:
code_lines.append(" extract(pipeline, __file__)\n")
# code_lines.append(l)
has_return = True
elif "get_summary()" in l:
continue
elif continue_to_screen:
code_lines.append(f"# {l}")
if ")" in l:
continue_to_screen = False
else:
should_append = True
for key_word in screen_keywords:
if key_word in l:
code_lines.append(f"# {l}")
should_append = False
if ")" not in l:
continue_to_screen = True
if should_append:
code_lines.append(l)
return code_lines
def get_testsuite_file(testsuite_file_path):
echo.echo(f"testsuite_file_path: {testsuite_file_path}")
with open(testsuite_file_path, 'r', encoding='utf-8') as load_f:
testsuite_json = json.load(load_f)
if "tasks" in testsuite_json:
del testsuite_json["tasks"]
if "pipeline_tasks" in testsuite_json:
del testsuite_json["pipeline_tasks"]
return testsuite_json
def do_generated(file_path, fold_name, template_path, config: Config):
yaml_file = os.path.join(config.data_base_dir, "./examples/config.yaml")
PYTHONPATH = os.environ.get('PYTHONPATH') + ":" + str(config.data_base_dir)
os.environ['PYTHONPATH'] = PYTHONPATH
if not os.path.isdir(file_path):
return
files = os.listdir(file_path)
if template_path is None:
for f in files:
if "testsuite" in f and "generated_testsuite" not in f:
template_path = os.path.join(file_path, f)
break
if template_path is None:
return
suite_json = get_testsuite_file(template_path)
pipeline_suite = copy.deepcopy(suite_json)
suite_json["tasks"] = {}
pipeline_suite["pipeline_tasks"] = {}
replaced_path = os.path.join(file_path, 'replaced_code')
generated_path = os.path.join(file_path, 'dsl_testsuite')
if not os.path.exists(replaced_path):
os.system('mkdir {}'.format(replaced_path))
if not os.path.exists(generated_path):
os.system('mkdir {}'.format(generated_path))
for f in files:
if not f.startswith("pipeline"):
continue
echo.echo(f)
task_name = f.replace(".py", "")
task_name = "-".join(task_name.split('-')[1:])
pipeline_suite["pipeline_tasks"][task_name] = {
"script": f
}
f_path = os.path.join(file_path, f)
code_str = insert_extract_code(f_path)
pipeline_file_path = os.path.join(replaced_path, f)
open(pipeline_file_path, 'w').writelines(code_str)
exe_files = os.listdir(replaced_path)
fail_job_count = 0
task_type_list = []
exe_conf_file = None
exe_dsl_file = None
for i, f in enumerate(exe_files):
abs_file = os.path.join(replaced_path, f)
echo.echo('\n' + '[{}/{}] executing {}'.format(i + 1, len(exe_files), abs_file), fg='red')
result = os.system(f"python {abs_file} -config {yaml_file}")
if not result:
time.sleep(3)
conf_files = os.listdir(generated_path)
f_dsl = {"_".join(f.split('_')[:-1]): f for f in conf_files if 'dsl.json' in f}
f_conf = {"_".join(f.split('_')[:-1]): f for f in conf_files if 'conf.json' in f}
for task_type, dsl_file in f_dsl.items():
if task_type not in task_type_list:
exe_dsl_file = dsl_file
task_type_list.append(task_type)
exe_conf_file = f_conf[task_type]
suite_json['tasks'][task_type] = {
"conf": exe_conf_file,
"dsl": exe_dsl_file
}
echo.echo('conf name is {}'.format(os.path.join(file_path, "dsl_testsuite", exe_conf_file)))
echo.echo('dsl name is {}'.format(os.path.join(file_path, "dsl_testsuite", exe_dsl_file)))
else:
echo.echo('profile generation failed')
fail_job_count += 1
suite_path = os.path.join(generated_path, f"{fold_name}_testsuite.json")
with open(suite_path, 'w', encoding='utf-8') as json_file:
json.dump(suite_json, json_file, ensure_ascii=False, indent=4)
suite_path = os.path.join(file_path, f"{fold_name}_pipeline_testsuite.json")
with open(suite_path, 'w', encoding='utf-8') as json_file:
json.dump(pipeline_suite, json_file, ensure_ascii=False, indent=4)
shutil.rmtree(replaced_path)
if not fail_job_count:
echo.echo("Generate testsuite and dsl&conf finished!")
else:
echo.echo("Generate testsuite and dsl&conf finished! {} failures".format(fail_job_count))
| true | true |
1c35235d2354af6cc9a378696a72c8e3440fb543 | 5,315 | py | Python | cnn_architectures/augmentation_4/eval_model_c10_leilaclip_aug4_560.py | leilayasmeen/MSc_Thesis | ee5e1782ab4a1d86c5dc0f5dc4111b4432ae204d | [
"MIT"
] | 2 | 2019-10-29T03:26:20.000Z | 2021-03-07T10:02:39.000Z | cnn_architectures/augmentation_4/eval_model_c10_leilaclip_aug4_560.py | leilayasmeen/MSc_Thesis | ee5e1782ab4a1d86c5dc0f5dc4111b4432ae204d | [
"MIT"
] | null | null | null | cnn_architectures/augmentation_4/eval_model_c10_leilaclip_aug4_560.py | leilayasmeen/MSc_Thesis | ee5e1782ab4a1d86c5dc0f5dc4111b4432ae204d | [
"MIT"
] | null | null | null | # Load in model weights and evaluate its goodness (ECE, MCE, error) also saves logits.
# ResNet model from https://github.com/BIGBALLON/cifar-10-cnn/blob/master/4_Residual_Network/ResNet_keras.py
import keras
import numpy as np
from keras.datasets import cifar10, cifar100
from keras.preprocessing.image import ImageDataGenerator
from keras.layers.normalization import BatchNormalization
from keras.layers import Conv2D, Dense, Input, add, Activation, GlobalAveragePooling2D
from keras.callbacks import LearningRateScheduler, TensorBoard, ModelCheckpoint
from keras.models import Model
from keras import optimizers, regularizers
from sklearn.model_selection import train_test_split
import pickle
# Imports to get "utility" package
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath("utility") ) ) )
from utility.evaluation import evaluate_model
stack_n = 18
num_classes10 = 10
num_classes100 = 100
img_rows, img_cols = 32, 32
img_channels = 3
batch_size = 128
epochs = 200
iterations = 45560 // batch_size
weight_decay = 0.0001
mean = [125.307, 122.95, 113.865] # Mean (per-pixel mean?)
std = [62.9932, 62.0887, 66.7048]
seed = 333
weights_file_10 = "resnet_110_45kclip_aug_interpol4_560.h5"
def scheduler(epoch):
if epoch < 80:
return 0.1
if epoch < 150:
return 0.01
return 0.001
def residual_network(img_input,classes_num=10,stack_n=5):
def residual_block(intput,out_channel,increase=False):
if increase:
stride = (2,2)
else:
stride = (1,1)
pre_bn = BatchNormalization()(intput)
pre_relu = Activation('relu')(pre_bn)
conv_1 = Conv2D(out_channel,kernel_size=(3,3),strides=stride,padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(pre_relu)
bn_1 = BatchNormalization()(conv_1)
relu1 = Activation('relu')(bn_1)
conv_2 = Conv2D(out_channel,kernel_size=(3,3),strides=(1,1),padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(relu1)
if increase:
projection = Conv2D(out_channel,
kernel_size=(1,1),
strides=(2,2),
padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(intput)
block = add([conv_2, projection])
else:
block = add([intput,conv_2])
return block
# build model
# total layers = stack_n * 3 * 2 + 2
# stack_n = 5 by default, total layers = 32
# input: 32x32x3 output: 32x32x16
x = Conv2D(filters=16,kernel_size=(3,3),strides=(1,1),padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(img_input)
# input: 32x32x16 output: 32x32x16
for _ in range(stack_n):
x = residual_block(x,16,False)
# input: 32x32x16 output: 16x16x32
x = residual_block(x,32,True)
for _ in range(1,stack_n):
x = residual_block(x,32,False)
# input: 16x16x32 output: 8x8x64
x = residual_block(x,64,True)
for _ in range(1,stack_n):
x = residual_block(x,64,False)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
# input: 64 output: 10
x = Dense(classes_num,activation='softmax',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(x)
return x
if __name__ == '__main__':
# load data
print("Cifar-10 evaluation")
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_test = keras.utils.to_categorical(y_test, num_classes10)
x_train_additions = np.load('Augmentation_Sets/x_augmentation_array_interpol4_560.npy')
y_train_additions = np.load('Augmentation_Sets/y_augmentation_array_interpol4_560.npy')
x_train45, x_val, y_train45, y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=seed) # random_state = seed
x_train_additions = x_train_additions.transpose(0,2,3,1)
y_train45 = keras.utils.to_categorical(y_train45, num_classes10)
y_train_additions = y_train_additions.reshape(-1, num_classes10)
x_train45 = np.concatenate((x_train45, x_train_additions),axis=0)
y_train45 = np.concatenate((y_train45, y_train_additions),axis=0)
# color preprocessing
img_mean = x_train45.mean(axis=0) # per-pixel mean
img_std = x_train45.std(axis=0)
x_train45 = (x_train45-img_mean)/img_std
x_val = (x_val-img_mean)/img_std
x_test = (x_test-img_mean)/img_std
# build network
img_input = Input(shape=(img_rows,img_cols,img_channels))
output = residual_network(img_input,num_classes10,stack_n)
model = Model(img_input, output)
evaluate_model(model, weights_file_10, x_test, y_test, bins = 15, verbose = True,
pickle_file = "probs_resnet110_c10clip_aug_interpol4_560", x_val = x_val, y_val = y_val)
| 38.23741 | 132 | 0.660207 |
import keras
import numpy as np
from keras.datasets import cifar10, cifar100
from keras.preprocessing.image import ImageDataGenerator
from keras.layers.normalization import BatchNormalization
from keras.layers import Conv2D, Dense, Input, add, Activation, GlobalAveragePooling2D
from keras.callbacks import LearningRateScheduler, TensorBoard, ModelCheckpoint
from keras.models import Model
from keras import optimizers, regularizers
from sklearn.model_selection import train_test_split
import pickle
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath("utility") ) ) )
from utility.evaluation import evaluate_model
stack_n = 18
num_classes10 = 10
num_classes100 = 100
img_rows, img_cols = 32, 32
img_channels = 3
batch_size = 128
epochs = 200
iterations = 45560 // batch_size
weight_decay = 0.0001
mean = [125.307, 122.95, 113.865]
std = [62.9932, 62.0887, 66.7048]
seed = 333
weights_file_10 = "resnet_110_45kclip_aug_interpol4_560.h5"
def scheduler(epoch):
if epoch < 80:
return 0.1
if epoch < 150:
return 0.01
return 0.001
def residual_network(img_input,classes_num=10,stack_n=5):
def residual_block(intput,out_channel,increase=False):
if increase:
stride = (2,2)
else:
stride = (1,1)
pre_bn = BatchNormalization()(intput)
pre_relu = Activation('relu')(pre_bn)
conv_1 = Conv2D(out_channel,kernel_size=(3,3),strides=stride,padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(pre_relu)
bn_1 = BatchNormalization()(conv_1)
relu1 = Activation('relu')(bn_1)
conv_2 = Conv2D(out_channel,kernel_size=(3,3),strides=(1,1),padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(relu1)
if increase:
projection = Conv2D(out_channel,
kernel_size=(1,1),
strides=(2,2),
padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(intput)
block = add([conv_2, projection])
else:
block = add([intput,conv_2])
return block
x = Conv2D(filters=16,kernel_size=(3,3),strides=(1,1),padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(img_input)
for _ in range(stack_n):
x = residual_block(x,16,False)
x = residual_block(x,32,True)
for _ in range(1,stack_n):
x = residual_block(x,32,False)
x = residual_block(x,64,True)
for _ in range(1,stack_n):
x = residual_block(x,64,False)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
x = Dense(classes_num,activation='softmax',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(x)
return x
if __name__ == '__main__':
print("Cifar-10 evaluation")
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_test = keras.utils.to_categorical(y_test, num_classes10)
x_train_additions = np.load('Augmentation_Sets/x_augmentation_array_interpol4_560.npy')
y_train_additions = np.load('Augmentation_Sets/y_augmentation_array_interpol4_560.npy')
x_train45, x_val, y_train45, y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=seed)
x_train_additions = x_train_additions.transpose(0,2,3,1)
y_train45 = keras.utils.to_categorical(y_train45, num_classes10)
y_train_additions = y_train_additions.reshape(-1, num_classes10)
x_train45 = np.concatenate((x_train45, x_train_additions),axis=0)
y_train45 = np.concatenate((y_train45, y_train_additions),axis=0)
img_mean = x_train45.mean(axis=0)
img_std = x_train45.std(axis=0)
x_train45 = (x_train45-img_mean)/img_std
x_val = (x_val-img_mean)/img_std
x_test = (x_test-img_mean)/img_std
img_input = Input(shape=(img_rows,img_cols,img_channels))
output = residual_network(img_input,num_classes10,stack_n)
model = Model(img_input, output)
evaluate_model(model, weights_file_10, x_test, y_test, bins = 15, verbose = True,
pickle_file = "probs_resnet110_c10clip_aug_interpol4_560", x_val = x_val, y_val = y_val)
| true | true |
1c35238991c89b2303596c6026d78ebc7dc792de | 155 | py | Python | libtcodpy.py | Rakaneth/python-tcod | 70ff1895fd7ae87bf66f16e388211db389d983fd | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | libtcodpy.py | Rakaneth/python-tcod | 70ff1895fd7ae87bf66f16e388211db389d983fd | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | libtcodpy.py | Rakaneth/python-tcod | 70ff1895fd7ae87bf66f16e388211db389d983fd | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | """This module just an alias for tcod"""
import warnings
warnings.warn("`import tcod` is preferred.", DeprecationWarning, stacklevel=2)
from tcod import *
| 31 | 78 | 0.76129 | import warnings
warnings.warn("`import tcod` is preferred.", DeprecationWarning, stacklevel=2)
from tcod import *
| true | true |
1c3523fd8d02a1516ef4dd75b146e5fa3c73adca | 5,803 | py | Python | Supermicro/benchmarks/maskrcnn/implementations/pytorch_SYS-420GP-TNAR/maskrcnn_benchmark/modeling/matcher.py | gglin001/training_results_v1.1 | 58fd4103f0f465bda6eb56a06a74b7bbccbbcf24 | [
"Apache-2.0"
] | 48 | 2020-07-29T18:09:23.000Z | 2021-10-09T01:53:33.000Z | Supermicro/benchmarks/maskrcnn/implementations/pytorch_SYS-420GP-TNAR/maskrcnn_benchmark/modeling/matcher.py | gglin001/training_results_v1.1 | 58fd4103f0f465bda6eb56a06a74b7bbccbbcf24 | [
"Apache-2.0"
] | 21 | 2021-08-31T08:34:50.000Z | 2022-03-17T11:42:10.000Z | NVIDIA/benchmarks/maskrcnn/implementations/pytorch/maskrcnn_benchmark/modeling/matcher.py | lablup/training_results_v0.7 | f5bb59aa0f8b18b602763abe47d1d24d0d54b197 | [
"Apache-2.0"
] | 42 | 2020-08-01T06:41:24.000Z | 2022-01-20T10:33:08.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2018-2019 NVIDIA CORPORATION. All rights reserved.
import torch
from maskrcnn_benchmark import _C
class Matcher(object):
"""
This class assigns to each predicted "element" (e.g., a box) a ground-truth
element. Each predicted element will have exactly zero or one matches; each
ground-truth element may be assigned to zero or more predicted elements.
Matching is based on the MxN match_quality_matrix, that characterizes how well
each (ground-truth, predicted)-pair match. For example, if the elements are
boxes, the matrix may contain box IoU overlap values.
The matcher returns a tensor of size N containing the index of the ground-truth
element m that matches to prediction n. If there is no match, a negative value
is returned.
"""
BELOW_LOW_THRESHOLD = -1
BETWEEN_THRESHOLDS = -2
def __init__(self, high_threshold, low_threshold, allow_low_quality_matches=False):
"""
Args:
high_threshold (float): quality values greater than or equal to
this value are candidate matches.
low_threshold (float): a lower quality threshold used to stratify
matches into three levels:
1) matches >= high_threshold
2) BETWEEN_THRESHOLDS matches in [low_threshold, high_threshold)
3) BELOW_LOW_THRESHOLD matches in [0, low_threshold)
allow_low_quality_matches (bool): if True, produce additional matches
for predictions that have only low-quality match candidates. See
set_low_quality_matches_ for more details.
"""
assert low_threshold <= high_threshold
self.high_threshold = high_threshold
self.low_threshold = low_threshold
self.allow_low_quality_matches = allow_low_quality_matches
def __call__(self, match_quality_matrix, batched = 0):
"""
Args:
match_quality_matrix (Tensor[float]): an MxN tensor, containing the
pairwise quality between M ground-truth elements and N predicted elements.
Returns:
matches (Tensor[int64]): an N tensor where N[i] is a matched gt in
[0, M - 1] or a negative value indicating that prediction i could not
be matched.
"""
if match_quality_matrix.numel() == 0:
# empty targets or proposals not supported during training
if match_quality_matrix.shape[0] == 0:
raise ValueError(
"No ground-truth boxes available for one of the images "
"during training")
else:
raise ValueError(
"No proposal boxes available for one of the images "
"during training")
# match_quality_matrix is M (gt) x N (predicted)
# Max over gt elements (dim 0) to find best gt candidate for each prediction
if match_quality_matrix.is_cuda:
if batched:
matches = _C.match_proposals(match_quality_matrix,self.allow_low_quality_matches, self.low_threshold, self.high_threshold)
else:
match_quality_matrix_unsqueezed = match_quality_matrix.unsqueeze(0)
matches = _C.match_proposals(match_quality_matrix_unsqueezed, self.allow_low_quality_matches, self.low_threshold, self.high_threshold).squeeze(0)
else:
matched_vals, matches = match_quality_matrix.max(dim=0)
if self.allow_low_quality_matches:
all_matches = matches.clone()
# Assign candidate matches with low quality to negative (unassigned) values
below_low_threshold = matched_vals < self.low_threshold
between_thresholds = (matched_vals >= self.low_threshold) & (
matched_vals < self.high_threshold
)
matches.masked_fill_(below_low_threshold, Matcher.BELOW_LOW_THRESHOLD)
matches.masked_fill_(between_thresholds, Matcher.BETWEEN_THRESHOLDS)
if self.allow_low_quality_matches:
self.set_low_quality_matches_(matches, all_matches, match_quality_matrix)
return matches
def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix):
"""
Produce additional matches for predictions that have only low-quality matches.
Specifically, for each ground-truth find the set of predictions that have
maximum overlap with it (including ties); for each prediction in that set, if
it is unmatched, then match it to the ground-truth with which it has the highest
quality value.
"""
# For each gt, find the prediction with which it has highest quality
highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)
# Find highest quality match available, even if it is low, including ties
gt_pred_pairs_of_highest_quality = torch.nonzero(
match_quality_matrix == highest_quality_foreach_gt[:, None]
)
# Example gt_pred_pairs_of_highest_quality:
# tensor([[ 0, 39796],
# [ 1, 32055],
# [ 1, 32070],
# [ 2, 39190],
# [ 2, 40255],
# [ 3, 40390],
# [ 3, 41455],
# [ 4, 45470],
# [ 5, 45325],
# [ 5, 46390]])
# Each row is a (gt index, prediction index)
# Note how gt items 1, 2, 3, and 5 each have two ties
pred_inds_to_update = gt_pred_pairs_of_highest_quality[:, 1]
matches[pred_inds_to_update] = all_matches[pred_inds_to_update]
| 47.958678 | 161 | 0.639152 |
import torch
from maskrcnn_benchmark import _C
class Matcher(object):
BELOW_LOW_THRESHOLD = -1
BETWEEN_THRESHOLDS = -2
def __init__(self, high_threshold, low_threshold, allow_low_quality_matches=False):
assert low_threshold <= high_threshold
self.high_threshold = high_threshold
self.low_threshold = low_threshold
self.allow_low_quality_matches = allow_low_quality_matches
def __call__(self, match_quality_matrix, batched = 0):
if match_quality_matrix.numel() == 0:
if match_quality_matrix.shape[0] == 0:
raise ValueError(
"No ground-truth boxes available for one of the images "
"during training")
else:
raise ValueError(
"No proposal boxes available for one of the images "
"during training")
if match_quality_matrix.is_cuda:
if batched:
matches = _C.match_proposals(match_quality_matrix,self.allow_low_quality_matches, self.low_threshold, self.high_threshold)
else:
match_quality_matrix_unsqueezed = match_quality_matrix.unsqueeze(0)
matches = _C.match_proposals(match_quality_matrix_unsqueezed, self.allow_low_quality_matches, self.low_threshold, self.high_threshold).squeeze(0)
else:
matched_vals, matches = match_quality_matrix.max(dim=0)
if self.allow_low_quality_matches:
all_matches = matches.clone()
below_low_threshold = matched_vals < self.low_threshold
between_thresholds = (matched_vals >= self.low_threshold) & (
matched_vals < self.high_threshold
)
matches.masked_fill_(below_low_threshold, Matcher.BELOW_LOW_THRESHOLD)
matches.masked_fill_(between_thresholds, Matcher.BETWEEN_THRESHOLDS)
if self.allow_low_quality_matches:
self.set_low_quality_matches_(matches, all_matches, match_quality_matrix)
return matches
def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix):
highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)
gt_pred_pairs_of_highest_quality = torch.nonzero(
match_quality_matrix == highest_quality_foreach_gt[:, None]
)
pred_inds_to_update = gt_pred_pairs_of_highest_quality[:, 1]
matches[pred_inds_to_update] = all_matches[pred_inds_to_update]
| true | true |
1c3524bc34295eb32e34de4b5a895a6a729a82bc | 469 | py | Python | Algorithms/Implementation/Apple_and_Orange/main.py | ugurcan-sonmez-95/HackerRank_Problems | 187d83422128228c241f279096386df5493d539d | [
"MIT"
] | null | null | null | Algorithms/Implementation/Apple_and_Orange/main.py | ugurcan-sonmez-95/HackerRank_Problems | 187d83422128228c241f279096386df5493d539d | [
"MIT"
] | null | null | null | Algorithms/Implementation/Apple_and_Orange/main.py | ugurcan-sonmez-95/HackerRank_Problems | 187d83422128228c241f279096386df5493d539d | [
"MIT"
] | null | null | null | ### Apple and Orange - Solution
def countApplesAndOranges(*args):
count = (sum(s <= (a+i) <= t for i in list_apple), sum(s <= (b+j) <= t for j in list_orange))
print(*count, sep='\n')
s, t = map(int, input().split())
a, b = map(int, input().split())
apples, oranges = map(int, input().split())
list_apple = tuple(map(int, input().split()[:apples]))
list_orange = tuple(map(int, input().split()[:oranges]))
countApplesAndOranges(s,t,a,b,list_apple,list_orange) | 39.083333 | 97 | 0.646055 | a+i) <= t for i in list_apple), sum(s <= (b+j) <= t for j in list_orange))
print(*count, sep='\n')
s, t = map(int, input().split())
a, b = map(int, input().split())
apples, oranges = map(int, input().split())
list_apple = tuple(map(int, input().split()[:apples]))
list_orange = tuple(map(int, input().split()[:oranges]))
countApplesAndOranges(s,t,a,b,list_apple,list_orange) | true | true |
1c3524d3b7d89fd9ea3d4786c0b184c1ef7629b3 | 598 | py | Python | pbx_gs_python_utils/utils/Process.py | owasp-sbot/pbx-gs-python-utils | f448aa36c4448fc04d30c3a5b25640ea4d44a267 | [
"Apache-2.0"
] | 3 | 2018-12-14T15:43:46.000Z | 2019-04-25T07:44:58.000Z | pbx_gs_python_utils/utils/Process.py | owasp-sbot/pbx-gs-python-utils | f448aa36c4448fc04d30c3a5b25640ea4d44a267 | [
"Apache-2.0"
] | 1 | 2019-05-11T14:19:37.000Z | 2019-05-11T14:51:04.000Z | pbx_gs_python_utils/utils/Process.py | owasp-sbot/pbx-gs-python-utils | f448aa36c4448fc04d30c3a5b25640ea4d44a267 | [
"Apache-2.0"
] | 4 | 2018-12-27T04:54:14.000Z | 2019-05-11T14:07:47.000Z | import os
import signal
import subprocess
class Process:
@staticmethod
def run(executable, params = [], cwd='.'):
run_params = [executable] + params
result = subprocess.run(run_params, cwd = cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return {
"runParams" : run_params,
"stdout" : result.stdout.decode(),
"stderr" : result.stderr.decode(),
}
@staticmethod
def stop(pid):
print('killing process {0} with {1}'.format(pid, signal.SIGKILL))
print(os.kill(pid, signal.SIGKILL))
| 27.181818 | 107 | 0.593645 | import os
import signal
import subprocess
class Process:
@staticmethod
def run(executable, params = [], cwd='.'):
run_params = [executable] + params
result = subprocess.run(run_params, cwd = cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return {
"runParams" : run_params,
"stdout" : result.stdout.decode(),
"stderr" : result.stderr.decode(),
}
@staticmethod
def stop(pid):
print('killing process {0} with {1}'.format(pid, signal.SIGKILL))
print(os.kill(pid, signal.SIGKILL))
| true | true |
1c35250043d955c5b423451551b1a24c66b4c5d2 | 24,594 | py | Python | unit_tests/test_charms_openstack_devices_pci.py | cloud-padawan/charms.openstack | 1fe4e411cf1ebc5b89e69af0cbac5e4045811ef8 | [
"Apache-2.0"
] | null | null | null | unit_tests/test_charms_openstack_devices_pci.py | cloud-padawan/charms.openstack | 1fe4e411cf1ebc5b89e69af0cbac5e4045811ef8 | [
"Apache-2.0"
] | null | null | null | unit_tests/test_charms_openstack_devices_pci.py | cloud-padawan/charms.openstack | 1fe4e411cf1ebc5b89e69af0cbac5e4045811ef8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note that the unit_tests/__init__.py has the following lines to stop
# side effects from the imorts from charm helpers.
# sys.path.append('./lib')
# mock out some charmhelpers libraries as they have apt install side effects
# sys.modules['charmhelpers.contrib.openstack.utils'] = mock.MagicMock()
# sys.modules['charmhelpers.contrib.network.ip'] = mock.MagicMock()
from __future__ import absolute_import
import mock
import charms_openstack.devices.pci as pci
import unit_tests.pci_responses as pci_responses
import unit_tests.utils as utils
def mocked_subprocess(subproc_map=None):
def _subproc(cmd, stdin=None):
for key in pci_responses.COMMANDS.keys():
if pci_responses.COMMANDS[key] == cmd:
return subproc_map[key]
elif pci_responses.COMMANDS[key] == cmd[:-1]:
return subproc_map[cmd[-1]][key]
if not subproc_map:
subproc_map = pci_responses.NET_SETUP
return _subproc
class mocked_filehandle(object):
def _setfilename(self, fname, omode):
self.FILENAME = fname
def _getfilecontents_read(self):
return pci_responses.FILE_CONTENTS[self.FILENAME]
def _getfilecontents_readlines(self):
return pci_responses.FILE_CONTENTS[self.FILENAME].split('\n')
class PCIDevTest(utils.BaseTestCase):
def test_format_pci_addr(self):
self.assertEqual(pci.format_pci_addr('0:0:1.1'), '0000:00:01.1')
self.assertEqual(pci.format_pci_addr(
'0000:00:02.1'), '0000:00:02.1')
class PCINetDeviceTest(utils.BaseTestCase):
def test_init(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
a = pci.PCINetDevice('pciaddr')
self.update_attributes.assert_called_once_with()
self.assertEqual(a.pci_address, 'pciaddr')
def test_update_attributes(self):
self.patch_object(pci.PCINetDevice, '__init__')
self.patch_object(pci.PCINetDevice, 'loaded_kmod')
self.patch_object(pci.PCINetDevice, 'update_modalias_kmod')
self.patch_object(pci.PCINetDevice, 'update_interface_info')
a = pci.PCINetDevice('pciaddr')
a.update_attributes()
self.update_modalias_kmod.assert_called_once_with()
self.update_interface_info.assert_called_once_with()
def test_loaded_kmod(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci, 'subprocess')
self.subprocess.check_output.side_effect = mocked_subprocess()
device = pci.PCINetDevice('0000:06:00.0')
self.assertEqual(device.loaded_kmod, 'igb_uio')
def test_update_modalias_kmod(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci, 'subprocess')
device = pci.PCINetDevice('0000:07:00.0')
self.subprocess.check_output.side_effect = mocked_subprocess()
with utils.patch_open() as (_open, _file):
super_fh = mocked_filehandle()
_file.readlines = mock.MagicMock()
_open.side_effect = super_fh._setfilename
_file.read.side_effect = super_fh._getfilecontents_read
_file.readlines.side_effect = super_fh._getfilecontents_readlines
device.update_modalias_kmod()
self.assertEqual(device.modalias_kmod, 'enic')
def test_update_interface_info_call_vpeinfo(self):
self.patch_object(pci.PCINetDevice, 'update_interface_info_eth')
self.patch_object(pci.PCINetDevice, 'update_interface_info_vpe')
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci.PCINetDevice, 'get_kernel_name')
self.patch_object(pci.PCINetDevice, 'loaded_kmod', new='igb_uio')
self.patch_object(pci, 'subprocess')
self.get_kernel_name.return_value = '3.13.0-77-generic'
self.subprocess.check_output.side_effect = \
mocked_subprocess()
dev6 = pci.PCINetDevice('0000:06:00.0')
dev6.update_interface_info()
self.update_interface_info_vpe.assert_called_with()
self.assertFalse(self.update_interface_info_eth.called)
def test_update_interface_info_call_ethinfo(self):
self.patch_object(pci.PCINetDevice, 'update_interface_info_eth')
self.patch_object(pci.PCINetDevice, 'update_interface_info_vpe')
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci.PCINetDevice, 'get_kernel_name')
self.patch_object(pci.PCINetDevice, 'loaded_kmod', new='igb')
self.patch_object(pci, 'subprocess')
self.get_kernel_name.return_value = '3.13.0-77-generic'
self.subprocess.check_output.side_effect = \
mocked_subprocess()
dev = pci.PCINetDevice('0000:10:00.0')
dev.update_interface_info()
self.update_interface_info_eth.assert_called_with()
self.assertFalse(self.update_interface_info_vpe.called)
def test_test_update_interface_info_orphan(self):
self.patch_object(pci.PCINetDevice, 'update_interface_info_eth')
self.patch_object(pci.PCINetDevice, 'update_interface_info_vpe')
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci.PCINetDevice, 'get_kernel_name')
self.patch_object(pci, 'subprocess')
self.subprocess.check_output.side_effect = \
mocked_subprocess(
subproc_map=pci_responses.NET_SETUP_ORPHAN)
dev = pci.PCINetDevice('0000:07:00.0')
dev.update_interface_info()
self.assertFalse(self.update_interface_info_vpe.called)
self.assertFalse(self.update_interface_info_eth.called)
self.assertEqual(dev.interface_name, None)
self.assertEqual(dev.mac_address, None)
def test_get_kernel_name(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci, 'subprocess')
dev = pci.PCINetDevice('0000:07:00.0')
self.subprocess.check_output.return_value = '3.13.0-55-generic'
self.assertEqual(dev.get_kernel_name(), '3.13.0-55-generic')
def test_pci_rescan(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci, 'subprocess')
dev = pci.PCINetDevice('0000:07:00.0')
with utils.patch_open() as (_open, _file):
dev.pci_rescan()
_open.assert_called_with('/sys/bus/pci/rescan', 'w')
_file.write.assert_called_with('1')
def test_bind(self):
self.patch_object(pci.PCINetDevice, 'pci_rescan')
self.patch_object(pci.PCINetDevice, 'update_attributes')
dev = pci.PCINetDevice('0000:07:00.0')
with utils.patch_open() as (_open, _file):
dev.bind('enic')
_open.assert_called_with('/sys/bus/pci/drivers/enic/bind', 'w')
_file.write.assert_called_with('0000:07:00.0')
self.pci_rescan.assert_called_with()
self.update_attributes.assert_called_with()
def test_unbind(self):
self.patch_object(pci.PCINetDevice, 'pci_rescan')
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci.PCINetDevice, 'loaded_kmod', new='igb_uio')
dev = pci.PCINetDevice('0000:07:00.0')
with utils.patch_open() as (_open, _file):
dev.unbind()
_open.assert_called_with(
'/sys/bus/pci/drivers/igb_uio/unbind', 'w')
_file.write.assert_called_with('0000:07:00.0')
self.pci_rescan.assert_called_with()
self.update_attributes.assert_called_with()
def test_update_interface_info_vpe(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci.PCINetDevice, 'get_vpe_interfaces_and_macs')
self.get_vpe_interfaces_and_macs.return_value = [
{
'interface': 'TenGigabitEthernet6/0/0',
'macAddress': '84:b8:02:2a:5f:c3',
'pci_address': '0000:06:00.0'},
{
'interface': 'TenGigabitEthernet7/0/0',
'macAddress': '84:b8:02:2a:5f:c4',
'pci_address': '0000:07:00.0'}]
dev = pci.PCINetDevice('0000:07:00.0')
dev.update_interface_info_vpe()
self.assertEqual('TenGigabitEthernet7/0/0', dev.interface_name)
self.assertEqual('84:b8:02:2a:5f:c4', dev.mac_address)
self.assertEqual('vpebound', dev.state)
def test_update_interface_info_vpe_orphan(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci.PCINetDevice, 'get_vpe_interfaces_and_macs')
self.get_vpe_interfaces_and_macs.return_value = [
{
'interface': 'TenGigabitEthernet6/0/0',
'macAddress': '84:b8:02:2a:5f:c3',
'pci_address': '0000:06:00.0'}]
dev = pci.PCINetDevice('0000:07:00.0')
dev.update_interface_info_vpe()
self.assertEqual(None, dev.interface_name)
self.assertEqual(None, dev.mac_address)
self.assertEqual(None, dev.state)
def test_get_vpe_cli_out(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci, 'subprocess')
self.subprocess.check_output.side_effect = \
mocked_subprocess()
dev = pci.PCINetDevice('0000:07:00.0')
self.assertTrue('local0' in dev.get_vpe_cli_out())
def test_get_vpe_interfaces_and_macs(self):
self.patch_object(pci.PCINetDevice, 'get_vpe_cli_out')
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci, 'subprocess')
self.subprocess.check_output.side_effect = \
mocked_subprocess()
self.get_vpe_cli_out.return_value = pci_responses.CONFD_CLI
dev = pci.PCINetDevice('0000:07:00.0')
vpe_devs = dev.get_vpe_interfaces_and_macs()
expect = [
{
'interface': 'TenGigabitEthernet6/0/0',
'macAddress': '84:b8:02:2a:5f:c3',
'pci_address': '0000:06:00.0'
},
{
'interface': 'TenGigabitEthernet7/0/0',
'macAddress': '84:b8:02:2a:5f:c4',
'pci_address': '0000:07:00.0'
},
]
self.assertEqual(vpe_devs, expect)
def test_get_vpe_interfaces_and_macs_invalid_cli(self):
self.patch_object(pci.PCINetDevice, 'get_vpe_cli_out')
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci, 'subprocess')
self.subprocess.check_output.side_effect = \
mocked_subprocess()
dev = pci.PCINetDevice('0000:07:00.0')
self.get_vpe_cli_out.return_value = pci_responses.CONFD_CLI_NOLOCAL
with self.assertRaises(pci.VPECLIException):
dev.get_vpe_interfaces_and_macs()
def test_get_vpe_interfaces_and_macs_invmac(self):
self.patch_object(pci.PCINetDevice, 'get_vpe_cli_out')
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci, 'subprocess')
self.subprocess.check_output.side_effect = \
mocked_subprocess()
dev = pci.PCINetDevice('0000:07:00.0')
self.get_vpe_cli_out.return_value = pci_responses.CONFD_CLI_INVMAC
vpe_devs = dev.get_vpe_interfaces_and_macs()
expect = [
{
'interface': 'TenGigabitEthernet7/0/0',
'macAddress': '84:b8:02:2a:5f:c4',
'pci_address': '0000:07:00.0'
},
]
self.assertEqual(vpe_devs, expect)
def test_extract_pci_addr_from_vpe_interface(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
dev = pci.PCINetDevice('0000:07:00.0')
self.assertEqual(dev.extract_pci_addr_from_vpe_interface(
'TenGigabitEthernet1/1/1'), '0000:01:01.1')
self.assertEqual(dev.extract_pci_addr_from_vpe_interface(
'TenGigabitEtherneta/0/0'), '0000:0a:00.0')
self.assertEqual(dev.extract_pci_addr_from_vpe_interface(
'GigabitEthernet0/2/0'), '0000:00:02.0')
def test_update_interface_info_eth(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci.PCINetDevice, 'get_sysnet_interfaces_and_macs')
dev = pci.PCINetDevice('0000:10:00.0')
self.get_sysnet_interfaces_and_macs.return_value = [
{
'interface': 'eth2',
'macAddress': 'a8:9d:21:cf:93:fc',
'pci_address': '0000:10:00.0',
'state': 'up'
},
{
'interface': 'eth3',
'macAddress': 'a8:9d:21:cf:93:fd',
'pci_address': '0000:10:00.1',
'state': 'down'
}
]
dev.update_interface_info_eth()
self.assertEqual(dev.interface_name, 'eth2')
def test_get_sysnet_interfaces_and_macs_virtio(self):
self.patch_object(pci.glob, 'glob')
self.patch_object(pci.os.path, 'islink')
self.patch_object(pci.os.path, 'realpath')
self.patch_object(pci.PCINetDevice, 'get_sysnet_device_state')
self.patch_object(pci.PCINetDevice, 'get_sysnet_mac')
self.patch_object(pci.PCINetDevice, 'get_sysnet_interface')
self.patch_object(pci.PCINetDevice, 'update_attributes')
dev = pci.PCINetDevice('0000:06:00.0')
self.glob.return_value = ['/sys/class/net/eth2']
self.get_sysnet_interface.return_value = 'eth2'
self.get_sysnet_mac.return_value = 'a8:9d:21:cf:93:fc'
self.get_sysnet_device_state.return_value = 'up'
self.realpath.return_value = ('/sys/devices/pci0000:00/0000:00:07.0/'
'virtio5')
self.islink.return_value = True
expect = {
'interface': 'eth2',
'macAddress': 'a8:9d:21:cf:93:fc',
'pci_address': '0000:00:07.0',
'state': 'up',
}
self.assertEqual(dev.get_sysnet_interfaces_and_macs(), [expect])
def test_get_sysnet_interfaces_and_macs(self):
self.patch_object(pci.glob, 'glob')
self.patch_object(pci.os.path, 'islink')
self.patch_object(pci.os.path, 'realpath')
self.patch_object(pci.PCINetDevice, 'get_sysnet_device_state')
self.patch_object(pci.PCINetDevice, 'get_sysnet_mac')
self.patch_object(pci.PCINetDevice, 'get_sysnet_interface')
self.patch_object(pci.PCINetDevice, 'update_attributes')
dev = pci.PCINetDevice('0000:06:00.0')
self.glob.return_value = ['/sys/class/net/eth2']
self.get_sysnet_interface.return_value = 'eth2'
self.get_sysnet_mac.return_value = 'a8:9d:21:cf:93:fc'
self.get_sysnet_device_state.return_value = 'up'
self.realpath.return_value = (
'/sys/devices/pci0000:00/0000:00:02.0/0000:02:00.0/0000:03:00.0/'
'0000:04:00.0/0000:05:01.0/0000:07:00.0')
self.islink.return_value = True
expect = {
'interface': 'eth2',
'macAddress': 'a8:9d:21:cf:93:fc',
'pci_address': '0000:07:00.0',
'state': 'up',
}
self.assertEqual(dev.get_sysnet_interfaces_and_macs(), [expect])
def test_get_sysnet_mac(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
device = pci.PCINetDevice('0000:10:00.1')
with utils.patch_open() as (_open, _file):
super_fh = mocked_filehandle()
_file.readlines = mock.MagicMock()
_open.side_effect = super_fh._setfilename
_file.read.side_effect = super_fh._getfilecontents_read
macaddr = device.get_sysnet_mac('/sys/class/net/eth3')
self.assertEqual(macaddr, 'a8:9d:21:cf:93:fd')
def test_get_sysnet_device_state(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
device = pci.PCINetDevice('0000:10:00.1')
with utils.patch_open() as (_open, _file):
super_fh = mocked_filehandle()
_file.readlines = mock.MagicMock()
_open.side_effect = super_fh._setfilename
_file.read.side_effect = super_fh._getfilecontents_read
state = device.get_sysnet_device_state('/sys/class/net/eth3')
self.assertEqual(state, 'down')
def test_get_sysnet_interface(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
device = pci.PCINetDevice('0000:10:00.1')
self.assertEqual(
device.get_sysnet_interface('/sys/class/net/eth3'), 'eth3')
class PCINetDevicesTest(utils.BaseTestCase):
def test_init(self):
self.patch_object(pci.PCINetDevices, 'get_pci_ethernet_addresses')
self.patch_object(pci, 'PCINetDevice')
self.get_pci_ethernet_addresses.return_value = ['pciaddr']
pci.PCINetDevices()
self.PCINetDevice.assert_called_once_with('pciaddr')
def test_get_pci_ethernet_addresses(self):
self.patch_object(pci, 'subprocess')
self.patch_object(pci, 'PCINetDevice')
self.subprocess.check_output.side_effect = \
mocked_subprocess()
a = pci.PCINetDevices()
self.assertEqual(
a.get_pci_ethernet_addresses(),
['0000:06:00.0', '0000:07:00.0', '0000:10:00.0', '0000:10:00.1'])
def test_update_devices(self):
pcinetdev = mock.MagicMock()
self.patch_object(pci.PCINetDevices, 'get_pci_ethernet_addresses')
self.patch_object(pci, 'PCINetDevice')
self.PCINetDevice.return_value = pcinetdev
self.get_pci_ethernet_addresses.return_value = ['pciaddr']
a = pci.PCINetDevices()
a.update_devices()
pcinetdev.update_attributes.assert_called_once_with()
def test_get_macs(self):
pcinetdev = mock.MagicMock()
self.patch_object(pci.PCINetDevices, 'get_pci_ethernet_addresses')
self.patch_object(pci, 'PCINetDevice')
self.PCINetDevice.return_value = pcinetdev
self.get_pci_ethernet_addresses.return_value = ['pciaddr']
pcinetdev.mac_address = 'mac1'
a = pci.PCINetDevices()
self.assertEqual(a.get_macs(), ['mac1'])
def test_get_device_from_mac(self):
pcinetdev = mock.MagicMock()
self.patch_object(pci.PCINetDevices, 'get_pci_ethernet_addresses')
self.patch_object(pci, 'PCINetDevice')
self.PCINetDevice.return_value = pcinetdev
self.get_pci_ethernet_addresses.return_value = ['pciaddr']
pcinetdev.mac_address = 'mac1'
a = pci.PCINetDevices()
self.assertEqual(a.get_device_from_mac('mac1'), pcinetdev)
def test_get_device_from_pci_address(self):
pcinetdev = mock.MagicMock()
self.patch_object(pci.PCINetDevices, 'get_pci_ethernet_addresses')
self.patch_object(pci, 'PCINetDevice')
self.PCINetDevice.return_value = pcinetdev
self.get_pci_ethernet_addresses.return_value = ['pciaddr']
pcinetdev.pci_address = 'pciaddr'
a = pci.PCINetDevices()
self.assertEqual(a.get_device_from_pci_address('pciaddr'), pcinetdev)
def test_rebind_orphans(self):
self.patch_object(pci.PCINetDevices, 'get_pci_ethernet_addresses')
self.patch_object(pci.PCINetDevices, 'unbind_orphans')
self.patch_object(pci.PCINetDevices, 'bind_orphans')
self.patch_object(pci, 'PCINetDevice')
self.get_pci_ethernet_addresses.return_value = []
a = pci.PCINetDevices()
a.rebind_orphans()
self.unbind_orphans.assert_called_once_with()
self.bind_orphans.assert_called_once_with()
def test_unbind_orphans(self):
orphan = mock.MagicMock()
self.patch_object(pci.PCINetDevices, 'get_pci_ethernet_addresses')
self.get_pci_ethernet_addresses.return_value = ['pciaddr']
self.patch_object(pci.PCINetDevices, 'get_orphans')
self.patch_object(pci.PCINetDevices, 'update_devices')
self.patch_object(pci, 'PCINetDevice')
self.get_orphans.return_value = [orphan]
a = pci.PCINetDevices()
a.unbind_orphans()
orphan.unbind.assert_called_once_with()
self.update_devices.assert_called_once_with()
def test_bind_orphans(self):
orphan = mock.MagicMock()
self.patch_object(pci.PCINetDevices, 'get_pci_ethernet_addresses')
self.get_pci_ethernet_addresses.return_value = ['pciaddr']
self.patch_object(pci.PCINetDevices, 'get_orphans')
self.patch_object(pci.PCINetDevices, 'update_devices')
self.patch_object(pci, 'PCINetDevice')
self.get_orphans.return_value = [orphan]
orphan.modalias_kmod = 'kmod'
a = pci.PCINetDevices()
a.bind_orphans()
orphan.bind.assert_called_once_with('kmod')
self.update_devices.assert_called_once_with()
def test_get_orphans(self):
pcinetdev = mock.MagicMock()
self.patch_object(pci.PCINetDevices, 'get_pci_ethernet_addresses')
self.patch_object(pci, 'PCINetDevice')
self.PCINetDevice.return_value = pcinetdev
self.get_pci_ethernet_addresses.return_value = ['pciaddr']
pcinetdev.loaded_kmod = None
pcinetdev.interface_name = None
pcinetdev.mac_address = None
a = pci.PCINetDevices()
self.assertEqual(a.get_orphans(), [pcinetdev])
class PCIInfoTest(utils.BaseTestCase):
def dev_mock(self, state, pci_address, interface_name):
dev = mock.MagicMock()
dev.state = state
dev.pci_address = pci_address
dev.interface_name = interface_name
return dev
def test_init(self):
net_dev_mocks = {
'mac1': self.dev_mock('down', 'pciaddr0', 'eth0'),
'mac2': self.dev_mock('down', 'pciaddr1', 'eth1'),
'mac3': self.dev_mock('up', 'pciaddr3', 'eth2'),
}
net_devs = mock.MagicMock()
self.patch_object(pci.PCIInfo, 'get_user_requested_config')
self.patch_object(pci, 'PCINetDevices')
self.PCINetDevices.return_value = net_devs
net_devs.get_macs.return_value = net_dev_mocks.keys()
net_devs.get_device_from_mac.side_effect = lambda x: net_dev_mocks[x]
self.get_user_requested_config.return_value = {
'mac1': [{'net': 'net1'}, {'net': 'net2'}],
'mac2': [{'net': 'net1'}],
'mac3': [{'net': 'net1'}]}
a = pci.PCIInfo()
expect = {
'mac1': [{'interface': 'eth0', 'net': 'net1'},
{'interface': 'eth0', 'net': 'net2'}],
'mac2': [{'interface': 'eth1', 'net': 'net1'}]}
self.assertEqual(a.local_mac_nets, expect)
self.assertEqual(a.vpe_dev_string, 'dev pciaddr0 dev pciaddr1')
def test_get_user_requested_config(self):
self.patch_object(pci.PCIInfo, '__init__')
self.patch_object(pci.hookenv, 'config')
self.config.return_value = ('mac=mac1;net=net1 mac=mac1;net=net2'
' mac=mac2;net=net1')
a = pci.PCIInfo()
expect = {
'mac1': [{'net': 'net1'}, {'net': 'net2'}],
'mac2': [{'net': 'net1'}]}
self.assertEqual(a.get_user_requested_config(), expect)
def test_get_user_requested_invalid_entries(self):
self.patch_object(pci.PCIInfo, '__init__')
self.patch_object(pci.hookenv, 'config')
self.config.return_value = ('ac=mac1;net=net1 randomstuff'
' mac=mac2;net=net1')
a = pci.PCIInfo()
expect = {'mac2': [{'net': 'net1'}]}
self.assertEqual(a.get_user_requested_config(), expect)
def test_get_user_requested_config_empty(self):
self.patch_object(pci.PCIInfo, '__init__')
self.patch_object(pci.hookenv, 'config')
self.config.return_value = None
a = pci.PCIInfo()
expect = {}
self.assertEqual(a.get_user_requested_config(), expect)
| 43.996422 | 77 | 0.65703 |
from __future__ import absolute_import
import mock
import charms_openstack.devices.pci as pci
import unit_tests.pci_responses as pci_responses
import unit_tests.utils as utils
def mocked_subprocess(subproc_map=None):
def _subproc(cmd, stdin=None):
for key in pci_responses.COMMANDS.keys():
if pci_responses.COMMANDS[key] == cmd:
return subproc_map[key]
elif pci_responses.COMMANDS[key] == cmd[:-1]:
return subproc_map[cmd[-1]][key]
if not subproc_map:
subproc_map = pci_responses.NET_SETUP
return _subproc
class mocked_filehandle(object):
def _setfilename(self, fname, omode):
self.FILENAME = fname
def _getfilecontents_read(self):
return pci_responses.FILE_CONTENTS[self.FILENAME]
def _getfilecontents_readlines(self):
return pci_responses.FILE_CONTENTS[self.FILENAME].split('\n')
class PCIDevTest(utils.BaseTestCase):
def test_format_pci_addr(self):
self.assertEqual(pci.format_pci_addr('0:0:1.1'), '0000:00:01.1')
self.assertEqual(pci.format_pci_addr(
'0000:00:02.1'), '0000:00:02.1')
class PCINetDeviceTest(utils.BaseTestCase):
def test_init(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
a = pci.PCINetDevice('pciaddr')
self.update_attributes.assert_called_once_with()
self.assertEqual(a.pci_address, 'pciaddr')
def test_update_attributes(self):
self.patch_object(pci.PCINetDevice, '__init__')
self.patch_object(pci.PCINetDevice, 'loaded_kmod')
self.patch_object(pci.PCINetDevice, 'update_modalias_kmod')
self.patch_object(pci.PCINetDevice, 'update_interface_info')
a = pci.PCINetDevice('pciaddr')
a.update_attributes()
self.update_modalias_kmod.assert_called_once_with()
self.update_interface_info.assert_called_once_with()
def test_loaded_kmod(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci, 'subprocess')
self.subprocess.check_output.side_effect = mocked_subprocess()
device = pci.PCINetDevice('0000:06:00.0')
self.assertEqual(device.loaded_kmod, 'igb_uio')
def test_update_modalias_kmod(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci, 'subprocess')
device = pci.PCINetDevice('0000:07:00.0')
self.subprocess.check_output.side_effect = mocked_subprocess()
with utils.patch_open() as (_open, _file):
super_fh = mocked_filehandle()
_file.readlines = mock.MagicMock()
_open.side_effect = super_fh._setfilename
_file.read.side_effect = super_fh._getfilecontents_read
_file.readlines.side_effect = super_fh._getfilecontents_readlines
device.update_modalias_kmod()
self.assertEqual(device.modalias_kmod, 'enic')
def test_update_interface_info_call_vpeinfo(self):
self.patch_object(pci.PCINetDevice, 'update_interface_info_eth')
self.patch_object(pci.PCINetDevice, 'update_interface_info_vpe')
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci.PCINetDevice, 'get_kernel_name')
self.patch_object(pci.PCINetDevice, 'loaded_kmod', new='igb_uio')
self.patch_object(pci, 'subprocess')
self.get_kernel_name.return_value = '3.13.0-77-generic'
self.subprocess.check_output.side_effect = \
mocked_subprocess()
dev6 = pci.PCINetDevice('0000:06:00.0')
dev6.update_interface_info()
self.update_interface_info_vpe.assert_called_with()
self.assertFalse(self.update_interface_info_eth.called)
def test_update_interface_info_call_ethinfo(self):
self.patch_object(pci.PCINetDevice, 'update_interface_info_eth')
self.patch_object(pci.PCINetDevice, 'update_interface_info_vpe')
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci.PCINetDevice, 'get_kernel_name')
self.patch_object(pci.PCINetDevice, 'loaded_kmod', new='igb')
self.patch_object(pci, 'subprocess')
self.get_kernel_name.return_value = '3.13.0-77-generic'
self.subprocess.check_output.side_effect = \
mocked_subprocess()
dev = pci.PCINetDevice('0000:10:00.0')
dev.update_interface_info()
self.update_interface_info_eth.assert_called_with()
self.assertFalse(self.update_interface_info_vpe.called)
def test_test_update_interface_info_orphan(self):
self.patch_object(pci.PCINetDevice, 'update_interface_info_eth')
self.patch_object(pci.PCINetDevice, 'update_interface_info_vpe')
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci.PCINetDevice, 'get_kernel_name')
self.patch_object(pci, 'subprocess')
self.subprocess.check_output.side_effect = \
mocked_subprocess(
subproc_map=pci_responses.NET_SETUP_ORPHAN)
dev = pci.PCINetDevice('0000:07:00.0')
dev.update_interface_info()
self.assertFalse(self.update_interface_info_vpe.called)
self.assertFalse(self.update_interface_info_eth.called)
self.assertEqual(dev.interface_name, None)
self.assertEqual(dev.mac_address, None)
def test_get_kernel_name(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci, 'subprocess')
dev = pci.PCINetDevice('0000:07:00.0')
self.subprocess.check_output.return_value = '3.13.0-55-generic'
self.assertEqual(dev.get_kernel_name(), '3.13.0-55-generic')
def test_pci_rescan(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci, 'subprocess')
dev = pci.PCINetDevice('0000:07:00.0')
with utils.patch_open() as (_open, _file):
dev.pci_rescan()
_open.assert_called_with('/sys/bus/pci/rescan', 'w')
_file.write.assert_called_with('1')
def test_bind(self):
self.patch_object(pci.PCINetDevice, 'pci_rescan')
self.patch_object(pci.PCINetDevice, 'update_attributes')
dev = pci.PCINetDevice('0000:07:00.0')
with utils.patch_open() as (_open, _file):
dev.bind('enic')
_open.assert_called_with('/sys/bus/pci/drivers/enic/bind', 'w')
_file.write.assert_called_with('0000:07:00.0')
self.pci_rescan.assert_called_with()
self.update_attributes.assert_called_with()
def test_unbind(self):
self.patch_object(pci.PCINetDevice, 'pci_rescan')
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci.PCINetDevice, 'loaded_kmod', new='igb_uio')
dev = pci.PCINetDevice('0000:07:00.0')
with utils.patch_open() as (_open, _file):
dev.unbind()
_open.assert_called_with(
'/sys/bus/pci/drivers/igb_uio/unbind', 'w')
_file.write.assert_called_with('0000:07:00.0')
self.pci_rescan.assert_called_with()
self.update_attributes.assert_called_with()
def test_update_interface_info_vpe(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci.PCINetDevice, 'get_vpe_interfaces_and_macs')
self.get_vpe_interfaces_and_macs.return_value = [
{
'interface': 'TenGigabitEthernet6/0/0',
'macAddress': '84:b8:02:2a:5f:c3',
'pci_address': '0000:06:00.0'},
{
'interface': 'TenGigabitEthernet7/0/0',
'macAddress': '84:b8:02:2a:5f:c4',
'pci_address': '0000:07:00.0'}]
dev = pci.PCINetDevice('0000:07:00.0')
dev.update_interface_info_vpe()
self.assertEqual('TenGigabitEthernet7/0/0', dev.interface_name)
self.assertEqual('84:b8:02:2a:5f:c4', dev.mac_address)
self.assertEqual('vpebound', dev.state)
def test_update_interface_info_vpe_orphan(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci.PCINetDevice, 'get_vpe_interfaces_and_macs')
self.get_vpe_interfaces_and_macs.return_value = [
{
'interface': 'TenGigabitEthernet6/0/0',
'macAddress': '84:b8:02:2a:5f:c3',
'pci_address': '0000:06:00.0'}]
dev = pci.PCINetDevice('0000:07:00.0')
dev.update_interface_info_vpe()
self.assertEqual(None, dev.interface_name)
self.assertEqual(None, dev.mac_address)
self.assertEqual(None, dev.state)
def test_get_vpe_cli_out(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci, 'subprocess')
self.subprocess.check_output.side_effect = \
mocked_subprocess()
dev = pci.PCINetDevice('0000:07:00.0')
self.assertTrue('local0' in dev.get_vpe_cli_out())
def test_get_vpe_interfaces_and_macs(self):
self.patch_object(pci.PCINetDevice, 'get_vpe_cli_out')
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci, 'subprocess')
self.subprocess.check_output.side_effect = \
mocked_subprocess()
self.get_vpe_cli_out.return_value = pci_responses.CONFD_CLI
dev = pci.PCINetDevice('0000:07:00.0')
vpe_devs = dev.get_vpe_interfaces_and_macs()
expect = [
{
'interface': 'TenGigabitEthernet6/0/0',
'macAddress': '84:b8:02:2a:5f:c3',
'pci_address': '0000:06:00.0'
},
{
'interface': 'TenGigabitEthernet7/0/0',
'macAddress': '84:b8:02:2a:5f:c4',
'pci_address': '0000:07:00.0'
},
]
self.assertEqual(vpe_devs, expect)
def test_get_vpe_interfaces_and_macs_invalid_cli(self):
self.patch_object(pci.PCINetDevice, 'get_vpe_cli_out')
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci, 'subprocess')
self.subprocess.check_output.side_effect = \
mocked_subprocess()
dev = pci.PCINetDevice('0000:07:00.0')
self.get_vpe_cli_out.return_value = pci_responses.CONFD_CLI_NOLOCAL
with self.assertRaises(pci.VPECLIException):
dev.get_vpe_interfaces_and_macs()
def test_get_vpe_interfaces_and_macs_invmac(self):
self.patch_object(pci.PCINetDevice, 'get_vpe_cli_out')
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci, 'subprocess')
self.subprocess.check_output.side_effect = \
mocked_subprocess()
dev = pci.PCINetDevice('0000:07:00.0')
self.get_vpe_cli_out.return_value = pci_responses.CONFD_CLI_INVMAC
vpe_devs = dev.get_vpe_interfaces_and_macs()
expect = [
{
'interface': 'TenGigabitEthernet7/0/0',
'macAddress': '84:b8:02:2a:5f:c4',
'pci_address': '0000:07:00.0'
},
]
self.assertEqual(vpe_devs, expect)
def test_extract_pci_addr_from_vpe_interface(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
dev = pci.PCINetDevice('0000:07:00.0')
self.assertEqual(dev.extract_pci_addr_from_vpe_interface(
'TenGigabitEthernet1/1/1'), '0000:01:01.1')
self.assertEqual(dev.extract_pci_addr_from_vpe_interface(
'TenGigabitEtherneta/0/0'), '0000:0a:00.0')
self.assertEqual(dev.extract_pci_addr_from_vpe_interface(
'GigabitEthernet0/2/0'), '0000:00:02.0')
def test_update_interface_info_eth(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
self.patch_object(pci.PCINetDevice, 'get_sysnet_interfaces_and_macs')
dev = pci.PCINetDevice('0000:10:00.0')
self.get_sysnet_interfaces_and_macs.return_value = [
{
'interface': 'eth2',
'macAddress': 'a8:9d:21:cf:93:fc',
'pci_address': '0000:10:00.0',
'state': 'up'
},
{
'interface': 'eth3',
'macAddress': 'a8:9d:21:cf:93:fd',
'pci_address': '0000:10:00.1',
'state': 'down'
}
]
dev.update_interface_info_eth()
self.assertEqual(dev.interface_name, 'eth2')
def test_get_sysnet_interfaces_and_macs_virtio(self):
self.patch_object(pci.glob, 'glob')
self.patch_object(pci.os.path, 'islink')
self.patch_object(pci.os.path, 'realpath')
self.patch_object(pci.PCINetDevice, 'get_sysnet_device_state')
self.patch_object(pci.PCINetDevice, 'get_sysnet_mac')
self.patch_object(pci.PCINetDevice, 'get_sysnet_interface')
self.patch_object(pci.PCINetDevice, 'update_attributes')
dev = pci.PCINetDevice('0000:06:00.0')
self.glob.return_value = ['/sys/class/net/eth2']
self.get_sysnet_interface.return_value = 'eth2'
self.get_sysnet_mac.return_value = 'a8:9d:21:cf:93:fc'
self.get_sysnet_device_state.return_value = 'up'
self.realpath.return_value = ('/sys/devices/pci0000:00/0000:00:07.0/'
'virtio5')
self.islink.return_value = True
expect = {
'interface': 'eth2',
'macAddress': 'a8:9d:21:cf:93:fc',
'pci_address': '0000:00:07.0',
'state': 'up',
}
self.assertEqual(dev.get_sysnet_interfaces_and_macs(), [expect])
def test_get_sysnet_interfaces_and_macs(self):
self.patch_object(pci.glob, 'glob')
self.patch_object(pci.os.path, 'islink')
self.patch_object(pci.os.path, 'realpath')
self.patch_object(pci.PCINetDevice, 'get_sysnet_device_state')
self.patch_object(pci.PCINetDevice, 'get_sysnet_mac')
self.patch_object(pci.PCINetDevice, 'get_sysnet_interface')
self.patch_object(pci.PCINetDevice, 'update_attributes')
dev = pci.PCINetDevice('0000:06:00.0')
self.glob.return_value = ['/sys/class/net/eth2']
self.get_sysnet_interface.return_value = 'eth2'
self.get_sysnet_mac.return_value = 'a8:9d:21:cf:93:fc'
self.get_sysnet_device_state.return_value = 'up'
self.realpath.return_value = (
'/sys/devices/pci0000:00/0000:00:02.0/0000:02:00.0/0000:03:00.0/'
'0000:04:00.0/0000:05:01.0/0000:07:00.0')
self.islink.return_value = True
expect = {
'interface': 'eth2',
'macAddress': 'a8:9d:21:cf:93:fc',
'pci_address': '0000:07:00.0',
'state': 'up',
}
self.assertEqual(dev.get_sysnet_interfaces_and_macs(), [expect])
def test_get_sysnet_mac(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
device = pci.PCINetDevice('0000:10:00.1')
with utils.patch_open() as (_open, _file):
super_fh = mocked_filehandle()
_file.readlines = mock.MagicMock()
_open.side_effect = super_fh._setfilename
_file.read.side_effect = super_fh._getfilecontents_read
macaddr = device.get_sysnet_mac('/sys/class/net/eth3')
self.assertEqual(macaddr, 'a8:9d:21:cf:93:fd')
def test_get_sysnet_device_state(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
device = pci.PCINetDevice('0000:10:00.1')
with utils.patch_open() as (_open, _file):
super_fh = mocked_filehandle()
_file.readlines = mock.MagicMock()
_open.side_effect = super_fh._setfilename
_file.read.side_effect = super_fh._getfilecontents_read
state = device.get_sysnet_device_state('/sys/class/net/eth3')
self.assertEqual(state, 'down')
def test_get_sysnet_interface(self):
self.patch_object(pci.PCINetDevice, 'update_attributes')
device = pci.PCINetDevice('0000:10:00.1')
self.assertEqual(
device.get_sysnet_interface('/sys/class/net/eth3'), 'eth3')
class PCINetDevicesTest(utils.BaseTestCase):
def test_init(self):
self.patch_object(pci.PCINetDevices, 'get_pci_ethernet_addresses')
self.patch_object(pci, 'PCINetDevice')
self.get_pci_ethernet_addresses.return_value = ['pciaddr']
pci.PCINetDevices()
self.PCINetDevice.assert_called_once_with('pciaddr')
def test_get_pci_ethernet_addresses(self):
self.patch_object(pci, 'subprocess')
self.patch_object(pci, 'PCINetDevice')
self.subprocess.check_output.side_effect = \
mocked_subprocess()
a = pci.PCINetDevices()
self.assertEqual(
a.get_pci_ethernet_addresses(),
['0000:06:00.0', '0000:07:00.0', '0000:10:00.0', '0000:10:00.1'])
def test_update_devices(self):
pcinetdev = mock.MagicMock()
self.patch_object(pci.PCINetDevices, 'get_pci_ethernet_addresses')
self.patch_object(pci, 'PCINetDevice')
self.PCINetDevice.return_value = pcinetdev
self.get_pci_ethernet_addresses.return_value = ['pciaddr']
a = pci.PCINetDevices()
a.update_devices()
pcinetdev.update_attributes.assert_called_once_with()
def test_get_macs(self):
pcinetdev = mock.MagicMock()
self.patch_object(pci.PCINetDevices, 'get_pci_ethernet_addresses')
self.patch_object(pci, 'PCINetDevice')
self.PCINetDevice.return_value = pcinetdev
self.get_pci_ethernet_addresses.return_value = ['pciaddr']
pcinetdev.mac_address = 'mac1'
a = pci.PCINetDevices()
self.assertEqual(a.get_macs(), ['mac1'])
def test_get_device_from_mac(self):
pcinetdev = mock.MagicMock()
self.patch_object(pci.PCINetDevices, 'get_pci_ethernet_addresses')
self.patch_object(pci, 'PCINetDevice')
self.PCINetDevice.return_value = pcinetdev
self.get_pci_ethernet_addresses.return_value = ['pciaddr']
pcinetdev.mac_address = 'mac1'
a = pci.PCINetDevices()
self.assertEqual(a.get_device_from_mac('mac1'), pcinetdev)
def test_get_device_from_pci_address(self):
pcinetdev = mock.MagicMock()
self.patch_object(pci.PCINetDevices, 'get_pci_ethernet_addresses')
self.patch_object(pci, 'PCINetDevice')
self.PCINetDevice.return_value = pcinetdev
self.get_pci_ethernet_addresses.return_value = ['pciaddr']
pcinetdev.pci_address = 'pciaddr'
a = pci.PCINetDevices()
self.assertEqual(a.get_device_from_pci_address('pciaddr'), pcinetdev)
def test_rebind_orphans(self):
self.patch_object(pci.PCINetDevices, 'get_pci_ethernet_addresses')
self.patch_object(pci.PCINetDevices, 'unbind_orphans')
self.patch_object(pci.PCINetDevices, 'bind_orphans')
self.patch_object(pci, 'PCINetDevice')
self.get_pci_ethernet_addresses.return_value = []
a = pci.PCINetDevices()
a.rebind_orphans()
self.unbind_orphans.assert_called_once_with()
self.bind_orphans.assert_called_once_with()
def test_unbind_orphans(self):
orphan = mock.MagicMock()
self.patch_object(pci.PCINetDevices, 'get_pci_ethernet_addresses')
self.get_pci_ethernet_addresses.return_value = ['pciaddr']
self.patch_object(pci.PCINetDevices, 'get_orphans')
self.patch_object(pci.PCINetDevices, 'update_devices')
self.patch_object(pci, 'PCINetDevice')
self.get_orphans.return_value = [orphan]
a = pci.PCINetDevices()
a.unbind_orphans()
orphan.unbind.assert_called_once_with()
self.update_devices.assert_called_once_with()
def test_bind_orphans(self):
orphan = mock.MagicMock()
self.patch_object(pci.PCINetDevices, 'get_pci_ethernet_addresses')
self.get_pci_ethernet_addresses.return_value = ['pciaddr']
self.patch_object(pci.PCINetDevices, 'get_orphans')
self.patch_object(pci.PCINetDevices, 'update_devices')
self.patch_object(pci, 'PCINetDevice')
self.get_orphans.return_value = [orphan]
orphan.modalias_kmod = 'kmod'
a = pci.PCINetDevices()
a.bind_orphans()
orphan.bind.assert_called_once_with('kmod')
self.update_devices.assert_called_once_with()
def test_get_orphans(self):
pcinetdev = mock.MagicMock()
self.patch_object(pci.PCINetDevices, 'get_pci_ethernet_addresses')
self.patch_object(pci, 'PCINetDevice')
self.PCINetDevice.return_value = pcinetdev
self.get_pci_ethernet_addresses.return_value = ['pciaddr']
pcinetdev.loaded_kmod = None
pcinetdev.interface_name = None
pcinetdev.mac_address = None
a = pci.PCINetDevices()
self.assertEqual(a.get_orphans(), [pcinetdev])
class PCIInfoTest(utils.BaseTestCase):
def dev_mock(self, state, pci_address, interface_name):
dev = mock.MagicMock()
dev.state = state
dev.pci_address = pci_address
dev.interface_name = interface_name
return dev
def test_init(self):
net_dev_mocks = {
'mac1': self.dev_mock('down', 'pciaddr0', 'eth0'),
'mac2': self.dev_mock('down', 'pciaddr1', 'eth1'),
'mac3': self.dev_mock('up', 'pciaddr3', 'eth2'),
}
net_devs = mock.MagicMock()
self.patch_object(pci.PCIInfo, 'get_user_requested_config')
self.patch_object(pci, 'PCINetDevices')
self.PCINetDevices.return_value = net_devs
net_devs.get_macs.return_value = net_dev_mocks.keys()
net_devs.get_device_from_mac.side_effect = lambda x: net_dev_mocks[x]
self.get_user_requested_config.return_value = {
'mac1': [{'net': 'net1'}, {'net': 'net2'}],
'mac2': [{'net': 'net1'}],
'mac3': [{'net': 'net1'}]}
a = pci.PCIInfo()
expect = {
'mac1': [{'interface': 'eth0', 'net': 'net1'},
{'interface': 'eth0', 'net': 'net2'}],
'mac2': [{'interface': 'eth1', 'net': 'net1'}]}
self.assertEqual(a.local_mac_nets, expect)
self.assertEqual(a.vpe_dev_string, 'dev pciaddr0 dev pciaddr1')
def test_get_user_requested_config(self):
self.patch_object(pci.PCIInfo, '__init__')
self.patch_object(pci.hookenv, 'config')
self.config.return_value = ('mac=mac1;net=net1 mac=mac1;net=net2'
' mac=mac2;net=net1')
a = pci.PCIInfo()
expect = {
'mac1': [{'net': 'net1'}, {'net': 'net2'}],
'mac2': [{'net': 'net1'}]}
self.assertEqual(a.get_user_requested_config(), expect)
def test_get_user_requested_invalid_entries(self):
self.patch_object(pci.PCIInfo, '__init__')
self.patch_object(pci.hookenv, 'config')
self.config.return_value = ('ac=mac1;net=net1 randomstuff'
' mac=mac2;net=net1')
a = pci.PCIInfo()
expect = {'mac2': [{'net': 'net1'}]}
self.assertEqual(a.get_user_requested_config(), expect)
def test_get_user_requested_config_empty(self):
self.patch_object(pci.PCIInfo, '__init__')
self.patch_object(pci.hookenv, 'config')
self.config.return_value = None
a = pci.PCIInfo()
expect = {}
self.assertEqual(a.get_user_requested_config(), expect)
| true | true |
1c352504ad483c694312e26f2b9ee3495b840de3 | 450 | py | Python | cui/symbols.py | clandgraf/cui | 2073e56e6f0a6d1278207b583bfc3f15a08a5ca5 | [
"BSD-3-Clause"
] | null | null | null | cui/symbols.py | clandgraf/cui | 2073e56e6f0a6d1278207b583bfc3f15a08a5ca5 | [
"BSD-3-Clause"
] | null | null | null | cui/symbols.py | clandgraf/cui | 2073e56e6f0a6d1278207b583bfc3f15a08a5ca5 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2017 Christoph Landgraf. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class Symbol(object):
def __hash__(self):
return id(self)
def __eq__(self, other):
return id(self) == id(other)
SYM_VLINE = Symbol()
SYM_HLINE = Symbol()
SYM_LLCORNER = Symbol()
SYM_LTEE = Symbol()
SYM_RARROW = Symbol()
SYM_DARROW = Symbol()
| 25 | 73 | 0.666667 |
class Symbol(object):
def __hash__(self):
return id(self)
def __eq__(self, other):
return id(self) == id(other)
SYM_VLINE = Symbol()
SYM_HLINE = Symbol()
SYM_LLCORNER = Symbol()
SYM_LTEE = Symbol()
SYM_RARROW = Symbol()
SYM_DARROW = Symbol()
| true | true |
1c352709362d8ae28f08359cb49eba77cb85c6cb | 4,638 | py | Python | auth-api/src/auth_api/__init__.py | thorwolpert/sbc-auth | 5da50cde2e5625d1b0ceea090c3656ee374c9b71 | [
"Apache-2.0"
] | 11 | 2019-09-26T06:58:25.000Z | 2022-01-26T06:19:39.000Z | auth-api/src/auth_api/__init__.py | thorwolpert/sbc-auth | 5da50cde2e5625d1b0ceea090c3656ee374c9b71 | [
"Apache-2.0"
] | 1,622 | 2019-05-07T21:08:38.000Z | 2022-03-28T17:07:15.000Z | auth-api/src/auth_api/__init__.py | thorwolpert/sbc-auth | 5da50cde2e5625d1b0ceea090c3656ee374c9b71 | [
"Apache-2.0"
] | 98 | 2019-03-01T21:36:15.000Z | 2021-12-01T22:11:25.000Z | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Authroization API service.
This module is the API for the Authroization system.
"""
import json
import os
import sentry_sdk # noqa: I001; pylint: disable=ungrouped-imports,wrong-import-order; conflicts with Flake8
from flask import Flask, g, request
from humps.main import camelize
from sbc_common_components.exception_handling.exception_handler import ExceptionHandler # noqa: I001
from sentry_sdk.integrations.flask import FlaskIntegration # noqa: I001
import auth_api.config as config # pylint:disable=consider-using-from-import
from auth_api import models
from auth_api.auth import jwt
from auth_api.config import _Config
from auth_api.extensions import mail
from auth_api.models import db, ma
from auth_api.utils.cache import cache
from auth_api.utils.run_version import get_run_version
from auth_api.utils.util_logging import setup_logging
setup_logging(os.path.join(_Config.PROJECT_ROOT, 'logging.conf')) # important to do this first
def create_app(run_mode=os.getenv('FLASK_ENV', 'production')):
"""Return a configured Flask App using the Factory method."""
app = Flask(__name__)
app.config.from_object(config.CONFIGURATION[run_mode])
# Configure Sentry
if app.config.get('SENTRY_DSN', None):
sentry_sdk.init( # pylint: disable=abstract-class-instantiated
dsn=app.config.get('SENTRY_DSN'),
integrations=[FlaskIntegration()]
)
from auth_api.resources import TEST_BLUEPRINT # pylint: disable=import-outside-toplevel
from auth_api.resources import API_BLUEPRINT, OPS_BLUEPRINT # pylint: disable=import-outside-toplevel
db.init_app(app)
ma.init_app(app)
mail.init_app(app)
app.register_blueprint(API_BLUEPRINT)
app.register_blueprint(OPS_BLUEPRINT)
if os.getenv('FLASK_ENV', 'production') in ['development', 'testing']:
app.register_blueprint(TEST_BLUEPRINT)
if os.getenv('FLASK_ENV', 'production') != 'testing':
setup_jwt_manager(app, jwt)
ExceptionHandler(app)
@app.before_request
def set_origin():
g.origin_url = request.environ.get('HTTP_ORIGIN', 'localhost')
@app.after_request
def handle_after_request(response): # pylint: disable=unused-variable
add_version(response)
camelize_json(response)
return response
def add_version(response):
version = get_run_version()
response.headers['API'] = f'auth_api/{version}'
def camelize_json(response):
if response.headers['Content-Type'] == 'application/json':
response.set_data(json.dumps(camelize(json.loads(response.get_data()))))
register_shellcontext(app)
build_cache(app)
return app
def setup_jwt_manager(app, jwt_manager):
"""Use flask app to configure the JWTManager to work for a particular Realm."""
def get_roles(a_dict):
return a_dict['realm_access']['roles'] # pragma: no cover
app.config['JWT_ROLE_CALLBACK'] = get_roles
jwt_manager.init_app(app)
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {'app': app, 'jwt': jwt, 'db': db, 'models': models} # pragma: no cover
app.shell_context_processor(shell_context)
def build_cache(app):
"""Build cache."""
cache.init_app(app)
with app.app_context():
cache.clear()
if not app.config.get('TESTING', False):
try:
from auth_api.services.permissions import \
Permissions as PermissionService # pylint: disable=import-outside-toplevel
from auth_api.services.products import \
Product as ProductService # pylint: disable=import-outside-toplevel
PermissionService.build_all_permission_cache()
ProductService.build_all_products_cache()
except Exception as e: # NOQA # pylint:disable=broad-except
app.logger.error('Error on caching ')
app.logger.error(e)
| 35.136364 | 108 | 0.709142 |
import json
import os
import sentry_sdk
from flask import Flask, g, request
from humps.main import camelize
from sbc_common_components.exception_handling.exception_handler import ExceptionHandler
from sentry_sdk.integrations.flask import FlaskIntegration
import auth_api.config as config
from auth_api import models
from auth_api.auth import jwt
from auth_api.config import _Config
from auth_api.extensions import mail
from auth_api.models import db, ma
from auth_api.utils.cache import cache
from auth_api.utils.run_version import get_run_version
from auth_api.utils.util_logging import setup_logging
setup_logging(os.path.join(_Config.PROJECT_ROOT, 'logging.conf'))
def create_app(run_mode=os.getenv('FLASK_ENV', 'production')):
app = Flask(__name__)
app.config.from_object(config.CONFIGURATION[run_mode])
if app.config.get('SENTRY_DSN', None):
sentry_sdk.init(
dsn=app.config.get('SENTRY_DSN'),
integrations=[FlaskIntegration()]
)
from auth_api.resources import TEST_BLUEPRINT
from auth_api.resources import API_BLUEPRINT, OPS_BLUEPRINT
db.init_app(app)
ma.init_app(app)
mail.init_app(app)
app.register_blueprint(API_BLUEPRINT)
app.register_blueprint(OPS_BLUEPRINT)
if os.getenv('FLASK_ENV', 'production') in ['development', 'testing']:
app.register_blueprint(TEST_BLUEPRINT)
if os.getenv('FLASK_ENV', 'production') != 'testing':
setup_jwt_manager(app, jwt)
ExceptionHandler(app)
@app.before_request
def set_origin():
g.origin_url = request.environ.get('HTTP_ORIGIN', 'localhost')
@app.after_request
def handle_after_request(response):
add_version(response)
camelize_json(response)
return response
def add_version(response):
version = get_run_version()
response.headers['API'] = f'auth_api/{version}'
def camelize_json(response):
if response.headers['Content-Type'] == 'application/json':
response.set_data(json.dumps(camelize(json.loads(response.get_data()))))
register_shellcontext(app)
build_cache(app)
return app
def setup_jwt_manager(app, jwt_manager):
def get_roles(a_dict):
return a_dict['realm_access']['roles']
app.config['JWT_ROLE_CALLBACK'] = get_roles
jwt_manager.init_app(app)
def register_shellcontext(app):
def shell_context():
return {'app': app, 'jwt': jwt, 'db': db, 'models': models}
app.shell_context_processor(shell_context)
def build_cache(app):
cache.init_app(app)
with app.app_context():
cache.clear()
if not app.config.get('TESTING', False):
try:
from auth_api.services.permissions import \
Permissions as PermissionService
from auth_api.services.products import \
Product as ProductService
PermissionService.build_all_permission_cache()
ProductService.build_all_products_cache()
except Exception as e: rror('Error on caching ')
app.logger.error(e)
| true | true |
1c35273f22524824a1badec9f7b8ab86d3ec0258 | 12,655 | py | Python | Staff.py | iGuan7u/HRScript | 53ac7b6865623713ffadb22ff0620d63f87c3313 | [
"MIT"
] | null | null | null | Staff.py | iGuan7u/HRScript | 53ac7b6865623713ffadb22ff0620d63f87c3313 | [
"MIT"
] | null | null | null | Staff.py | iGuan7u/HRScript | 53ac7b6865623713ffadb22ff0620d63f87c3313 | [
"MIT"
] | null | null | null | from SheetHelper import SheetHelper
class Staff:
# 姓名
name = ''
# 工作地点
workPlace = ''
# 中心
center = ''
# 部门
department = ''
# 科室
administration = ''
# 组
group = ''
# 一级主管
leader = ''
# 用户名
userName = ''
# OA
OAName = ''
# 英文名
englishName = ''
# 在职状态
state = '在职'
# 民族
nation = ''
# 政治面貌
politcInfo = ''
# 性别
gender = 0
# 籍贯
nativePlace = ''
# 婚姻状态
mariageState = 0
# 出生年月
birthDate = 0
# 最高学历
education = ''
# 兴趣爱好
hobby = ''
# 电子邮件
workEmail = ''
# 入职时间
entryTime = 0
# 特长
skill = ''
# 身份证
identityNumber = ''
# 户口所在地详细地址
householdPlace = ''
# 户口性质
householdType = ''
# 手机
phoneNumber = 0
# 家庭电话
housePhoneNumber = 0
# 现在住址
livingPlace = ''
# 教育经历
educationInfo = ''
# 培训经历
trainInfo = ''
# 专业资格证书及获取
professionInfo = ''
# 工作履历
workHistory = ''
# 家庭成员
familyMember = ''
# 紧急联系人
urgentContact = ''
# 工资卡号
bankCard = ''
# 银行卡开户地
bankPosition = ''
# 提交资料
submitInfo = 0
# 参加工作时间
startWorkTime = 0
# 员工类型
staffType = ''
# 职位
position = ''
# 岗位级别
positionLevel = ''
# 职级
grade = ''
# 个人Email
personEmail = ''
# 合同期开始
contractStartTime = ''
# 合同期结束
contractEndTime = ''
# 合同到期提醒日期
contractRemindTime = ''
# 试用期
probation = ''
# 转正日期
fullTime = ''
# 离职日期
leaveTime = ''
# 离职原因
leaveReason = ''
# 公积金购买日期
fundTime = ''
# 公积金比例
fundPercent = ''
# 公积金类型
fundType = ''
# 公积金封存日期
fundXXTime = ''
# 公积金基数
fundXXNum = ''
# 公积金号
fundNumber = ''
# 社保增员日期
insuranceDate = ''
# 是否当地社保新开户
insuranceType = ''
# 社保减员日期
insuranceDate2 = ''
# 社保基数
insuranceXXNum = ''
# 社保其他说明
insuranceMemo = ''
# 商业保险
businessInsurance = ''
# 异动情况
moveMemo = ''
# 奖惩记录
record = 0
# 劳动合同签订情况
laborContract = ''
# 体检
bodyCheck = ''
# 其他
other = ''
# 工号
workNum = ''
# 工龄开始日期
workingYearsStartTime = ''
# 合同年限
contractDuration = ''
# 年龄
age = ''
# 子女姓名
insuranceKid = ''
# 子女性别
insuranceKidGender = ''
# 子女身份证号
insuranceKidIdentifierNumber = ''
# 考勤规则
rule = ''
# 子女姓名1
kid1 = ''
# 子女性别1
kid1Gender = ''
# 子女身份证号1
kid1Identify = ''
# 子女个数
kidCount = ''
# 档案编号
fileNumber = ''
@property
def company(self):
if self.workPlace == '广州':
return '广州威尔森信息科技有限公司'
elif self.workPlace == '北京' or self.workPlace == '长春':
return '广州威尔森信息科技有限公司北京分公司'
elif self.workPlace == '上海':
return '广州威尔森信息科技有限公司上海分公司'
else:
return 'something is wrong'
def __init__(self, sheet):
self.name = SheetHelper.getValueFromSheet(sheet, 'B3')
self.center = SheetHelper.getValueFromSheet(sheet, 'B53')
self.department = SheetHelper.getValueFromSheet(sheet, 'D53')
self.administration = SheetHelper.getValueFromSheet(sheet, 'F53')
self.group = SheetHelper.getValueFromSheet(sheet, 'H53')
self.workPlace = SheetHelper.getValueFromSheet(sheet, 'B56')
self.leader = SheetHelper.getValueFromSheet(sheet, 'H54')
self.englishName = SheetHelper.getValueFromSheet(sheet, 'D3')
self.nation = SheetHelper.getValueFromSheet(sheet, 'F3')
self.politcInfo = SheetHelper.getValueFromSheet(sheet, 'F5')
self.gender = SheetHelper.getValueFromSheet(sheet, 'F4')
self.nativePlace = SheetHelper.getValueFromSheet(sheet, 'B4')
self.mariageState = SheetHelper.getValueFromSheet(sheet, 'B5')
self.birthDate = SheetHelper.getValueFromSheet(sheet, 'D4')
self.hobby = SheetHelper.getValueFromSheet(sheet, 'B6')
self.workEmail = SheetHelper.getValueFromSheet(sheet, 'D56')
self.entryTime = SheetHelper.getValueFromSheet(sheet, 'B55')
self.education = SheetHelper.getValueFromSheet(sheet, 'D6')
self.skill = SheetHelper.getValueFromSheet(sheet, 'B6')
self.identityNumber = SheetHelper.getValueFromSheet(sheet, 'F7')
self.householdPlace = SheetHelper.getValueFromSheet(sheet, 'F9')
self.householdType = SheetHelper.getValueFromSheet(sheet, 'F6')
self.phoneNumber = SheetHelper.getValueFromSheet(sheet, 'C8')
self.livingPlace = SheetHelper.getValueFromSheet(sheet, 'F8')
educationInfos = SheetHelper.getValueFromBounds(sheet, 'B11', 'H13')
educationInfosStrings = []
for educationInfo in educationInfos:
educationInfosStrings.append('起止时间:{0[0]};学校:{0[1]};专业:{0[2]};证书:{0[3]}'.format(educationInfo))
self.educationInfo = ';'.join(educationInfosStrings)
trainInfos = SheetHelper.getValueFromBounds(sheet, 'B15', 'H17')
trainInfosStrings = []
for trainInfo in trainInfos:
if len(trainInfos) < 4:
print("%s 培训记录不合法,记录将抛弃", self.name)
continue
trainInfosStrings.append('起止时间:{0[0]};培训机构:{0[1]};培训课程:{0[2]};证书:{0[3]}'.format(trainInfo))
self.trainInfo = ';'.join(trainInfosStrings)
self.professionInfo = SheetHelper.getValueFromSheet(sheet, 'B18')
workHistorys = SheetHelper.getValueFromBounds(sheet, 'B22', 'H25')
workHistoryStrings = []
for workHistory in workHistorys:
workHistoryStrings.append('起止时间:{0[0]};工作单位:{0[1]};职位:{0[2]};薪资状况:{0[3]};离职原因:{0[4]}'.format(workHistory))
self.workHistory = ';'.join(workHistoryStrings)
familyMembers = SheetHelper.getValueFromBounds(sheet, 'B27', 'H29')
familyMemberStrings = []
for familyMember in familyMembers:
familyMemberStrings.append('姓名:{0[0]};关系:{0[1]};出生年月:{0[2]};工作单位:{0[3]};职务:{0[4]}'.format(familyMember))
self.familyMember = ';'.join(familyMemberStrings)
urgentContact = ''
if SheetHelper.getValueFromSheet(sheet, 'B35') != '':
urgentContact = '紧急联系人1:%s;双方关系:%s;联系电话:%s' % (SheetHelper.getValueFromSheet(sheet, 'B35'),SheetHelper.getValueFromSheet(sheet, 'D35'),
SheetHelper.getValueFromSheet(sheet, 'G35'))
if SheetHelper.getValueFromSheet(sheet, 'B36') != '':
urgentContact += ';紧急联系人2:%s;双方关系:%s;联系电话:%s' % (SheetHelper.getValueFromSheet(sheet, 'B36'),SheetHelper.getValueFromSheet(sheet, 'D36'),
SheetHelper.getValueFromSheet(sheet, 'G36'))
self.urgentContact = urgentContact
self.bankCard = SheetHelper.getValueFromSheet(sheet, 'C37')
self.bankPosition = SheetHelper.getValueFromSheet(sheet, 'F37')
self.staffType = SheetHelper.getValueFromSheet(sheet, 'H55')
self.position = SheetHelper.getValueFromSheet(sheet, 'B54')
self.positionLevel = SheetHelper.getValueFromSheet(sheet, 'D54')
self.grade = SheetHelper.getValueFromSheet(sheet, 'F54')
self.personEmail = SheetHelper.getValueFromSheet(sheet, 'C9')
self.contractStartTime = SheetHelper.getValueFromSheet(sheet, 'B55')
self.probation = SheetHelper.getValueFromSheet(sheet, 'D55')
self.fundPercent = SheetHelper.getValueFromSheet(sheet, 'C44')
self.fundType = SheetHelper.getValueFromSheet(sheet, 'G43')
self.fundNumber = SheetHelper.getValueFromSheet(sheet, 'G44')
if self.fundNumber == '可不填':
self.fundNumber = ''
self.insuranceType = SheetHelper.getValueFromSheet(sheet, 'C43')
self.insuranceMemo = SheetHelper.getValueFromSheet(sheet, 'F6')
self.workingYearsStartTime = SheetHelper.getValueFromSheet(sheet, 'B55')
self.contractDuration = SheetHelper.getValueFromSheet(sheet, 'F55')
self.insuranceKid = SheetHelper.getValueFromSheet(sheet, 'C46')
self.insuranceKidGender = SheetHelper.getValueFromSheet(sheet, 'G46')
self.insuranceKidIdentifierNumber = SheetHelper.getValueFromSheet(sheet, 'C47')
self.kid1 = SheetHelper.getValueFromSheet(sheet, 'B49')
self.kid1Gender = SheetHelper.getValueFromSheet(sheet, 'C47')
self.kid1Identify = SheetHelper.getValueFromSheet(sheet, 'F49')
self.kidCount = SheetHelper.getValueFromSheet(sheet, 'D5')
self.insuranceKidIdentifierNumber = SheetHelper.getValueFromSheet(sheet, 'C47')
self.insuranceKidIdentifierNumber = SheetHelper.getValueFromSheet(sheet, 'C47')
self.insuranceKidIdentifierNumber = SheetHelper.getValueFromSheet(sheet, 'C47')
self.insuranceKidIdentifierNumber = SheetHelper.getValueFromSheet(sheet, 'C47')
def writeToSheet(self, sheet, row):
sheet.write(row, 1, self.company)
sheet.write(row, 2, self.center)
sheet.write(row, 3, self.department)
sheet.write(row, 4, self.administration)
sheet.write(row, 5, self.group)
sheet.write(row, 6, self.workPlace)
sheet.write(row, 7, self.leader)
sheet.write(row, 8, self.name)
sheet.write(row, 9, self.OAName)
sheet.write(row, 10, self.englishName)
sheet.write(row, 11, self.state)
sheet.write(row, 12, self.nation)
sheet.write(row, 13, self.politcInfo)
sheet.write(row, 14, self.gender)
sheet.write(row, 15, self.nativePlace)
sheet.write(row, 16, self.mariageState)
sheet.write(row, 17, self.birthDate)
sheet.write(row, 18, self.education)
sheet.write(row, 19, self.hobby)
sheet.write(row, 20, self.workEmail)
sheet.write(row, 21, self.entryTime)
sheet.write(row, 22, self.skill)
sheet.write(row, 23, self.identityNumber)
sheet.write(row, 24, self.householdPlace)
sheet.write(row, 25, self.householdType)
sheet.write(row, 26, self.phoneNumber)
sheet.write(row, 27, self.housePhoneNumber)
sheet.write(row, 28, self.livingPlace)
sheet.write(row, 29, self.educationInfo)
sheet.write(row, 30, self.trainInfo)
sheet.write(row, 31, self.professionInfo)
sheet.write(row, 32, self.workHistory)
sheet.write(row, 33, self.familyMember)
sheet.write(row, 34, self.urgentContact)
sheet.write(row, 35, self.bankCard)
sheet.write(row, 36, self.bankPosition)
sheet.write(row, 37, self.submitInfo)
sheet.write(row, 38, self.startWorkTime)
sheet.write(row, 39, self.staffType)
sheet.write(row, 40, self.position)
sheet.write(row, 41, self.positionLevel)
sheet.write(row, 42, self.grade)
sheet.write(row, 43, self.personEmail)
sheet.write(row, 44, self.contractStartTime)
sheet.write(row, 45, self.contractEndTime)
sheet.write(row, 46, self.contractRemindTime)
sheet.write(row, 47, self.probation)
sheet.write(row, 48, self.fullTime)
sheet.write(row, 49, self.leaveTime)
sheet.write(row, 50, self.leaveReason)
sheet.write(row, 51, self.fundTime)
sheet.write(row, 52, self.fundPercent)
sheet.write(row, 53, self.fundType)
sheet.write(row, 54, self.fundXXTime)
sheet.write(row, 55, self.fundXXNum)
sheet.write(row, 56, self.fundNumber)
sheet.write(row, 57, self.insuranceDate)
sheet.write(row, 58, self.insuranceType)
sheet.write(row, 59, self.insuranceDate2)
sheet.write(row, 60, self.insuranceXXNum)
sheet.write(row, 61, self.insuranceMemo)
sheet.write(row, 62, self.businessInsurance)
sheet.write(row, 63, self.moveMemo)
sheet.write(row, 64, self.record)
sheet.write(row, 65, self.laborContract)
sheet.write(row, 66, self.bodyCheck)
sheet.write(row, 67, self.other)
sheet.write(row, 68, self.workNum)
# sheet.write(row, 69, '员工生日')
sheet.write(row, 70, self.workingYearsStartTime)
sheet.write(row, 71, self.contractDuration)
sheet.write(row, 72, self.age)
sheet.write(row, 73, self.insuranceKid)
sheet.write(row, 74, self.insuranceKidGender)
sheet.write(row, 75, self.insuranceKidIdentifierNumber)
sheet.write(row, 76, self.rule)
sheet.write(row, 77, self.kid1)
sheet.write(row, 78, self.kid1Gender)
sheet.write(row, 79, self.kid1Identify)
sheet.write(row, 80, self.kidCount)
sheet.write(row, 81, self.fileNumber) | 31.558603 | 149 | 0.616041 | from SheetHelper import SheetHelper
class Staff:
name = ''
workPlace = ''
center = ''
department = ''
administration = ''
group = ''
leader = ''
userName = ''
OAName = ''
englishName = ''
state = '在职'
nation = ''
politcInfo = ''
gender = 0
nativePlace = ''
mariageState = 0
birthDate = 0
education = ''
hobby = ''
workEmail = ''
entryTime = 0
skill = ''
identityNumber = ''
householdPlace = ''
householdType = ''
phoneNumber = 0
housePhoneNumber = 0
livingPlace = ''
educationInfo = ''
trainInfo = ''
professionInfo = ''
workHistory = ''
familyMember = ''
urgentContact = ''
bankCard = ''
bankPosition = ''
submitInfo = 0
startWorkTime = 0
staffType = ''
position = ''
positionLevel = ''
grade = ''
personEmail = ''
contractStartTime = ''
contractEndTime = ''
contractRemindTime = ''
probation = ''
fullTime = ''
leaveTime = ''
leaveReason = ''
fundTime = ''
fundPercent = ''
fundType = ''
fundXXTime = ''
fundXXNum = ''
fundNumber = ''
insuranceDate = ''
insuranceType = ''
insuranceDate2 = ''
insuranceXXNum = ''
insuranceMemo = ''
businessInsurance = ''
moveMemo = ''
record = 0
laborContract = ''
bodyCheck = ''
other = ''
workNum = ''
workingYearsStartTime = ''
contractDuration = ''
age = ''
insuranceKid = ''
insuranceKidGender = ''
insuranceKidIdentifierNumber = ''
rule = ''
kid1 = ''
kid1Gender = ''
kid1Identify = ''
kidCount = ''
fileNumber = ''
@property
def company(self):
if self.workPlace == '广州':
return '广州威尔森信息科技有限公司'
elif self.workPlace == '北京' or self.workPlace == '长春':
return '广州威尔森信息科技有限公司北京分公司'
elif self.workPlace == '上海':
return '广州威尔森信息科技有限公司上海分公司'
else:
return 'something is wrong'
def __init__(self, sheet):
self.name = SheetHelper.getValueFromSheet(sheet, 'B3')
self.center = SheetHelper.getValueFromSheet(sheet, 'B53')
self.department = SheetHelper.getValueFromSheet(sheet, 'D53')
self.administration = SheetHelper.getValueFromSheet(sheet, 'F53')
self.group = SheetHelper.getValueFromSheet(sheet, 'H53')
self.workPlace = SheetHelper.getValueFromSheet(sheet, 'B56')
self.leader = SheetHelper.getValueFromSheet(sheet, 'H54')
self.englishName = SheetHelper.getValueFromSheet(sheet, 'D3')
self.nation = SheetHelper.getValueFromSheet(sheet, 'F3')
self.politcInfo = SheetHelper.getValueFromSheet(sheet, 'F5')
self.gender = SheetHelper.getValueFromSheet(sheet, 'F4')
self.nativePlace = SheetHelper.getValueFromSheet(sheet, 'B4')
self.mariageState = SheetHelper.getValueFromSheet(sheet, 'B5')
self.birthDate = SheetHelper.getValueFromSheet(sheet, 'D4')
self.hobby = SheetHelper.getValueFromSheet(sheet, 'B6')
self.workEmail = SheetHelper.getValueFromSheet(sheet, 'D56')
self.entryTime = SheetHelper.getValueFromSheet(sheet, 'B55')
self.education = SheetHelper.getValueFromSheet(sheet, 'D6')
self.skill = SheetHelper.getValueFromSheet(sheet, 'B6')
self.identityNumber = SheetHelper.getValueFromSheet(sheet, 'F7')
self.householdPlace = SheetHelper.getValueFromSheet(sheet, 'F9')
self.householdType = SheetHelper.getValueFromSheet(sheet, 'F6')
self.phoneNumber = SheetHelper.getValueFromSheet(sheet, 'C8')
self.livingPlace = SheetHelper.getValueFromSheet(sheet, 'F8')
educationInfos = SheetHelper.getValueFromBounds(sheet, 'B11', 'H13')
educationInfosStrings = []
for educationInfo in educationInfos:
educationInfosStrings.append('起止时间:{0[0]};学校:{0[1]};专业:{0[2]};证书:{0[3]}'.format(educationInfo))
self.educationInfo = ';'.join(educationInfosStrings)
trainInfos = SheetHelper.getValueFromBounds(sheet, 'B15', 'H17')
trainInfosStrings = []
for trainInfo in trainInfos:
if len(trainInfos) < 4:
print("%s 培训记录不合法,记录将抛弃", self.name)
continue
trainInfosStrings.append('起止时间:{0[0]};培训机构:{0[1]};培训课程:{0[2]};证书:{0[3]}'.format(trainInfo))
self.trainInfo = ';'.join(trainInfosStrings)
self.professionInfo = SheetHelper.getValueFromSheet(sheet, 'B18')
workHistorys = SheetHelper.getValueFromBounds(sheet, 'B22', 'H25')
workHistoryStrings = []
for workHistory in workHistorys:
workHistoryStrings.append('起止时间:{0[0]};工作单位:{0[1]};职位:{0[2]};薪资状况:{0[3]};离职原因:{0[4]}'.format(workHistory))
self.workHistory = ';'.join(workHistoryStrings)
familyMembers = SheetHelper.getValueFromBounds(sheet, 'B27', 'H29')
familyMemberStrings = []
for familyMember in familyMembers:
familyMemberStrings.append('姓名:{0[0]};关系:{0[1]};出生年月:{0[2]};工作单位:{0[3]};职务:{0[4]}'.format(familyMember))
self.familyMember = ';'.join(familyMemberStrings)
urgentContact = ''
if SheetHelper.getValueFromSheet(sheet, 'B35') != '':
urgentContact = '紧急联系人1:%s;双方关系:%s;联系电话:%s' % (SheetHelper.getValueFromSheet(sheet, 'B35'),SheetHelper.getValueFromSheet(sheet, 'D35'),
SheetHelper.getValueFromSheet(sheet, 'G35'))
if SheetHelper.getValueFromSheet(sheet, 'B36') != '':
urgentContact += ';紧急联系人2:%s;双方关系:%s;联系电话:%s' % (SheetHelper.getValueFromSheet(sheet, 'B36'),SheetHelper.getValueFromSheet(sheet, 'D36'),
SheetHelper.getValueFromSheet(sheet, 'G36'))
self.urgentContact = urgentContact
self.bankCard = SheetHelper.getValueFromSheet(sheet, 'C37')
self.bankPosition = SheetHelper.getValueFromSheet(sheet, 'F37')
self.staffType = SheetHelper.getValueFromSheet(sheet, 'H55')
self.position = SheetHelper.getValueFromSheet(sheet, 'B54')
self.positionLevel = SheetHelper.getValueFromSheet(sheet, 'D54')
self.grade = SheetHelper.getValueFromSheet(sheet, 'F54')
self.personEmail = SheetHelper.getValueFromSheet(sheet, 'C9')
self.contractStartTime = SheetHelper.getValueFromSheet(sheet, 'B55')
self.probation = SheetHelper.getValueFromSheet(sheet, 'D55')
self.fundPercent = SheetHelper.getValueFromSheet(sheet, 'C44')
self.fundType = SheetHelper.getValueFromSheet(sheet, 'G43')
self.fundNumber = SheetHelper.getValueFromSheet(sheet, 'G44')
if self.fundNumber == '可不填':
self.fundNumber = ''
self.insuranceType = SheetHelper.getValueFromSheet(sheet, 'C43')
self.insuranceMemo = SheetHelper.getValueFromSheet(sheet, 'F6')
self.workingYearsStartTime = SheetHelper.getValueFromSheet(sheet, 'B55')
self.contractDuration = SheetHelper.getValueFromSheet(sheet, 'F55')
self.insuranceKid = SheetHelper.getValueFromSheet(sheet, 'C46')
self.insuranceKidGender = SheetHelper.getValueFromSheet(sheet, 'G46')
self.insuranceKidIdentifierNumber = SheetHelper.getValueFromSheet(sheet, 'C47')
self.kid1 = SheetHelper.getValueFromSheet(sheet, 'B49')
self.kid1Gender = SheetHelper.getValueFromSheet(sheet, 'C47')
self.kid1Identify = SheetHelper.getValueFromSheet(sheet, 'F49')
self.kidCount = SheetHelper.getValueFromSheet(sheet, 'D5')
self.insuranceKidIdentifierNumber = SheetHelper.getValueFromSheet(sheet, 'C47')
self.insuranceKidIdentifierNumber = SheetHelper.getValueFromSheet(sheet, 'C47')
self.insuranceKidIdentifierNumber = SheetHelper.getValueFromSheet(sheet, 'C47')
self.insuranceKidIdentifierNumber = SheetHelper.getValueFromSheet(sheet, 'C47')
def writeToSheet(self, sheet, row):
sheet.write(row, 1, self.company)
sheet.write(row, 2, self.center)
sheet.write(row, 3, self.department)
sheet.write(row, 4, self.administration)
sheet.write(row, 5, self.group)
sheet.write(row, 6, self.workPlace)
sheet.write(row, 7, self.leader)
sheet.write(row, 8, self.name)
sheet.write(row, 9, self.OAName)
sheet.write(row, 10, self.englishName)
sheet.write(row, 11, self.state)
sheet.write(row, 12, self.nation)
sheet.write(row, 13, self.politcInfo)
sheet.write(row, 14, self.gender)
sheet.write(row, 15, self.nativePlace)
sheet.write(row, 16, self.mariageState)
sheet.write(row, 17, self.birthDate)
sheet.write(row, 18, self.education)
sheet.write(row, 19, self.hobby)
sheet.write(row, 20, self.workEmail)
sheet.write(row, 21, self.entryTime)
sheet.write(row, 22, self.skill)
sheet.write(row, 23, self.identityNumber)
sheet.write(row, 24, self.householdPlace)
sheet.write(row, 25, self.householdType)
sheet.write(row, 26, self.phoneNumber)
sheet.write(row, 27, self.housePhoneNumber)
sheet.write(row, 28, self.livingPlace)
sheet.write(row, 29, self.educationInfo)
sheet.write(row, 30, self.trainInfo)
sheet.write(row, 31, self.professionInfo)
sheet.write(row, 32, self.workHistory)
sheet.write(row, 33, self.familyMember)
sheet.write(row, 34, self.urgentContact)
sheet.write(row, 35, self.bankCard)
sheet.write(row, 36, self.bankPosition)
sheet.write(row, 37, self.submitInfo)
sheet.write(row, 38, self.startWorkTime)
sheet.write(row, 39, self.staffType)
sheet.write(row, 40, self.position)
sheet.write(row, 41, self.positionLevel)
sheet.write(row, 42, self.grade)
sheet.write(row, 43, self.personEmail)
sheet.write(row, 44, self.contractStartTime)
sheet.write(row, 45, self.contractEndTime)
sheet.write(row, 46, self.contractRemindTime)
sheet.write(row, 47, self.probation)
sheet.write(row, 48, self.fullTime)
sheet.write(row, 49, self.leaveTime)
sheet.write(row, 50, self.leaveReason)
sheet.write(row, 51, self.fundTime)
sheet.write(row, 52, self.fundPercent)
sheet.write(row, 53, self.fundType)
sheet.write(row, 54, self.fundXXTime)
sheet.write(row, 55, self.fundXXNum)
sheet.write(row, 56, self.fundNumber)
sheet.write(row, 57, self.insuranceDate)
sheet.write(row, 58, self.insuranceType)
sheet.write(row, 59, self.insuranceDate2)
sheet.write(row, 60, self.insuranceXXNum)
sheet.write(row, 61, self.insuranceMemo)
sheet.write(row, 62, self.businessInsurance)
sheet.write(row, 63, self.moveMemo)
sheet.write(row, 64, self.record)
sheet.write(row, 65, self.laborContract)
sheet.write(row, 66, self.bodyCheck)
sheet.write(row, 67, self.other)
sheet.write(row, 68, self.workNum)
sheet.write(row, 70, self.workingYearsStartTime)
sheet.write(row, 71, self.contractDuration)
sheet.write(row, 72, self.age)
sheet.write(row, 73, self.insuranceKid)
sheet.write(row, 74, self.insuranceKidGender)
sheet.write(row, 75, self.insuranceKidIdentifierNumber)
sheet.write(row, 76, self.rule)
sheet.write(row, 77, self.kid1)
sheet.write(row, 78, self.kid1Gender)
sheet.write(row, 79, self.kid1Identify)
sheet.write(row, 80, self.kidCount)
sheet.write(row, 81, self.fileNumber) | true | true |
1c3527b72a8c9842e102fa8eb962fbb3b769d1c1 | 3,224 | py | Python | pytmatrix/tmatrix_aux.py | DaveOri/pytmatrix | 0287a41d49ff3a34d5309f5f832183f37b24276d | [
"MIT"
] | 64 | 2015-03-09T18:35:52.000Z | 2022-02-28T22:01:40.000Z | pytmatrix/tmatrix_aux.py | DaveOri/pytmatrix | 0287a41d49ff3a34d5309f5f832183f37b24276d | [
"MIT"
] | 22 | 2015-03-09T19:08:47.000Z | 2022-01-19T08:10:10.000Z | pytmatrix/tmatrix_aux.py | DaveOri/pytmatrix | 0287a41d49ff3a34d5309f5f832183f37b24276d | [
"MIT"
] | 38 | 2015-01-29T13:22:32.000Z | 2022-02-17T07:47:06.000Z | """
Copyright (C) 2009-2015 Jussi Leinonen, Finnish Meteorological Institute,
California Institute of Technology
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
#current version
VERSION = "0.3.2"
#typical wavelengths [mm] at different bands
wl_S = 111.0
wl_C = 53.5
wl_X = 33.3
wl_Ku = 22.0
wl_Ka = 8.43
wl_W = 3.19
#typical values of K_w_sqr at different bands
K_w_sqr = {wl_S: 0.93, wl_C: 0.93, wl_X: 0.93, wl_Ku: 0.93, wl_Ka: 0.92,
wl_W: 0.75}
#preset geometries
geom_horiz_back = (90.0, 90.0, 0.0, 180.0, 0.0, 0.0) #horiz. backscatter
geom_horiz_forw = (90.0, 90.0, 0.0, 0.0, 0.0, 0.0) #horiz. forward scatter
geom_vert_back = (0.0, 180.0, 0.0, 0.0, 0.0, 0.0) #vert. backscatter
geom_vert_forw = (180.0, 180.0, 0.0, 0.0, 0.0, 0.0) #vert. forward scatter
#Drop Shape Relationship Functions
def dsr_thurai_2007(D_eq):
"""
Drop shape relationship function from Thurai2007
(http://dx.doi.org/10.1175/JTECH2051.1) paper.
Arguments:
D_eq: Drop volume-equivalent diameter (mm)
Returns:
r: The vertical-to-horizontal drop axis ratio. Note: the Scatterer class
expects horizontal to vertical, so you should pass 1/dsr_thurai_2007
"""
if D_eq < 0.7:
return 1.0
elif D_eq < 1.5:
return 1.173 - 0.5165*D_eq + 0.4698*D_eq**2 - 0.1317*D_eq**3 - \
8.5e-3*D_eq**4
else:
return 1.065 - 6.25e-2*D_eq - 3.99e-3*D_eq**2 + 7.66e-4*D_eq**3 - \
4.095e-5*D_eq**4
def dsr_pb(D_eq):
"""
Pruppacher and Beard drop shape relationship function.
Arguments:
D_eq: Drop volume-equivalent diameter (mm)
Returns:
r: The vertical-to-horizontal drop axis ratio. Note: the Scatterer class
expects horizontal to vertical, so you should pass 1/dsr_pb
"""
return 1.03 - 0.062*D_eq
def dsr_bc(D_eq):
"""
Beard and Chuang drop shape relationship function.
Arguments:
D_eq: Drop volume-equivalent diameter (mm)
Returns:
r: The vertical-to-horizontal drop axis ratio. Note: the Scatterer class
expects horizontal to vertical, so you should pass 1/dsr_bc
"""
return 1.0048 + 5.7e-04*D_eq - 2.628e-02 * D_eq**2 + \
3.682e-03*D_eq**3 - 1.677e-04 * D_eq**4
| 34.297872 | 80 | 0.694479 |
VERSION = "0.3.2"
wl_S = 111.0
wl_C = 53.5
wl_X = 33.3
wl_Ku = 22.0
wl_Ka = 8.43
wl_W = 3.19
K_w_sqr = {wl_S: 0.93, wl_C: 0.93, wl_X: 0.93, wl_Ku: 0.93, wl_Ka: 0.92,
wl_W: 0.75}
geom_horiz_back = (90.0, 90.0, 0.0, 180.0, 0.0, 0.0)
geom_horiz_forw = (90.0, 90.0, 0.0, 0.0, 0.0, 0.0)
geom_vert_back = (0.0, 180.0, 0.0, 0.0, 0.0, 0.0)
geom_vert_forw = (180.0, 180.0, 0.0, 0.0, 0.0, 0.0)
def dsr_thurai_2007(D_eq):
if D_eq < 0.7:
return 1.0
elif D_eq < 1.5:
return 1.173 - 0.5165*D_eq + 0.4698*D_eq**2 - 0.1317*D_eq**3 - \
8.5e-3*D_eq**4
else:
return 1.065 - 6.25e-2*D_eq - 3.99e-3*D_eq**2 + 7.66e-4*D_eq**3 - \
4.095e-5*D_eq**4
def dsr_pb(D_eq):
return 1.03 - 0.062*D_eq
def dsr_bc(D_eq):
return 1.0048 + 5.7e-04*D_eq - 2.628e-02 * D_eq**2 + \
3.682e-03*D_eq**3 - 1.677e-04 * D_eq**4
| true | true |
1c3528b330d6d52f1eafee2cbedada808d989220 | 3,093 | py | Python | src/iago/Parser.py | ferchault/iago | fc853eab7820df18d20b653acdc09c156dc152e1 | [
"MIT"
] | null | null | null | src/iago/Parser.py | ferchault/iago | fc853eab7820df18d20b653acdc09c156dc152e1 | [
"MIT"
] | 18 | 2016-10-09T14:48:28.000Z | 2017-05-08T06:34:24.000Z | src/iago/Parser.py | ferchault/iago | fc853eab7820df18d20b653acdc09c156dc152e1 | [
"MIT"
] | null | null | null | # standard modules
import os
import re
# custom modules
import Reader
class Parser(object):
def __init__(self):
self._readers = dict()
self.path = None
self.runmatch = dict()
self._runcache = None
def get_atom_indices(self, selector):
"""
:param selector: Valid selection string.
:return: List of 0-based atom indices.
"""
u = self._readers.itervalues().next().get_universe()
if isinstance(u, Reader.EmptyUniverse):
return []
ag = u.select_atoms(selector)
return [_.index for _ in ag]
def get_runs(self):
""" Discovers all available runs in this bucket.
:return: Dict of run names available in this bucket. Keys: paths, values: names.
"""
if self._runcache is not None:
return self._runcache
# regular runs
inodes = os.listdir(self.path)
directories = [_ for _ in inodes if os.path.isdir(os.path.join(self.path, _))]
runs = {_: _ for _ in directories if _.startswith('run-')}
# alternative run directories
for root, dirs, files in os.walk(self.path):
relpath = os.path.relpath(root, self.path)
for regex, replace in self.runmatch.iteritems():
g = re.match(regex, relpath)
if g is not None:
runs[relpath] = replace.format(**g.groupdict())
self._runcache = runs
return self._runcache
def get_universe(self, run):
return self._readers[run].get_universe()
def get_input(self, run):
return self._readers[run].get_input()
def get_output(self, run, alias):
o = self._readers[run].get_output()
o['run'] = alias
return o
def get_groups(self, run, groups):
u = self.get_universe(run)
if isinstance(u, Reader.EmptyUniverse):
return {key: [] for (key, value) in groups.iteritems()}
return {key: u.atoms[value] for (key, value) in groups.iteritems()}
def get_trajectory_frames(self, run):
return self._readers[run].get_trajectory_frames()
def get_run_code(self, runpath, topologyfiles, configfiles, logfiles):
readers = {'cp2k': Reader.CP2KReader, 'namd': Reader.NAMDReader}
for label, reader in readers.iteritems():
r = reader(runpath)
if 'inputnames' in r.get_options():
r.inputnames = configfiles + r.inputnames
if 'topologies' in r.get_options():
r.topologies = topologyfiles + r.topologies
if 'logs' in r.get_options():
r.logs = logfiles + r.logs
if r.claims():
return label
def run(self, path, runmatch=dict(), topologyfiles=[], configfiles=[], logfiles=[]):
""" Parses all runs of a certain bucket.
:param path: Basepath of all runs in this bucket.
:param runmatch: For run autodiscovery: dict of regular expressions matching relative paths from bucket root as
keys and named group replacements as values.
"""
self.path = path
self.runmatch = runmatch
for run in self.get_runs():
code = self.get_run_code(os.path.join(path, run), topologyfiles, configfiles, logfiles)
if code == 'cp2k':
self._readers[run] = Reader.CP2KReader(os.path.join(path, run))
elif code == 'namd':
self._readers[run] = Reader.NAMDReader(os.path.join(path, run))
else:
raise NotImplementedError()
self._readers[run].read()
| 29.457143 | 113 | 0.696088 |
import os
import re
import Reader
class Parser(object):
def __init__(self):
self._readers = dict()
self.path = None
self.runmatch = dict()
self._runcache = None
def get_atom_indices(self, selector):
u = self._readers.itervalues().next().get_universe()
if isinstance(u, Reader.EmptyUniverse):
return []
ag = u.select_atoms(selector)
return [_.index for _ in ag]
def get_runs(self):
if self._runcache is not None:
return self._runcache
inodes = os.listdir(self.path)
directories = [_ for _ in inodes if os.path.isdir(os.path.join(self.path, _))]
runs = {_: _ for _ in directories if _.startswith('run-')}
for root, dirs, files in os.walk(self.path):
relpath = os.path.relpath(root, self.path)
for regex, replace in self.runmatch.iteritems():
g = re.match(regex, relpath)
if g is not None:
runs[relpath] = replace.format(**g.groupdict())
self._runcache = runs
return self._runcache
def get_universe(self, run):
return self._readers[run].get_universe()
def get_input(self, run):
return self._readers[run].get_input()
def get_output(self, run, alias):
o = self._readers[run].get_output()
o['run'] = alias
return o
def get_groups(self, run, groups):
u = self.get_universe(run)
if isinstance(u, Reader.EmptyUniverse):
return {key: [] for (key, value) in groups.iteritems()}
return {key: u.atoms[value] for (key, value) in groups.iteritems()}
def get_trajectory_frames(self, run):
return self._readers[run].get_trajectory_frames()
def get_run_code(self, runpath, topologyfiles, configfiles, logfiles):
readers = {'cp2k': Reader.CP2KReader, 'namd': Reader.NAMDReader}
for label, reader in readers.iteritems():
r = reader(runpath)
if 'inputnames' in r.get_options():
r.inputnames = configfiles + r.inputnames
if 'topologies' in r.get_options():
r.topologies = topologyfiles + r.topologies
if 'logs' in r.get_options():
r.logs = logfiles + r.logs
if r.claims():
return label
def run(self, path, runmatch=dict(), topologyfiles=[], configfiles=[], logfiles=[]):
self.path = path
self.runmatch = runmatch
for run in self.get_runs():
code = self.get_run_code(os.path.join(path, run), topologyfiles, configfiles, logfiles)
if code == 'cp2k':
self._readers[run] = Reader.CP2KReader(os.path.join(path, run))
elif code == 'namd':
self._readers[run] = Reader.NAMDReader(os.path.join(path, run))
else:
raise NotImplementedError()
self._readers[run].read()
| true | true |
1c3528b5e4cac10d724e001938a99d7a640d8dbd | 684 | py | Python | core/pages/header/header.py | mmihailicenko/selenium-pytest-framework | 9487ae4911e8ac7f6a69028603d31b347f182f47 | [
"MIT"
] | 1 | 2021-07-26T06:28:30.000Z | 2021-07-26T06:28:30.000Z | core/pages/header/header.py | mmihailicenko/selenium-pytest-framework | 9487ae4911e8ac7f6a69028603d31b347f182f47 | [
"MIT"
] | null | null | null | core/pages/header/header.py | mmihailicenko/selenium-pytest-framework | 9487ae4911e8ac7f6a69028603d31b347f182f47 | [
"MIT"
] | null | null | null | from selenium.webdriver.common.by import By
from core.pages.base_page import BasePage
from core.pages.main.main_page import MainPage
class Header(BasePage):
LOGO_TITLE = (By.CSS_SELECTOR, "#logo .title")
SEARCH_FIELD = (By.CSS_SELECTOR, ".search-field")
SEARCH_SUBMIT_BTN = (By.CSS_SELECTOR, ".search-submit")
ADD_TO_CART_BTN = (By.CSS_SELECTOR, ".add-to-cart-button")
def and_get_main_page(self) -> MainPage:
return MainPage(self)
def set_search(self, value: str):
self.set_text(*self.SEARCH_FIELD, value)
return Header(self)
def submit_search(self):
self.submit(*self.SEARCH_SUBMIT_BTN)
return Header(self)
| 28.5 | 62 | 0.701754 | from selenium.webdriver.common.by import By
from core.pages.base_page import BasePage
from core.pages.main.main_page import MainPage
class Header(BasePage):
LOGO_TITLE = (By.CSS_SELECTOR, "#logo .title")
SEARCH_FIELD = (By.CSS_SELECTOR, ".search-field")
SEARCH_SUBMIT_BTN = (By.CSS_SELECTOR, ".search-submit")
ADD_TO_CART_BTN = (By.CSS_SELECTOR, ".add-to-cart-button")
def and_get_main_page(self) -> MainPage:
return MainPage(self)
def set_search(self, value: str):
self.set_text(*self.SEARCH_FIELD, value)
return Header(self)
def submit_search(self):
self.submit(*self.SEARCH_SUBMIT_BTN)
return Header(self)
| true | true |
1c35292b72892e1eb6c4d0cd70aa67d1eef3aca7 | 3,085 | py | Python | Codes/recognition/lib/models/crnn.py | hsupengbo/201800130086_spring_NNML | c51d074c2d33650cc923ccc4297ecbce31c83df7 | [
"MIT"
] | 3 | 2021-12-15T06:57:46.000Z | 2022-03-16T06:26:16.000Z | Codes/recognition/lib/models/crnn.py | pengbohsu/201800130086_spring_NNML | c51d074c2d33650cc923ccc4297ecbce31c83df7 | [
"MIT"
] | 2 | 2021-12-15T07:34:34.000Z | 2022-03-16T06:24:21.000Z | Codes/recognition/lib/models/crnn.py | pengbohsu/201800130086_spring_NNML | c51d074c2d33650cc923ccc4297ecbce31c83df7 | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch.nn.functional as F
class BidirectionalLSTM(nn.Module):
# Inputs hidden units Out
def __init__(self, nIn, nHidden, nOut):
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)
self.embedding = nn.Linear(nHidden * 2, nOut)
def forward(self, input):
recurrent, _ = self.rnn(input)
T, b, h = recurrent.size()
t_rec = recurrent.view(T * b, h)
output = self.embedding(t_rec) # [T * b, nOut]
output = output.view(T, b, -1)
return output
class CRNN(nn.Module):
def __init__(self, imgH, nc, nclass, nh, n_rnn=2, leakyRelu=False):
super(CRNN, self).__init__()
assert imgH % 16 == 0, 'imgH has to be a multiple of 16'
ks = [3, 3, 3, 3, 3, 3, 2]
ps = [1, 1, 1, 1, 1, 1, 0]
ss = [1, 1, 1, 1, 1, 1, 1]
nm = [64, 128, 256, 256, 512, 512, 512]
cnn = nn.Sequential()
def convRelu(i, batchNormalization=False):
nIn = nc if i == 0 else nm[i - 1]
nOut = nm[i]
cnn.add_module('conv{0}'.format(i),
nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i]))
if batchNormalization:
cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut))
if leakyRelu:
cnn.add_module('relu{0}'.format(i),
nn.LeakyReLU(0.2, inplace=True))
else:
cnn.add_module('relu{0}'.format(i), nn.ReLU(True))
convRelu(0)
cnn.add_module('pooling{0}'.format(0), nn.MaxPool2d(2, 2)) # 64x16x64
convRelu(1)
cnn.add_module('pooling{0}'.format(1), nn.MaxPool2d(2, 2)) # 128x8x32
convRelu(2, True)
convRelu(3)
cnn.add_module('pooling{0}'.format(2),
nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 256x4x16
convRelu(4, True)
convRelu(5)
cnn.add_module('pooling{0}'.format(3),
nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 512x2x16
convRelu(6, True) # 512x1x16
self.cnn = cnn
self.rnn = nn.Sequential(
BidirectionalLSTM(512, nh, nh),
BidirectionalLSTM(nh, nh, nclass))
def forward(self, input):
# conv features
conv = self.cnn(input)
b, c, h, w = conv.size()
#print(conv.size())
assert h == 1, "the height of conv must be 1"
conv = conv.squeeze(2) # b *512 * width
conv = conv.permute(2, 0, 1) # [w, b, c]
output = F.log_softmax(self.rnn(conv), dim=2)
return output
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def get_crnn(config):
model = CRNN(config.MODEL.IMAGE_SIZE.H, 1, config.MODEL.NUM_CLASSES + 1, config.MODEL.NUM_HIDDEN)
model.apply(weights_init)
return model
| 32.135417 | 101 | 0.540357 | import torch.nn as nn
import torch.nn.functional as F
class BidirectionalLSTM(nn.Module):
def __init__(self, nIn, nHidden, nOut):
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)
self.embedding = nn.Linear(nHidden * 2, nOut)
def forward(self, input):
recurrent, _ = self.rnn(input)
T, b, h = recurrent.size()
t_rec = recurrent.view(T * b, h)
output = self.embedding(t_rec)
output = output.view(T, b, -1)
return output
class CRNN(nn.Module):
def __init__(self, imgH, nc, nclass, nh, n_rnn=2, leakyRelu=False):
super(CRNN, self).__init__()
assert imgH % 16 == 0, 'imgH has to be a multiple of 16'
ks = [3, 3, 3, 3, 3, 3, 2]
ps = [1, 1, 1, 1, 1, 1, 0]
ss = [1, 1, 1, 1, 1, 1, 1]
nm = [64, 128, 256, 256, 512, 512, 512]
cnn = nn.Sequential()
def convRelu(i, batchNormalization=False):
nIn = nc if i == 0 else nm[i - 1]
nOut = nm[i]
cnn.add_module('conv{0}'.format(i),
nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i]))
if batchNormalization:
cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut))
if leakyRelu:
cnn.add_module('relu{0}'.format(i),
nn.LeakyReLU(0.2, inplace=True))
else:
cnn.add_module('relu{0}'.format(i), nn.ReLU(True))
convRelu(0)
cnn.add_module('pooling{0}'.format(0), nn.MaxPool2d(2, 2))
convRelu(1)
cnn.add_module('pooling{0}'.format(1), nn.MaxPool2d(2, 2))
convRelu(2, True)
convRelu(3)
cnn.add_module('pooling{0}'.format(2),
nn.MaxPool2d((2, 2), (2, 1), (0, 1)))
convRelu(4, True)
convRelu(5)
cnn.add_module('pooling{0}'.format(3),
nn.MaxPool2d((2, 2), (2, 1), (0, 1)))
convRelu(6, True)
self.cnn = cnn
self.rnn = nn.Sequential(
BidirectionalLSTM(512, nh, nh),
BidirectionalLSTM(nh, nh, nclass))
def forward(self, input):
conv = self.cnn(input)
b, c, h, w = conv.size()
assert h == 1, "the height of conv must be 1"
conv = conv.squeeze(2)
conv = conv.permute(2, 0, 1)
output = F.log_softmax(self.rnn(conv), dim=2)
return output
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def get_crnn(config):
model = CRNN(config.MODEL.IMAGE_SIZE.H, 1, config.MODEL.NUM_CLASSES + 1, config.MODEL.NUM_HIDDEN)
model.apply(weights_init)
return model
| true | true |
1c35298fcc6b85b9e74eaf2e56505004f4529ad7 | 14,679 | py | Python | pandas/core/arrays/numpy_.py | kadekillary/pandas | f6a5dd4b8c450d73f3bec964b05cca32cef4bb71 | [
"BSD-3-Clause"
] | 1 | 2019-12-27T01:54:53.000Z | 2019-12-27T01:54:53.000Z | pandas/core/arrays/numpy_.py | kadekillary/pandas | f6a5dd4b8c450d73f3bec964b05cca32cef4bb71 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/arrays/numpy_.py | kadekillary/pandas | f6a5dd4b8c450d73f3bec964b05cca32cef4bb71 | [
"BSD-3-Clause"
] | null | null | null | import numbers
import numpy as np
from numpy.lib.mixins import NDArrayOperatorsMixin
from pandas._libs import lib
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.dtypes.inference import is_array_like, is_list_like
from pandas.core.dtypes.missing import isna
from pandas import compat
from pandas.core import nanops
from pandas.core.algorithms import searchsorted, take, unique
from pandas.core.construction import extract_array
from pandas.core.missing import backfill_1d, pad_1d
from .base import ExtensionArray, ExtensionOpsMixin
class PandasDtype(ExtensionDtype):
"""
A Pandas ExtensionDtype for NumPy dtypes.
.. versionadded:: 0.24.0
This is mostly for internal compatibility, and is not especially
useful on its own.
Parameters
----------
dtype : numpy.dtype
"""
_metadata = ("_dtype",)
def __init__(self, dtype):
dtype = np.dtype(dtype)
self._dtype = dtype
self._name = dtype.name
self._type = dtype.type
def __repr__(self):
return "PandasDtype({!r})".format(self.name)
@property
def numpy_dtype(self):
"""The NumPy dtype this PandasDtype wraps."""
return self._dtype
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def _is_numeric(self):
# exclude object, str, unicode, void.
return self.kind in set("biufc")
@property
def _is_boolean(self):
return self.kind == "b"
@classmethod
def construct_from_string(cls, string):
return cls(np.dtype(string))
def construct_array_type(cls):
return PandasArray
@property
def kind(self):
return self._dtype.kind
@property
def itemsize(self):
"""The element size of this data-type object."""
return self._dtype.itemsize
class PandasArray(ExtensionArray, ExtensionOpsMixin, NDArrayOperatorsMixin):
"""
A pandas ExtensionArray for NumPy data.
.. versionadded :: 0.24.0
This is mostly for internal compatibility, and is not especially
useful on its own.
Parameters
----------
values : ndarray
The NumPy ndarray to wrap. Must be 1-dimensional.
copy : bool, default False
Whether to copy `values`.
Attributes
----------
None
Methods
-------
None
"""
# If you're wondering why pd.Series(cls) doesn't put the array in an
# ExtensionBlock, search for `ABCPandasArray`. We check for
# that _typ to ensure that that users don't unnecessarily use EAs inside
# pandas internals, which turns off things like block consolidation.
_typ = "npy_extension"
__array_priority__ = 1000
# ------------------------------------------------------------------------
# Constructors
def __init__(self, values, copy=False):
if isinstance(values, type(self)):
values = values._ndarray
if not isinstance(values, np.ndarray):
raise ValueError("'values' must be a NumPy array.")
if values.ndim != 1:
raise ValueError("PandasArray must be 1-dimensional.")
if copy:
values = values.copy()
self._ndarray = values
self._dtype = PandasDtype(values.dtype)
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
if isinstance(dtype, PandasDtype):
dtype = dtype._dtype
result = np.asarray(scalars, dtype=dtype)
if copy and result is scalars:
result = result.copy()
return cls(result)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
@classmethod
def _concat_same_type(cls, to_concat):
return cls(np.concatenate(to_concat))
# ------------------------------------------------------------------------
# Data
@property
def dtype(self):
return self._dtype
# ------------------------------------------------------------------------
# NumPy Array Interface
def __array__(self, dtype=None):
return np.asarray(self._ndarray, dtype=dtype)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# Lightly modified version of
# https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/\
# numpy.lib.mixins.NDArrayOperatorsMixin.html
# The primary modification is not boxing scalar return values
# in PandasArray, since pandas' ExtensionArrays are 1-d.
out = kwargs.get("out", ())
for x in inputs + out:
# Only support operations with instances of _HANDLED_TYPES.
# Use PandasArray instead of type(self) for isinstance to
# allow subclasses that don't override __array_ufunc__ to
# handle PandasArray objects.
if not isinstance(x, self._HANDLED_TYPES + (PandasArray,)):
return NotImplemented
# Defer to the implementation of the ufunc on unwrapped values.
inputs = tuple(x._ndarray if isinstance(x, PandasArray) else x for x in inputs)
if out:
kwargs["out"] = tuple(
x._ndarray if isinstance(x, PandasArray) else x for x in out
)
result = getattr(ufunc, method)(*inputs, **kwargs)
if type(result) is tuple and len(result):
# multiple return values
if not lib.is_scalar(result[0]):
# re-box array-like results
return tuple(type(self)(x) for x in result)
else:
# but not scalar reductions
return result
elif method == "at":
# no return value
return None
else:
# one return value
if not lib.is_scalar(result):
# re-box array-like results, but not scalar reductions
result = type(self)(result)
return result
# ------------------------------------------------------------------------
# Pandas ExtensionArray Interface
def __getitem__(self, item):
if isinstance(item, type(self)):
item = item._ndarray
result = self._ndarray[item]
if not lib.is_scalar(item):
result = type(self)(result)
return result
def __setitem__(self, key, value):
value = extract_array(value, extract_numpy=True)
if not lib.is_scalar(key) and is_list_like(key):
key = np.asarray(key)
if not lib.is_scalar(value):
value = np.asarray(value)
values = self._ndarray
t = np.result_type(value, values)
if t != self._ndarray.dtype:
values = values.astype(t, casting="safe")
values[key] = value
self._dtype = PandasDtype(t)
self._ndarray = values
else:
self._ndarray[key] = value
def __len__(self):
return len(self._ndarray)
@property
def nbytes(self):
return self._ndarray.nbytes
def isna(self):
return isna(self._ndarray)
def fillna(self, value=None, method=None, limit=None):
# TODO(_values_for_fillna): remove this
value, method = validate_fillna_kwargs(value, method)
mask = self.isna()
if is_array_like(value):
if len(value) != len(self):
raise ValueError(
"Length of 'value' does not match. Got ({}) "
" expected {}".format(len(value), len(self))
)
value = value[mask]
if mask.any():
if method is not None:
func = pad_1d if method == "pad" else backfill_1d
new_values = func(self._ndarray, limit=limit, mask=mask)
new_values = self._from_sequence(new_values, dtype=self.dtype)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
new_values = self.copy()
return new_values
def take(self, indices, allow_fill=False, fill_value=None):
result = take(
self._ndarray, indices, allow_fill=allow_fill, fill_value=fill_value
)
return type(self)(result)
def copy(self):
return type(self)(self._ndarray.copy())
def _values_for_argsort(self):
return self._ndarray
def _values_for_factorize(self):
return self._ndarray, -1
def unique(self):
return type(self)(unique(self._ndarray))
# ------------------------------------------------------------------------
# Reductions
def _reduce(self, name, skipna=True, **kwargs):
meth = getattr(self, name, None)
if meth:
return meth(skipna=skipna, **kwargs)
else:
msg = "'{}' does not implement reduction '{}'"
raise TypeError(msg.format(type(self).__name__, name))
def any(self, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_any((), dict(out=out, keepdims=keepdims))
return nanops.nanany(self._ndarray, axis=axis, skipna=skipna)
def all(self, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_all((), dict(out=out, keepdims=keepdims))
return nanops.nanall(self._ndarray, axis=axis, skipna=skipna)
def min(self, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_min((), dict(out=out, keepdims=keepdims))
return nanops.nanmin(self._ndarray, axis=axis, skipna=skipna)
def max(self, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_max((), dict(out=out, keepdims=keepdims))
return nanops.nanmax(self._ndarray, axis=axis, skipna=skipna)
def sum(
self,
axis=None,
dtype=None,
out=None,
keepdims=False,
initial=None,
skipna=True,
min_count=0,
):
nv.validate_sum(
(), dict(dtype=dtype, out=out, keepdims=keepdims, initial=initial)
)
return nanops.nansum(
self._ndarray, axis=axis, skipna=skipna, min_count=min_count
)
def prod(
self,
axis=None,
dtype=None,
out=None,
keepdims=False,
initial=None,
skipna=True,
min_count=0,
):
nv.validate_prod(
(), dict(dtype=dtype, out=out, keepdims=keepdims, initial=initial)
)
return nanops.nanprod(
self._ndarray, axis=axis, skipna=skipna, min_count=min_count
)
def mean(self, axis=None, dtype=None, out=None, keepdims=False, skipna=True):
nv.validate_mean((), dict(dtype=dtype, out=out, keepdims=keepdims))
return nanops.nanmean(self._ndarray, axis=axis, skipna=skipna)
def median(
self, axis=None, out=None, overwrite_input=False, keepdims=False, skipna=True
):
nv.validate_median(
(), dict(out=out, overwrite_input=overwrite_input, keepdims=keepdims)
)
return nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna)
def std(self, axis=None, dtype=None, out=None, ddof=1, keepdims=False, skipna=True):
nv.validate_stat_ddof_func(
(), dict(dtype=dtype, out=out, keepdims=keepdims), fname="std"
)
return nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
def var(self, axis=None, dtype=None, out=None, ddof=1, keepdims=False, skipna=True):
nv.validate_stat_ddof_func(
(), dict(dtype=dtype, out=out, keepdims=keepdims), fname="var"
)
return nanops.nanvar(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
def sem(self, axis=None, dtype=None, out=None, ddof=1, keepdims=False, skipna=True):
nv.validate_stat_ddof_func(
(), dict(dtype=dtype, out=out, keepdims=keepdims), fname="sem"
)
return nanops.nansem(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
def kurt(self, axis=None, dtype=None, out=None, keepdims=False, skipna=True):
nv.validate_stat_ddof_func(
(), dict(dtype=dtype, out=out, keepdims=keepdims), fname="kurt"
)
return nanops.nankurt(self._ndarray, axis=axis, skipna=skipna)
def skew(self, axis=None, dtype=None, out=None, keepdims=False, skipna=True):
nv.validate_stat_ddof_func(
(), dict(dtype=dtype, out=out, keepdims=keepdims), fname="skew"
)
return nanops.nanskew(self._ndarray, axis=axis, skipna=skipna)
# ------------------------------------------------------------------------
# Additional Methods
def to_numpy(self, dtype=None, copy=False):
"""
Convert the PandasArray to a :class:`numpy.ndarray`.
By default, this requires no coercion or copying of data.
Parameters
----------
dtype : numpy.dtype
The NumPy dtype to pass to :func:`numpy.asarray`.
copy : bool, default False
Whether to copy the underlying data.
Returns
-------
ndarray
"""
result = np.asarray(self._ndarray, dtype=dtype)
if copy and result is self._ndarray:
result = result.copy()
return result
@Appender(ExtensionArray.searchsorted.__doc__)
def searchsorted(self, value, side="left", sorter=None):
return searchsorted(self.to_numpy(), value, side=side, sorter=sorter)
# ------------------------------------------------------------------------
# Ops
def __invert__(self):
return type(self)(~self._ndarray)
@classmethod
def _create_arithmetic_method(cls, op):
def arithmetic_method(self, other):
if isinstance(other, (ABCIndexClass, ABCSeries)):
return NotImplemented
elif isinstance(other, cls):
other = other._ndarray
with np.errstate(all="ignore"):
result = op(self._ndarray, other)
if op is divmod:
a, b = result
return cls(a), cls(b)
return cls(result)
return compat.set_function_name(
arithmetic_method, "__{}__".format(op.__name__), cls
)
_create_comparison_method = _create_arithmetic_method
PandasArray._add_arithmetic_ops()
PandasArray._add_comparison_ops()
| 31.635776 | 88 | 0.592138 | import numbers
import numpy as np
from numpy.lib.mixins import NDArrayOperatorsMixin
from pandas._libs import lib
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.dtypes.inference import is_array_like, is_list_like
from pandas.core.dtypes.missing import isna
from pandas import compat
from pandas.core import nanops
from pandas.core.algorithms import searchsorted, take, unique
from pandas.core.construction import extract_array
from pandas.core.missing import backfill_1d, pad_1d
from .base import ExtensionArray, ExtensionOpsMixin
class PandasDtype(ExtensionDtype):
_metadata = ("_dtype",)
def __init__(self, dtype):
dtype = np.dtype(dtype)
self._dtype = dtype
self._name = dtype.name
self._type = dtype.type
def __repr__(self):
return "PandasDtype({!r})".format(self.name)
@property
def numpy_dtype(self):
return self._dtype
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def _is_numeric(self):
return self.kind in set("biufc")
@property
def _is_boolean(self):
return self.kind == "b"
@classmethod
def construct_from_string(cls, string):
return cls(np.dtype(string))
def construct_array_type(cls):
return PandasArray
@property
def kind(self):
return self._dtype.kind
@property
def itemsize(self):
return self._dtype.itemsize
class PandasArray(ExtensionArray, ExtensionOpsMixin, NDArrayOperatorsMixin):
# pandas internals, which turns off things like block consolidation.
_typ = "npy_extension"
__array_priority__ = 1000
# ------------------------------------------------------------------------
# Constructors
def __init__(self, values, copy=False):
if isinstance(values, type(self)):
values = values._ndarray
if not isinstance(values, np.ndarray):
raise ValueError("'values' must be a NumPy array.")
if values.ndim != 1:
raise ValueError("PandasArray must be 1-dimensional.")
if copy:
values = values.copy()
self._ndarray = values
self._dtype = PandasDtype(values.dtype)
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
if isinstance(dtype, PandasDtype):
dtype = dtype._dtype
result = np.asarray(scalars, dtype=dtype)
if copy and result is scalars:
result = result.copy()
return cls(result)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
@classmethod
def _concat_same_type(cls, to_concat):
return cls(np.concatenate(to_concat))
# ------------------------------------------------------------------------
# Data
@property
def dtype(self):
return self._dtype
# ------------------------------------------------------------------------
# NumPy Array Interface
def __array__(self, dtype=None):
return np.asarray(self._ndarray, dtype=dtype)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# Lightly modified version of
# https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/\
# numpy.lib.mixins.NDArrayOperatorsMixin.html
# The primary modification is not boxing scalar return values
# in PandasArray, since pandas' ExtensionArrays are 1-d.
out = kwargs.get("out", ())
for x in inputs + out:
# handle PandasArray objects.
if not isinstance(x, self._HANDLED_TYPES + (PandasArray,)):
return NotImplemented
# Defer to the implementation of the ufunc on unwrapped values.
inputs = tuple(x._ndarray if isinstance(x, PandasArray) else x for x in inputs)
if out:
kwargs["out"] = tuple(
x._ndarray if isinstance(x, PandasArray) else x for x in out
)
result = getattr(ufunc, method)(*inputs, **kwargs)
if type(result) is tuple and len(result):
# multiple return values
if not lib.is_scalar(result[0]):
# re-box array-like results
return tuple(type(self)(x) for x in result)
else:
# but not scalar reductions
return result
elif method == "at":
# no return value
return None
else:
# one return value
if not lib.is_scalar(result):
# re-box array-like results, but not scalar reductions
result = type(self)(result)
return result
# ------------------------------------------------------------------------
# Pandas ExtensionArray Interface
def __getitem__(self, item):
if isinstance(item, type(self)):
item = item._ndarray
result = self._ndarray[item]
if not lib.is_scalar(item):
result = type(self)(result)
return result
def __setitem__(self, key, value):
value = extract_array(value, extract_numpy=True)
if not lib.is_scalar(key) and is_list_like(key):
key = np.asarray(key)
if not lib.is_scalar(value):
value = np.asarray(value)
values = self._ndarray
t = np.result_type(value, values)
if t != self._ndarray.dtype:
values = values.astype(t, casting="safe")
values[key] = value
self._dtype = PandasDtype(t)
self._ndarray = values
else:
self._ndarray[key] = value
def __len__(self):
return len(self._ndarray)
@property
def nbytes(self):
return self._ndarray.nbytes
def isna(self):
return isna(self._ndarray)
def fillna(self, value=None, method=None, limit=None):
# TODO(_values_for_fillna): remove this
value, method = validate_fillna_kwargs(value, method)
mask = self.isna()
if is_array_like(value):
if len(value) != len(self):
raise ValueError(
"Length of 'value' does not match. Got ({}) "
" expected {}".format(len(value), len(self))
)
value = value[mask]
if mask.any():
if method is not None:
func = pad_1d if method == "pad" else backfill_1d
new_values = func(self._ndarray, limit=limit, mask=mask)
new_values = self._from_sequence(new_values, dtype=self.dtype)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
new_values = self.copy()
return new_values
def take(self, indices, allow_fill=False, fill_value=None):
result = take(
self._ndarray, indices, allow_fill=allow_fill, fill_value=fill_value
)
return type(self)(result)
def copy(self):
return type(self)(self._ndarray.copy())
def _values_for_argsort(self):
return self._ndarray
def _values_for_factorize(self):
return self._ndarray, -1
def unique(self):
return type(self)(unique(self._ndarray))
# ------------------------------------------------------------------------
# Reductions
def _reduce(self, name, skipna=True, **kwargs):
meth = getattr(self, name, None)
if meth:
return meth(skipna=skipna, **kwargs)
else:
msg = "'{}' does not implement reduction '{}'"
raise TypeError(msg.format(type(self).__name__, name))
def any(self, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_any((), dict(out=out, keepdims=keepdims))
return nanops.nanany(self._ndarray, axis=axis, skipna=skipna)
def all(self, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_all((), dict(out=out, keepdims=keepdims))
return nanops.nanall(self._ndarray, axis=axis, skipna=skipna)
def min(self, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_min((), dict(out=out, keepdims=keepdims))
return nanops.nanmin(self._ndarray, axis=axis, skipna=skipna)
def max(self, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_max((), dict(out=out, keepdims=keepdims))
return nanops.nanmax(self._ndarray, axis=axis, skipna=skipna)
def sum(
self,
axis=None,
dtype=None,
out=None,
keepdims=False,
initial=None,
skipna=True,
min_count=0,
):
nv.validate_sum(
(), dict(dtype=dtype, out=out, keepdims=keepdims, initial=initial)
)
return nanops.nansum(
self._ndarray, axis=axis, skipna=skipna, min_count=min_count
)
def prod(
self,
axis=None,
dtype=None,
out=None,
keepdims=False,
initial=None,
skipna=True,
min_count=0,
):
nv.validate_prod(
(), dict(dtype=dtype, out=out, keepdims=keepdims, initial=initial)
)
return nanops.nanprod(
self._ndarray, axis=axis, skipna=skipna, min_count=min_count
)
def mean(self, axis=None, dtype=None, out=None, keepdims=False, skipna=True):
nv.validate_mean((), dict(dtype=dtype, out=out, keepdims=keepdims))
return nanops.nanmean(self._ndarray, axis=axis, skipna=skipna)
def median(
self, axis=None, out=None, overwrite_input=False, keepdims=False, skipna=True
):
nv.validate_median(
(), dict(out=out, overwrite_input=overwrite_input, keepdims=keepdims)
)
return nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna)
def std(self, axis=None, dtype=None, out=None, ddof=1, keepdims=False, skipna=True):
nv.validate_stat_ddof_func(
(), dict(dtype=dtype, out=out, keepdims=keepdims), fname="std"
)
return nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
def var(self, axis=None, dtype=None, out=None, ddof=1, keepdims=False, skipna=True):
nv.validate_stat_ddof_func(
(), dict(dtype=dtype, out=out, keepdims=keepdims), fname="var"
)
return nanops.nanvar(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
def sem(self, axis=None, dtype=None, out=None, ddof=1, keepdims=False, skipna=True):
nv.validate_stat_ddof_func(
(), dict(dtype=dtype, out=out, keepdims=keepdims), fname="sem"
)
return nanops.nansem(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
def kurt(self, axis=None, dtype=None, out=None, keepdims=False, skipna=True):
nv.validate_stat_ddof_func(
(), dict(dtype=dtype, out=out, keepdims=keepdims), fname="kurt"
)
return nanops.nankurt(self._ndarray, axis=axis, skipna=skipna)
def skew(self, axis=None, dtype=None, out=None, keepdims=False, skipna=True):
nv.validate_stat_ddof_func(
(), dict(dtype=dtype, out=out, keepdims=keepdims), fname="skew"
)
return nanops.nanskew(self._ndarray, axis=axis, skipna=skipna)
# ------------------------------------------------------------------------
# Additional Methods
def to_numpy(self, dtype=None, copy=False):
result = np.asarray(self._ndarray, dtype=dtype)
if copy and result is self._ndarray:
result = result.copy()
return result
@Appender(ExtensionArray.searchsorted.__doc__)
def searchsorted(self, value, side="left", sorter=None):
return searchsorted(self.to_numpy(), value, side=side, sorter=sorter)
# ------------------------------------------------------------------------
# Ops
def __invert__(self):
return type(self)(~self._ndarray)
@classmethod
def _create_arithmetic_method(cls, op):
def arithmetic_method(self, other):
if isinstance(other, (ABCIndexClass, ABCSeries)):
return NotImplemented
elif isinstance(other, cls):
other = other._ndarray
with np.errstate(all="ignore"):
result = op(self._ndarray, other)
if op is divmod:
a, b = result
return cls(a), cls(b)
return cls(result)
return compat.set_function_name(
arithmetic_method, "__{}__".format(op.__name__), cls
)
_create_comparison_method = _create_arithmetic_method
PandasArray._add_arithmetic_ops()
PandasArray._add_comparison_ops()
| true | true |
1c352997b3488b4665bffd9224d7f607f1c9e05d | 8,158 | py | Python | lib/datasets/voc_eval.py | zyuerugou/tf-faster-rcnn | 6d1e3d9691ad3dd570e56a77304fc307969dc0f3 | [
"MIT"
] | null | null | null | lib/datasets/voc_eval.py | zyuerugou/tf-faster-rcnn | 6d1e3d9691ad3dd570e56a77304fc307969dc0f3 | [
"MIT"
] | null | null | null | lib/datasets/voc_eval.py | zyuerugou/tf-faster-rcnn | 6d1e3d9691ad3dd570e56a77304fc307969dc0f3 | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Bharath Hariharan
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import xml.etree.ElementTree as ET
import os
import pickle
import numpy as np
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
# When data/{dataset}/annotations_cache is not exist,
# This function will be call.
tree = ET.parse(filename)
objects = []
print('obj class:')
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text.lower()
#---------------------------------------------------------------
# user added
# to sure the data parsed is not Null
#---------------------------------------------------------------
print(obj_struct['name'])
#---------------------------------------------------------------
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=False,
use_diff=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, '%s_annots.pkl' % imagesetfile.split("/")[-1].split(".")[0])
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
#-----------------------------------------------------
# user added
#-----------------------------------------------------
print('cachefile:')
print(cachefile)
#-----------------------------------------------------
if not os.path.isfile(cachefile):
# load annotations
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath.format(imagename))
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames)))
# save
print('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
pickle.dump(recs, f)
else:
# load
with open(cachefile, 'rb') as f:
try:
recs = pickle.load(f)
except:
recs = pickle.load(f, encoding='bytes')
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
#------------------------------------------------------------------------
# default
#------------------------------------------------------------------------
#R = [obj for obj in recs[imagename] if obj['name'] == classname]
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# user changed
#------------------------------------------------------------------------
R = []
for obj in recs[imagename]:
#print('obj class:' + obj['name'])
#print('class name:' + classname)
if obj['name'] == classname:
R.append(obj)
#-------------------------------------------------------------------------
bbox = np.array([x['bbox'] for x in R])
if use_diff:
difficult = np.array([False for x in R]).astype(np.bool)
else:
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
if BB.shape[0] > 0:
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps, IOU
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
| 33.02834 | 97 | 0.511522 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import xml.etree.ElementTree as ET
import os
import pickle
import numpy as np
def parse_rec(filename):
tree = ET.parse(filename)
objects = []
print('obj class:')
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text.lower()
print(obj_struct['name'])
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
if use_07_metric:
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
i = np.where(mrec[1:] != mrec[:-1])[0]
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=False,
use_diff=False):
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, '%s_annots.pkl' % imagesetfile.split("/")[-1].split(".")[0])
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
print('cachefile:')
print(cachefile)
if not os.path.isfile(cachefile):
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath.format(imagename))
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames)))
print('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
pickle.dump(recs, f)
else:
with open(cachefile, 'rb') as f:
try:
recs = pickle.load(f)
except:
recs = pickle.load(f, encoding='bytes')
class_recs = {}
npos = 0
for imagename in imagenames:
R = []
for obj in recs[imagename]:
if obj['name'] == classname:
R.append(obj)
bbox = np.array([x['bbox'] for x in R])
if use_diff:
difficult = np.array([False for x in R]).astype(np.bool)
else:
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
if BB.shape[0] > 0:
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
| true | true |
1c352997d01a22505572c0627be711f3de4d8a1d | 737 | py | Python | kubragen2/tests/test_options.py | RangelReale/kubragen2 | 2118f1429a9b9da937582db1f41d4f12b78773e2 | [
"MIT"
] | 1 | 2022-02-14T07:31:57.000Z | 2022-02-14T07:31:57.000Z | kubragen2/tests/test_options.py | RangelReale/kubragen2 | 2118f1429a9b9da937582db1f41d4f12b78773e2 | [
"MIT"
] | null | null | null | kubragen2/tests/test_options.py | RangelReale/kubragen2 | 2118f1429a9b9da937582db1f41d4f12b78773e2 | [
"MIT"
] | null | null | null | import copy
import unittest
from kubragen2.options import Options, OptionValue, OptionsBuildData
class TestUtil(unittest.TestCase):
def test_option_merge(self):
options = Options({
'x': {
'y': OptionValue('x.z'),
'z': 14,
}
}, {
'x': {
'y': 99,
},
})
self.assertEqual(options.option_get('x.y'), 99)
def test_option_value(self):
options = Options({
'x': {
'y': OptionValue('x.z'),
'z': 14,
}
})
data = OptionsBuildData(options, copy.deepcopy(options.options))
self.assertEqual(data, {'x': {'y': 14, 'z': 14}})
| 24.566667 | 72 | 0.466757 | import copy
import unittest
from kubragen2.options import Options, OptionValue, OptionsBuildData
class TestUtil(unittest.TestCase):
def test_option_merge(self):
options = Options({
'x': {
'y': OptionValue('x.z'),
'z': 14,
}
}, {
'x': {
'y': 99,
},
})
self.assertEqual(options.option_get('x.y'), 99)
def test_option_value(self):
options = Options({
'x': {
'y': OptionValue('x.z'),
'z': 14,
}
})
data = OptionsBuildData(options, copy.deepcopy(options.options))
self.assertEqual(data, {'x': {'y': 14, 'z': 14}})
| true | true |
1c352a37c5aeb60e9672cf6ce32cb450212169fe | 501 | py | Python | example/NaCl/NaCl-yaml.py | ladyteam/phonopy | 455ef61dfa15c01fb6b516461b52f15aefbf92b3 | [
"BSD-3-Clause"
] | 127 | 2015-01-21T17:50:58.000Z | 2020-02-04T13:46:13.000Z | example/NaCl/NaCl-yaml.py | ladyteam/phonopy | 455ef61dfa15c01fb6b516461b52f15aefbf92b3 | [
"BSD-3-Clause"
] | 100 | 2015-02-07T15:32:50.000Z | 2020-02-23T02:09:08.000Z | example/NaCl/NaCl-yaml.py | ladyteam/phonopy | 455ef61dfa15c01fb6b516461b52f15aefbf92b3 | [
"BSD-3-Clause"
] | 122 | 2015-02-07T15:39:28.000Z | 2020-02-10T22:33:16.000Z | """Example to obtain PhonopyYaml instance."""
import phonopy
from phonopy.interface.phonopy_yaml import PhonopyYaml
phonon = phonopy.load(
supercell_matrix=[[2, 0, 0], [0, 2, 0], [0, 0, 2]],
primitive_matrix=[[0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]],
unitcell_filename="POSCAR-unitcell",
force_sets_filename="FORCE_SETS",
born_filename="BORN",
)
phpy_yaml = PhonopyYaml(calculator="vasp", settings={"force_constants": True})
phpy_yaml.set_phonon_info(phonon)
print(phpy_yaml)
| 33.4 | 78 | 0.700599 | import phonopy
from phonopy.interface.phonopy_yaml import PhonopyYaml
phonon = phonopy.load(
supercell_matrix=[[2, 0, 0], [0, 2, 0], [0, 0, 2]],
primitive_matrix=[[0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]],
unitcell_filename="POSCAR-unitcell",
force_sets_filename="FORCE_SETS",
born_filename="BORN",
)
phpy_yaml = PhonopyYaml(calculator="vasp", settings={"force_constants": True})
phpy_yaml.set_phonon_info(phonon)
print(phpy_yaml)
| true | true |
1c352afc280d330d2ed09810169d30a1a948c30d | 19,808 | py | Python | infra/bots/recipe_modules/flavor/gn_android_flavor.py | despairblue/esy-skia | 1c81aac298602f8e872c1079db92868199b6394f | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | infra/bots/recipe_modules/flavor/gn_android_flavor.py | despairblue/esy-skia | 1c81aac298602f8e872c1079db92868199b6394f | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | infra/bots/recipe_modules/flavor/gn_android_flavor.py | despairblue/esy-skia | 1c81aac298602f8e872c1079db92868199b6394f | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine import recipe_api
import default_flavor
import re
import subprocess
"""GN Android flavor utils, used for building Skia for Android with GN."""
class GNAndroidFlavorUtils(default_flavor.DefaultFlavorUtils):
def __init__(self, m):
super(GNAndroidFlavorUtils, self).__init__(m)
self._ever_ran_adb = False
self.ADB_BINARY = '/usr/bin/adb.1.0.35'
self.ADB_PUB_KEY = '/home/chrome-bot/.android/adbkey'
self._golo_devices = ['Nexus5x']
if self.m.vars.builder_cfg.get('model') in self._golo_devices:
self.ADB_BINARY = '/opt/infra-android/tools/adb'
self.ADB_PUB_KEY = ('/home/chrome-bot/.android/'
'chrome_infrastructure_adbkey')
# Data should go in android_data_dir, which may be preserved across runs.
android_data_dir = '/sdcard/revenge_of_the_skiabot/'
self.device_dirs = default_flavor.DeviceDirs(
bin_dir = '/data/local/tmp/',
dm_dir = android_data_dir + 'dm_out',
perf_data_dir = android_data_dir + 'perf',
resource_dir = android_data_dir + 'resources',
images_dir = android_data_dir + 'images',
skp_dir = android_data_dir + 'skps',
svg_dir = android_data_dir + 'svgs',
tmp_dir = android_data_dir)
# A list of devices we can't root. If rooting fails and a device is not
# on the list, we fail the task to avoid perf inconsistencies.
self.rootable_blacklist = ['GalaxyS6', 'GalaxyS7_G930A', 'GalaxyS7_G930FD',
'MotoG4', 'NVIDIA_Shield']
# Maps device type -> CPU ids that should be scaled for nanobench.
# Many devices have two (or more) different CPUs (e.g. big.LITTLE
# on Nexus5x). The CPUs listed are the biggest cpus on the device.
# The CPUs are grouped together, so we only need to scale one of them
# (the one listed) in order to scale them all.
# E.g. Nexus5x has cpu0-3 as one chip and cpu4-5 as the other. Thus,
# if one wants to run a single-threaded application (e.g. nanobench), one
# can disable cpu0-3 and scale cpu 4 to have only cpu4 and 5 at the same
# frequency. See also disable_for_nanobench.
self.cpus_to_scale = {
'Nexus5x': [4],
'NexusPlayer': [0, 2], # has 2 identical chips, so scale them both.
'Pixel': [2],
'Pixel2XL': [4]
}
# Maps device type -> CPU ids that should be turned off when running
# single-threaded applications like nanobench. The devices listed have
# multiple, differnt CPUs. We notice a lot of noise that seems to be
# caused by nanobench running on the slow CPU, then the big CPU. By
# disabling this, we see less of that noise by forcing the same CPU
# to be used for the performance testing every time.
self.disable_for_nanobench = {
'Nexus5x': range(0, 4),
'Pixel': range(0, 2),
'Pixel2XL': range(0, 4),
'PixelC': range(0, 2)
}
self.gpu_scaling = {
"Nexus5": 450000000,
"Nexus5x": 600000000,
}
def _run(self, title, *cmd, **kwargs):
with self.m.context(cwd=self.m.path['start_dir'].join('skia')):
return self.m.run(self.m.step, title, cmd=list(cmd), **kwargs)
def _adb(self, title, *cmd, **kwargs):
# The only non-infra adb steps (dm / nanobench) happen to not use _adb().
if 'infra_step' not in kwargs:
kwargs['infra_step'] = True
self._ever_ran_adb = True
attempts = 1
flaky_devices = ['NexusPlayer', 'PixelC']
if self.m.vars.builder_cfg.get('model') in flaky_devices:
attempts = 3
def wait_for_device(attempt):
self.m.run(self.m.step,
'kill adb server after failure of \'%s\' (attempt %d)' % (
title, attempt),
cmd=[self.ADB_BINARY, 'kill-server'],
infra_step=True, timeout=30, abort_on_failure=False,
fail_build_on_failure=False)
self.m.run(self.m.step,
'wait for device after failure of \'%s\' (attempt %d)' % (
title, attempt),
cmd=[self.ADB_BINARY, 'wait-for-device'], infra_step=True,
timeout=180, abort_on_failure=False,
fail_build_on_failure=False)
with self.m.context(cwd=self.m.path['start_dir'].join('skia')):
with self.m.env({'ADB_VENDOR_KEYS': self.ADB_PUB_KEY}):
return self.m.run.with_retry(self.m.step, title, attempts,
cmd=[self.ADB_BINARY]+list(cmd),
between_attempts_fn=wait_for_device,
**kwargs)
def _scale_for_dm(self):
device = self.m.vars.builder_cfg.get('model')
if (device in self.rootable_blacklist or
self.m.vars.internal_hardware_label):
return
# This is paranoia... any CPUs we disabled while running nanobench
# ought to be back online now that we've restarted the device.
for i in self.disable_for_nanobench.get(device, []):
self._set_cpu_online(i, 1) # enable
scale_up = self.cpus_to_scale.get(device, [0])
# For big.LITTLE devices, make sure we scale the LITTLE cores up;
# there is a chance they are still in powersave mode from when
# swarming slows things down for cooling down and charging.
if 0 not in scale_up:
scale_up.append(0)
for i in scale_up:
# AndroidOne doesn't support ondemand governor. hotplug is similar.
if device == 'AndroidOne':
self._set_governor(i, 'hotplug')
else:
self._set_governor(i, 'ondemand')
def _scale_for_nanobench(self):
device = self.m.vars.builder_cfg.get('model')
if (device in self.rootable_blacklist or
self.m.vars.internal_hardware_label):
return
for i in self.cpus_to_scale.get(device, [0]):
self._set_governor(i, 'userspace')
self._scale_cpu(i, 0.6)
for i in self.disable_for_nanobench.get(device, []):
self._set_cpu_online(i, 0) # disable
if device in self.gpu_scaling:
#https://developer.qualcomm.com/qfile/28823/lm80-p0436-11_adb_commands.pdf
# Section 3.2.1 Commands to put the GPU in performance mode
# Nexus 5 is 320000000 by default
# Nexus 5x is 180000000 by default
gpu_freq = self.gpu_scaling[device]
self.m.run.with_retry(self.m.python.inline,
"Lock GPU to %d (and other perf tweaks)" % gpu_freq,
3, # attempts
program="""
import os
import subprocess
import sys
import time
ADB = sys.argv[1]
freq = sys.argv[2]
idle_timer = "10000"
log = subprocess.check_output([ADB, 'root'])
# check for message like 'adbd cannot run as root in production builds'
print log
if 'cannot' in log:
raise Exception('adb root failed')
subprocess.check_output([ADB, 'shell', 'stop', 'thermald'])
subprocess.check_output([ADB, 'shell', 'echo "%s" > '
'/sys/class/kgsl/kgsl-3d0/gpuclk' % freq])
actual_freq = subprocess.check_output([ADB, 'shell', 'cat '
'/sys/class/kgsl/kgsl-3d0/gpuclk']).strip()
if actual_freq != freq:
raise Exception('Frequency (actual, expected) (%s, %s)'
% (actual_freq, freq))
subprocess.check_output([ADB, 'shell', 'echo "%s" > '
'/sys/class/kgsl/kgsl-3d0/idle_timer' % idle_timer])
actual_timer = subprocess.check_output([ADB, 'shell', 'cat '
'/sys/class/kgsl/kgsl-3d0/idle_timer']).strip()
if actual_timer != idle_timer:
raise Exception('idle_timer (actual, expected) (%s, %s)'
% (actual_timer, idle_timer))
for s in ['force_bus_on', 'force_rail_on', 'force_clk_on']:
subprocess.check_output([ADB, 'shell', 'echo "1" > '
'/sys/class/kgsl/kgsl-3d0/%s' % s])
actual_set = subprocess.check_output([ADB, 'shell', 'cat '
'/sys/class/kgsl/kgsl-3d0/%s' % s]).strip()
if actual_set != "1":
raise Exception('%s (actual, expected) (%s, 1)'
% (s, actual_set))
""",
args = [self.ADB_BINARY, gpu_freq],
infra_step=True,
timeout=30)
def _set_governor(self, cpu, gov):
self._ever_ran_adb = True
self.m.run.with_retry(self.m.python.inline,
"Set CPU %d's governor to %s" % (cpu, gov),
3, # attempts
program="""
import os
import subprocess
import sys
import time
ADB = sys.argv[1]
cpu = int(sys.argv[2])
gov = sys.argv[3]
log = subprocess.check_output([ADB, 'root'])
# check for message like 'adbd cannot run as root in production builds'
print log
if 'cannot' in log:
raise Exception('adb root failed')
subprocess.check_output([ADB, 'shell', 'echo "%s" > '
'/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor' % (gov, cpu)])
actual_gov = subprocess.check_output([ADB, 'shell', 'cat '
'/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor' % cpu]).strip()
if actual_gov != gov:
raise Exception('(actual, expected) (%s, %s)'
% (actual_gov, gov))
""",
args = [self.ADB_BINARY, cpu, gov],
infra_step=True,
timeout=30)
def _set_cpu_online(self, cpu, value):
"""Set /sys/devices/system/cpu/cpu{N}/online to value (0 or 1)."""
self._ever_ran_adb = True
msg = 'Disabling'
if value:
msg = 'Enabling'
self.m.run.with_retry(self.m.python.inline,
'%s CPU %d' % (msg, cpu),
3, # attempts
program="""
import os
import subprocess
import sys
import time
ADB = sys.argv[1]
cpu = int(sys.argv[2])
value = int(sys.argv[3])
log = subprocess.check_output([ADB, 'root'])
# check for message like 'adbd cannot run as root in production builds'
print log
if 'cannot' in log:
raise Exception('adb root failed')
# If we try to echo 1 to an already online cpu, adb returns exit code 1.
# So, check the value before trying to write it.
prior_status = subprocess.check_output([ADB, 'shell', 'cat '
'/sys/devices/system/cpu/cpu%d/online' % cpu]).strip()
if prior_status == str(value):
print 'CPU %d online already %d' % (cpu, value)
sys.exit()
subprocess.check_output([ADB, 'shell', 'echo %s > '
'/sys/devices/system/cpu/cpu%d/online' % (value, cpu)])
actual_status = subprocess.check_output([ADB, 'shell', 'cat '
'/sys/devices/system/cpu/cpu%d/online' % cpu]).strip()
if actual_status != str(value):
raise Exception('(actual, expected) (%s, %d)'
% (actual_status, value))
""",
args = [self.ADB_BINARY, cpu, value],
infra_step=True,
timeout=30)
def _scale_cpu(self, cpu, target_percent):
self._ever_ran_adb = True
self.m.run.with_retry(self.m.python.inline,
'Scale CPU %d to %f' % (cpu, target_percent),
3, # attempts
program="""
import os
import subprocess
import sys
import time
ADB = sys.argv[1]
target_percent = float(sys.argv[2])
cpu = int(sys.argv[3])
log = subprocess.check_output([ADB, 'root'])
# check for message like 'adbd cannot run as root in production builds'
print log
if 'cannot' in log:
raise Exception('adb root failed')
root = '/sys/devices/system/cpu/cpu%d/cpufreq' %cpu
# All devices we test on give a list of their available frequencies.
available_freqs = subprocess.check_output([ADB, 'shell',
'cat %s/scaling_available_frequencies' % root])
# Check for message like '/system/bin/sh: file not found'
if available_freqs and '/system/bin/sh' not in available_freqs:
available_freqs = sorted(
int(i) for i in available_freqs.strip().split())
else:
raise Exception('Could not get list of available frequencies: %s' %
available_freqs)
maxfreq = available_freqs[-1]
target = int(round(maxfreq * target_percent))
freq = maxfreq
for f in reversed(available_freqs):
if f <= target:
freq = f
break
print 'Setting frequency to %d' % freq
# If scaling_max_freq is lower than our attempted setting, it won't take.
# We must set min first, because if we try to set max to be less than min
# (which sometimes happens after certain devices reboot) it returns a
# perplexing permissions error.
subprocess.check_output([ADB, 'shell', 'echo 0 > '
'%s/scaling_min_freq' % root])
subprocess.check_output([ADB, 'shell', 'echo %d > '
'%s/scaling_max_freq' % (freq, root)])
subprocess.check_output([ADB, 'shell', 'echo %d > '
'%s/scaling_setspeed' % (freq, root)])
time.sleep(5)
actual_freq = subprocess.check_output([ADB, 'shell', 'cat '
'%s/scaling_cur_freq' % root]).strip()
if actual_freq != str(freq):
raise Exception('(actual, expected) (%s, %d)'
% (actual_freq, freq))
""",
args = [self.ADB_BINARY, str(target_percent), cpu],
infra_step=True,
timeout=30)
def install(self):
self._adb('mkdir ' + self.device_dirs.resource_dir,
'shell', 'mkdir', '-p', self.device_dirs.resource_dir)
if 'ASAN' in self.m.vars.extra_tokens:
asan_setup = self.m.vars.slave_dir.join(
'android_ndk_linux', 'toolchains', 'llvm', 'prebuilt',
'linux-x86_64', 'lib64', 'clang', '6.0.2', 'bin',
'asan_device_setup')
self.m.run(self.m.python.inline, 'Setting up device to run ASAN',
program="""
import os
import subprocess
import sys
import time
ADB = sys.argv[1]
ASAN_SETUP = sys.argv[2]
def wait_for_device():
while True:
time.sleep(5)
print 'Waiting for device'
subprocess.check_output([ADB, 'wait-for-device'])
bit1 = subprocess.check_output([ADB, 'shell', 'getprop',
'dev.bootcomplete'])
bit2 = subprocess.check_output([ADB, 'shell', 'getprop',
'sys.boot_completed'])
if '1' in bit1 and '1' in bit2:
print 'Device detected'
break
log = subprocess.check_output([ADB, 'root'])
# check for message like 'adbd cannot run as root in production builds'
print log
if 'cannot' in log:
raise Exception('adb root failed')
output = subprocess.check_output([ADB, 'disable-verity'])
print output
if 'already disabled' not in output:
print 'Rebooting device'
subprocess.check_output([ADB, 'reboot'])
wait_for_device()
def installASAN(revert=False):
# ASAN setup script is idempotent, either it installs it or
# says it's installed. Returns True on success, false otherwise.
out = subprocess.check_output([ADB, 'wait-for-device'])
print out
cmd = [ASAN_SETUP]
if revert:
cmd = [ASAN_SETUP, '--revert']
process = subprocess.Popen(cmd, env={'ADB': ADB},
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# this also blocks until command finishes
(stdout, stderr) = process.communicate()
print stdout
print 'Stderr: %s' % stderr
return process.returncode == 0
if not installASAN():
print 'Trying to revert the ASAN install and then re-install'
# ASAN script sometimes has issues if it was interrupted or partially applied
# Try reverting it, then re-enabling it
if not installASAN(revert=True):
raise Exception('reverting ASAN install failed')
# Sleep because device does not reboot instantly
time.sleep(10)
if not installASAN():
raise Exception('Tried twice to setup ASAN and failed.')
# Sleep because device does not reboot instantly
time.sleep(10)
wait_for_device()
""",
args = [self.ADB_BINARY, asan_setup],
infra_step=True,
timeout=300,
abort_on_failure=True)
def cleanup_steps(self):
if self._ever_ran_adb:
self.m.run(self.m.python.inline, 'dump log', program="""
import os
import subprocess
import sys
out = sys.argv[1]
log = subprocess.check_output(['%s', 'logcat', '-d'])
for line in log.split('\\n'):
tokens = line.split()
if len(tokens) == 11 and tokens[-7] == 'F' and tokens[-3] == 'pc':
addr, path = tokens[-2:]
local = os.path.join(out, os.path.basename(path))
if os.path.exists(local):
sym = subprocess.check_output(['addr2line', '-Cfpe', local, addr])
line = line.replace(addr, addr + ' ' + sym.strip())
print line
""" % self.ADB_BINARY,
args=[self.m.vars.skia_out],
infra_step=True,
timeout=300,
abort_on_failure=False)
# Only quarantine the bot if the first failed step
# is an infra step. If, instead, we did this for any infra failures, we
# would do this too much. For example, if a Nexus 10 died during dm
# and the following pull step would also fail "device not found" - causing
# us to run the shutdown command when the device was probably not in a
# broken state; it was just rebooting.
if (self.m.run.failed_steps and
isinstance(self.m.run.failed_steps[0], recipe_api.InfraFailure)):
bot_id = self.m.vars.swarming_bot_id
self.m.file.write_text('Quarantining Bot',
'/home/chrome-bot/%s.force_quarantine' % bot_id,
' ')
if self._ever_ran_adb:
self._adb('kill adb server', 'kill-server')
def step(self, name, cmd, **kwargs):
if (cmd[0] == 'nanobench'):
self._scale_for_nanobench()
else:
self._scale_for_dm()
app = self.m.vars.skia_out.join(cmd[0])
self._adb('push %s' % cmd[0],
'push', app, self.device_dirs.bin_dir)
sh = '%s.sh' % cmd[0]
self.m.run.writefile(self.m.vars.tmp_dir.join(sh),
'set -x; %s%s; echo $? >%src' % (
self.device_dirs.bin_dir, subprocess.list2cmdline(map(str, cmd)),
self.device_dirs.bin_dir))
self._adb('push %s' % sh,
'push', self.m.vars.tmp_dir.join(sh), self.device_dirs.bin_dir)
self._adb('clear log', 'logcat', '-c')
self.m.python.inline('%s' % cmd[0], """
import subprocess
import sys
bin_dir = sys.argv[1]
sh = sys.argv[2]
subprocess.check_call(['%s', 'shell', 'sh', bin_dir + sh])
try:
sys.exit(int(subprocess.check_output(['%s', 'shell', 'cat',
bin_dir + 'rc'])))
except ValueError:
print "Couldn't read the return code. Probably killed for OOM."
sys.exit(1)
""" % (self.ADB_BINARY, self.ADB_BINARY),
args=[self.device_dirs.bin_dir, sh])
def copy_file_to_device(self, host, device):
self._adb('push %s %s' % (host, device), 'push', host, device)
def copy_directory_contents_to_device(self, host, device):
# Copy the tree, avoiding hidden directories and resolving symlinks.
self.m.run(self.m.python.inline, 'push %s/* %s' % (host, device),
program="""
import os
import subprocess
import sys
host = sys.argv[1]
device = sys.argv[2]
for d, _, fs in os.walk(host):
p = os.path.relpath(d, host)
if p != '.' and p.startswith('.'):
continue
for f in fs:
print os.path.join(p,f)
subprocess.check_call(['%s', 'push',
os.path.realpath(os.path.join(host, p, f)),
os.path.join(device, p, f)])
""" % self.ADB_BINARY, args=[host, device], infra_step=True)
def copy_directory_contents_to_host(self, device, host):
self._adb('pull %s %s' % (device, host), 'pull', device, host)
def read_file_on_device(self, path, **kwargs):
rv = self._adb('read %s' % path,
'shell', 'cat', path, stdout=self.m.raw_io.output(),
**kwargs)
return rv.stdout.rstrip() if rv and rv.stdout else None
def remove_file_on_device(self, path):
self._adb('rm %s' % path, 'shell', 'rm', '-f', path)
def create_clean_device_dir(self, path):
self._adb('rm %s' % path, 'shell', 'rm', '-rf', path)
self._adb('mkdir %s' % path, 'shell', 'mkdir', '-p', path)
| 36.613678 | 82 | 0.632977 |
from recipe_engine import recipe_api
import default_flavor
import re
import subprocess
class GNAndroidFlavorUtils(default_flavor.DefaultFlavorUtils):
def __init__(self, m):
super(GNAndroidFlavorUtils, self).__init__(m)
self._ever_ran_adb = False
self.ADB_BINARY = '/usr/bin/adb.1.0.35'
self.ADB_PUB_KEY = '/home/chrome-bot/.android/adbkey'
self._golo_devices = ['Nexus5x']
if self.m.vars.builder_cfg.get('model') in self._golo_devices:
self.ADB_BINARY = '/opt/infra-android/tools/adb'
self.ADB_PUB_KEY = ('/home/chrome-bot/.android/'
'chrome_infrastructure_adbkey')
android_data_dir = '/sdcard/revenge_of_the_skiabot/'
self.device_dirs = default_flavor.DeviceDirs(
bin_dir = '/data/local/tmp/',
dm_dir = android_data_dir + 'dm_out',
perf_data_dir = android_data_dir + 'perf',
resource_dir = android_data_dir + 'resources',
images_dir = android_data_dir + 'images',
skp_dir = android_data_dir + 'skps',
svg_dir = android_data_dir + 'svgs',
tmp_dir = android_data_dir)
# on the list, we fail the task to avoid perf inconsistencies.
self.rootable_blacklist = ['GalaxyS6', 'GalaxyS7_G930A', 'GalaxyS7_G930FD',
'MotoG4', 'NVIDIA_Shield']
# Maps device type -> CPU ids that should be scaled for nanobench.
# Many devices have two (or more) different CPUs (e.g. big.LITTLE
# on Nexus5x). The CPUs listed are the biggest cpus on the device.
# The CPUs are grouped together, so we only need to scale one of them
# (the one listed) in order to scale them all.
# E.g. Nexus5x has cpu0-3 as one chip and cpu4-5 as the other. Thus,
# if one wants to run a single-threaded application (e.g. nanobench), one
# can disable cpu0-3 and scale cpu 4 to have only cpu4 and 5 at the same
# frequency. See also disable_for_nanobench.
self.cpus_to_scale = {
'Nexus5x': [4],
'NexusPlayer': [0, 2], # has 2 identical chips, so scale them both.
'Pixel': [2],
'Pixel2XL': [4]
}
# Maps device type -> CPU ids that should be turned off when running
# single-threaded applications like nanobench. The devices listed have
# multiple, differnt CPUs. We notice a lot of noise that seems to be
# caused by nanobench running on the slow CPU, then the big CPU. By
# disabling this, we see less of that noise by forcing the same CPU
# to be used for the performance testing every time.
self.disable_for_nanobench = {
'Nexus5x': range(0, 4),
'Pixel': range(0, 2),
'Pixel2XL': range(0, 4),
'PixelC': range(0, 2)
}
self.gpu_scaling = {
"Nexus5": 450000000,
"Nexus5x": 600000000,
}
def _run(self, title, *cmd, **kwargs):
with self.m.context(cwd=self.m.path['start_dir'].join('skia')):
return self.m.run(self.m.step, title, cmd=list(cmd), **kwargs)
def _adb(self, title, *cmd, **kwargs):
# The only non-infra adb steps (dm / nanobench) happen to not use _adb().
if 'infra_step' not in kwargs:
kwargs['infra_step'] = True
self._ever_ran_adb = True
attempts = 1
flaky_devices = ['NexusPlayer', 'PixelC']
if self.m.vars.builder_cfg.get('model') in flaky_devices:
attempts = 3
def wait_for_device(attempt):
self.m.run(self.m.step,
'kill adb server after failure of \'%s\' (attempt %d)' % (
title, attempt),
cmd=[self.ADB_BINARY, 'kill-server'],
infra_step=True, timeout=30, abort_on_failure=False,
fail_build_on_failure=False)
self.m.run(self.m.step,
'wait for device after failure of \'%s\' (attempt %d)' % (
title, attempt),
cmd=[self.ADB_BINARY, 'wait-for-device'], infra_step=True,
timeout=180, abort_on_failure=False,
fail_build_on_failure=False)
with self.m.context(cwd=self.m.path['start_dir'].join('skia')):
with self.m.env({'ADB_VENDOR_KEYS': self.ADB_PUB_KEY}):
return self.m.run.with_retry(self.m.step, title, attempts,
cmd=[self.ADB_BINARY]+list(cmd),
between_attempts_fn=wait_for_device,
**kwargs)
def _scale_for_dm(self):
device = self.m.vars.builder_cfg.get('model')
if (device in self.rootable_blacklist or
self.m.vars.internal_hardware_label):
return
# This is paranoia... any CPUs we disabled while running nanobench
# ought to be back online now that we've restarted the device.
for i in self.disable_for_nanobench.get(device, []):
self._set_cpu_online(i, 1)
scale_up = self.cpus_to_scale.get(device, [0])
if 0 not in scale_up:
scale_up.append(0)
for i in scale_up:
if device == 'AndroidOne':
self._set_governor(i, 'hotplug')
else:
self._set_governor(i, 'ondemand')
def _scale_for_nanobench(self):
device = self.m.vars.builder_cfg.get('model')
if (device in self.rootable_blacklist or
self.m.vars.internal_hardware_label):
return
for i in self.cpus_to_scale.get(device, [0]):
self._set_governor(i, 'userspace')
self._scale_cpu(i, 0.6)
for i in self.disable_for_nanobench.get(device, []):
self._set_cpu_online(i, 0) # disable
if device in self.gpu_scaling:
#https://developer.qualcomm.com/qfile/28823/lm80-p0436-11_adb_commands.pdf
# Section 3.2.1 Commands to put the GPU in performance mode
# Nexus 5 is 320000000 by default
# Nexus 5x is 180000000 by default
gpu_freq = self.gpu_scaling[device]
self.m.run.with_retry(self.m.python.inline,
"Lock GPU to %d (and other perf tweaks)" % gpu_freq,
3, # attempts
program="""
import os
import subprocess
import sys
import time
ADB = sys.argv[1]
freq = sys.argv[2]
idle_timer = "10000"
log = subprocess.check_output([ADB, 'root'])
# check for message like 'adbd cannot run as root in production builds'
print log
if 'cannot' in log:
raise Exception('adb root failed')
subprocess.check_output([ADB, 'shell', 'stop', 'thermald'])
subprocess.check_output([ADB, 'shell', 'echo "%s" > '
'/sys/class/kgsl/kgsl-3d0/gpuclk' % freq])
actual_freq = subprocess.check_output([ADB, 'shell', 'cat '
'/sys/class/kgsl/kgsl-3d0/gpuclk']).strip()
if actual_freq != freq:
raise Exception('Frequency (actual, expected) (%s, %s)'
% (actual_freq, freq))
subprocess.check_output([ADB, 'shell', 'echo "%s" > '
'/sys/class/kgsl/kgsl-3d0/idle_timer' % idle_timer])
actual_timer = subprocess.check_output([ADB, 'shell', 'cat '
'/sys/class/kgsl/kgsl-3d0/idle_timer']).strip()
if actual_timer != idle_timer:
raise Exception('idle_timer (actual, expected) (%s, %s)'
% (actual_timer, idle_timer))
for s in ['force_bus_on', 'force_rail_on', 'force_clk_on']:
subprocess.check_output([ADB, 'shell', 'echo "1" > '
'/sys/class/kgsl/kgsl-3d0/%s' % s])
actual_set = subprocess.check_output([ADB, 'shell', 'cat '
'/sys/class/kgsl/kgsl-3d0/%s' % s]).strip()
if actual_set != "1":
raise Exception('%s (actual, expected) (%s, 1)'
% (s, actual_set))
""",
args = [self.ADB_BINARY, gpu_freq],
infra_step=True,
timeout=30)
def _set_governor(self, cpu, gov):
self._ever_ran_adb = True
self.m.run.with_retry(self.m.python.inline,
"Set CPU %d's governor to %s" % (cpu, gov),
3,
program="""
import os
import subprocess
import sys
import time
ADB = sys.argv[1]
cpu = int(sys.argv[2])
gov = sys.argv[3]
log = subprocess.check_output([ADB, 'root'])
# check for message like 'adbd cannot run as root in production builds'
print log
if 'cannot' in log:
raise Exception('adb root failed')
subprocess.check_output([ADB, 'shell', 'echo "%s" > '
'/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor' % (gov, cpu)])
actual_gov = subprocess.check_output([ADB, 'shell', 'cat '
'/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor' % cpu]).strip()
if actual_gov != gov:
raise Exception('(actual, expected) (%s, %s)'
% (actual_gov, gov))
""",
args = [self.ADB_BINARY, cpu, gov],
infra_step=True,
timeout=30)
def _set_cpu_online(self, cpu, value):
self._ever_ran_adb = True
msg = 'Disabling'
if value:
msg = 'Enabling'
self.m.run.with_retry(self.m.python.inline,
'%s CPU %d' % (msg, cpu),
3,
program="""
import os
import subprocess
import sys
import time
ADB = sys.argv[1]
cpu = int(sys.argv[2])
value = int(sys.argv[3])
log = subprocess.check_output([ADB, 'root'])
# check for message like 'adbd cannot run as root in production builds'
print log
if 'cannot' in log:
raise Exception('adb root failed')
# If we try to echo 1 to an already online cpu, adb returns exit code 1.
# So, check the value before trying to write it.
prior_status = subprocess.check_output([ADB, 'shell', 'cat '
'/sys/devices/system/cpu/cpu%d/online' % cpu]).strip()
if prior_status == str(value):
print 'CPU %d online already %d' % (cpu, value)
sys.exit()
subprocess.check_output([ADB, 'shell', 'echo %s > '
'/sys/devices/system/cpu/cpu%d/online' % (value, cpu)])
actual_status = subprocess.check_output([ADB, 'shell', 'cat '
'/sys/devices/system/cpu/cpu%d/online' % cpu]).strip()
if actual_status != str(value):
raise Exception('(actual, expected) (%s, %d)'
% (actual_status, value))
""",
args = [self.ADB_BINARY, cpu, value],
infra_step=True,
timeout=30)
def _scale_cpu(self, cpu, target_percent):
self._ever_ran_adb = True
self.m.run.with_retry(self.m.python.inline,
'Scale CPU %d to %f' % (cpu, target_percent),
3,
program="""
import os
import subprocess
import sys
import time
ADB = sys.argv[1]
target_percent = float(sys.argv[2])
cpu = int(sys.argv[3])
log = subprocess.check_output([ADB, 'root'])
# check for message like 'adbd cannot run as root in production builds'
print log
if 'cannot' in log:
raise Exception('adb root failed')
root = '/sys/devices/system/cpu/cpu%d/cpufreq' %cpu
# All devices we test on give a list of their available frequencies.
available_freqs = subprocess.check_output([ADB, 'shell',
'cat %s/scaling_available_frequencies' % root])
# Check for message like '/system/bin/sh: file not found'
if available_freqs and '/system/bin/sh' not in available_freqs:
available_freqs = sorted(
int(i) for i in available_freqs.strip().split())
else:
raise Exception('Could not get list of available frequencies: %s' %
available_freqs)
maxfreq = available_freqs[-1]
target = int(round(maxfreq * target_percent))
freq = maxfreq
for f in reversed(available_freqs):
if f <= target:
freq = f
break
print 'Setting frequency to %d' % freq
# If scaling_max_freq is lower than our attempted setting, it won't take.
# We must set min first, because if we try to set max to be less than min
# (which sometimes happens after certain devices reboot) it returns a
# perplexing permissions error.
subprocess.check_output([ADB, 'shell', 'echo 0 > '
'%s/scaling_min_freq' % root])
subprocess.check_output([ADB, 'shell', 'echo %d > '
'%s/scaling_max_freq' % (freq, root)])
subprocess.check_output([ADB, 'shell', 'echo %d > '
'%s/scaling_setspeed' % (freq, root)])
time.sleep(5)
actual_freq = subprocess.check_output([ADB, 'shell', 'cat '
'%s/scaling_cur_freq' % root]).strip()
if actual_freq != str(freq):
raise Exception('(actual, expected) (%s, %d)'
% (actual_freq, freq))
""",
args = [self.ADB_BINARY, str(target_percent), cpu],
infra_step=True,
timeout=30)
def install(self):
self._adb('mkdir ' + self.device_dirs.resource_dir,
'shell', 'mkdir', '-p', self.device_dirs.resource_dir)
if 'ASAN' in self.m.vars.extra_tokens:
asan_setup = self.m.vars.slave_dir.join(
'android_ndk_linux', 'toolchains', 'llvm', 'prebuilt',
'linux-x86_64', 'lib64', 'clang', '6.0.2', 'bin',
'asan_device_setup')
self.m.run(self.m.python.inline, 'Setting up device to run ASAN',
program="""
import os
import subprocess
import sys
import time
ADB = sys.argv[1]
ASAN_SETUP = sys.argv[2]
def wait_for_device():
while True:
time.sleep(5)
print 'Waiting for device'
subprocess.check_output([ADB, 'wait-for-device'])
bit1 = subprocess.check_output([ADB, 'shell', 'getprop',
'dev.bootcomplete'])
bit2 = subprocess.check_output([ADB, 'shell', 'getprop',
'sys.boot_completed'])
if '1' in bit1 and '1' in bit2:
print 'Device detected'
break
log = subprocess.check_output([ADB, 'root'])
# check for message like 'adbd cannot run as root in production builds'
print log
if 'cannot' in log:
raise Exception('adb root failed')
output = subprocess.check_output([ADB, 'disable-verity'])
print output
if 'already disabled' not in output:
print 'Rebooting device'
subprocess.check_output([ADB, 'reboot'])
wait_for_device()
def installASAN(revert=False):
# ASAN setup script is idempotent, either it installs it or
# says it's installed. Returns True on success, false otherwise.
out = subprocess.check_output([ADB, 'wait-for-device'])
print out
cmd = [ASAN_SETUP]
if revert:
cmd = [ASAN_SETUP, '--revert']
process = subprocess.Popen(cmd, env={'ADB': ADB},
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# this also blocks until command finishes
(stdout, stderr) = process.communicate()
print stdout
print 'Stderr: %s' % stderr
return process.returncode == 0
if not installASAN():
print 'Trying to revert the ASAN install and then re-install'
# ASAN script sometimes has issues if it was interrupted or partially applied
# Try reverting it, then re-enabling it
if not installASAN(revert=True):
raise Exception('reverting ASAN install failed')
# Sleep because device does not reboot instantly
time.sleep(10)
if not installASAN():
raise Exception('Tried twice to setup ASAN and failed.')
# Sleep because device does not reboot instantly
time.sleep(10)
wait_for_device()
""",
args = [self.ADB_BINARY, asan_setup],
infra_step=True,
timeout=300,
abort_on_failure=True)
def cleanup_steps(self):
if self._ever_ran_adb:
self.m.run(self.m.python.inline, 'dump log', program="""
import os
import subprocess
import sys
out = sys.argv[1]
log = subprocess.check_output(['%s', 'logcat', '-d'])
for line in log.split('\\n'):
tokens = line.split()
if len(tokens) == 11 and tokens[-7] == 'F' and tokens[-3] == 'pc':
addr, path = tokens[-2:]
local = os.path.join(out, os.path.basename(path))
if os.path.exists(local):
sym = subprocess.check_output(['addr2line', '-Cfpe', local, addr])
line = line.replace(addr, addr + ' ' + sym.strip())
print line
""" % self.ADB_BINARY,
args=[self.m.vars.skia_out],
infra_step=True,
timeout=300,
abort_on_failure=False)
if (self.m.run.failed_steps and
isinstance(self.m.run.failed_steps[0], recipe_api.InfraFailure)):
bot_id = self.m.vars.swarming_bot_id
self.m.file.write_text('Quarantining Bot',
'/home/chrome-bot/%s.force_quarantine' % bot_id,
' ')
if self._ever_ran_adb:
self._adb('kill adb server', 'kill-server')
def step(self, name, cmd, **kwargs):
if (cmd[0] == 'nanobench'):
self._scale_for_nanobench()
else:
self._scale_for_dm()
app = self.m.vars.skia_out.join(cmd[0])
self._adb('push %s' % cmd[0],
'push', app, self.device_dirs.bin_dir)
sh = '%s.sh' % cmd[0]
self.m.run.writefile(self.m.vars.tmp_dir.join(sh),
'set -x; %s%s; echo $? >%src' % (
self.device_dirs.bin_dir, subprocess.list2cmdline(map(str, cmd)),
self.device_dirs.bin_dir))
self._adb('push %s' % sh,
'push', self.m.vars.tmp_dir.join(sh), self.device_dirs.bin_dir)
self._adb('clear log', 'logcat', '-c')
self.m.python.inline('%s' % cmd[0], """
import subprocess
import sys
bin_dir = sys.argv[1]
sh = sys.argv[2]
subprocess.check_call(['%s', 'shell', 'sh', bin_dir + sh])
try:
sys.exit(int(subprocess.check_output(['%s', 'shell', 'cat',
bin_dir + 'rc'])))
except ValueError:
print "Couldn't read the return code. Probably killed for OOM."
sys.exit(1)
""" % (self.ADB_BINARY, self.ADB_BINARY),
args=[self.device_dirs.bin_dir, sh])
def copy_file_to_device(self, host, device):
self._adb('push %s %s' % (host, device), 'push', host, device)
def copy_directory_contents_to_device(self, host, device):
# Copy the tree, avoiding hidden directories and resolving symlinks.
self.m.run(self.m.python.inline, 'push %s/* %s' % (host, device),
program="""
import os
import subprocess
import sys
host = sys.argv[1]
device = sys.argv[2]
for d, _, fs in os.walk(host):
p = os.path.relpath(d, host)
if p != '.' and p.startswith('.'):
continue
for f in fs:
print os.path.join(p,f)
subprocess.check_call(['%s', 'push',
os.path.realpath(os.path.join(host, p, f)),
os.path.join(device, p, f)])
""" % self.ADB_BINARY, args=[host, device], infra_step=True)
def copy_directory_contents_to_host(self, device, host):
self._adb('pull %s %s' % (device, host), 'pull', device, host)
def read_file_on_device(self, path, **kwargs):
rv = self._adb('read %s' % path,
'shell', 'cat', path, stdout=self.m.raw_io.output(),
**kwargs)
return rv.stdout.rstrip() if rv and rv.stdout else None
def remove_file_on_device(self, path):
self._adb('rm %s' % path, 'shell', 'rm', '-f', path)
def create_clean_device_dir(self, path):
self._adb('rm %s' % path, 'shell', 'rm', '-rf', path)
self._adb('mkdir %s' % path, 'shell', 'mkdir', '-p', path)
| true | true |
1c352b0a9797bacca4a2027a8e3d37c33bdafbd4 | 204 | py | Python | dicodile/data/tests/test_gait.py | hndgzkn/dicodile | 799f3fe244609d4699109a42956bf1ab97778e6c | [
"BSD-3-Clause"
] | 15 | 2019-02-04T19:55:41.000Z | 2021-12-28T14:27:42.000Z | dicodile/data/tests/test_gait.py | hndgzkn/dicodile | 799f3fe244609d4699109a42956bf1ab97778e6c | [
"BSD-3-Clause"
] | 47 | 2021-01-12T09:41:15.000Z | 2022-03-10T10:33:48.000Z | dicodile/data/tests/test_gait.py | hndgzkn/dicodile | 799f3fe244609d4699109a42956bf1ab97778e6c | [
"BSD-3-Clause"
] | 7 | 2019-05-06T15:21:55.000Z | 2021-04-22T09:53:45.000Z | from dicodile.data.gait import get_gait_data
def test_get_gait():
trial = get_gait_data()
assert trial['Subject'] == 1
assert trial['Trial'] == 1
assert len(trial['data'].columns) == 16
| 22.666667 | 44 | 0.666667 | from dicodile.data.gait import get_gait_data
def test_get_gait():
trial = get_gait_data()
assert trial['Subject'] == 1
assert trial['Trial'] == 1
assert len(trial['data'].columns) == 16
| true | true |
1c352b4e88322157cacafc5f554a325bea421e51 | 1,155 | py | Python | clients/client/python/test/test_submit_self_service_login_flow_with_lookup_secret_method_body.py | tobbbles/sdk | 017ca2fd46019bafd1853913b6c0f2b0fe687621 | [
"Apache-2.0"
] | null | null | null | clients/client/python/test/test_submit_self_service_login_flow_with_lookup_secret_method_body.py | tobbbles/sdk | 017ca2fd46019bafd1853913b6c0f2b0fe687621 | [
"Apache-2.0"
] | null | null | null | clients/client/python/test/test_submit_self_service_login_flow_with_lookup_secret_method_body.py | tobbbles/sdk | 017ca2fd46019bafd1853913b6c0f2b0fe687621 | [
"Apache-2.0"
] | null | null | null | """
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v0.0.1-alpha.18
Contact: support@ory.sh
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ory_client
from ory_client.model.submit_self_service_login_flow_with_lookup_secret_method_body import SubmitSelfServiceLoginFlowWithLookupSecretMethodBody
class TestSubmitSelfServiceLoginFlowWithLookupSecretMethodBody(unittest.TestCase):
"""SubmitSelfServiceLoginFlowWithLookupSecretMethodBody unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSubmitSelfServiceLoginFlowWithLookupSecretMethodBody(self):
"""Test SubmitSelfServiceLoginFlowWithLookupSecretMethodBody"""
# FIXME: construct object with mandatory attributes with example values
# model = SubmitSelfServiceLoginFlowWithLookupSecretMethodBody() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 31.216216 | 194 | 0.769697 |
import sys
import unittest
import ory_client
from ory_client.model.submit_self_service_login_flow_with_lookup_secret_method_body import SubmitSelfServiceLoginFlowWithLookupSecretMethodBody
class TestSubmitSelfServiceLoginFlowWithLookupSecretMethodBody(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testSubmitSelfServiceLoginFlowWithLookupSecretMethodBody(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
1c352be446d9cc123b14ae4b032878c1de524b88 | 3,665 | py | Python | pycaret/tests/test_time_series_tune_base.py | AJarman/pycaret | e96fefbf95c9e0195ec07ea63ebe25a8ce98baf3 | [
"MIT"
] | null | null | null | pycaret/tests/test_time_series_tune_base.py | AJarman/pycaret | e96fefbf95c9e0195ec07ea63ebe25a8ce98baf3 | [
"MIT"
] | null | null | null | pycaret/tests/test_time_series_tune_base.py | AJarman/pycaret | e96fefbf95c9e0195ec07ea63ebe25a8ce98baf3 | [
"MIT"
] | null | null | null | """Module to test time_series "tune_model" BASE functionality
"""
import pytest
import numpy as np
import pandas as pd
from pycaret.internal.pycaret_experiment import TimeSeriesExperiment
from .time_series_test_utils import _ALL_METRICS
##########################
#### Tests Start Here ####
##########################
def test_tune_custom_grid_and_choose_better(load_pos_and_neg_data):
"""Tests
(1) passing a custom grid to tune_model, and
(2) choose_better=True
"""
exp = TimeSeriesExperiment()
fh = np.arange(1, 13)
fold = 2
data = load_pos_and_neg_data
exp.setup(
data=data,
fh=fh,
fold=fold,
fold_strategy="expanding",
verbose=False,
session_id=42,
)
model = exp.create_model("naive")
# Custom Grid
only_strategy = "mean"
custom_grid = {"strategy": [only_strategy]}
# By default choose_better = True
tuned_model1 = exp.tune_model(model, custom_grid=custom_grid)
# Choose Better = False
tuned_model2 = exp.tune_model(model, custom_grid=custom_grid, choose_better=False)
# Same strategy should be chosen since choose_better = True by default
assert tuned_model1.strategy == model.strategy
# should pick only value in custom grid
assert tuned_model2.strategy == only_strategy
# tuned model does improve score (verified manually), and choose_better
# set to False. So pick worse value itself.
assert tuned_model2.strategy != model.strategy
def test_tune_model_custom_folds(load_pos_and_neg_data):
"""test custom folds in tune_model"""
exp = TimeSeriesExperiment()
setup_fold = 3
exp.setup(
data=load_pos_and_neg_data,
fold=setup_fold,
fh=12,
fold_strategy="sliding",
verbose=False,
)
#######################################
## Test Tune Model with custom folds ##
#######################################
model = exp.create_model("naive")
_ = exp.tune_model(model)
metrics1 = exp.pull()
custom_fold = 5
_ = exp.tune_model(model, fold=5)
metrics2 = exp.pull()
assert len(metrics1) == setup_fold + 2 # + 2 for Mean and SD
assert len(metrics2) == custom_fold + 2 # + 2 for Mean and SD
@pytest.mark.parametrize("metric", _ALL_METRICS)
def test_tune_model_alternate_metric(load_pos_and_neg_data, metric):
"""tests model selection using non default metric"""
exp = TimeSeriesExperiment()
fh = 12
fold = 2
exp.setup(data=load_pos_and_neg_data, fold=fold, fh=fh, fold_strategy="sliding")
model_obj = exp.create_model("naive")
tuned_model_obj = exp.tune_model(model_obj, optimize=metric)
y_pred = exp.predict_model(tuned_model_obj)
assert isinstance(y_pred, pd.Series)
expected_period_index = load_pos_and_neg_data.iloc[-fh:].index
assert np.all(y_pred.index == expected_period_index)
def test_tune_model_raises(load_pos_and_neg_data):
"""Tests conditions that raise an error due to lack of data"""
exp = TimeSeriesExperiment()
fh = np.arange(1, 13)
fold = 2
data = load_pos_and_neg_data
exp.setup(
data=data,
fh=fh,
fold=fold,
fold_strategy="expanding",
verbose=False,
session_id=42,
)
model = exp.create_model("naive")
with pytest.raises(ValueError) as errmsg:
search_algorithm = "wrong_algorithm"
_ = exp.tune_model(model, search_algorithm=search_algorithm)
exceptionmsg = errmsg.value.args[0]
assert (
exceptionmsg
== f"`search_algorithm` must be one of 'None, random, grid'. You passed '{search_algorithm}'."
)
| 27.350746 | 102 | 0.655389 |
import pytest
import numpy as np
import pandas as pd
from pycaret.internal.pycaret_experiment import TimeSeriesExperiment
from .time_series_test_utils import _ALL_METRICS
assert tuned_model2.strategy != model.strategy
def test_tune_model_custom_folds(load_pos_and_neg_data):
exp = TimeSeriesExperiment()
setup_fold = 3
exp.setup(
data=load_pos_and_neg_data,
fold=setup_fold,
fh=12,
fold_strategy="sliding",
verbose=False,
)
rid'. You passed '{search_algorithm}'."
)
| true | true |
1c352bf374538b5e35d40deb826562eb2decc543 | 2,226 | py | Python | handlers/handlers.py | waleko/libreta | 323d20c52d676a36f47df70f0909eb2bbb7ab753 | [
"Apache-2.0"
] | null | null | null | handlers/handlers.py | waleko/libreta | 323d20c52d676a36f47df70f0909eb2bbb7ab753 | [
"Apache-2.0"
] | null | null | null | handlers/handlers.py | waleko/libreta | 323d20c52d676a36f47df70f0909eb2bbb7ab753 | [
"Apache-2.0"
] | null | null | null | from typing import List
from telegram import Update
from telegram.ext import Handler, CallbackContext, ConversationHandler
from strings import Strings
from utils.dao import Dao
handlers: List[Handler] = []
def register_unprotected_handler(handler: Handler):
"""
Adds given handler to `Bot`
"""
handlers.append(handler)
def add_authguard_to_handler(handler: Handler) -> Handler:
"""
Transforms handler to be accessible only to invited users.
:param handler: handler without authguard
:return: same handler with an authguard
"""
# if handler is a ConversationHandler, there is no `.callback`
if isinstance(handler, ConversationHandler):
# recursively add authguard to every entry point
new_entry_points = [
add_authguard_to_handler(entry_point)
for entry_point in handler.entry_points
]
# construct new handler
new_handler = ConversationHandler(
new_entry_points,
handler.states,
handler.fallbacks,
handler.allow_reentry,
handler.per_chat,
handler.per_user,
handler.per_message,
handler.conversation_timeout,
handler.name,
handler.persistent,
handler.map_to_parent,
handler.run_async,
)
return new_handler
else:
# get default callback
callback = handler.callback
# custom callback
def auth_guard_callback(update: Update, context: CallbackContext):
# check user auth status
if Dao.is_user_authorized(update.effective_user):
# if authenticated, continue execution
return callback(update, context)
else:
# if not authenticated, reply with failed
update.effective_message.reply_text(Strings.unauthenticated)
# apply custom callback
handler.callback = auth_guard_callback
return handler
def register_protected_handler(handler: Handler):
"""
Adds auth guard to handler and dds new (protected) handler to `Bot`
"""
register_unprotected_handler(add_authguard_to_handler(handler))
| 30.493151 | 76 | 0.651842 | from typing import List
from telegram import Update
from telegram.ext import Handler, CallbackContext, ConversationHandler
from strings import Strings
from utils.dao import Dao
handlers: List[Handler] = []
def register_unprotected_handler(handler: Handler):
handlers.append(handler)
def add_authguard_to_handler(handler: Handler) -> Handler:
if isinstance(handler, ConversationHandler):
new_entry_points = [
add_authguard_to_handler(entry_point)
for entry_point in handler.entry_points
]
new_handler = ConversationHandler(
new_entry_points,
handler.states,
handler.fallbacks,
handler.allow_reentry,
handler.per_chat,
handler.per_user,
handler.per_message,
handler.conversation_timeout,
handler.name,
handler.persistent,
handler.map_to_parent,
handler.run_async,
)
return new_handler
else:
callback = handler.callback
def auth_guard_callback(update: Update, context: CallbackContext):
if Dao.is_user_authorized(update.effective_user):
return callback(update, context)
else:
update.effective_message.reply_text(Strings.unauthenticated)
handler.callback = auth_guard_callback
return handler
def register_protected_handler(handler: Handler):
register_unprotected_handler(add_authguard_to_handler(handler))
| true | true |
1c352bfb1e5e73fc26b875533846ebf2be26997b | 43 | py | Python | pipex/storages/h5storage/__init__.py | Algy/pipex | 02b958f67b32cad4a492d098a2ed73f971c6ac5f | [
"MIT"
] | 3 | 2018-12-24T03:48:40.000Z | 2018-12-24T04:07:36.000Z | pipex/storages/h5storage/__init__.py | Algy/pipex | 02b958f67b32cad4a492d098a2ed73f971c6ac5f | [
"MIT"
] | 2 | 2021-03-18T21:56:12.000Z | 2021-09-08T00:47:14.000Z | pipex/storages/h5storage/__init__.py | Algy/pipex | 02b958f67b32cad4a492d098a2ed73f971c6ac5f | [
"MIT"
] | null | null | null | from .h5storage import H5Storage, H5Bucket
| 21.5 | 42 | 0.837209 | from .h5storage import H5Storage, H5Bucket
| true | true |
1c352c2f63bd95f0122ababeaffcc414d0aab268 | 271 | py | Python | tests/test_signals.py | marazmiki/django-disguise | 35ee8f883d198292911a3e996d7920ab4faa3db8 | [
"MIT"
] | 1 | 2015-04-04T22:14:53.000Z | 2015-04-04T22:14:53.000Z | tests/test_signals.py | marazmiki/django-disguise | 35ee8f883d198292911a3e996d7920ab4faa3db8 | [
"MIT"
] | 2 | 2019-10-03T04:54:52.000Z | 2020-02-11T23:57:02.000Z | tests/test_signals.py | marazmiki/django-disguise | 35ee8f883d198292911a3e996d7920ab4faa3db8 | [
"MIT"
] | 1 | 2018-03-05T17:41:48.000Z | 2018-03-05T17:41:48.000Z | from django.contrib.auth.models import Permission
from django.db.models.signals import post_save
def test_permission():
qs = Permission.objects.filter(codename='can_disguise')
assert not qs.exists()
post_save.send(sender=Permission)
assert qs.exists()
| 24.636364 | 59 | 0.760148 | from django.contrib.auth.models import Permission
from django.db.models.signals import post_save
def test_permission():
qs = Permission.objects.filter(codename='can_disguise')
assert not qs.exists()
post_save.send(sender=Permission)
assert qs.exists()
| true | true |
1c352e16362790d4730bcd25c38138a601edc85d | 1,192 | py | Python | tests/utils/path.py | lise1020/pybinding | 921d5c2ac0ecc0ef317ba28b0bf68899ea30709a | [
"BSD-2-Clause"
] | 159 | 2016-01-20T17:40:48.000Z | 2022-03-24T06:08:55.000Z | tests/utils/path.py | deilynazar/pybinding | ec1128aaa84a1b43a74fb970479ce4544bd63179 | [
"BSD-2-Clause"
] | 36 | 2016-11-01T17:15:12.000Z | 2022-03-08T14:31:51.000Z | tests/utils/path.py | deilynazar/pybinding | ec1128aaa84a1b43a74fb970479ce4544bd63179 | [
"BSD-2-Clause"
] | 57 | 2016-04-23T22:12:01.000Z | 2022-03-08T12:33:04.000Z | import pathlib
def path_from_fixture(request, prefix, variant='', ext='', override_group=''):
"""Use a fixture's `request` argument to create a unique file path
The final return path will look like:
prefix/module_name/test_name[fixture_param]variant.ext
Parameters
----------
request
Pytest fixture argument.
prefix : str
Path prefix. If a relative path is given it's assumed to be inside the tests dir.
variant : str, optional
Appended to the path just before the suffix.
ext : str, optional
File name extension
override_group : str, optional
'test_name[fixture_param]' -> 'override_group[fixture_param]'
Returns
-------
pathlib.Path
"""
test_dir = pathlib.Path(str(request.fspath.join('..')))
module_name = request.module.__name__.split('.')[-1].replace('test_', '')
name = request.node.name.replace('test_', '') + variant
if override_group:
# 'test_name[fixture_param]' -> 'override_name[fixture_param]'
part = name.partition('[')
name = override_group + part[1] + part[2]
return (test_dir / prefix / module_name / name).with_suffix(ext)
| 32.216216 | 89 | 0.646812 | import pathlib
def path_from_fixture(request, prefix, variant='', ext='', override_group=''):
test_dir = pathlib.Path(str(request.fspath.join('..')))
module_name = request.module.__name__.split('.')[-1].replace('test_', '')
name = request.node.name.replace('test_', '') + variant
if override_group:
part = name.partition('[')
name = override_group + part[1] + part[2]
return (test_dir / prefix / module_name / name).with_suffix(ext)
| true | true |
1c35309bac56e128d6e052406d3a7d5cd066fd49 | 302 | py | Python | util/arrays.py | cassianobecker/dnn | bb2ea04f77733de9df10f795bb049ac3b9d30478 | [
"MIT"
] | 3 | 2020-02-21T21:35:07.000Z | 2020-09-29T15:20:00.000Z | util/arrays.py | cassianobecker/dnn | bb2ea04f77733de9df10f795bb049ac3b9d30478 | [
"MIT"
] | 27 | 2020-02-20T21:00:23.000Z | 2020-05-22T15:23:25.000Z | util/arrays.py | cassianobecker/dnn | bb2ea04f77733de9df10f795bb049ac3b9d30478 | [
"MIT"
] | null | null | null | def slice_from_list_of_pairs(pair_list, null_offset=None):
slice_list = []
if null_offset is not None:
for _ in range(null_offset):
slice_list.append(slice(None))
for pair in pair_list:
slice_list.append(slice(pair[0], pair[1]))
return tuple(slice_list)
| 23.230769 | 58 | 0.662252 | def slice_from_list_of_pairs(pair_list, null_offset=None):
slice_list = []
if null_offset is not None:
for _ in range(null_offset):
slice_list.append(slice(None))
for pair in pair_list:
slice_list.append(slice(pair[0], pair[1]))
return tuple(slice_list)
| true | true |
1c35321a8dd538f3952867cc9a3e9b9162013ea8 | 1,043 | py | Python | machine-learning-az/Part 1 - Data Preprocessing/Section 2 -------------------- Part 1 - Data Preprocessing --------------------/categorical_data.py | tapiwam/dataSciProjects | 55d6fb348bc63acacfa0510ffd9787ecf49e0495 | [
"MIT"
] | null | null | null | machine-learning-az/Part 1 - Data Preprocessing/Section 2 -------------------- Part 1 - Data Preprocessing --------------------/categorical_data.py | tapiwam/dataSciProjects | 55d6fb348bc63acacfa0510ffd9787ecf49e0495 | [
"MIT"
] | null | null | null | machine-learning-az/Part 1 - Data Preprocessing/Section 2 -------------------- Part 1 - Data Preprocessing --------------------/categorical_data.py | tapiwam/dataSciProjects | 55d6fb348bc63acacfa0510ffd9787ecf49e0495 | [
"MIT"
] | null | null | null | # Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 3].values
# Taking care of missing data
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
imputer = imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3])
# Encoding categorical data
# Encoding the Independent Variable
'''
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:, 0] = labelencoder_X.fit_transform(X[:, 0])
onehotencoder = OneHotEncoder(categorical_features = [0])
X = onehotencoder.fit_transform(X).toarray()
# Encoding the Dependent Variable
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
'''
# New way
from sklearn.compose import ColumnTransformer
ct = ColumnTransformer([("State", OneHotEncoder(), [0])], remainder = 'passthrough')
X = ct.fit_transform(X) | 29.8 | 84 | 0.746884 |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 3].values
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
imputer = imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3])
from sklearn.compose import ColumnTransformer
ct = ColumnTransformer([("State", OneHotEncoder(), [0])], remainder = 'passthrough')
X = ct.fit_transform(X) | true | true |
1c35323ca460d4043b0045be8eb077516d2d70f2 | 1,658 | py | Python | src/config.py | shuu-tatsu/qagan | 15c76655cfecba4f6073940728d930b58a305eec | [
"MIT"
] | null | null | null | src/config.py | shuu-tatsu/qagan | 15c76655cfecba4f6073940728d930b58a305eec | [
"MIT"
] | 1 | 2019-04-02T06:13:33.000Z | 2019-04-02T06:13:33.000Z | src/config.py | shuu-tatsu/qagan | 15c76655cfecba4f6073940728d930b58a305eec | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import torch
parser = argparse.ArgumentParser()
# GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# File
target_dir = '../data/'
train_file = target_dir + '/msmarco/train_v2.1.json'
dev_file = target_dir + '/msmarco/dev_v2.1.json'
eval_file = target_dir + '/msmarco/eval_v2.1_public.json'
quora_train_file = target_dir + '/quora/train.tsv'
quora_dev_file = target_dir + '/quora/dev.tsv'
train_data_pickled_file = '../data/pickled/train_data_pickled.pkl'
dev_data_pickled_file = '../data/pickled/dev_data_pickled.pkl'
vocab_pickled_file = '../data/pickled/vocab_pickled.pkl'
glove_pre_trained_pickled_file = '../data/pickled/glove_pre_trained_pickled.pkl'
# Data
parser.add_argument("--max_length", type=int, default=50, help="max_length")
parser.add_argument("--sos_token", type=int, default=0, help="sos_token")
parser.add_argument("--eos_token", type=int, default=1, help="eos_token")
parser.add_argument("--learning_rate", type=float, default=0.0001, help="learning_rate")
# Dimention
#parser.add_argument("--embedding_dim", type=int, default=50, help="embedding_dim")
parser.add_argument("--embedding_dim", type=int, default=100, help="embedding_dim")
#parser.add_argument("--embedding_dim", type=int, default=200, help="embedding_dim")
#parser.add_argument("--embedding_dim", type=int, default=300, help="embedding_dim")
#glove_file = '../data/embedding/glove.6B.50d.txt'
glove_file = '../data/embedding/glove.6B.100d.txt'
#glove_file = '../data/embedding/glove.6B.200d.txt'
#glove_file = '../data/embedding/glove.6B.300d.txt'
args = parser.parse_args()
| 37.681818 | 88 | 0.749095 |
import argparse
import torch
parser = argparse.ArgumentParser()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
target_dir = '../data/'
train_file = target_dir + '/msmarco/train_v2.1.json'
dev_file = target_dir + '/msmarco/dev_v2.1.json'
eval_file = target_dir + '/msmarco/eval_v2.1_public.json'
quora_train_file = target_dir + '/quora/train.tsv'
quora_dev_file = target_dir + '/quora/dev.tsv'
train_data_pickled_file = '../data/pickled/train_data_pickled.pkl'
dev_data_pickled_file = '../data/pickled/dev_data_pickled.pkl'
vocab_pickled_file = '../data/pickled/vocab_pickled.pkl'
glove_pre_trained_pickled_file = '../data/pickled/glove_pre_trained_pickled.pkl'
parser.add_argument("--max_length", type=int, default=50, help="max_length")
parser.add_argument("--sos_token", type=int, default=0, help="sos_token")
parser.add_argument("--eos_token", type=int, default=1, help="eos_token")
parser.add_argument("--learning_rate", type=float, default=0.0001, help="learning_rate")
parser.add_argument("--embedding_dim", type=int, default=100, help="embedding_dim")
glove_file = '../data/embedding/glove.6B.100d.txt'
args = parser.parse_args()
| true | true |
1c35325f170029556775761a4723544e43364755 | 18,617 | py | Python | analyzer/libs/pygments/pygments/lexers/html.py | oslab-swrc/juxta | 481cd6f01e87790041a07379805968bcf57d75f4 | [
"MIT"
] | 23 | 2016-01-06T07:01:46.000Z | 2022-02-12T15:53:20.000Z | analyzer/libs/pygments/pygments/lexers/html.py | oslab-swrc/juxta | 481cd6f01e87790041a07379805968bcf57d75f4 | [
"MIT"
] | 1 | 2019-04-02T00:42:29.000Z | 2019-04-02T00:42:29.000Z | analyzer/libs/pygments/pygments/lexers/html.py | oslab-swrc/juxta | 481cd6f01e87790041a07379805968bcf57d75f4 | [
"MIT"
] | 16 | 2016-01-06T07:01:46.000Z | 2021-11-29T11:43:16.000Z | # -*- coding: utf-8 -*-
"""
pygments.lexers.html
~~~~~~~~~~~~~~~~~~~~
Lexers for HTML, XML and related markup.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \
default, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Punctuation
from pygments.util import looks_like_xml, html_doctype_matches
from pygments.lexers.javascript import JavascriptLexer
from pygments.lexers.jvm import ScalaLexer
from pygments.lexers.css import CssLexer, _indentation, _starts_block
from pygments.lexers.ruby import RubyLexer
__all__ = ['HtmlLexer', 'DtdLexer', 'XmlLexer', 'XsltLexer', 'HamlLexer',
'ScamlLexer', 'JadeLexer']
class HtmlLexer(RegexLexer):
"""
For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted
by the appropriate lexer.
"""
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt']
mimetypes = ['text/html', 'application/xhtml+xml']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*script\s*', Name.Tag, ('script-content', 'tag')),
(r'<\s*style\s*', Name.Tag, ('style-content', 'tag')),
# note: this allows tag names not used in HTML like <x:with-dash>,
# this is to support yet-unknown template engines and the like
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'([\w:-]+\s*=)(\s*)', bygroups(Name.Attribute, Text), 'attr'),
(r'[\w:-]+', Name.Attribute),
(r'/?\s*>', Name.Tag, '#pop'),
],
'script-content': [
(r'<\s*/\s*script\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)),
],
'style-content': [
(r'<\s*/\s*style\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if html_doctype_matches(text):
return 0.5
class DtdLexer(RegexLexer):
"""
A lexer for DTDs (Document Type Definitions).
.. versionadded:: 1.5
"""
flags = re.MULTILINE | re.DOTALL
name = 'DTD'
aliases = ['dtd']
filenames = ['*.dtd']
mimetypes = ['application/xml-dtd']
tokens = {
'root': [
include('common'),
(r'(<!ELEMENT)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'element'),
(r'(<!ATTLIST)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'attlist'),
(r'(<!ENTITY)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Entity), 'entity'),
(r'(<!NOTATION)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'notation'),
(r'(<!\[)([^\[\s]+)(\s*)(\[)', # conditional sections
bygroups(Keyword, Name.Entity, Text, Keyword)),
(r'(<!DOCTYPE)(\s+)([^>\s]+)',
bygroups(Keyword, Text, Name.Tag)),
(r'PUBLIC|SYSTEM', Keyword.Constant),
(r'[\[\]>]', Keyword),
],
'common': [
(r'\s+', Text),
(r'(%|&)[^;]*;', Name.Entity),
('<!--', Comment, 'comment'),
(r'[(|)*,?+]', Operator),
(r'"[^"]*"', String.Double),
(r'\'[^\']*\'', String.Single),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'element': [
include('common'),
(r'EMPTY|ANY|#PCDATA', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Tag),
(r'>', Keyword, '#pop'),
],
'attlist': [
include('common'),
(r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION',
Keyword.Constant),
(r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant),
(r'xml:space|xml:lang', Keyword.Reserved),
(r'[^>\s|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
'entity': [
include('common'),
(r'SYSTEM|PUBLIC|NDATA', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Entity),
(r'>', Keyword, '#pop'),
],
'notation': [
include('common'),
(r'SYSTEM|PUBLIC', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
}
def analyse_text(text):
if not looks_like_xml(text) and \
('<!ELEMENT' in text or '<!ATTLIST' in text or '<!ENTITY' in text):
return 0.8
class XmlLexer(RegexLexer):
"""
Generic lexer for XML (eXtensible Markup Language).
"""
flags = re.MULTILINE | re.DOTALL | re.UNICODE
name = 'XML'
aliases = ['xml']
filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd',
'*.wsdl', '*.wsf']
mimetypes = ['text/xml', 'application/xml', 'image/svg+xml',
'application/rss+xml', 'application/atom+xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if looks_like_xml(text):
return 0.45 # less than HTML
class XsltLexer(XmlLexer):
"""
A lexer for XSLT.
.. versionadded:: 0.10
"""
name = 'XSLT'
aliases = ['xslt']
filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc
mimetypes = ['application/xsl+xml', 'application/xslt+xml']
EXTRA_KEYWORDS = set((
'apply-imports', 'apply-templates', 'attribute',
'attribute-set', 'call-template', 'choose', 'comment',
'copy', 'copy-of', 'decimal-format', 'element', 'fallback',
'for-each', 'if', 'import', 'include', 'key', 'message',
'namespace-alias', 'number', 'otherwise', 'output', 'param',
'preserve-space', 'processing-instruction', 'sort',
'strip-space', 'stylesheet', 'template', 'text', 'transform',
'value-of', 'variable', 'when', 'with-param'
))
def get_tokens_unprocessed(self, text):
for index, token, value in XmlLexer.get_tokens_unprocessed(self, text):
m = re.match('</?xsl:([^>]*)/?>?', value)
if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS:
yield index, Keyword, value
else:
yield index, token, value
def analyse_text(text):
if looks_like_xml(text) and '<xsl' in text:
return 0.8
class HamlLexer(ExtendedRegexLexer):
"""
For Haml markup.
.. versionadded:: 1.3
"""
name = 'Haml'
aliases = ['haml']
filenames = ['*.haml']
mimetypes = ['text/x-haml']
flags = re.IGNORECASE
# Haml can include " |\n" anywhere,
# which is ignored and used to wrap long lines.
# To accomodate this, use this custom faux dot instead.
_dot = r'(?: \|\n(?=.* \|)|.)'
# In certain places, a comma at the end of the line
# allows line wrapping as well.
_comma_dot = r'(?:,\s*\n|' + _dot + ')'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'root'),
default('plain'),
],
'content': [
include('css'),
(r'%[\w:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'haml-comment-block'), '#pop'),
(r'(-)(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(RubyLexer)),
(r'\[' + _dot + '*?\]', using(RubyLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'haml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class ScamlLexer(ExtendedRegexLexer):
"""
For `Scaml markup <http://scalate.fusesource.org/>`_. Scaml is Haml for Scala.
.. versionadded:: 1.4
"""
name = 'Scaml'
aliases = ['scaml']
filenames = ['*.scaml']
mimetypes = ['text/x-scaml']
flags = re.IGNORECASE
# Scaml does not yet support the " |\n" notation to
# wrap long lines. Once it does, use the custom faux
# dot instead.
# _dot = r'(?: \|\n(?=.* \|)|.)'
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'root'),
default('plain'),
],
'content': [
include('css'),
(r'%[\w:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class JadeLexer(ExtendedRegexLexer):
"""
For Jade markup.
Jade is a variant of Scaml, see:
http://scalate.fusesource.org/documentation/scaml-reference.html
.. versionadded:: 1.4
"""
name = 'Jade'
aliases = ['jade']
filenames = ['*.jade']
mimetypes = ['text/x-jade']
flags = re.IGNORECASE
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)), 'root'),
default('plain'),
],
'content': [
include('css'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
(r'[\w:-]+', Name.Tag, 'tag'),
(r'\|', Text, 'eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
| 31.554237 | 83 | 0.41258 |
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \
default, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Punctuation
from pygments.util import looks_like_xml, html_doctype_matches
from pygments.lexers.javascript import JavascriptLexer
from pygments.lexers.jvm import ScalaLexer
from pygments.lexers.css import CssLexer, _indentation, _starts_block
from pygments.lexers.ruby import RubyLexer
__all__ = ['HtmlLexer', 'DtdLexer', 'XmlLexer', 'XsltLexer', 'HamlLexer',
'ScamlLexer', 'JadeLexer']
class HtmlLexer(RegexLexer):
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt']
mimetypes = ['text/html', 'application/xhtml+xml']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*script\s*', Name.Tag, ('script-content', 'tag')),
(r'<\s*style\s*', Name.Tag, ('style-content', 'tag')),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'([\w:-]+\s*=)(\s*)', bygroups(Name.Attribute, Text), 'attr'),
(r'[\w:-]+', Name.Attribute),
(r'/?\s*>', Name.Tag, '#pop'),
],
'script-content': [
(r'<\s*/\s*script\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)),
],
'style-content': [
(r'<\s*/\s*style\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if html_doctype_matches(text):
return 0.5
class DtdLexer(RegexLexer):
flags = re.MULTILINE | re.DOTALL
name = 'DTD'
aliases = ['dtd']
filenames = ['*.dtd']
mimetypes = ['application/xml-dtd']
tokens = {
'root': [
include('common'),
(r'(<!ELEMENT)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'element'),
(r'(<!ATTLIST)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'attlist'),
(r'(<!ENTITY)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Entity), 'entity'),
(r'(<!NOTATION)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'notation'),
(r'(<!\[)([^\[\s]+)(\s*)(\[)',
bygroups(Keyword, Name.Entity, Text, Keyword)),
(r'(<!DOCTYPE)(\s+)([^>\s]+)',
bygroups(Keyword, Text, Name.Tag)),
(r'PUBLIC|SYSTEM', Keyword.Constant),
(r'[\[\]>]', Keyword),
],
'common': [
(r'\s+', Text),
(r'(%|&)[^;]*;', Name.Entity),
('<!--', Comment, 'comment'),
(r'[(|)*,?+]', Operator),
(r'"[^"]*"', String.Double),
(r'\'[^\']*\'', String.Single),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'element': [
include('common'),
(r'EMPTY|ANY|#PCDATA', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Tag),
(r'>', Keyword, '#pop'),
],
'attlist': [
include('common'),
(r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION',
Keyword.Constant),
(r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant),
(r'xml:space|xml:lang', Keyword.Reserved),
(r'[^>\s|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
'entity': [
include('common'),
(r'SYSTEM|PUBLIC|NDATA', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Entity),
(r'>', Keyword, '#pop'),
],
'notation': [
include('common'),
(r'SYSTEM|PUBLIC', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
}
def analyse_text(text):
if not looks_like_xml(text) and \
('<!ELEMENT' in text or '<!ATTLIST' in text or '<!ENTITY' in text):
return 0.8
class XmlLexer(RegexLexer):
flags = re.MULTILINE | re.DOTALL | re.UNICODE
name = 'XML'
aliases = ['xml']
filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd',
'*.wsdl', '*.wsf']
mimetypes = ['text/xml', 'application/xml', 'image/svg+xml',
'application/rss+xml', 'application/atom+xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if looks_like_xml(text):
return 0.45 # less than HTML
class XsltLexer(XmlLexer):
name = 'XSLT'
aliases = ['xslt']
filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc
mimetypes = ['application/xsl+xml', 'application/xslt+xml']
EXTRA_KEYWORDS = set((
'apply-imports', 'apply-templates', 'attribute',
'attribute-set', 'call-template', 'choose', 'comment',
'copy', 'copy-of', 'decimal-format', 'element', 'fallback',
'for-each', 'if', 'import', 'include', 'key', 'message',
'namespace-alias', 'number', 'otherwise', 'output', 'param',
'preserve-space', 'processing-instruction', 'sort',
'strip-space', 'stylesheet', 'template', 'text', 'transform',
'value-of', 'variable', 'when', 'with-param'
))
def get_tokens_unprocessed(self, text):
for index, token, value in XmlLexer.get_tokens_unprocessed(self, text):
m = re.match('</?xsl:([^>]*)/?>?', value)
if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS:
yield index, Keyword, value
else:
yield index, token, value
def analyse_text(text):
if looks_like_xml(text) and '<xsl' in text:
return 0.8
class HamlLexer(ExtendedRegexLexer):
name = 'Haml'
aliases = ['haml']
filenames = ['*.haml']
mimetypes = ['text/x-haml']
flags = re.IGNORECASE
# Haml can include " |\n" anywhere,
# which is ignored and used to wrap long lines.
# To accomodate this, use this custom faux dot instead.
_dot = r'(?: \|\n(?=.* \|)|.)'
# In certain places, a comma at the end of the line
# allows line wrapping as well.
_comma_dot = r'(?:,\s*\n|' + _dot + ')'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'root'),
default('plain'),
],
'content': [
include('css'),
(r'%[\w:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'haml-comment-block'), '#pop'),
(r'(-)(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(RubyLexer)),
(r'\[' + _dot + '*?\]', using(RubyLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'haml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class ScamlLexer(ExtendedRegexLexer):
name = 'Scaml'
aliases = ['scaml']
filenames = ['*.scaml']
mimetypes = ['text/x-scaml']
flags = re.IGNORECASE
# Scaml does not yet support the " |\n" notation to
# wrap long lines. Once it does, use the custom faux
# dot instead.
# _dot = r'(?: \|\n(?=.* \|)|.)'
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'root'),
default('plain'),
],
'content': [
include('css'),
(r'%[\w:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class JadeLexer(ExtendedRegexLexer):
name = 'Jade'
aliases = ['jade']
filenames = ['*.jade']
mimetypes = ['text/x-jade']
flags = re.IGNORECASE
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)), 'root'),
default('plain'),
],
'content': [
include('css'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
(r'[\w:-]+', Name.Tag, 'tag'),
(r'\|', Text, 'eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
| true | true |
1c3532f6e51f3e4e021302188e96be49eb686017 | 4,642 | py | Python | sdk/python/pulumi_aws/dynamodb/global_table.py | Dominik-K/pulumi-aws | efb5e2a48a86baba58e373ade5863c0f45389c29 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/dynamodb/global_table.py | Dominik-K/pulumi-aws | efb5e2a48a86baba58e373ade5863c0f45389c29 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/dynamodb/global_table.py | Dominik-K/pulumi-aws | efb5e2a48a86baba58e373ade5863c0f45389c29 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GlobalTable(pulumi.CustomResource):
arn: pulumi.Output[str]
"""
The ARN of the DynamoDB Global Table
"""
name: pulumi.Output[str]
"""
The name of the global table. Must match underlying DynamoDB Table names in all regions.
"""
replicas: pulumi.Output[list]
"""
Underlying DynamoDB Table. At least 1 replica must be defined. See below.
* `regionName` (`str`) - AWS region name of replica DynamoDB Table. e.g. `us-east-1`
"""
def __init__(__self__, resource_name, opts=None, name=None, replicas=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a resource to manage a DynamoDB Global Table. These are layered on top of existing DynamoDB Tables.
> Note: There are many restrictions before you can properly create DynamoDB Global Tables in multiple regions. See the [AWS DynamoDB Global Table Requirements](http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables_reqs_bestpractices.html) for more information.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: The name of the global table. Must match underlying DynamoDB Table names in all regions.
:param pulumi.Input[list] replicas: Underlying DynamoDB Table. At least 1 replica must be defined. See below.
The **replicas** object supports the following:
* `regionName` (`pulumi.Input[str]`) - AWS region name of replica DynamoDB Table. e.g. `us-east-1`
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['name'] = name
if replicas is None:
raise TypeError("Missing required property 'replicas'")
__props__['replicas'] = replicas
__props__['arn'] = None
super(GlobalTable, __self__).__init__(
'aws:dynamodb/globalTable:GlobalTable',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, arn=None, name=None, replicas=None):
"""
Get an existing GlobalTable resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The ARN of the DynamoDB Global Table
:param pulumi.Input[str] name: The name of the global table. Must match underlying DynamoDB Table names in all regions.
:param pulumi.Input[list] replicas: Underlying DynamoDB Table. At least 1 replica must be defined. See below.
The **replicas** object supports the following:
* `regionName` (`pulumi.Input[str]`) - AWS region name of replica DynamoDB Table. e.g. `us-east-1`
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["name"] = name
__props__["replicas"] = replicas
return GlobalTable(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 45.067961 | 291 | 0.673847 |
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GlobalTable(pulumi.CustomResource):
arn: pulumi.Output[str]
name: pulumi.Output[str]
replicas: pulumi.Output[list]
def __init__(__self__, resource_name, opts=None, name=None, replicas=None, __props__=None, __name__=None, __opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['name'] = name
if replicas is None:
raise TypeError("Missing required property 'replicas'")
__props__['replicas'] = replicas
__props__['arn'] = None
super(GlobalTable, __self__).__init__(
'aws:dynamodb/globalTable:GlobalTable',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, arn=None, name=None, replicas=None):
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["name"] = name
__props__["replicas"] = replicas
return GlobalTable(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
1c3533381d19c145ed61b41772342c7e6675c738 | 5,199 | py | Python | tensorflow_datasets/image_classification/oxford_iiit_pet.py | daniel-trejobanos/tf-ds-321 | e3f5b1771a176dc552c3a99f51f3a5ffbe105852 | [
"Apache-2.0"
] | 2 | 2020-10-12T07:09:38.000Z | 2021-03-05T12:48:23.000Z | tensorflow_datasets/image_classification/oxford_iiit_pet.py | javierespinozat/datasets | 1465d97b2e8b2a030f5df7872e8390b90dba8926 | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/image_classification/oxford_iiit_pet.py | javierespinozat/datasets | 1465d97b2e8b2a030f5df7872e8390b90dba8926 | [
"Apache-2.0"
] | 1 | 2021-06-30T17:45:23.000Z | 2021-06-30T17:45:23.000Z | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Oxford-IIIT pet dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """\
The Oxford-IIIT pet dataset is a 37 category pet image dataset with roughly 200
images for each class. The images have large variations in scale, pose and
lighting. All images have an associated ground truth annotation of breed.
"""
_CITATION = """\
@InProceedings{parkhi12a,
author = "Parkhi, O. M. and Vedaldi, A. and Zisserman, A. and Jawahar, C.~V.",
title = "Cats and Dogs",
booktitle = "IEEE Conference on Computer Vision and Pattern Recognition",
year = "2012",
}
"""
_BASE_URL = "http://www.robots.ox.ac.uk/~vgg/data/pets/data"
_LABEL_CLASSES = [
"Abyssinian", "american_bulldog", "american_pit_bull_terrier",
"basset_hound", "beagle", "Bengal", "Birman", "Bombay", "boxer",
"British_Shorthair", "chihuahua", "Egyptian_Mau", "english_cocker_spaniel",
"english_setter", "german_shorthaired", "great_pyrenees", "havanese",
"japanese_chin", "keeshond", "leonberger", "Maine_Coon",
"miniature_pinscher", "newfoundland", "Persian", "pomeranian", "pug",
"Ragdoll", "Russian_Blue", "saint_bernard", "samoyed", "scottish_terrier",
"shiba_inu", "Siamese", "Sphynx", "staffordshire_bull_terrier",
"wheaten_terrier", "yorkshire_terrier"
]
_SPECIES_CLASSES = ["Cat", "Dog"]
class OxfordIIITPet(tfds.core.GeneratorBasedBuilder):
"""Oxford-IIIT pet dataset."""
VERSION = tfds.core.Version("3.2.0")
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"image": tfds.features.Image(),
"label": tfds.features.ClassLabel(names=_LABEL_CLASSES),
"species": tfds.features.ClassLabel(names=_SPECIES_CLASSES),
"file_name": tfds.features.Text(),
"segmentation_mask": tfds.features.Image(shape=(None, None, 1))
}),
supervised_keys=("image", "label"),
homepage="http://www.robots.ox.ac.uk/~vgg/data/pets/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns splits."""
# Download images and annotations that come in separate archives.
# Note, that the extension of archives is .tar.gz even though the actual
# archives format is uncompressed tar.
dl_paths = dl_manager.download_and_extract({
"images": tfds.download.Resource(
url=_BASE_URL + "/images.tar.gz",
extract_method=tfds.download.ExtractMethod.TAR),
"annotations": tfds.download.Resource(
url=_BASE_URL + "/annotations.tar.gz",
extract_method=tfds.download.ExtractMethod.TAR)
})
images_path_dir = os.path.join(dl_paths["images"], "images")
annotations_path_dir = os.path.join(dl_paths["annotations"], "annotations")
# Setup train and test splits
train_split = tfds.core.SplitGenerator(
name="train",
gen_kwargs={
"images_dir_path": images_path_dir,
"annotations_dir_path": annotations_path_dir,
"images_list_file": os.path.join(annotations_path_dir,
"trainval.txt"),
},
)
test_split = tfds.core.SplitGenerator(
name="test",
gen_kwargs={
"images_dir_path": images_path_dir,
"annotations_dir_path": annotations_path_dir,
"images_list_file": os.path.join(annotations_path_dir,
"test.txt")
},
)
return [train_split, test_split]
def _generate_examples(self, images_dir_path, annotations_dir_path,
images_list_file):
with tf.io.gfile.GFile(images_list_file, "r") as images_list:
for line in images_list:
image_name, label, species, _ = line.strip().split(" ")
trimaps_dir_path = os.path.join(annotations_dir_path, "trimaps")
trimap_name = image_name + ".png"
image_name += ".jpg"
label = int(label) - 1
species = int(species) - 1
record = {
"image": os.path.join(images_dir_path, image_name),
"label": int(label),
"species": species,
"file_name": image_name,
"segmentation_mask": os.path.join(trimaps_dir_path, trimap_name)
}
yield image_name, record
| 36.356643 | 86 | 0.655703 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """\
The Oxford-IIIT pet dataset is a 37 category pet image dataset with roughly 200
images for each class. The images have large variations in scale, pose and
lighting. All images have an associated ground truth annotation of breed.
"""
_CITATION = """\
@InProceedings{parkhi12a,
author = "Parkhi, O. M. and Vedaldi, A. and Zisserman, A. and Jawahar, C.~V.",
title = "Cats and Dogs",
booktitle = "IEEE Conference on Computer Vision and Pattern Recognition",
year = "2012",
}
"""
_BASE_URL = "http://www.robots.ox.ac.uk/~vgg/data/pets/data"
_LABEL_CLASSES = [
"Abyssinian", "american_bulldog", "american_pit_bull_terrier",
"basset_hound", "beagle", "Bengal", "Birman", "Bombay", "boxer",
"British_Shorthair", "chihuahua", "Egyptian_Mau", "english_cocker_spaniel",
"english_setter", "german_shorthaired", "great_pyrenees", "havanese",
"japanese_chin", "keeshond", "leonberger", "Maine_Coon",
"miniature_pinscher", "newfoundland", "Persian", "pomeranian", "pug",
"Ragdoll", "Russian_Blue", "saint_bernard", "samoyed", "scottish_terrier",
"shiba_inu", "Siamese", "Sphynx", "staffordshire_bull_terrier",
"wheaten_terrier", "yorkshire_terrier"
]
_SPECIES_CLASSES = ["Cat", "Dog"]
class OxfordIIITPet(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version("3.2.0")
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"image": tfds.features.Image(),
"label": tfds.features.ClassLabel(names=_LABEL_CLASSES),
"species": tfds.features.ClassLabel(names=_SPECIES_CLASSES),
"file_name": tfds.features.Text(),
"segmentation_mask": tfds.features.Image(shape=(None, None, 1))
}),
supervised_keys=("image", "label"),
homepage="http://www.robots.ox.ac.uk/~vgg/data/pets/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_paths = dl_manager.download_and_extract({
"images": tfds.download.Resource(
url=_BASE_URL + "/images.tar.gz",
extract_method=tfds.download.ExtractMethod.TAR),
"annotations": tfds.download.Resource(
url=_BASE_URL + "/annotations.tar.gz",
extract_method=tfds.download.ExtractMethod.TAR)
})
images_path_dir = os.path.join(dl_paths["images"], "images")
annotations_path_dir = os.path.join(dl_paths["annotations"], "annotations")
train_split = tfds.core.SplitGenerator(
name="train",
gen_kwargs={
"images_dir_path": images_path_dir,
"annotations_dir_path": annotations_path_dir,
"images_list_file": os.path.join(annotations_path_dir,
"trainval.txt"),
},
)
test_split = tfds.core.SplitGenerator(
name="test",
gen_kwargs={
"images_dir_path": images_path_dir,
"annotations_dir_path": annotations_path_dir,
"images_list_file": os.path.join(annotations_path_dir,
"test.txt")
},
)
return [train_split, test_split]
def _generate_examples(self, images_dir_path, annotations_dir_path,
images_list_file):
with tf.io.gfile.GFile(images_list_file, "r") as images_list:
for line in images_list:
image_name, label, species, _ = line.strip().split(" ")
trimaps_dir_path = os.path.join(annotations_dir_path, "trimaps")
trimap_name = image_name + ".png"
image_name += ".jpg"
label = int(label) - 1
species = int(species) - 1
record = {
"image": os.path.join(images_dir_path, image_name),
"label": int(label),
"species": species,
"file_name": image_name,
"segmentation_mask": os.path.join(trimaps_dir_path, trimap_name)
}
yield image_name, record
| true | true |
1c3533c7e05e40ac6b283f54f736a327f5fdab87 | 154 | py | Python | Leetcode/0914. X of a Kind in a Deck of Cards/0914.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | Leetcode/0914. X of a Kind in a Deck of Cards/0914.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | Leetcode/0914. X of a Kind in a Deck of Cards/0914.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | class Solution:
def hasGroupsSizeX(self, deck: List[int]) -> bool:
count = Counter(deck)
return functools.reduce(math.gcd, count.values()) >= 2
| 30.8 | 58 | 0.681818 | class Solution:
def hasGroupsSizeX(self, deck: List[int]) -> bool:
count = Counter(deck)
return functools.reduce(math.gcd, count.values()) >= 2
| true | true |
1c353430554f30c2e65b41c56de5fa9c108a644d | 853 | py | Python | selfservice-api/src/selfservice_api/services/external/models/__init__.py | bcgov/BCSC-BPS | 3bfe09c100a0f5b98d61228324336d5f45ad93ad | [
"Apache-2.0"
] | 2 | 2020-07-03T18:18:34.000Z | 2021-03-08T10:25:50.000Z | selfservice-api/src/selfservice_api/services/external/models/__init__.py | bcgov/BCSC-BPS | 3bfe09c100a0f5b98d61228324336d5f45ad93ad | [
"Apache-2.0"
] | 312 | 2020-01-10T23:00:08.000Z | 2022-03-29T22:07:00.000Z | selfservice-api/src/selfservice_api/services/external/models/__init__.py | bcgov/BCSC-BPS | 3bfe09c100a0f5b98d61228324336d5f45ad93ad | [
"Apache-2.0"
] | 2 | 2020-03-26T05:10:20.000Z | 2021-02-05T19:22:56.000Z | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This exports all of the models for external service."""
from .dynamic_client_create import CreateRequestModel, CreateResponseModel
from .dynamic_client_get import GetResponseModel
from .dynamic_client_update import UpdateRequestModel, UpdateResponseModel
| 44.894737 | 74 | 0.793669 |
from .dynamic_client_create import CreateRequestModel, CreateResponseModel
from .dynamic_client_get import GetResponseModel
from .dynamic_client_update import UpdateRequestModel, UpdateResponseModel
| true | true |
1c3534e51791e7e9bbfa8e8618d8af7c902e6529 | 6,211 | py | Python | billy/tests/importers/test_utils.py | backwardn/billy | 07ac788d25a6c79d03dd0e3d55459bbb55e22439 | [
"BSD-3-Clause"
] | 33 | 2016-11-05T07:25:48.000Z | 2022-01-31T03:40:43.000Z | billy/tests/importers/test_utils.py | backwardn/billy | 07ac788d25a6c79d03dd0e3d55459bbb55e22439 | [
"BSD-3-Clause"
] | 16 | 2015-02-05T21:25:58.000Z | 2015-09-18T20:27:06.000Z | billy/tests/importers/test_utils.py | backwardn/billy | 07ac788d25a6c79d03dd0e3d55459bbb55e22439 | [
"BSD-3-Clause"
] | 22 | 2015-03-23T07:13:20.000Z | 2016-06-10T04:41:06.000Z | import re
import time
import datetime
from nose.tools import with_setup, assert_raises
from billy.core import db
from billy.importers import utils
def drop_everything():
db.metadata.drop()
db.legislators.drop()
db.bills.drop()
db.committees.drop()
def test_insert_with_id_duplicate_id():
obj = {'_id': 'whatever'}
assert_raises(ValueError, utils.insert_with_id, obj)
@with_setup(drop_everything)
def test_insert_with_id_increments():
obj1 = {'full_name': 'a test legislator',
'_type': 'person',
'state': 'ex'}
obj2 = {'full_name': 'another legislator',
'_type': 'person',
'state': 'ex'}
leg_id_re = re.compile(r'^EXL\d{6,6}$')
id1 = utils.insert_with_id(obj1)
assert leg_id_re.match(id1)
found = db.legislators.find_one({'_id': id1})
assert found['_all_ids'] == [id1]
id2 = utils.insert_with_id(obj2)
assert leg_id_re.match(id2)
assert id2 != id1
found = db.legislators.find_one({'_id': id2})
assert found
assert found['_all_ids'] == [id2]
# also check the timestamp creation
assert found['created_at'] == found['updated_at']
assert isinstance(found['created_at'], datetime.datetime)
@with_setup(drop_everything)
def test_insert_with_id_types():
person = {'_type': 'person', 'state': 'ex'}
legislator = {'_type': 'person', 'state': 'ex'}
committee = {'_type': 'committee', 'state': 'ex'}
bill = {'_type': 'bill', 'state': 'ex'}
other = {'_type': 'other', 'state': 'ex'}
assert utils.insert_with_id(person).startswith('EXL')
assert utils.insert_with_id(legislator).startswith('EXL')
assert utils.insert_with_id(committee).startswith('EXC')
assert utils.insert_with_id(bill).startswith('EXB')
assert_raises(ValueError, utils.insert_with_id, other)
@with_setup(drop_everything)
def test_update():
obj0 = {'_type': 'bill', 'state': 'ex', 'field1': 'stuff',
'field2': 'original', '_locked_fields': ['field2']}
id1 = utils.insert_with_id(obj0)
obj1 = db.bills.find_one(id1)
# Updating a bill with itself shouldn't cause 'updated_at' to be changed
utils.update(obj1, obj1, db.bills)
obj2 = db.bills.find_one({'_id': id1})
assert obj2['created_at'] == obj2['updated_at'] == obj1['updated_at']
initial_timestamp = obj2['created_at'] # we need this later
# update with a few fields changed
changes = {'field1': 'more stuff', 'field2': 'a change'}
time.sleep(0.005) # sleep long enough to avoid created_at == updated_at
utils.update(obj1, changes, db.bills)
obj2 = db.bills.find_one({'_id': id1})
# check that timestamps have updated
assert obj2['created_at'] < obj2['updated_at']
assert initial_timestamp < obj2['updated_at']
# make sure field1 gets overwritten and field 2 doesn't
assert obj2['field1'] == 'more stuff'
assert obj2['field2'] == 'original'
@with_setup(drop_everything)
def test_update_sneaky_filter():
obj = {'_type': 'bill', 'state': 'ex', 'normal_field': 1,
'set_field': [1, 2, 3]}
def _set_changed(old, new):
return set(old) != set(new)
sneaky_filter = {'set_field': _set_changed}
id = utils.insert_with_id(obj)
obj = db.bills.find_one(id)
# the set will be the same, shouldn't update
utils.update(obj, {'set_field': [3, 2, 1]}, db.bills, sneaky_filter)
assert obj['set_field'] == [1, 2, 3]
assert obj['updated_at'] == obj['created_at']
# the set now differs, should update
utils.update(obj, {'set_field': [4, 3, 2, 1]}, db.bills, sneaky_filter)
assert obj['set_field'] == [4, 3, 2, 1]
assert obj['updated_at'] > obj['created_at']
def test_convert_timestamps():
dt = datetime.datetime.now().replace(microsecond=0)
ts = time.mktime(dt.utctimetuple())
obj = {'date': ts,
'actions': [{'when': ts}, {'date': ts}],
'sources': [{'when': ts}, {'date': ts}],
'votes': [{'when': ts}, {'date': ts}],
}
expect = {'date': dt,
'actions': [{'when': dt}, {'date': dt}],
'sources': [{'when': dt}, {'date': dt}],
'votes': [{'when': dt}, {'date': dt}],
}
assert utils.convert_timestamps(obj) == expect
# also modifies obj in place
assert obj == expect
def test_split_name():
obj = {'_type': 'person', 'full_name': 'Michael Stephens'}
expect = {'_type': 'person', 'full_name': 'Michael Stephens',
'first_name': 'Michael', 'last_name': 'Stephens',
'suffixes': ''}
assert utils.split_name(obj) == expect
# Don't overwrite existing first/last name
obj = {'_type': 'person', 'full_name': 'Michael Stephens',
'first_name': 'Another', 'last_name': 'Name',
'suffixes': ''}
assert utils.split_name(obj) == obj
# Don't try to split name for non-people
obj = {'_type': 'not_a_person', 'full_name': 'A Name'}
assert utils.split_name(obj) == obj
def test_make_plus_fields():
bill = {'_type': 'bill', 'bill_id': 'AB 123',
'title': 'An Awesome Bill',
'extra_field': 'this is not normal',
'actions': [{'actor': 'Tom Cruise',
'action': 'hero',
'date': 'now',
'superfluous': 42}]}
expect = {'_type': 'bill', 'bill_id': 'AB 123', 'title': 'An Awesome Bill',
'+extra_field': 'this is not normal',
'actions': [{'actor': 'Tom Cruise', 'action': 'hero',
'date': 'now', '+superfluous': 42}]}
plussed = utils.make_plus_fields(bill)
assert plussed == expect
def test_next_big_id():
db.test_ids.drop()
db.vote_ids.drop()
assert utils.next_big_id('xy', 'D', 'test_ids') == 'XYD00000001'
assert utils.next_big_id('xy', 'D', 'test_ids') == 'XYD00000002'
assert utils.next_big_id('xy', 'D', 'test_ids') == 'XYD00000003'
assert utils.next_big_id('xy', 'V', 'vote_ids') == 'XYV00000001'
db.test_ids.drop()
assert utils.next_big_id('xy', 'D', 'test_ids') == 'XYD00000001'
assert utils.next_big_id('xy', 'V', 'vote_ids') == 'XYV00000002'
| 33.037234 | 79 | 0.603445 | import re
import time
import datetime
from nose.tools import with_setup, assert_raises
from billy.core import db
from billy.importers import utils
def drop_everything():
db.metadata.drop()
db.legislators.drop()
db.bills.drop()
db.committees.drop()
def test_insert_with_id_duplicate_id():
obj = {'_id': 'whatever'}
assert_raises(ValueError, utils.insert_with_id, obj)
@with_setup(drop_everything)
def test_insert_with_id_increments():
obj1 = {'full_name': 'a test legislator',
'_type': 'person',
'state': 'ex'}
obj2 = {'full_name': 'another legislator',
'_type': 'person',
'state': 'ex'}
leg_id_re = re.compile(r'^EXL\d{6,6}$')
id1 = utils.insert_with_id(obj1)
assert leg_id_re.match(id1)
found = db.legislators.find_one({'_id': id1})
assert found['_all_ids'] == [id1]
id2 = utils.insert_with_id(obj2)
assert leg_id_re.match(id2)
assert id2 != id1
found = db.legislators.find_one({'_id': id2})
assert found
assert found['_all_ids'] == [id2]
assert found['created_at'] == found['updated_at']
assert isinstance(found['created_at'], datetime.datetime)
@with_setup(drop_everything)
def test_insert_with_id_types():
person = {'_type': 'person', 'state': 'ex'}
legislator = {'_type': 'person', 'state': 'ex'}
committee = {'_type': 'committee', 'state': 'ex'}
bill = {'_type': 'bill', 'state': 'ex'}
other = {'_type': 'other', 'state': 'ex'}
assert utils.insert_with_id(person).startswith('EXL')
assert utils.insert_with_id(legislator).startswith('EXL')
assert utils.insert_with_id(committee).startswith('EXC')
assert utils.insert_with_id(bill).startswith('EXB')
assert_raises(ValueError, utils.insert_with_id, other)
@with_setup(drop_everything)
def test_update():
obj0 = {'_type': 'bill', 'state': 'ex', 'field1': 'stuff',
'field2': 'original', '_locked_fields': ['field2']}
id1 = utils.insert_with_id(obj0)
obj1 = db.bills.find_one(id1)
utils.update(obj1, obj1, db.bills)
obj2 = db.bills.find_one({'_id': id1})
assert obj2['created_at'] == obj2['updated_at'] == obj1['updated_at']
initial_timestamp = obj2['created_at'] # we need this later
# update with a few fields changed
changes = {'field1': 'more stuff', 'field2': 'a change'}
time.sleep(0.005) # sleep long enough to avoid created_at == updated_at
utils.update(obj1, changes, db.bills)
obj2 = db.bills.find_one({'_id': id1})
# check that timestamps have updated
assert obj2['created_at'] < obj2['updated_at']
assert initial_timestamp < obj2['updated_at']
# make sure field1 gets overwritten and field 2 doesn't
assert obj2['field1'] == 'more stuff'
assert obj2['field2'] == 'original'
@with_setup(drop_everything)
def test_update_sneaky_filter():
obj = {'_type': 'bill', 'state': 'ex', 'normal_field': 1,
'set_field': [1, 2, 3]}
def _set_changed(old, new):
return set(old) != set(new)
sneaky_filter = {'set_field': _set_changed}
id = utils.insert_with_id(obj)
obj = db.bills.find_one(id)
utils.update(obj, {'set_field': [3, 2, 1]}, db.bills, sneaky_filter)
assert obj['set_field'] == [1, 2, 3]
assert obj['updated_at'] == obj['created_at']
# the set now differs, should update
utils.update(obj, {'set_field': [4, 3, 2, 1]}, db.bills, sneaky_filter)
assert obj['set_field'] == [4, 3, 2, 1]
assert obj['updated_at'] > obj['created_at']
def test_convert_timestamps():
dt = datetime.datetime.now().replace(microsecond=0)
ts = time.mktime(dt.utctimetuple())
obj = {'date': ts,
'actions': [{'when': ts}, {'date': ts}],
'sources': [{'when': ts}, {'date': ts}],
'votes': [{'when': ts}, {'date': ts}],
}
expect = {'date': dt,
'actions': [{'when': dt}, {'date': dt}],
'sources': [{'when': dt}, {'date': dt}],
'votes': [{'when': dt}, {'date': dt}],
}
assert utils.convert_timestamps(obj) == expect
# also modifies obj in place
assert obj == expect
def test_split_name():
obj = {'_type': 'person', 'full_name': 'Michael Stephens'}
expect = {'_type': 'person', 'full_name': 'Michael Stephens',
'first_name': 'Michael', 'last_name': 'Stephens',
'suffixes': ''}
assert utils.split_name(obj) == expect
# Don't overwrite existing first/last name
obj = {'_type': 'person', 'full_name': 'Michael Stephens',
'first_name': 'Another', 'last_name': 'Name',
'suffixes': ''}
assert utils.split_name(obj) == obj
obj = {'_type': 'not_a_person', 'full_name': 'A Name'}
assert utils.split_name(obj) == obj
def test_make_plus_fields():
bill = {'_type': 'bill', 'bill_id': 'AB 123',
'title': 'An Awesome Bill',
'extra_field': 'this is not normal',
'actions': [{'actor': 'Tom Cruise',
'action': 'hero',
'date': 'now',
'superfluous': 42}]}
expect = {'_type': 'bill', 'bill_id': 'AB 123', 'title': 'An Awesome Bill',
'+extra_field': 'this is not normal',
'actions': [{'actor': 'Tom Cruise', 'action': 'hero',
'date': 'now', '+superfluous': 42}]}
plussed = utils.make_plus_fields(bill)
assert plussed == expect
def test_next_big_id():
db.test_ids.drop()
db.vote_ids.drop()
assert utils.next_big_id('xy', 'D', 'test_ids') == 'XYD00000001'
assert utils.next_big_id('xy', 'D', 'test_ids') == 'XYD00000002'
assert utils.next_big_id('xy', 'D', 'test_ids') == 'XYD00000003'
assert utils.next_big_id('xy', 'V', 'vote_ids') == 'XYV00000001'
db.test_ids.drop()
assert utils.next_big_id('xy', 'D', 'test_ids') == 'XYD00000001'
assert utils.next_big_id('xy', 'V', 'vote_ids') == 'XYV00000002'
| true | true |
1c353523dd72bb26442371df58b0ebb088eb80cd | 2,756 | py | Python | core/function.py | mc-nya/FedNest | 35405f4f9943488331eaada87bc9caf109ee6124 | [
"MIT"
] | null | null | null | core/function.py | mc-nya/FedNest | 35405f4f9943488331eaada87bc9caf109ee6124 | [
"MIT"
] | null | null | null | core/function.py | mc-nya/FedNest | 35405f4f9943488331eaada87bc9caf109ee6124 | [
"MIT"
] | 2 | 2022-02-23T10:46:28.000Z | 2022-02-24T16:19:50.000Z | from numpy import dtype
import torch.nn.functional as F
import torch
from torch.autograd import grad
def gather_flat_grad(loss_grad):
# convert the gradient output from list of tensors to to flat vector
return torch.cat([p.contiguous().view(-1) for p in loss_grad if not p is None])
def neumann_hyperstep_preconditioner(d_val_loss_d_theta, d_train_loss_d_w, elementary_lr, num_neumann_terms, model):
preconditioner = d_val_loss_d_theta.detach()
counter = preconditioner
# Do the fixed point iteration to approximate the vector-inverseHessian product
i = 0
while i < num_neumann_terms: # for i in range(num_neumann_terms):
old_counter = counter
# This increments counter to counter * (I - hessian) = counter - counter * hessian
hessian_term = gather_flat_grad(
grad(d_train_loss_d_w, model.parameters(), grad_outputs=counter.view(-1), retain_graph=True))
counter = old_counter - elementary_lr * hessian_term
preconditioner = preconditioner + counter
i += 1
return elementary_lr * preconditioner
def loss_adjust_cross_entropy(logits, targets, params, group_size=1):
# loss adjust cross entropy for long-tail cifar experiments
dy = params['dy']
ly = params['ly']
if group_size != 1:
new_dy = dy.repeat_interleave(group_size)
new_ly = ly.repeat_interleave(group_size)
x = logits*F.sigmoid(new_dy)+new_ly
else:
x = logits*F.sigmoid(dy)+ly
if len(params) == 3:
wy = params[2]
loss = F.cross_entropy(x, targets, weight=wy)
else:
loss = F.cross_entropy(x, targets)
return loss
def get_trainable_hyper_params(params):
if isinstance(params,dict):
return[params[k] for k in params if params[k].requires_grad]
else:
return params
def gather_flat_hyper_params(params):
if isinstance(params,dict):
return torch.cat([params[k].view(-1) for k in params if params[k].requires_grad])
else:
return torch.cat([k.view(-1) for k in params if k.requires_grad])
def assign_hyper_gradient(params, gradient):
i = 0
max_len=gradient.shape[0]
if isinstance(params, dict):
for k in params:
para=params[k]
if para.requires_grad:
num = para.nelement()
grad = gradient[i:min(i+num,max_len)].clone()
torch.reshape(grad, para.shape)
para.grad = grad.view(para.shape)
i += num
else:
for para in params:
if para.requires_grad:
num = para.nelement()
grad = gradient[i:min(i+num,max_len)].clone()
para.grad = grad.view(para.shape)
i += num | 37.243243 | 116 | 0.646589 | from numpy import dtype
import torch.nn.functional as F
import torch
from torch.autograd import grad
def gather_flat_grad(loss_grad):
return torch.cat([p.contiguous().view(-1) for p in loss_grad if not p is None])
def neumann_hyperstep_preconditioner(d_val_loss_d_theta, d_train_loss_d_w, elementary_lr, num_neumann_terms, model):
preconditioner = d_val_loss_d_theta.detach()
counter = preconditioner
i = 0
while i < num_neumann_terms:
old_counter = counter
hessian_term = gather_flat_grad(
grad(d_train_loss_d_w, model.parameters(), grad_outputs=counter.view(-1), retain_graph=True))
counter = old_counter - elementary_lr * hessian_term
preconditioner = preconditioner + counter
i += 1
return elementary_lr * preconditioner
def loss_adjust_cross_entropy(logits, targets, params, group_size=1):
dy = params['dy']
ly = params['ly']
if group_size != 1:
new_dy = dy.repeat_interleave(group_size)
new_ly = ly.repeat_interleave(group_size)
x = logits*F.sigmoid(new_dy)+new_ly
else:
x = logits*F.sigmoid(dy)+ly
if len(params) == 3:
wy = params[2]
loss = F.cross_entropy(x, targets, weight=wy)
else:
loss = F.cross_entropy(x, targets)
return loss
def get_trainable_hyper_params(params):
if isinstance(params,dict):
return[params[k] for k in params if params[k].requires_grad]
else:
return params
def gather_flat_hyper_params(params):
if isinstance(params,dict):
return torch.cat([params[k].view(-1) for k in params if params[k].requires_grad])
else:
return torch.cat([k.view(-1) for k in params if k.requires_grad])
def assign_hyper_gradient(params, gradient):
i = 0
max_len=gradient.shape[0]
if isinstance(params, dict):
for k in params:
para=params[k]
if para.requires_grad:
num = para.nelement()
grad = gradient[i:min(i+num,max_len)].clone()
torch.reshape(grad, para.shape)
para.grad = grad.view(para.shape)
i += num
else:
for para in params:
if para.requires_grad:
num = para.nelement()
grad = gradient[i:min(i+num,max_len)].clone()
para.grad = grad.view(para.shape)
i += num | true | true |
1c35359516695468349b188bdbe2a5db70c4134c | 670 | py | Python | ProductService/manage.py | surajkendhey/Kart | 458bee955d1569372fc8b3facb2602063a6ec6f5 | [
"Apache-2.0"
] | null | null | null | ProductService/manage.py | surajkendhey/Kart | 458bee955d1569372fc8b3facb2602063a6ec6f5 | [
"Apache-2.0"
] | null | null | null | ProductService/manage.py | surajkendhey/Kart | 458bee955d1569372fc8b3facb2602063a6ec6f5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ProductService.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.130435 | 78 | 0.68209 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ProductService.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
1c35364d7dc975462b487db0da5126d34f19d939 | 4,028 | py | Python | lib/surface/dataplex/lakes/create.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/surface/dataplex/lakes/create.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/surface/dataplex/lakes/create.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`gcloud dataplex lake create` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.dataplex import lake
from googlecloudsdk.api_lib.dataplex import util as dataplex_util
from googlecloudsdk.api_lib.util import exceptions as gcloud_exception
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.dataplex import resource_args
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import log
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Create(base.Command):
"""Creating a lake."""
detailed_help = {
'EXAMPLES':
"""\
To create a Dataplex Lake, run:
$ {command} projects/{project_id}/locations/{location}/lakes/{lake_id}
""",
}
@staticmethod
def Args(parser):
resource_args.AddLakeResourceArg(parser, 'to create a Lake to.')
parser.add_argument(
'--validate-only',
action='store_true',
default=False,
help='Validate the create action, but don\'t actually perform it.')
metastore = parser.add_group(
help='Settings to manage metadata publishing to a Hive Metastore from a lake.'
)
metastore.add_argument(
'--metastore-service',
help=""" A relative reference to the Dataproc Metastore
(https://cloud.google.com/dataproc-metastore/docs) service instance into
which metadata will be published. This is of the form:
projects/{project_number}/locations/{location_id}/services/{service_id}
where the location matches the location of the lake.""")
parser.add_argument('--description', help='Description of the Lake')
parser.add_argument('--display-name', help='Display Name')
base.ASYNC_FLAG.AddToParser(parser)
labels_util.AddCreateLabelsFlags(parser)
@gcloud_exception.CatchHTTPErrorRaiseHTTPException(
'Status code: {status_code}. {status_message}.')
def Run(self, args):
lake_ref = args.CONCEPTS.lake.Parse()
dataplex_client = dataplex_util.GetClientInstance()
message = dataplex_util.GetMessageModule()
create_req_op = dataplex_client.projects_locations_lakes.Create(
message.DataplexProjectsLocationsLakesCreateRequest(
lakeId=lake_ref.Name(),
parent=lake_ref.Parent().RelativeName(),
validateOnly=args.validate_only,
googleCloudDataplexV1Lake=message.GoogleCloudDataplexV1Lake(
description=args.description,
displayName=args.display_name,
labels=dataplex_util.CreateLabels(
message.GoogleCloudDataplexV1Lake, args),
metastore=message.GoogleCloudDataplexV1LakeMetastore(
service=args.metastore_service))))
validate_only = getattr(args, 'validate_only', False)
if validate_only:
log.status.Print('Validation complete.')
return
async_ = getattr(args, 'async_', False)
if not async_:
lake.WaitForOperation(create_req_op)
log.CreatedResource(
lake_ref.Name(),
details='Lake created in project [{0}] with location [{1}]'.format(
lake_ref.projectsId, lake_ref.locationsId))
return
log.status.Print('Creating [{0}] with operation [{1}].'.format(
lake_ref, create_req_op.name))
| 39.490196 | 86 | 0.706554 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.dataplex import lake
from googlecloudsdk.api_lib.dataplex import util as dataplex_util
from googlecloudsdk.api_lib.util import exceptions as gcloud_exception
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.dataplex import resource_args
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import log
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Create(base.Command):
detailed_help = {
'EXAMPLES':
"""\
To create a Dataplex Lake, run:
$ {command} projects/{project_id}/locations/{location}/lakes/{lake_id}
""",
}
@staticmethod
def Args(parser):
resource_args.AddLakeResourceArg(parser, 'to create a Lake to.')
parser.add_argument(
'--validate-only',
action='store_true',
default=False,
help='Validate the create action, but don\'t actually perform it.')
metastore = parser.add_group(
help='Settings to manage metadata publishing to a Hive Metastore from a lake.'
)
metastore.add_argument(
'--metastore-service',
help=""" A relative reference to the Dataproc Metastore
(https://cloud.google.com/dataproc-metastore/docs) service instance into
which metadata will be published. This is of the form:
projects/{project_number}/locations/{location_id}/services/{service_id}
where the location matches the location of the lake.""")
parser.add_argument('--description', help='Description of the Lake')
parser.add_argument('--display-name', help='Display Name')
base.ASYNC_FLAG.AddToParser(parser)
labels_util.AddCreateLabelsFlags(parser)
@gcloud_exception.CatchHTTPErrorRaiseHTTPException(
'Status code: {status_code}. {status_message}.')
def Run(self, args):
lake_ref = args.CONCEPTS.lake.Parse()
dataplex_client = dataplex_util.GetClientInstance()
message = dataplex_util.GetMessageModule()
create_req_op = dataplex_client.projects_locations_lakes.Create(
message.DataplexProjectsLocationsLakesCreateRequest(
lakeId=lake_ref.Name(),
parent=lake_ref.Parent().RelativeName(),
validateOnly=args.validate_only,
googleCloudDataplexV1Lake=message.GoogleCloudDataplexV1Lake(
description=args.description,
displayName=args.display_name,
labels=dataplex_util.CreateLabels(
message.GoogleCloudDataplexV1Lake, args),
metastore=message.GoogleCloudDataplexV1LakeMetastore(
service=args.metastore_service))))
validate_only = getattr(args, 'validate_only', False)
if validate_only:
log.status.Print('Validation complete.')
return
async_ = getattr(args, 'async_', False)
if not async_:
lake.WaitForOperation(create_req_op)
log.CreatedResource(
lake_ref.Name(),
details='Lake created in project [{0}] with location [{1}]'.format(
lake_ref.projectsId, lake_ref.locationsId))
return
log.status.Print('Creating [{0}] with operation [{1}].'.format(
lake_ref, create_req_op.name))
| true | true |
1c353653b40c7bfbb9044c05746afb39df3ff25f | 4,222 | py | Python | my_dlib/tsdlib.py | kiddkyd1412/find_av_by_face | e6071b9edbfb6a6ae1c833b13988b6262cc9aa55 | [
"Apache-2.0"
] | 4 | 2019-06-03T03:03:40.000Z | 2022-03-29T11:36:31.000Z | my_dlib/tsdlib.py | kiddkyd1412/find_av_by_face | e6071b9edbfb6a6ae1c833b13988b6262cc9aa55 | [
"Apache-2.0"
] | null | null | null | my_dlib/tsdlib.py | kiddkyd1412/find_av_by_face | e6071b9edbfb6a6ae1c833b13988b6262cc9aa55 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import base64
import json
import sys
import time
import warnings
from concurrent.futures import ThreadPoolExecutor, wait, as_completed
from operator import itemgetter
import dlib
import cv2
import os
import glob
import numpy as np
from iface import IFace
class FaceDlib(IFace):
def __init__(self):
super().__init__()
self.current_path = os.getcwd() # 获取根路径
self.predictor_path = self.current_path + "/my_dlib/model/shape_predictor_68_face_landmarks.dat"
self.face_rec_model_path = self.current_path + "/my_dlib/model/dlib_face_recognition_resnet_model_v1.dat"
self.dataPath = self.current_path + "/my_dlib/cache_data/"
# 读入模型
self.detector = dlib.get_frontal_face_detector()
self.shape_predictor = dlib.shape_predictor(self.predictor_path)
self.face_rec_model = dlib.face_recognition_model_v1(self.face_rec_model_path)
self.executor = ThreadPoolExecutor(max_workers=8)
self.result_min_value = 0.5 # 至少要少于0.6才是相似
def init(self, source_img_info, target_img_list, result_list):
os.makedirs(os.path.join(self.current_path, 'my_dlib/cache_data/'), exist_ok=True)
self.result_list = result_list
self.source_img_info = source_img_info
self.target_img_list = target_img_list
self.source_img_data = self.__get_tezheng(source_img_info)
self.error_list = []
self.thread_list = []
return self
def working(self):
try:
print('开始处理数据,总共:' + str(len(self.target_img_list)) + '条')
self.__start_thread(self.target_img_list)
self.__show_thread_log()
if len(self.result_list) > 0:
self.result_list.sort(key=itemgetter(2))
print('---------任务结束------------')
except Exception as ex:
info = sys.exc_info()
msg = '{}:{}'.format(info[0], info[1])
warnings.warn(msg)
finally:
self.executor.shutdown(False)
self.save_log(self.source_img_info['imgurl'].split('/')[-1].split('.')[0], self.result_list, "dlib")
self.save_error_log(self.error_list)
def __chk_photo_for(self, target_info):
result = self.__compare_data(self.source_img_data, self.__get_tezheng(target_info))
if result < self.result_min_value:
self.result_list.append((target_info['imgurl'], target_info['username'], result))
# 开始构建线程进行工作
def __start_thread(self, work_list):
self.thread_list.clear()
for img_info in work_list:
self.thread_list.append(self.executor.submit(self.__chk_photo_for, img_info))
# 显示线程日志
def __show_thread_log(self):
for i, future in enumerate(as_completed(self.thread_list)):
print('完成:' + str(i + 1))
print('---------线程结束------------')
def __get_tezheng(self, img_info):
# 检查是否有缓存数据
filePath = self.dataPath + img_info['imgurl'].split('/')[-1].split('.')[0] + '_' + img_info["username"] + '.npy'
if os.path.isfile(filePath):
vectors = np.load(filePath)
if vectors.size > 0:
return vectors
# 没有的话,就构建并存起来
img_data = base64.b64decode(img_info['buf'])
img_array = np.fromstring(img_data, np.uint8)
img = cv2.imdecode(img_array, cv2.COLOR_BGR2RGB)
dets = self.detector(img, 1) # 人脸标定
if len(dets) is not 1:
warnings.warn("图片检测的人脸数为: {}".format(len(dets)))
self.error_list.append((img_info['username'], img_info['imgurl']))
return np.array([])
face = dets[0]
shape = self.shape_predictor(img, face)
vectors = np.array([])
for i, num in enumerate(self.face_rec_model.compute_face_descriptor(img, shape)):
vectors = np.append(vectors, num)
np.save(filePath, vectors)
return vectors
# 计算欧式距离,判断是否是同一个人
def __compare_data(self, data1, data2):
diff = 0
# for v1, v2 in data1, data2:
# diff += (v1 - v2)**2
for i in range(len(data1)):
diff += (data1[i] - data2[i]) ** 2
diff = np.sqrt(diff)
return diff
| 35.183333 | 120 | 0.623638 |
import base64
import json
import sys
import time
import warnings
from concurrent.futures import ThreadPoolExecutor, wait, as_completed
from operator import itemgetter
import dlib
import cv2
import os
import glob
import numpy as np
from iface import IFace
class FaceDlib(IFace):
def __init__(self):
super().__init__()
self.current_path = os.getcwd()
self.predictor_path = self.current_path + "/my_dlib/model/shape_predictor_68_face_landmarks.dat"
self.face_rec_model_path = self.current_path + "/my_dlib/model/dlib_face_recognition_resnet_model_v1.dat"
self.dataPath = self.current_path + "/my_dlib/cache_data/"
self.detector = dlib.get_frontal_face_detector()
self.shape_predictor = dlib.shape_predictor(self.predictor_path)
self.face_rec_model = dlib.face_recognition_model_v1(self.face_rec_model_path)
self.executor = ThreadPoolExecutor(max_workers=8)
self.result_min_value = 0.5
def init(self, source_img_info, target_img_list, result_list):
os.makedirs(os.path.join(self.current_path, 'my_dlib/cache_data/'), exist_ok=True)
self.result_list = result_list
self.source_img_info = source_img_info
self.target_img_list = target_img_list
self.source_img_data = self.__get_tezheng(source_img_info)
self.error_list = []
self.thread_list = []
return self
def working(self):
try:
print('开始处理数据,总共:' + str(len(self.target_img_list)) + '条')
self.__start_thread(self.target_img_list)
self.__show_thread_log()
if len(self.result_list) > 0:
self.result_list.sort(key=itemgetter(2))
print('---------任务结束------------')
except Exception as ex:
info = sys.exc_info()
msg = '{}:{}'.format(info[0], info[1])
warnings.warn(msg)
finally:
self.executor.shutdown(False)
self.save_log(self.source_img_info['imgurl'].split('/')[-1].split('.')[0], self.result_list, "dlib")
self.save_error_log(self.error_list)
def __chk_photo_for(self, target_info):
result = self.__compare_data(self.source_img_data, self.__get_tezheng(target_info))
if result < self.result_min_value:
self.result_list.append((target_info['imgurl'], target_info['username'], result))
def __start_thread(self, work_list):
self.thread_list.clear()
for img_info in work_list:
self.thread_list.append(self.executor.submit(self.__chk_photo_for, img_info))
def __show_thread_log(self):
for i, future in enumerate(as_completed(self.thread_list)):
print('完成:' + str(i + 1))
print('---------线程结束------------')
def __get_tezheng(self, img_info):
filePath = self.dataPath + img_info['imgurl'].split('/')[-1].split('.')[0] + '_' + img_info["username"] + '.npy'
if os.path.isfile(filePath):
vectors = np.load(filePath)
if vectors.size > 0:
return vectors
img_data = base64.b64decode(img_info['buf'])
img_array = np.fromstring(img_data, np.uint8)
img = cv2.imdecode(img_array, cv2.COLOR_BGR2RGB)
dets = self.detector(img, 1)
if len(dets) is not 1:
warnings.warn("图片检测的人脸数为: {}".format(len(dets)))
self.error_list.append((img_info['username'], img_info['imgurl']))
return np.array([])
face = dets[0]
shape = self.shape_predictor(img, face)
vectors = np.array([])
for i, num in enumerate(self.face_rec_model.compute_face_descriptor(img, shape)):
vectors = np.append(vectors, num)
np.save(filePath, vectors)
return vectors
def __compare_data(self, data1, data2):
diff = 0
for i in range(len(data1)):
diff += (data1[i] - data2[i]) ** 2
diff = np.sqrt(diff)
return diff
| true | true |
1c3536c1300674f74f83f7fe74d13104432a24e3 | 287 | py | Python | third_party_logistics/third_party_logistics/doctype/third_party_logistics_settings/third_party_logistics_settings.py | hafeesk/third_party_logistics | 6b97c5ad1bbb8386ca93e480bcb55ed3bc784ac4 | [
"MIT"
] | 1 | 2021-09-10T03:47:53.000Z | 2021-09-10T03:47:53.000Z | third_party_logistics/third_party_logistics/doctype/third_party_logistics_settings/third_party_logistics_settings.py | hafeesk/third_party_logistics | 6b97c5ad1bbb8386ca93e480bcb55ed3bc784ac4 | [
"MIT"
] | null | null | null | third_party_logistics/third_party_logistics/doctype/third_party_logistics_settings/third_party_logistics_settings.py | hafeesk/third_party_logistics | 6b97c5ad1bbb8386ca93e480bcb55ed3bc784ac4 | [
"MIT"
] | 1 | 2022-02-05T10:16:53.000Z | 2022-02-05T10:16:53.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2020, GreyCube Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class ThirdPartyLogisticsSettings(Document):
pass
| 26.090909 | 60 | 0.794425 |
from __future__ import unicode_literals
from frappe.model.document import Document
class ThirdPartyLogisticsSettings(Document):
pass
| true | true |
1c3536cd3198ab9694baf695fe2ae02d8f0eb8d2 | 5,751 | py | Python | Lista2.py | EnzoItaliano/calculoNumericoEmPython | be3161b823955620be71e0f94a3421288fd28ef0 | [
"MIT"
] | 1 | 2019-12-28T21:23:00.000Z | 2019-12-28T21:23:00.000Z | Lista2.py | EnzoItaliano/calculoNumericoEmPython | be3161b823955620be71e0f94a3421288fd28ef0 | [
"MIT"
] | null | null | null | Lista2.py | EnzoItaliano/calculoNumericoEmPython | be3161b823955620be71e0f94a3421288fd28ef0 | [
"MIT"
] | null | null | null | import math
import matplotlib.pyplot as plt
from prettytable import PrettyTable
from sympy import *
import numpy as np
x = symbols('x')
#Raízes de Equações
##Método da Bissecção
def plot2d(f, inicio, fim):
z = np.arange(inicio,fim,0.1)
y = []
for i in range(len(z)):
y.append(f.subs(x,z[i]))
fig, ax = plt.subplots()
ax.set(title='Gráfico função f(x)='+str(f))
ax.plot(z,y)
ax.grid()
plt.show()
def bisseccao(f, e, a, b):
fa = f.subs(x,a)
fb = f.subs(x,b)
if fa * fb >= 0:
print("Não atende ao critério f(a) * f(b) < 0")
return
k = 0
ak = []
bk = []
xk = []
fak = []
fbk = []
xk = []
fxk = []
xk_x = []
ak.append(a)
bk.append(b)
kf = math.log((b-a)/e,2)-1
times = math.ceil(kf) + 1
for k in range(times):
if k == 0:
y = ak[len(ak)-1]
fak.append(round(f.subs(x,y),9))
y = bk[len(bk)-1]
fbk.append(round(f.subs(x,y),9))
xk.append((ak[len(ak)-1] + bk[len(bk)-1])/2)
y = xk[len(xk)-1]
fxk.append(round(f.subs(x,y),9))
xk_x.append('-')
else:
if (fak[len(fak)-1] < 0 and fxk[len(fxk)-1] < 0) or (fak[len(fak)-1] > 0 and fxk[len(fxk)-1] > 0):
ak.append(xk[len(xk)-1])
bk.append(bk[len(bk)-1])
else:
ak.append(ak[len(ak)-1])
bk.append(xk[len(xk)-1])
y = ak[len(ak)-1]
fak.append(round(f.subs(x,y),9))
y = bk[len(bk)-1]
fbk.append(round(f.subs(x,y),9))
xk.append((ak[len(ak)-1] + bk[len(bk)-1])/2)
y = xk[len(xk)-1]
fxk.append(round(f.subs(x,y),9))
temp = xk[len(xk)-1] - xk[len(xk)-2]
if temp < 0:
temp = temp * -1
xk_x.append(temp)
Table = PrettyTable(["k", "a", "b", "f(a)", "f(b)", "x", "f(x)", "|x(k) - x(k-1)|"])
for k in range(times):
Table.add_row([k, ak[k], bk[k], fak[k], fbk[k], xk[k], fxk[k], xk_x[k]])
print(Table)
print("Donde \u03B5 é aproximadamente " + str(xk[len(xk)-1]))
# def f(x): return pow(x,2)-3
# plot2d(f(x), 0, 2)
# bisseccao(f(x), 0.01, 1, 2)
## Método do Ponto Fixo
def pontoFixo(f,e,xi):
xk = []
xk.append(xi)
xk_x = []
xk_x.append("-")
end_condition = 0
while not end_condition:
xk.append(f.subs(x,xk[len(xk)-1]))
xk_x.append(abs(xk[len(xk)-1]-xk[len(xk)-2]))
if xk_x[len(xk_x)-1] < e:
end_condition = 1
Table = PrettyTable(["k", "xk", "|x(k) - x(k-1)|"])
for k in range(0, len(xk)):
Table.add_row([k, xk[k], xk_x[k]])
print(Table)
print("Donde \u03B5 é aproximadamente " + str(xk[len(xk)-1]))
# def f(x): return cos(x)
# pontoFixo(f(x),10**(-2), math.pi/4)
## Método de Newton
def newton(f, e, a, b):
xk = []
xk.append(b)
xk_x = []
xk_x.append(0)
end_condition = 0
if f.subs(x,xk[len(xk)-1]) * diff(diff(f,x),x).subs(x,xk[len(xk)-1]) > 0:
while not end_condition:
func = f.subs(x,xk[len(xk)-1])
derivate = diff(f,x).subs(x,xk[len(xk)-1])
temp = xk[len(xk)-1] - func/derivate
xk.append(N(temp))
temp2 = xk[len(xk)-2] - xk[len(xk)-1]
if temp2 < 0:
temp2 = temp2 * -1
xk_x.append(N(temp2))
if xk_x[len(xk_x)-1] < e:
end_condition = 1
Table = PrettyTable(["k", "xk", "|x(k) - x(k-1)|"])
for k in range(1, len(xk)):
Table.add_row([k, xk[k], xk_x[k]])
print(Table)
print("Donde \u03B5 é aproximadamente " + str(xk[len(xk)-1]))
# def f(x): return x**2-2
# newton(f(x), 0.00005, 1, 2)
## Método da Secante
def secante(f, e, a, b):
xk = []
xk.append(a)
xk.append(b)
xk_x = []
xk_x.append(0)
xk_x.append(0)
end_condition = 0
while not end_condition:
temp = f.subs(x, xk[len(xk)-1]) * (xk[len(xk)-1] - xk[len(xk)-2])
temp2 = f.subs(x, xk[len(xk)-1]) - f.subs(x,xk[len(xk)-2])
temp3 = xk[len(xk)-1] - (temp/temp2)
xk.append(temp3)
temp4 = xk[len(xk)-1] - xk[len(xk)-2]
if temp4 < 0:
temp4 = temp4 * -1
xk_x.append(temp4)
if xk_x[len(xk_x)-1] < e:
end_condition = 1
Table = PrettyTable(["k", "xk", "|x(k+1) - x(k)|"])
for k in range(2, len(xk)):
Table.add_row([k, xk[k], xk_x[k]])
print(Table)
print("Donde \u03B5 é aproximadamente " + str(xk[len(xk)-1]))
print("Secante\n")
def f(x): return 2*x**3-5*x**2-10*x+20
secante(f(x), 10**(-5), 1.2, 1.7)
## Método Regula Falsi
def regulaFalsi(f, e, a, b):
xk = []
xk_x = []
x0 = a
x1 = b
print(f.subs(x,a))
print(f.subs(x,b))
end_condition = 0
while not end_condition:
temp = x1 - f.subs(x, x1) * (x1 - x0) / (f.subs(x, x1) - f.subs(x, x0))
temp2 = temp - x1
if temp2 < 0:
temp2 = temp2 * -1
if temp2 < e:
xk.append(temp)
xk_x.append(temp2)
end_condition = 1
continue
k = f.subs(x, temp)
if k*f.subs(x, x1) < 0:
x0 = x1
x1 = temp
xk.append(temp)
xk_x.append(temp2)
Table = PrettyTable(["k", "xk", "|x(k) - x(k-1)|"])
for k in range(len(xk)):
Table.add_row([k+2, xk[k], xk_x[k]])
print(Table)
print("Donde \u03B5 é aproximadamente " + str(xk[len(xk)-1]))
print("\nRegula Falsi\n")
def f(x): return 2*x**3-5*x**2-10*x+20
regulaFalsi(f(x), 0.0001, 1.2, 1.7)
| 25.56 | 110 | 0.475569 | import math
import matplotlib.pyplot as plt
from prettytable import PrettyTable
from sympy import *
import numpy as np
x = symbols('x')
o, fim):
z = np.arange(inicio,fim,0.1)
y = []
for i in range(len(z)):
y.append(f.subs(x,z[i]))
fig, ax = plt.subplots()
ax.set(title='Gráfico função f(x)='+str(f))
ax.plot(z,y)
ax.grid()
plt.show()
def bisseccao(f, e, a, b):
fa = f.subs(x,a)
fb = f.subs(x,b)
if fa * fb >= 0:
print("Não atende ao critério f(a) * f(b) < 0")
return
k = 0
ak = []
bk = []
xk = []
fak = []
fbk = []
xk = []
fxk = []
xk_x = []
ak.append(a)
bk.append(b)
kf = math.log((b-a)/e,2)-1
times = math.ceil(kf) + 1
for k in range(times):
if k == 0:
y = ak[len(ak)-1]
fak.append(round(f.subs(x,y),9))
y = bk[len(bk)-1]
fbk.append(round(f.subs(x,y),9))
xk.append((ak[len(ak)-1] + bk[len(bk)-1])/2)
y = xk[len(xk)-1]
fxk.append(round(f.subs(x,y),9))
xk_x.append('-')
else:
if (fak[len(fak)-1] < 0 and fxk[len(fxk)-1] < 0) or (fak[len(fak)-1] > 0 and fxk[len(fxk)-1] > 0):
ak.append(xk[len(xk)-1])
bk.append(bk[len(bk)-1])
else:
ak.append(ak[len(ak)-1])
bk.append(xk[len(xk)-1])
y = ak[len(ak)-1]
fak.append(round(f.subs(x,y),9))
y = bk[len(bk)-1]
fbk.append(round(f.subs(x,y),9))
xk.append((ak[len(ak)-1] + bk[len(bk)-1])/2)
y = xk[len(xk)-1]
fxk.append(round(f.subs(x,y),9))
temp = xk[len(xk)-1] - xk[len(xk)-2]
if temp < 0:
temp = temp * -1
xk_x.append(temp)
Table = PrettyTable(["k", "a", "b", "f(a)", "f(b)", "x", "f(x)", "|x(k) - x(k-1)|"])
for k in range(times):
Table.add_row([k, ak[k], bk[k], fak[k], fbk[k], xk[k], fxk[k], xk_x[k]])
print(Table)
print("Donde \u03B5 é aproximadamente " + str(xk[len(xk)-1]))
:
xk = []
xk.append(xi)
xk_x = []
xk_x.append("-")
end_condition = 0
while not end_condition:
xk.append(f.subs(x,xk[len(xk)-1]))
xk_x.append(abs(xk[len(xk)-1]-xk[len(xk)-2]))
if xk_x[len(xk_x)-1] < e:
end_condition = 1
Table = PrettyTable(["k", "xk", "|x(k) - x(k-1)|"])
for k in range(0, len(xk)):
Table.add_row([k, xk[k], xk_x[k]])
print(Table)
print("Donde \u03B5 é aproximadamente " + str(xk[len(xk)-1]))
a, b):
xk = []
xk.append(b)
xk_x = []
xk_x.append(0)
end_condition = 0
if f.subs(x,xk[len(xk)-1]) * diff(diff(f,x),x).subs(x,xk[len(xk)-1]) > 0:
while not end_condition:
func = f.subs(x,xk[len(xk)-1])
derivate = diff(f,x).subs(x,xk[len(xk)-1])
temp = xk[len(xk)-1] - func/derivate
xk.append(N(temp))
temp2 = xk[len(xk)-2] - xk[len(xk)-1]
if temp2 < 0:
temp2 = temp2 * -1
xk_x.append(N(temp2))
if xk_x[len(xk_x)-1] < e:
end_condition = 1
Table = PrettyTable(["k", "xk", "|x(k) - x(k-1)|"])
for k in range(1, len(xk)):
Table.add_row([k, xk[k], xk_x[k]])
print(Table)
print("Donde \u03B5 é aproximadamente " + str(xk[len(xk)-1]))
a, b):
xk = []
xk.append(a)
xk.append(b)
xk_x = []
xk_x.append(0)
xk_x.append(0)
end_condition = 0
while not end_condition:
temp = f.subs(x, xk[len(xk)-1]) * (xk[len(xk)-1] - xk[len(xk)-2])
temp2 = f.subs(x, xk[len(xk)-1]) - f.subs(x,xk[len(xk)-2])
temp3 = xk[len(xk)-1] - (temp/temp2)
xk.append(temp3)
temp4 = xk[len(xk)-1] - xk[len(xk)-2]
if temp4 < 0:
temp4 = temp4 * -1
xk_x.append(temp4)
if xk_x[len(xk_x)-1] < e:
end_condition = 1
Table = PrettyTable(["k", "xk", "|x(k+1) - x(k)|"])
for k in range(2, len(xk)):
Table.add_row([k, xk[k], xk_x[k]])
print(Table)
print("Donde \u03B5 é aproximadamente " + str(xk[len(xk)-1]))
print("Secante\n")
def f(x): return 2*x**3-5*x**2-10*x+20
secante(f(x), 10**(-5), 1.2, 1.7)
, a, b):
xk = []
xk_x = []
x0 = a
x1 = b
print(f.subs(x,a))
print(f.subs(x,b))
end_condition = 0
while not end_condition:
temp = x1 - f.subs(x, x1) * (x1 - x0) / (f.subs(x, x1) - f.subs(x, x0))
temp2 = temp - x1
if temp2 < 0:
temp2 = temp2 * -1
if temp2 < e:
xk.append(temp)
xk_x.append(temp2)
end_condition = 1
continue
k = f.subs(x, temp)
if k*f.subs(x, x1) < 0:
x0 = x1
x1 = temp
xk.append(temp)
xk_x.append(temp2)
Table = PrettyTable(["k", "xk", "|x(k) - x(k-1)|"])
for k in range(len(xk)):
Table.add_row([k+2, xk[k], xk_x[k]])
print(Table)
print("Donde \u03B5 é aproximadamente " + str(xk[len(xk)-1]))
print("\nRegula Falsi\n")
def f(x): return 2*x**3-5*x**2-10*x+20
regulaFalsi(f(x), 0.0001, 1.2, 1.7)
| true | true |
1c35398eb40634581f7229ddd5711a7b1ff8f982 | 442 | py | Python | mainapp/migrations/0046_rescuecamp_total_people.py | reyasmohammed/rescuekerala | 68ee6cd4ea7b94e04fd32c4d488bcd7a8f2d371c | [
"MIT"
] | 1 | 2021-12-09T17:59:01.000Z | 2021-12-09T17:59:01.000Z | mainapp/migrations/0046_rescuecamp_total_people.py | reyasmohammed/rescuekerala | 68ee6cd4ea7b94e04fd32c4d488bcd7a8f2d371c | [
"MIT"
] | 1 | 2018-08-28T13:26:26.000Z | 2018-08-28T13:26:26.000Z | mainapp/migrations/0046_rescuecamp_total_people.py | reyasmohammed/rescuekerala | 68ee6cd4ea7b94e04fd32c4d488bcd7a8f2d371c | [
"MIT"
] | 5 | 2019-11-07T11:34:56.000Z | 2019-11-07T11:36:00.000Z | # Generated by Django 2.1 on 2018-08-18 13:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0045_auto_20180818_1827'),
]
operations = [
migrations.AddField(
model_name='rescuecamp',
name='total_people',
field=models.IntegerField(blank=True, null=True, verbose_name='Total Number of People'),
),
]
| 23.263158 | 100 | 0.626697 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0045_auto_20180818_1827'),
]
operations = [
migrations.AddField(
model_name='rescuecamp',
name='total_people',
field=models.IntegerField(blank=True, null=True, verbose_name='Total Number of People'),
),
]
| true | true |
1c353a370d3284471c6a5674ae1c783f7f93b99a | 506 | py | Python | pylearn2/compat.py | ikervazquezlopez/Pylearn2 | 2971e8f64374ffde572d4cf967aad5342beaf5e0 | [
"BSD-3-Clause"
] | 2,045 | 2015-01-01T14:07:52.000Z | 2022-03-08T08:56:41.000Z | pylearn2/compat.py | ikervazquezlopez/Pylearn2 | 2971e8f64374ffde572d4cf967aad5342beaf5e0 | [
"BSD-3-Clause"
] | 305 | 2015-01-02T13:18:24.000Z | 2021-08-20T18:03:28.000Z | pylearn2/compat.py | ikervazquezlopez/Pylearn2 | 2971e8f64374ffde572d4cf967aad5342beaf5e0 | [
"BSD-3-Clause"
] | 976 | 2015-01-01T17:08:51.000Z | 2022-03-25T19:53:17.000Z | """
Compatibility layer
"""
from theano.compat import six
__all__ = ('OrderedDict', )
if six.PY3:
from collections import OrderedDict
else:
from theano.compat import OrderedDict
def first_key(obj):
""" Return the first key
Parameters
----------
obj: dict-like object
"""
return six.next(six.iterkeys(obj))
def first_value(obj):
""" Return the first value
Parameters
----------
obj: dict-like object
"""
return six.next(six.itervalues(obj))
| 14.882353 | 41 | 0.624506 | from theano.compat import six
__all__ = ('OrderedDict', )
if six.PY3:
from collections import OrderedDict
else:
from theano.compat import OrderedDict
def first_key(obj):
return six.next(six.iterkeys(obj))
def first_value(obj):
return six.next(six.itervalues(obj))
| true | true |
1c353af5f765e844e7a94e9172e7f1021fcced24 | 7,706 | py | Python | Tests/Data/Parabolic/T/3D_3BHEs_array/bcs_tespy.py | jbathmann/ogs | a79e95d7521a841ffebd441a6100562847e03ab5 | [
"BSD-4-Clause"
] | null | null | null | Tests/Data/Parabolic/T/3D_3BHEs_array/bcs_tespy.py | jbathmann/ogs | a79e95d7521a841ffebd441a6100562847e03ab5 | [
"BSD-4-Clause"
] | 1 | 2021-09-02T14:21:33.000Z | 2021-09-02T14:21:33.000Z | Tests/Data/Parabolic/T/3D_3BHEs_array/bcs_tespy.py | jbathmann/ogs | a79e95d7521a841ffebd441a6100562847e03ab5 | [
"BSD-4-Clause"
] | null | null | null | ###
# Copyright(c) 2012 - 2019, OpenGeoSys Community(http://www.opengeosys.org)
# Distributed under a Modified BSD License.
# See accompanying file LICENSE.txt or
# http://www.opengeosys.org/project/license
###
import sys
print(sys.version)
import os
import numpy as np
from pandas import read_csv
import OpenGeoSys
from tespy import cmp, con, nwk, hlp, cmp_char
from tespy import nwkr
# User setting +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# parameters
# refrigerant parameters
refrig_density = 992.92 # kg/m3
# switch for special boundary conditions
# 'on','off', switch of the function for dynamic thermal demand from consumer
switch_dyn_demand = 'on'
# 'on','off', switch of the function for dynamic flowrate in BHE
switch_dyn_frate = 'off'
# timecurve setting
def timerange(t):
# month for closed network
timerange_nw_off_month = [-9999] # No month for closed network
nw_status = 'on'
# t-1 to avoid the calculation problem at special time point,
# e.g. t = 2592000.
t_trans = int((t - 1) / 86400 / 30) + 1
t_trans_month = t_trans
if t_trans_month > 12:
t_trans_month = t_trans - 12 * (int(t_trans / 12))
if t_trans_month in timerange_nw_off_month:
nw_status = 'off'
return t_trans, t_trans_month, nw_status
# consumer thermal load
# month demand
def consumer_demand(t): # dynamic thermal demand from consumer
# thermal demand in each month (assumed specific heat extraction rate*
# length of BHE* number of BHE)
month_demand = [
-25 * 50 * 3, -25 * 50 * 3, -25 * 50 * 3, -25 * 50 * 3, -25 * 50 * 3,
-25 * 50 * 3, -25 * 50 * 3, -25 * 50 * 3, -25 * 50 * 3, -25 * 50 * 3,
-25 * 50 * 3, -25 * 50 * 3
]
return month_demand[t - 1]
# dynamic hydraulic flow rate
# month demand
def dyn_frate(t): # dynamic flowrate in BHE
# flow rate in kg / s time curve in month
month_frate = [-9999]
return month_frate[t - 1]
# End User setting+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# create network dataframe
def create_dataframe():
# return dataframe
df_nw = read_csv('./pre/bhe_network.csv',
delimiter=';',
index_col=[0],
dtype={'data_index': str})
return (df_nw)
# TESPy hydraulic calculation process
def get_hydraulics(t_trans):
# if network exist dynamic flowrate
if switch_dyn_frate == 'on':
cur_frate = dyn_frate(t_trans)
localVars['inlet_name'].set_attr(m=cur_frate)
# solve imported network
nw.solve(mode='design')
# get flowrate #kg / s
for i in range(n_BHE):
for c in nw.conns.index:
if c.t.label == data_index[i]: # t:inlet comp, s:outlet comp
df.loc[df.index[i], 'flowrate'] = c.get_attr('m').val_SI
# convert flowrate to velocity : #m ^ 3 / s
for i in range(n_BHE):
df.loc[df.index[i],
'f_velocity'] = df.loc[df.index[i], 'flowrate'] / refrig_density
return df
# TESPy Thermal calculation process
def get_thermal(t):
# bhe network thermal re parametrization
if switch_dyn_demand == 'on':
# consumer thermal load:
cur_month_demand = consumer_demand(t)
# print('cur_month_demand', cur_month_demand)
nw.busses[bus_name].set_attr(P=cur_month_demand)
# T_out:
for i in range(n_BHE):
localVars['outlet_BHE' + str(i + 1)].set_attr(T=df.loc[data_index[i],
'Tout_val'])
# print('Tout=', df.loc[data_index[i], 'Tout_val'])
# solving network
nw.solve(mode='design')
# get Tin_val
for i in range(n_BHE):
df.loc[df.index[i],
'Tin_val'] = localVars['inlet_BHE' +
str(i + 1)].get_attr('T').val_SI
# print('Tin=', df.loc[df.index[i], 'Tin_val'])
return df['Tin_val'].tolist()
# OGS setting
# Dirichlet BCs
class BC(OpenGeoSys.BHENetwork):
def initializeDataContainer(self):
# convert dataframe to column list
t = 0 # 'initial time'
data_col_1 = df['Tin_val'].tolist() # 'Tin_val'
data_col_2 = df['Tout_val'].tolist() # 'Tout_val'
data_col_3 = df['Tout_node_id'].astype(int).tolist() # 'Tout_node_id'
get_hydraulics(0)
data_col_4 = df['f_velocity'].tolist() # 'BHE flow rate'
return (t, data_col_1, data_col_2, data_col_3, data_col_4)
def tespyThermalSolver(self, t, Tin_val, Tout_val):
# current time, network status:
t_trans, t_trans_month, nw_status = timerange(t)
# if network closed:
# print('nw_status = ', nw_status)
if nw_status == 'off':
return (True, True, Tout_val)
else:
# read Tout_val to dataframe
for i in range(n_BHE):
df.loc[df.index[i], 'Tout_val'] = Tout_val[i]
# TESPy solver
cur_cal_Tin_val = get_thermal(t_trans_month)
# check norm if network achieves the converge
if_success = False
pre_cal_Tin_val = Tin_val
norm = np.linalg.norm(
abs(np.asarray(pre_cal_Tin_val) - np.asarray(cur_cal_Tin_val)))
if norm < 10e-6:
if_success = True
# return to OGS
return (True, if_success, cur_cal_Tin_val)
def tespyHydroSolver(self, t):
if_dyn_frate = False
data_f_velocity = df['f_velocity'].tolist()
if switch_dyn_frate == 'on':
if_dyn_frate = True
# current time, network status:
t_trans, t_trans_month, nw_status = timerange(t)
if nw_status == 'off':
for i in range(n_BHE):
df.loc[df.index[i], 'f_velocity'] = 0
data_f_velocity = df['f_velocity'].tolist()
else:
dataframe = get_hydraulics(t_trans)
data_f_velocity = dataframe['f_velocity'].tolist()
# return to OGS
return (if_dyn_frate, data_f_velocity)
# main
# initialize the tespy model of the bhe network
# load path of network model:
# loading the TESPy model
project_dir = os.getcwd()
print("Project dir is: ", project_dir)
nw = nwkr.load_nwk('./pre/tespy_nw')
# set if print the information of the network
nw.set_printoptions(print_level='none')
# create bhe dataframe of the network system from bhe_network.csv
df = create_dataframe()
n_BHE = np.size(df.iloc[:, 0])
# create local variables of the components label and connections label in
# network
localVars = locals()
data_index = df.index.tolist()
for i in range(n_BHE):
for c in nw.conns.index:
# bhe inlet and outlet conns
if c.t.label == data_index[i]: # inlet conns of bhe
localVars['inlet_BHE' + str(i + 1)] = c
if c.s.label == data_index[i]: # outlet conns of bhe
localVars['outlet_BHE' + str(i + 1)] = c
# time depended consumer thermal demand
if switch_dyn_demand == 'on':
# import the name of bus from the network csv file
bus_name = read_csv('./pre/tespy_nw/comps/bus.csv',
delimiter=';',
index_col=[0]).index[0]
# time depended flowrate
if switch_dyn_frate == 'on':
# import the name of inlet connection from the network csv file
inlet_name = read_csv('./pre/tespy_nw/conn.csv',
delimiter=';',
index_col=[0]).iloc[0,0]
for c in nw.conns.index:
# bhe inflow conns
if c.s.label == inlet_name: # inlet conns of bhe
localVars['inlet_name'] = c
# instantiate BC objects referenced in OpenGeoSys
bc_bhe = BC()
| 34.711712 | 79 | 0.600831 |
mport sys
print(sys.version)
import os
import numpy as np
from pandas import read_csv
import OpenGeoSys
from tespy import cmp, con, nwk, hlp, cmp_char
from tespy import nwkr
refrig_density = 992.92
switch_dyn_demand = 'on'
switch_dyn_frate = 'off'
def timerange(t):
timerange_nw_off_month = [-9999]
nw_status = 'on'
t_trans = int((t - 1) / 86400 / 30) + 1
t_trans_month = t_trans
if t_trans_month > 12:
t_trans_month = t_trans - 12 * (int(t_trans / 12))
if t_trans_month in timerange_nw_off_month:
nw_status = 'off'
return t_trans, t_trans_month, nw_status
def consumer_demand(t):
month_demand = [
-25 * 50 * 3, -25 * 50 * 3, -25 * 50 * 3, -25 * 50 * 3, -25 * 50 * 3,
-25 * 50 * 3, -25 * 50 * 3, -25 * 50 * 3, -25 * 50 * 3, -25 * 50 * 3,
-25 * 50 * 3, -25 * 50 * 3
]
return month_demand[t - 1]
def dyn_frate(t):
month_frate = [-9999]
return month_frate[t - 1]
def create_dataframe():
df_nw = read_csv('./pre/bhe_network.csv',
delimiter=';',
index_col=[0],
dtype={'data_index': str})
return (df_nw)
def get_hydraulics(t_trans):
if switch_dyn_frate == 'on':
cur_frate = dyn_frate(t_trans)
localVars['inlet_name'].set_attr(m=cur_frate)
nw.solve(mode='design')
r i in range(n_BHE):
for c in nw.conns.index:
if c.t.label == data_index[i]:
df.loc[df.index[i], 'flowrate'] = c.get_attr('m').val_SI
in range(n_BHE):
df.loc[df.index[i],
'f_velocity'] = df.loc[df.index[i], 'flowrate'] / refrig_density
return df
def get_thermal(t):
if switch_dyn_demand == 'on':
cur_month_demand = consumer_demand(t)
nw.busses[bus_name].set_attr(P=cur_month_demand)
for i in range(n_BHE):
localVars['outlet_BHE' + str(i + 1)].set_attr(T=df.loc[data_index[i],
'Tout_val'])
nw.solve(mode='design')
for i in range(n_BHE):
df.loc[df.index[i],
'Tin_val'] = localVars['inlet_BHE' +
str(i + 1)].get_attr('T').val_SI
return df['Tin_val'].tolist()
class BC(OpenGeoSys.BHENetwork):
def initializeDataContainer(self):
t = 0
data_col_1 = df['Tin_val'].tolist()
data_col_2 = df['Tout_val'].tolist()
data_col_3 = df['Tout_node_id'].astype(int).tolist()
get_hydraulics(0)
data_col_4 = df['f_velocity'].tolist()
return (t, data_col_1, data_col_2, data_col_3, data_col_4)
def tespyThermalSolver(self, t, Tin_val, Tout_val):
t_trans, t_trans_month, nw_status = timerange(t)
if nw_status == 'off':
return (True, True, Tout_val)
else:
for i in range(n_BHE):
df.loc[df.index[i], 'Tout_val'] = Tout_val[i]
cur_cal_Tin_val = get_thermal(t_trans_month)
if_success = False
pre_cal_Tin_val = Tin_val
norm = np.linalg.norm(
abs(np.asarray(pre_cal_Tin_val) - np.asarray(cur_cal_Tin_val)))
if norm < 10e-6:
if_success = True
return (True, if_success, cur_cal_Tin_val)
def tespyHydroSolver(self, t):
if_dyn_frate = False
data_f_velocity = df['f_velocity'].tolist()
if switch_dyn_frate == 'on':
if_dyn_frate = True
t_trans, t_trans_month, nw_status = timerange(t)
if nw_status == 'off':
for i in range(n_BHE):
df.loc[df.index[i], 'f_velocity'] = 0
data_f_velocity = df['f_velocity'].tolist()
else:
dataframe = get_hydraulics(t_trans)
data_f_velocity = dataframe['f_velocity'].tolist()
return (if_dyn_frate, data_f_velocity)
project_dir = os.getcwd()
print("Project dir is: ", project_dir)
nw = nwkr.load_nwk('./pre/tespy_nw')
nw.set_printoptions(print_level='none')
df = create_dataframe()
n_BHE = np.size(df.iloc[:, 0])
localVars = locals()
data_index = df.index.tolist()
for i in range(n_BHE):
for c in nw.conns.index:
if c.t.label == data_index[i]:
localVars['inlet_BHE' + str(i + 1)] = c
if c.s.label == data_index[i]:
localVars['outlet_BHE' + str(i + 1)] = c
if switch_dyn_demand == 'on':
bus_name = read_csv('./pre/tespy_nw/comps/bus.csv',
delimiter=';',
index_col=[0]).index[0]
if switch_dyn_frate == 'on':
inlet_name = read_csv('./pre/tespy_nw/conn.csv',
delimiter=';',
index_col=[0]).iloc[0,0]
for c in nw.conns.index:
if c.s.label == inlet_name:
localVars['inlet_name'] = c
bc_bhe = BC()
| true | true |
1c353b4617bac86fc2666653def4885a80fa9857 | 3,115 | py | Python | django_optimizer/conf.py | robertxsuchocki/django-optimizer | 032a860285faaaab419ce06c5f015f32f85adb56 | [
"MIT"
] | null | null | null | django_optimizer/conf.py | robertxsuchocki/django-optimizer | 032a860285faaaab419ce06c5f015f32f85adb56 | [
"MIT"
] | null | null | null | django_optimizer/conf.py | robertxsuchocki/django-optimizer | 032a860285faaaab419ce06c5f015f32f85adb56 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Conf module containing app settings
"""
import os
import django
class DjangoOptimizerSettings(object):
"""
Container for settings exclusive for an app, with possibility to replace any in project settings
"""
def __getattribute__(self, item):
try:
return getattr(django.conf.settings, item)
except AttributeError:
return super(DjangoOptimizerSettings, self).__getattribute__(item)
DJANGO_OPTIMIZER_FIELD_REGISTRY = {
'BACKEND': 'django_optimizer.cache.PersistentFileBasedCache',
'LOCATION': os.path.join(django.conf.settings.BASE_DIR, '.django_optimizer_field_registry')
}
"""
Cache to be used in field registry (which contains tuples of fields gathered and used to optimize queries)
Defaults to PersistentFileBasedCache (FileBasedCache, but with no-ops for functions clearing any keys in cache)
Its' default path is equal to ``os.path.join(django.conf.settings.BASE_DIR, '.django_optimizer_field_registry')``
Keep in mind that cache shouldn't be eager to remove any entries contained, as they will be reappearing
and overwriting constantly. Ideally should disable any overwriting
If performance issues occur, then it should be dropped in favor of manual in-code optimization (at least partially)
"""
DJANGO_OPTIMIZER_CODE_REGISTRY = {
'BACKEND': 'django_optimizer.cache.PersistentFileBasedCache',
'LOCATION': os.path.join(django.conf.settings.BASE_DIR, '.django_optimizer_code_registry')
}
"""
Cache to be used in code registry (which contains code annotations for source code)
Defaults to PersistentFileBasedCache (FileBasedCache, but with no-ops for functions clearing any keys in cache)
Its' default path is equal to ``os.path.join(django.conf.settings.BASE_DIR, '.django_optimizer_code_registry')``
"""
DJANGO_OPTIMIZER_MODEL_REGISTRY_LOCATION = '__django_optimizer_model_registry'
"""
Name of a PersistentLocMemCache holding objects to be created after deferred_atomic block
"""
DJANGO_OPTIMIZER_LOGGING = True
"""
Whether model logging should be enabled
Might be turned off to disable this app completely or in a state where all fields have been gathered in a cache
and overhead related with enabling object logging is unwanted
"""
DJANGO_OPTIMIZER_LIVE_OPTIMIZATION = True
"""
Whether dynamic queryset optimization should be enabled
If logging is enabled and/or data is already gathered in cache it allows live manipulation on optimized
querysets and executes optimization functions before data is gathered
"""
DJANGO_OPTIMIZER_OFFSITE_OPTIMIZATION = True
"""
Whether offsite queryset optimization should be enabled, works only with 'DJANGO_OPTIMIZER_LIVE_OPTIMIZATION' on
Offsite optimization gathers data on which optimization function executions are being actually used and
then allows to use this data to add annotations within source code manually
"""
settings = DjangoOptimizerSettings()
| 39.935897 | 119 | 0.739647 |
import os
import django
class DjangoOptimizerSettings(object):
def __getattribute__(self, item):
try:
return getattr(django.conf.settings, item)
except AttributeError:
return super(DjangoOptimizerSettings, self).__getattribute__(item)
DJANGO_OPTIMIZER_FIELD_REGISTRY = {
'BACKEND': 'django_optimizer.cache.PersistentFileBasedCache',
'LOCATION': os.path.join(django.conf.settings.BASE_DIR, '.django_optimizer_field_registry')
}
DJANGO_OPTIMIZER_CODE_REGISTRY = {
'BACKEND': 'django_optimizer.cache.PersistentFileBasedCache',
'LOCATION': os.path.join(django.conf.settings.BASE_DIR, '.django_optimizer_code_registry')
}
DJANGO_OPTIMIZER_MODEL_REGISTRY_LOCATION = '__django_optimizer_model_registry'
DJANGO_OPTIMIZER_LOGGING = True
DJANGO_OPTIMIZER_LIVE_OPTIMIZATION = True
DJANGO_OPTIMIZER_OFFSITE_OPTIMIZATION = True
settings = DjangoOptimizerSettings()
| true | true |
1c353b5ae4cc11c3f8e4c3d0a077e474e7b0bf43 | 2,385 | py | Python | assets/fonts/font.py | MilianoJunior/app_relatorios_EngeSEP | 9efb77a2e93f418f061c88f988a2c87971183708 | [
"MIT"
] | null | null | null | assets/fonts/font.py | MilianoJunior/app_relatorios_EngeSEP | 9efb77a2e93f418f061c88f988a2c87971183708 | [
"MIT"
] | null | null | null | assets/fonts/font.py | MilianoJunior/app_relatorios_EngeSEP | 9efb77a2e93f418f061c88f988a2c87971183708 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from controllers.excpetions.RootException import InterfaceException
def font_choice(name_font):
try:
const = -3
if name_font == None:
name_font = 'Roboto'
fonts = {'H1': [f'{name_font}Light', 96 + const, False, -1.5],
'H2': [f'{name_font}Light', 60 + const, False, -0.5],
'H3': [name_font, 48 + const, False, 0],
'H4': [name_font, 34 + const, False, 0.25],
'H5': [name_font, 24 + const, False, 0],
'H6': [f'{name_font}Medium', 20 + const, False, 0.15],
'Subtitle1': [name_font, 16 + const, False, 0.15],
'Subtitle2': [f'{name_font}Medium', 14 + const, False, 0.1],
'Body1': [name_font, 16 + const, False, 0.5],
'Body2': [name_font, 14 + const, False, 0.25],
'Button': [f'{name_font}Medium', 14 + const, True, 1.25],
'Caption': [name_font, 12 + const, False, 0.4],
'Overline': [name_font, 10 + const, True, 1.5],
'Icon': ['Icons', 24 + const, False, 0]}
else:
name_font = os.path.join(os.environ['FONTS'],name_font,name_font+'-')
fonts = {'H1': [f'{name_font}Light', 96 + const, False, -1.5],
'H2': [f'{name_font}Light', 60 + const, False, -0.5],
'H3': [f'{name_font}Regular', 48 + const, False, 0],
'H4': [f'{name_font}Regular', 34 + const, False, 0.25],
'H5': [f'{name_font}Regular', 24 + const, False, 0],
'H6': [f'{name_font}Medium', 20 + const, False, 0.15],
'Subtitle1': [f'{name_font}Regular', 16 + const, False, 0.15],
'Subtitle2': [f'{name_font}Medium', 14 + const, False, 0.1],
'Body1': [f'{name_font}Regular', 16 + const, False, 0.5],
'Body2': [f'{name_font}Regular', 14 + const, False, 0.25],
'Button': [f'{name_font}Medium', 14 + const, True, 1.25],
'Caption': [f'{name_font}Regular', 12 + const, False, 0.4],
'Overline': [f'{name_font}Regular', 10 + const, True, 1.5],
'Icon': ['Icons', 24 + const, False, 0]}
return fonts
except Exception as e:
raise InterfaceException(e)()
| 54.204545 | 83 | 0.491405 |
import os
from controllers.excpetions.RootException import InterfaceException
def font_choice(name_font):
try:
const = -3
if name_font == None:
name_font = 'Roboto'
fonts = {'H1': [f'{name_font}Light', 96 + const, False, -1.5],
'H2': [f'{name_font}Light', 60 + const, False, -0.5],
'H3': [name_font, 48 + const, False, 0],
'H4': [name_font, 34 + const, False, 0.25],
'H5': [name_font, 24 + const, False, 0],
'H6': [f'{name_font}Medium', 20 + const, False, 0.15],
'Subtitle1': [name_font, 16 + const, False, 0.15],
'Subtitle2': [f'{name_font}Medium', 14 + const, False, 0.1],
'Body1': [name_font, 16 + const, False, 0.5],
'Body2': [name_font, 14 + const, False, 0.25],
'Button': [f'{name_font}Medium', 14 + const, True, 1.25],
'Caption': [name_font, 12 + const, False, 0.4],
'Overline': [name_font, 10 + const, True, 1.5],
'Icon': ['Icons', 24 + const, False, 0]}
else:
name_font = os.path.join(os.environ['FONTS'],name_font,name_font+'-')
fonts = {'H1': [f'{name_font}Light', 96 + const, False, -1.5],
'H2': [f'{name_font}Light', 60 + const, False, -0.5],
'H3': [f'{name_font}Regular', 48 + const, False, 0],
'H4': [f'{name_font}Regular', 34 + const, False, 0.25],
'H5': [f'{name_font}Regular', 24 + const, False, 0],
'H6': [f'{name_font}Medium', 20 + const, False, 0.15],
'Subtitle1': [f'{name_font}Regular', 16 + const, False, 0.15],
'Subtitle2': [f'{name_font}Medium', 14 + const, False, 0.1],
'Body1': [f'{name_font}Regular', 16 + const, False, 0.5],
'Body2': [f'{name_font}Regular', 14 + const, False, 0.25],
'Button': [f'{name_font}Medium', 14 + const, True, 1.25],
'Caption': [f'{name_font}Regular', 12 + const, False, 0.4],
'Overline': [f'{name_font}Regular', 10 + const, True, 1.5],
'Icon': ['Icons', 24 + const, False, 0]}
return fonts
except Exception as e:
raise InterfaceException(e)()
| true | true |
1c353d1e9561f6b6ea401312a6a5f248bfe40514 | 4,229 | py | Python | desktop/core/ext-py/pycryptodomex-3.9.7/lib/Cryptodome/Util/Padding.py | e11it/hue-1 | 436704c40b5fa6ffd30bd972bf50ffeec738d091 | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | desktop/core/ext-py/pycryptodomex-3.9.7/lib/Cryptodome/Util/Padding.py | e11it/hue-1 | 436704c40b5fa6ffd30bd972bf50ffeec738d091 | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | desktop/core/ext-py/pycryptodomex-3.9.7/lib/Cryptodome/Util/Padding.py | e11it/hue-1 | 436704c40b5fa6ffd30bd972bf50ffeec738d091 | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | #
# Util/Padding.py : Functions to manage padding
#
# ===================================================================
#
# Copyright (c) 2014, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
__all__ = [ 'pad', 'unpad' ]
from Cryptodome.Util.py3compat import *
def pad(data_to_pad, block_size, style='pkcs7'):
"""Apply standard padding.
Args:
data_to_pad (byte string):
The data that needs to be padded.
block_size (integer):
The block boundary to use for padding. The output length is guaranteed
to be a multiple of :data:`block_size`.
style (string):
Padding algorithm. It can be *'pkcs7'* (default), *'iso7816'* or *'x923'*.
Return:
byte string : the original data with the appropriate padding added at the end.
"""
padding_len = block_size-len(data_to_pad)%block_size
if style == 'pkcs7':
padding = bchr(padding_len)*padding_len
elif style == 'x923':
padding = bchr(0)*(padding_len-1) + bchr(padding_len)
elif style == 'iso7816':
padding = bchr(128) + bchr(0)*(padding_len-1)
else:
raise ValueError("Unknown padding style")
return data_to_pad + padding
def unpad(padded_data, block_size, style='pkcs7'):
"""Remove standard padding.
Args:
padded_data (byte string):
A piece of data with padding that needs to be stripped.
block_size (integer):
The block boundary to use for padding. The input length
must be a multiple of :data:`block_size`.
style (string):
Padding algorithm. It can be *'pkcs7'* (default), *'iso7816'* or *'x923'*.
Return:
byte string : data without padding.
Raises:
ValueError: if the padding is incorrect.
"""
pdata_len = len(padded_data)
if pdata_len % block_size:
raise ValueError("Input data is not padded")
if style in ('pkcs7', 'x923'):
padding_len = bord(padded_data[-1])
if padding_len<1 or padding_len>min(block_size, pdata_len):
raise ValueError("Padding is incorrect.")
if style == 'pkcs7':
if padded_data[-padding_len:]!=bchr(padding_len)*padding_len:
raise ValueError("PKCS#7 padding is incorrect.")
else:
if padded_data[-padding_len:-1]!=bchr(0)*(padding_len-1):
raise ValueError("ANSI X.923 padding is incorrect.")
elif style == 'iso7816':
padding_len = pdata_len - padded_data.rfind(bchr(128))
if padding_len<1 or padding_len>min(block_size, pdata_len):
raise ValueError("Padding is incorrect.")
if padding_len>1 and padded_data[1-padding_len:]!=bchr(0)*(padding_len-1):
raise ValueError("ISO 7816-4 padding is incorrect.")
else:
raise ValueError("Unknown padding style")
return padded_data[:-padding_len]
| 39.523364 | 84 | 0.661386 |
__all__ = [ 'pad', 'unpad' ]
from Cryptodome.Util.py3compat import *
def pad(data_to_pad, block_size, style='pkcs7'):
padding_len = block_size-len(data_to_pad)%block_size
if style == 'pkcs7':
padding = bchr(padding_len)*padding_len
elif style == 'x923':
padding = bchr(0)*(padding_len-1) + bchr(padding_len)
elif style == 'iso7816':
padding = bchr(128) + bchr(0)*(padding_len-1)
else:
raise ValueError("Unknown padding style")
return data_to_pad + padding
def unpad(padded_data, block_size, style='pkcs7'):
pdata_len = len(padded_data)
if pdata_len % block_size:
raise ValueError("Input data is not padded")
if style in ('pkcs7', 'x923'):
padding_len = bord(padded_data[-1])
if padding_len<1 or padding_len>min(block_size, pdata_len):
raise ValueError("Padding is incorrect.")
if style == 'pkcs7':
if padded_data[-padding_len:]!=bchr(padding_len)*padding_len:
raise ValueError("PKCS#7 padding is incorrect.")
else:
if padded_data[-padding_len:-1]!=bchr(0)*(padding_len-1):
raise ValueError("ANSI X.923 padding is incorrect.")
elif style == 'iso7816':
padding_len = pdata_len - padded_data.rfind(bchr(128))
if padding_len<1 or padding_len>min(block_size, pdata_len):
raise ValueError("Padding is incorrect.")
if padding_len>1 and padded_data[1-padding_len:]!=bchr(0)*(padding_len-1):
raise ValueError("ISO 7816-4 padding is incorrect.")
else:
raise ValueError("Unknown padding style")
return padded_data[:-padding_len]
| true | true |
1c353d331a3c9af7cf0ecc5d525a68b4c13af975 | 1,945 | py | Python | cornac/models/global_avg/recom_global_avg.py | carmanzhang/cornac | 215efd0ffa7b8ee1afe1ac6b5cc650ee6303ace3 | [
"Apache-2.0"
] | 597 | 2018-07-17T10:59:56.000Z | 2022-03-31T07:59:36.000Z | cornac/models/global_avg/recom_global_avg.py | carmanzhang/cornac | 215efd0ffa7b8ee1afe1ac6b5cc650ee6303ace3 | [
"Apache-2.0"
] | 137 | 2018-10-12T10:52:11.000Z | 2022-03-04T15:26:49.000Z | cornac/models/global_avg/recom_global_avg.py | carmanzhang/cornac | 215efd0ffa7b8ee1afe1ac6b5cc650ee6303ace3 | [
"Apache-2.0"
] | 112 | 2018-07-26T04:36:34.000Z | 2022-03-31T02:29:34.000Z | # Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
from ..recommender import Recommender
from ...exception import ScoreException
class GlobalAvg(Recommender):
"""Global Average baseline for rating prediction. Rating predictions equal to average rating
of training data (not personalized).
Parameters
----------
name: string, default: 'GlobalAvg'
The name of the recommender model.
"""
def __init__(self, name="GlobalAvg"):
super().__init__(name=name, trainable=False)
def score(self, user_idx, item_idx=None):
"""Predict the scores/ratings of a user for an item.
Parameters
----------
user_idx: int, required
The index of the user for whom to perform score prediction.
item_idx: int, optional, default: None
The index of the item for which to perform score prediction.
If None, scores for all known items will be returned.
Returns
-------
res : A scalar or a Numpy array
Relative scores that the user gives to the item or to all known items
"""
if item_idx is None:
return np.full(self.train_set.num_items, self.train_set.global_mean)
else:
return self.train_set.global_mean
| 34.122807 | 96 | 0.649357 |
import numpy as np
from ..recommender import Recommender
from ...exception import ScoreException
class GlobalAvg(Recommender):
def __init__(self, name="GlobalAvg"):
super().__init__(name=name, trainable=False)
def score(self, user_idx, item_idx=None):
if item_idx is None:
return np.full(self.train_set.num_items, self.train_set.global_mean)
else:
return self.train_set.global_mean
| true | true |
1c353ed3b527f763ce3fa8788d6e55789d200524 | 712 | py | Python | rocknext/compliance/doctype/quality_feedback/quality_feedback.py | mohsinalimat/rocknext | ff04c00e9ea7d9089921f7b41447b83dc9d78501 | [
"MIT"
] | 8 | 2021-09-26T08:22:57.000Z | 2021-11-30T09:35:55.000Z | rocknext/compliance/doctype/quality_feedback/quality_feedback.py | yrestom/rocknext | 551b2443a3eafade07f7e254f14e336d0f54bd70 | [
"MIT"
] | null | null | null | rocknext/compliance/doctype/quality_feedback/quality_feedback.py | yrestom/rocknext | 551b2443a3eafade07f7e254f14e336d0f54bd70 | [
"MIT"
] | 9 | 2021-09-26T08:23:05.000Z | 2022-01-15T15:12:27.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ComplianceFeedback(Document):
@frappe.whitelist()
def set_parameters(self):
if self.template and not getattr(self, 'parameters', []):
for d in frappe.get_doc('Compliance Feedback Template', self.template).parameters:
self.append('parameters', dict(
parameter = d.parameter,
rating = 1
))
def validate(self):
if not self.document_name:
self.document_type ='User'
self.document_name = frappe.session.user
self.set_parameters()
| 27.384615 | 85 | 0.734551 |
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ComplianceFeedback(Document):
@frappe.whitelist()
def set_parameters(self):
if self.template and not getattr(self, 'parameters', []):
for d in frappe.get_doc('Compliance Feedback Template', self.template).parameters:
self.append('parameters', dict(
parameter = d.parameter,
rating = 1
))
def validate(self):
if not self.document_name:
self.document_type ='User'
self.document_name = frappe.session.user
self.set_parameters()
| true | true |
1c353f11b22a3afa0036fa7d5f43c1dd1bc8a9df | 7,451 | py | Python | model/vgg19/model19_val1.py | wan-h/JD-AI-Fashion-Challenge | 817f693672f418745e3a4c89a0417a3165b08130 | [
"MIT"
] | 3 | 2018-05-06T15:15:21.000Z | 2018-05-13T12:31:42.000Z | model/vgg19/model19_val1.py | wan-h/JD-AI-Fashion-Challenge | 817f693672f418745e3a4c89a0417a3165b08130 | [
"MIT"
] | null | null | null | model/vgg19/model19_val1.py | wan-h/JD-AI-Fashion-Challenge | 817f693672f418745e3a4c89a0417a3165b08130 | [
"MIT"
] | null | null | null | """
以model 4为基础,新增real crop
"""
import math
import os
import queue
import time
import keras
from keras.layers import Dense, BatchNormalization, Activation
import config
from util import data_loader
from util import keras_util
from util.keras_util import KerasModelConfig
model_config = KerasModelConfig(k_fold_file="1.txt",
model_path=os.path.abspath(__file__),
image_resolution=224,
data_type=[config.DATA_TYPE_ORIGINAL],
label_position=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
label_color_augment=[0, 1, 3, 5, 6, 7, 9, 10, 11, 12],
train_batch_size=[16, 16, 16],
label_up_sampling=[10, 0, 0, 10, 0, 0, 10, 0, 0, 0, 0, 0, 10],
data_visualization=True,
downsampling=0.6,
val_batch_size=256,
predict_batch_size=256,
epoch=[1, 3, 6],
lr=[0.001, 0.0001, 0.00001],
freeze_layers=[-1, 0.6, 0],
tta_crop=True,
tta_flip=True,
input_norm=False)
def get_model(freeze_layers=-1, lr=0.01, output_dim=1, weights="imagenet"):
base_model = keras.applications.DenseNet169(include_top=False, weights=weights,
input_shape=model_config.image_shape, pooling="avg")
x = base_model.output
x = Dense(256, use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
predictions = Dense(units=output_dim, activation='sigmoid')(x)
model = keras.Model(inputs=base_model.input, outputs=predictions)
if freeze_layers == -1:
print("freeze all basic layers, lr=%f" % lr)
for layer in base_model.layers:
layer.trainable = False
else:
if freeze_layers < 1:
freeze_layers = math.floor(len(base_model.layers) * freeze_layers)
for layer in range(freeze_layers):
base_model.layers[layer].train_layer = False
print("freeze %d basic layers, lr=%f" % (freeze_layers, lr))
model.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.Nadam(lr=lr))
# model.summary()
print("basic model have %d layers" % len(base_model.layers))
return model
def train():
evaluate_queue = queue.Queue()
evaluate_task = keras_util.EvaluateTask(evaluate_queue)
evaluate_task.setDaemon(True)
evaluate_task.start()
checkpoint = keras_util.EvaluateCallback(model_config, evaluate_queue)
start = time.time()
model_config.save_log("####### start train model")
init_stage = model_config.get_init_stage()
model_config.save_log("####### init stage is %d" % init_stage)
for i in range(init_stage, len(model_config.epoch)):
model_config.save_log("####### lr=%f, freeze layers=%2f epoch=%d" % (
model_config.lr[i], model_config.freeze_layers[i], model_config.epoch[i]))
clr = keras_util.CyclicLrCallback(base_lr=model_config.lr[i], max_lr=model_config.lr[i] * 5,
step_size=model_config.get_steps_per_epoch(i) / 2)
train_flow = data_loader.KerasGenerator(model_config=model_config,
featurewise_center=True,
featurewise_std_normalization=True,
width_shift_range=0.15,
height_shift_range=0.1,
horizontal_flip=True,
real_transform=True,
rescale=1. / 256).flow_from_files(model_config.train_files, mode="fit",
target_size=model_config.image_size,
batch_size=
model_config.train_batch_size[i],
shuffle=True,
label_position=model_config.label_position)
if i == 0:
model_config.save_log("####### initial epoch is 0, end epoch is %d" % model_config.epoch[i])
model = get_model(freeze_layers=model_config.freeze_layers[i], lr=model_config.lr[i],
output_dim=len(model_config.label_position))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
workers=16,
verbose=1,
callbacks=[checkpoint, clr])
else:
model = get_model(freeze_layers=model_config.freeze_layers[i], output_dim=len(model_config.label_position),
lr=model_config.lr[i], weights=None)
if i == init_stage:
model_config.save_log(
"####### load weight file: %s" % model_config.get_weights_path(model_config.initial_epoch))
model.load_weights(model_config.get_weights_path(model_config.initial_epoch))
model_config.save_log("####### initial epoch is %d, end epoch is %d" % (
model_config.initial_epoch, model_config.epoch[i]))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
initial_epoch=model_config.initial_epoch,
workers=16,
verbose=1,
callbacks=[checkpoint, clr])
else:
model_config.save_log(
"####### load weight file: %s" % model_config.get_weights_path(model_config.epoch[i - 1]))
model.load_weights(model_config.get_weights_path(model_config.epoch[i - 1]))
model_config.save_log(
"####### initial epoch is %d, end epoch is %d" % (model_config.epoch[i - 1], model_config.epoch[i]))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
initial_epoch=model_config.epoch[i - 1],
workers=16,
verbose=1,
callbacks=[checkpoint, clr])
model_config.save_log("####### train model spend %d seconds" % (time.time() - start))
model_config.save_log(
"####### train model spend %d seconds average" % ((time.time() - start) / model_config.epoch[-1]))
| 51.034247 | 125 | 0.504899 | import math
import os
import queue
import time
import keras
from keras.layers import Dense, BatchNormalization, Activation
import config
from util import data_loader
from util import keras_util
from util.keras_util import KerasModelConfig
model_config = KerasModelConfig(k_fold_file="1.txt",
model_path=os.path.abspath(__file__),
image_resolution=224,
data_type=[config.DATA_TYPE_ORIGINAL],
label_position=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
label_color_augment=[0, 1, 3, 5, 6, 7, 9, 10, 11, 12],
train_batch_size=[16, 16, 16],
label_up_sampling=[10, 0, 0, 10, 0, 0, 10, 0, 0, 0, 0, 0, 10],
data_visualization=True,
downsampling=0.6,
val_batch_size=256,
predict_batch_size=256,
epoch=[1, 3, 6],
lr=[0.001, 0.0001, 0.00001],
freeze_layers=[-1, 0.6, 0],
tta_crop=True,
tta_flip=True,
input_norm=False)
def get_model(freeze_layers=-1, lr=0.01, output_dim=1, weights="imagenet"):
base_model = keras.applications.DenseNet169(include_top=False, weights=weights,
input_shape=model_config.image_shape, pooling="avg")
x = base_model.output
x = Dense(256, use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
predictions = Dense(units=output_dim, activation='sigmoid')(x)
model = keras.Model(inputs=base_model.input, outputs=predictions)
if freeze_layers == -1:
print("freeze all basic layers, lr=%f" % lr)
for layer in base_model.layers:
layer.trainable = False
else:
if freeze_layers < 1:
freeze_layers = math.floor(len(base_model.layers) * freeze_layers)
for layer in range(freeze_layers):
base_model.layers[layer].train_layer = False
print("freeze %d basic layers, lr=%f" % (freeze_layers, lr))
model.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.Nadam(lr=lr))
print("basic model have %d layers" % len(base_model.layers))
return model
def train():
evaluate_queue = queue.Queue()
evaluate_task = keras_util.EvaluateTask(evaluate_queue)
evaluate_task.setDaemon(True)
evaluate_task.start()
checkpoint = keras_util.EvaluateCallback(model_config, evaluate_queue)
start = time.time()
model_config.save_log("####### start train model")
init_stage = model_config.get_init_stage()
model_config.save_log("####### init stage is %d" % init_stage)
for i in range(init_stage, len(model_config.epoch)):
model_config.save_log("####### lr=%f, freeze layers=%2f epoch=%d" % (
model_config.lr[i], model_config.freeze_layers[i], model_config.epoch[i]))
clr = keras_util.CyclicLrCallback(base_lr=model_config.lr[i], max_lr=model_config.lr[i] * 5,
step_size=model_config.get_steps_per_epoch(i) / 2)
train_flow = data_loader.KerasGenerator(model_config=model_config,
featurewise_center=True,
featurewise_std_normalization=True,
width_shift_range=0.15,
height_shift_range=0.1,
horizontal_flip=True,
real_transform=True,
rescale=1. / 256).flow_from_files(model_config.train_files, mode="fit",
target_size=model_config.image_size,
batch_size=
model_config.train_batch_size[i],
shuffle=True,
label_position=model_config.label_position)
if i == 0:
model_config.save_log("####### initial epoch is 0, end epoch is %d" % model_config.epoch[i])
model = get_model(freeze_layers=model_config.freeze_layers[i], lr=model_config.lr[i],
output_dim=len(model_config.label_position))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
workers=16,
verbose=1,
callbacks=[checkpoint, clr])
else:
model = get_model(freeze_layers=model_config.freeze_layers[i], output_dim=len(model_config.label_position),
lr=model_config.lr[i], weights=None)
if i == init_stage:
model_config.save_log(
"####### load weight file: %s" % model_config.get_weights_path(model_config.initial_epoch))
model.load_weights(model_config.get_weights_path(model_config.initial_epoch))
model_config.save_log("####### initial epoch is %d, end epoch is %d" % (
model_config.initial_epoch, model_config.epoch[i]))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
initial_epoch=model_config.initial_epoch,
workers=16,
verbose=1,
callbacks=[checkpoint, clr])
else:
model_config.save_log(
"####### load weight file: %s" % model_config.get_weights_path(model_config.epoch[i - 1]))
model.load_weights(model_config.get_weights_path(model_config.epoch[i - 1]))
model_config.save_log(
"####### initial epoch is %d, end epoch is %d" % (model_config.epoch[i - 1], model_config.epoch[i]))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
initial_epoch=model_config.epoch[i - 1],
workers=16,
verbose=1,
callbacks=[checkpoint, clr])
model_config.save_log("####### train model spend %d seconds" % (time.time() - start))
model_config.save_log(
"####### train model spend %d seconds average" % ((time.time() - start) / model_config.epoch[-1]))
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.