code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import collections
import datetime
import logging
try:
import json
except ImportError:
import simplejson as json
import re
def get_log():
return logging.getLogger(__name__.split('.')[0])
class MarathonJsonEncoder(json.JSONEncoder):
"""Custom JSON encoder for Marathon object serialization."""
def default(self, obj):
if hasattr(obj, 'json_repr'):
return self.default(obj.json_repr())
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
if isinstance(obj, collections.Iterable) and not isinstance(obj, str):
try:
return {k: self.default(v) for k, v in obj.items()}
except AttributeError:
return [self.default(e) for e in obj]
return obj
class MarathonMinimalJsonEncoder(json.JSONEncoder):
"""Custom JSON encoder for Marathon object serialization."""
def default(self, obj):
if hasattr(obj, 'json_repr'):
return self.default(obj.json_repr(minimal=True))
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
if isinstance(obj, collections.Iterable) and not isinstance(obj, str):
try:
return {k: self.default(v) for k, v in obj.items() if (v or v in (False, 0))}
except AttributeError:
return [self.default(e) for e in obj if (e or e in (False, 0))]
return obj
def to_camel_case(snake_str):
words = snake_str.split('_')
return words[0] + ''.join(w.capitalize() for w in words[1:])
def to_snake_case(camel_str):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel_str)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
DATETIME_FORMATS = [
'%Y-%m-%dT%H:%M:%S.%fZ',
'%Y-%m-%dT%H:%M:%SZ', # Marathon omits milliseconds when they would be .000
]
def to_datetime(timestamp):
if (timestamp is None or isinstance(timestamp, datetime.datetime)):
return timestamp
else:
for fmt in DATETIME_FORMATS:
try:
return datetime.datetime.strptime(timestamp, fmt).replace(tzinfo=datetime.timezone.utc)
except ValueError:
pass
raise ValueError(f'Unrecognized datetime format: {timestamp}')
|
[
"datetime.datetime.strptime",
"re.sub"
] |
[((1660, 1708), 're.sub', 're.sub', (['"""(.)([A-Z][a-z]+)"""', '"""\\\\1_\\\\2"""', 'camel_str'], {}), "('(.)([A-Z][a-z]+)', '\\\\1_\\\\2', camel_str)\n", (1666, 1708), False, 'import re\n'), ((1719, 1761), 're.sub', 're.sub', (['"""([a-z0-9])([A-Z])"""', '"""\\\\1_\\\\2"""', 's1'], {}), "('([a-z0-9])([A-Z])', '\\\\1_\\\\2', s1)\n", (1725, 1761), False, 'import re\n'), ((2118, 2160), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['timestamp', 'fmt'], {}), '(timestamp, fmt)\n', (2144, 2160), False, 'import datetime\n')]
|
import os
import os.path as osp
import mmcv
import torch
import torch.distributed as dist
from mmcv.runner import Hook, obj_from_dict
from torch.utils.data import Dataset
from ... import datasets
from ..parallel import collate, scatter
from .accuracy import top_k_accuracy
class DistEvalHook(Hook):
def __init__(self, dataset, interval=1, distributed=True):
if isinstance(dataset, Dataset):
self.dataset = dataset
elif isinstance(dataset, dict):
self.dataset = obj_from_dict(dataset, datasets,
{'test_mode': True})
else:
raise TypeError(
'dataset must be a Dataset object or a dict, not {}'.format(
type(dataset)))
self.interval = interval
self.dist = distributed
# def after_train_epoch(self, runner):
# if not self.every_n_epochs(runner, self.interval):
# return
# runner.model.eval()
# # average allocation for gpus
# ws = runner.world_size
# append_size = (len(self.dataset) + ws - 1) // ws * ws
# results = [None for _ in range(append_size)]
# if runner.rank == 0:
# prog_bar = mmcv.ProgressBar(append_size)
# for idx in range(runner.rank, append_size, runner.world_size):
# idx = idx % len(self.dataset)
# data = self.dataset[idx]
# data_gpu = scatter(
# collate([data], samples_per_gpu=1),
# [torch.cuda.current_device()])[0]
# # compute output
# with torch.no_grad():
# result = runner.model(return_loss=False, **data_gpu)
# results[idx] = result
# batch_size = runner.world_size
# if runner.rank == 0:
# for _ in range(batch_size):
# prog_bar.update()
# tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(runner.rank))
# mmcv.dump(results, tmp_file)
# dist.barrier()
# if runner.rank == 0:
# print('\n')
# for i in range(1, runner.world_size):
# tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(i))
# tmp_results = mmcv.load(tmp_file)
# for idx in range(i, len(self.dataset), runner.world_size):
# results[idx] = tmp_results[idx]
# os.remove(tmp_file)
# self.evaluate(runner, results[:len(self.dataset)])
# os.remove(osp.join(runner.work_dir, 'temp_0.pkl'))
# return
def after_train_epoch(self, runner):
if not self.every_n_epochs(runner, self.interval):
return
runner.model.eval()
results = [None for _ in range(len(self.dataset))]
if runner.rank == 0:
prog_bar = mmcv.ProgressBar(len(self.dataset))
for idx in range(runner.rank, len(self.dataset), runner.world_size):
data = self.dataset[idx]
data_gpu = scatter(
collate([data], samples_per_gpu=1),
[torch.cuda.current_device()])[0]
# compute output
with torch.no_grad():
result = runner.model(return_loss=False, **data_gpu)
results[idx] = result
batch_size = runner.world_size
if runner.rank == 0:
for _ in range(batch_size):
prog_bar.update()
tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(runner.rank))
mmcv.dump(results, tmp_file)
dist.barrier()
if runner.rank == 0:
print('\n')
for i in range(1, runner.world_size):
tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(i))
tmp_results = mmcv.load(tmp_file)
for idx in range(i, len(results), runner.world_size):
results[idx] = tmp_results[idx]
os.remove(tmp_file)
self.evaluate(runner, results)
os.remove(osp.join(runner.work_dir, 'temp_0.pkl'))
return
def evaluate(self):
raise NotImplementedError
class DistEvalTopKAccuracyHook(DistEvalHook):
def __init__(self, dataset, interval=1, k=(1, ), dist=True):
super(DistEvalTopKAccuracyHook, self).__init__(dataset, interval, dist)
self.k = k
def evaluate(self, runner, results):
gt_labels = []
for i in range(len(self.dataset)):
ann = self.dataset.video_infos[i]
gt_labels.append(ann['label'])
results = [res.squeeze() for res in results]
top1, top5 = top_k_accuracy(results, gt_labels, k=self.k)
runner.mode = 'val'
runner.log_buffer.output['top1 acc'] = top1
runner.log_buffer.output['top5 acc'] = top5
runner.log_buffer.ready = True
|
[
"os.remove",
"mmcv.load",
"mmcv.runner.obj_from_dict",
"torch.distributed.barrier",
"mmcv.dump",
"torch.cuda.current_device",
"torch.no_grad",
"os.path.join"
] |
[((3562, 3590), 'mmcv.dump', 'mmcv.dump', (['results', 'tmp_file'], {}), '(results, tmp_file)\n', (3571, 3590), False, 'import mmcv\n'), ((3599, 3613), 'torch.distributed.barrier', 'dist.barrier', ([], {}), '()\n', (3611, 3613), True, 'import torch.distributed as dist\n'), ((510, 563), 'mmcv.runner.obj_from_dict', 'obj_from_dict', (['dataset', 'datasets', "{'test_mode': True}"], {}), "(dataset, datasets, {'test_mode': True})\n", (523, 563), False, 'from mmcv.runner import Hook, obj_from_dict\n'), ((3194, 3209), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3207, 3209), False, 'import torch\n'), ((3826, 3845), 'mmcv.load', 'mmcv.load', (['tmp_file'], {}), '(tmp_file)\n', (3835, 3845), False, 'import mmcv\n'), ((3984, 4003), 'os.remove', 'os.remove', (['tmp_file'], {}), '(tmp_file)\n', (3993, 4003), False, 'import os\n'), ((4069, 4108), 'os.path.join', 'osp.join', (['runner.work_dir', '"""temp_0.pkl"""'], {}), "(runner.work_dir, 'temp_0.pkl')\n", (4077, 4108), True, 'import os.path as osp\n'), ((3114, 3141), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (3139, 3141), False, 'import torch\n')]
|
from pathlib import Path
CAMVIEWER_CFG = '/reg/g/pcds/pyps/config/{}/camviewer.cfg'
CONDA_BASE = Path('/reg/g/pcds/pyps/conda/py36')
CUR_EXP_SCRIPT = '/reg/g/pcds/engineering_tools/{0}/scripts/get_curr_exp {0}'
CLASS_SEARCH_PATH = ['pcdsdevices.device_types']
DIR_MODULE = Path(__file__).resolve().parent
FILE_YAML = DIR_MODULE / 'logging.yml'
HUTCH_COLORS = dict(
amo='38;5;27',
sxr='38;5;250',
xpp='38;5;40',
xcs='38;5;93',
mfx='38;5;202',
cxi='38;5;196',
mec='38;5;214')
INPUT_LEVEL = 5
SUCCESS_LEVEL = 35
VALID_KEYS = ('hutch', 'db', 'load', 'experiment', 'daq_platform')
|
[
"pathlib.Path"
] |
[((99, 134), 'pathlib.Path', 'Path', (['"""/reg/g/pcds/pyps/conda/py36"""'], {}), "('/reg/g/pcds/pyps/conda/py36')\n", (103, 134), False, 'from pathlib import Path\n'), ((278, 292), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (282, 292), False, 'from pathlib import Path\n')]
|
"""Provides an easy way of generating several geometric objects.
CONTAINS
--------
vtkArrowSource
vtkCylinderSource
vtkSphereSource
vtkPlaneSource
vtkLineSource
vtkCubeSource
vtkConeSource
vtkDiskSource
vtkRegularPolygonSource
vtkPyramid
vtkPlatonicSolidSource
vtkSuperquadricSource
as well as some pure-python helpers.
"""
import numpy as np
import pyvista
from pyvista import _vtk
from pyvista.utilities import check_valid_vector
NORMALS = {
'x': [1, 0, 0],
'y': [0, 1, 0],
'z': [0, 0, 1],
'-x': [-1, 0, 0],
'-y': [0, -1, 0],
'-z': [0, 0, -1],
}
def translate(surf, center=[0., 0., 0.], direction=[1., 0., 0.]):
"""Translate and orient a mesh to a new center and direction.
By default, the input mesh is considered centered at the origin
and facing in the x direction.
"""
normx = np.array(direction)/np.linalg.norm(direction)
normz = np.cross(normx, [0, 1.0, 0.0000001])
normz /= np.linalg.norm(normz)
normy = np.cross(normz, normx)
trans = np.zeros((4, 4))
trans[:3, 0] = normx
trans[:3, 1] = normy
trans[:3, 2] = normz
trans[3, 3] = 1
surf.transform(trans)
if not np.allclose(center, [0., 0., 0.]):
surf.points += np.array(center)
def Cylinder(center=(0.0, 0.0, 0.0), direction=(1.0, 0.0, 0.0),
radius=0.5, height=1.0, resolution=100, capping=True):
"""Create the surface of a cylinder.
See also :func:`pyvista.CylinderStructured`.
Parameters
----------
center : sequence, optional
Location of the centroid in ``[x, y, z]``.
direction : sequence, optional
Direction cylinder points to in ``[x, y, z]``.
radius : float, optional
Radius of the cylinder.
height : float, optional
Height of the cylinder.
resolution : int, optional
Number of points on the circular face of the cylinder.
capping : bool, optional
Cap cylinder ends with polygons. Default ``True``.
Returns
-------
pyvista.PolyData
Cylinder surface.
Examples
--------
>>> import pyvista
>>> import numpy as np
>>> cylinder = pyvista.Cylinder(center=[1, 2, 3], direction=[1, 1, 1],
... radius=1, height=2)
>>> cylinder.plot(show_edges=True, line_width=5, cpos='xy')
"""
cylinderSource = _vtk.vtkCylinderSource()
cylinderSource.SetRadius(radius)
cylinderSource.SetHeight(height)
cylinderSource.SetCapping(capping)
cylinderSource.SetResolution(resolution)
cylinderSource.Update()
surf = pyvista.wrap(cylinderSource.GetOutput())
surf.rotate_z(-90, inplace=True)
translate(surf, center, direction)
return surf
def CylinderStructured(radius=0.5, height=1.0,
center=(0.,0.,0.), direction=(1.,0.,0.),
theta_resolution=32, z_resolution=10):
"""Create a cylinder mesh as a :class:`pyvista.StructuredGrid`.
The end caps are left open. This can create a surface mesh if a single
value for the ``radius`` is given or a 3D mesh if multiple radii are given
as a list/array in the ``radius`` argument.
Parameters
----------
radius : float, sequence, optional
Radius of the cylinder. If a sequence, then describes the
radial coordinates of the cells as a range of values as
specified by the ``radius``.
height : float, optional
Height of the cylinder along its Z-axis.
center : sequence
Location of the centroid in ``[x, y, z]``.
direction : sequence
Direction cylinder Z-axis in ``[x, y, z]``.
theta_resolution : int, optional
Number of points on the circular face of the cylinder.
Ignored if ``radius`` is an iterable.
z_resolution : int, optional
Number of points along the height (Z-axis) of the cylinder.
Returns
-------
pyvista.StructuredGrid
Structured cylinder.
Examples
--------
Default structured cylinder
>>> import pyvista
>>> mesh = pyvista.CylinderStructured()
>>> mesh.plot(show_edges=True)
Structured cylinder with an inner radius of 1, outer of 2, with 5
segments.
>>> import numpy as np
>>> mesh = pyvista.CylinderStructured(radius=np.linspace(1, 2, 5))
>>> mesh.plot(show_edges=True)
"""
# Define grid in polar coordinates
r = np.array([radius]).ravel()
nr = len(r)
theta = np.linspace(0, 2*np.pi, num=theta_resolution)
radius_matrix, theta_matrix = np.meshgrid(r, theta)
# Transform to cartesian space
X = radius_matrix * np.cos(theta_matrix)
Y = radius_matrix * np.sin(theta_matrix)
# Make all the nodes in the grid
xx = np.array([X] * z_resolution).ravel()
yy = np.array([Y] * z_resolution).ravel()
dz = height / (z_resolution - 1)
zz = np.empty(yy.size)
zz = np.full((X.size, z_resolution), dz)
zz *= np.arange(z_resolution)
zz = zz.ravel(order='f')
# Create the grid
grid = pyvista.StructuredGrid()
grid.points = np.c_[xx, yy, zz]
grid.dimensions = [nr, theta_resolution, z_resolution]
# Orient properly in user direction
vx = np.array([0., 0., 1.])
if not np.allclose(vx, direction):
direction /= np.linalg.norm(direction)
vx -= vx.dot(direction) * direction
vx /= np.linalg.norm(vx)
vy = np.cross(direction, vx)
rmtx = np.array([vx, vy, direction])
grid.points = grid.points.dot(rmtx)
# Translate to given center
grid.points -= np.array(grid.center)
grid.points += np.array(center)
return grid
def Arrow(start=(0., 0., 0.), direction=(1., 0., 0.), tip_length=0.25,
tip_radius=0.1, tip_resolution=20, shaft_radius=0.05,
shaft_resolution=20, scale=None):
"""Create an arrow.
Parameters
----------
start : iterable, optional
Start location in ``[x, y, z]``.
direction : iterable, optional
Direction the arrow points to in ``[x, y, z]``.
tip_length : float, optional
Length of the tip.
tip_radius : float, optional
Radius of the tip.
tip_resolution : int, optional
Number of faces around the tip.
shaft_radius : float, optional
Radius of the shaft.
shaft_resolution : int, optional
Number of faces around the shaft.
scale : float or str, optional
Scale factor of the entire object, default is ``None``
(i.e. scale of 1). ``'auto'`` scales to length of direction
array.
Returns
-------
pyvista.PolyData
Arrow mesh.
Examples
--------
Plot a default arrow.
>>> import pyvista
>>> mesh = pyvista.Arrow()
>>> mesh.plot(show_edges=True)
"""
# Create arrow object
arrow = _vtk.vtkArrowSource()
arrow.SetTipLength(tip_length)
arrow.SetTipRadius(tip_radius)
arrow.SetTipResolution(tip_resolution)
arrow.SetShaftRadius(shaft_radius)
arrow.SetShaftResolution(shaft_resolution)
arrow.Update()
surf = pyvista.wrap(arrow.GetOutput())
if scale == 'auto':
scale = float(np.linalg.norm(direction))
if isinstance(scale, float) or isinstance(scale, int):
surf.points *= scale
elif scale is not None:
raise TypeError("Scale must be either float, int or 'auto'.")
translate(surf, start, direction)
return surf
def Sphere(radius=0.5, center=(0, 0, 0), direction=(0, 0, 1), theta_resolution=30,
phi_resolution=30, start_theta=0, end_theta=360, start_phi=0, end_phi=180):
"""Create a vtk Sphere.
Parameters
----------
radius : float, optional
Sphere radius.
center : np.ndarray or list, optional
Center in ``[x, y, z]``.
direction : list or tuple or np.ndarray, optional
Direction the top of the sphere points to in ``[x, y, z]``.
theta_resolution : int , optional
Set the number of points in the longitude direction (ranging
from ``start_theta`` to ``end_theta``).
phi_resolution : int, optional
Set the number of points in the latitude direction (ranging from
``start_phi`` to ``end_phi``).
start_theta : float, optional
Starting longitude angle.
end_theta : float, optional
Ending longitude angle.
start_phi : float, optional
Starting latitude angle.
end_phi : float, optional
Ending latitude angle.
Returns
-------
pyvista.PolyData
Sphere mesh.
Examples
--------
Create a sphere using default parameters.
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> sphere.plot(show_edges=True)
Create a quarter sphere by setting ``end_theta``.
>>> sphere = pyvista.Sphere(end_theta=90)
>>> out = sphere.plot(show_edges=True)
"""
sphere = _vtk.vtkSphereSource()
sphere.SetRadius(radius)
sphere.SetThetaResolution(theta_resolution)
sphere.SetPhiResolution(phi_resolution)
sphere.SetStartTheta(start_theta)
sphere.SetEndTheta(end_theta)
sphere.SetStartPhi(start_phi)
sphere.SetEndPhi(end_phi)
sphere.Update()
surf = pyvista.wrap(sphere.GetOutput())
surf.rotate_y(-90, inplace=True)
translate(surf, center, direction)
return surf
def Plane(center=(0, 0, 0), direction=(0, 0, 1), i_size=1, j_size=1,
i_resolution=10, j_resolution=10):
"""Create a plane.
Parameters
----------
center : list or tuple or np.ndarray
Location of the centroid in ``[x, y, z]``.
direction : list or tuple or np.ndarray
Direction of the plane's normal in ``[x, y, z]``.
i_size : float
Size of the plane in the i direction.
j_size : float
Size of the plane in the j direction.
i_resolution : int
Number of points on the plane in the i direction.
j_resolution : int
Number of points on the plane in the j direction.
Returns
-------
pyvista.PolyData
Plane mesh.
Examples
--------
Create a default plane.
>>> import pyvista
>>> mesh = pyvista.Plane()
>>> mesh.point_data.clear()
>>> mesh.plot(show_edges=True)
"""
planeSource = _vtk.vtkPlaneSource()
planeSource.SetXResolution(i_resolution)
planeSource.SetYResolution(j_resolution)
planeSource.Update()
surf = pyvista.wrap(planeSource.GetOutput())
surf.points[:, 0] *= i_size
surf.points[:, 1] *= j_size
surf.rotate_y(-90, inplace=True)
translate(surf, center, direction)
return surf
def Line(pointa=(-0.5, 0., 0.), pointb=(0.5, 0., 0.), resolution=1):
"""Create a line.
Parameters
----------
pointa : np.ndarray or list, optional
Location in ``[x, y, z]``.
pointb : np.ndarray or list, optional
Location in ``[x, y, z]``.
resolution : int, optional
Number of pieces to divide line into.
Returns
-------
pyvista.PolyData
Line mesh.
Examples
--------
Create a line between ``(0, 0, 0)`` and ``(0, 0, 1)``.
>>> import pyvista
>>> mesh = pyvista.Line((0, 0, 0), (0, 0, 1))
>>> mesh.plot(color='k', line_width=10)
"""
if resolution <= 0:
raise ValueError('Resolution must be positive')
if np.array(pointa).size != 3:
raise TypeError('Point A must be a length three tuple of floats.')
if np.array(pointb).size != 3:
raise TypeError('Point B must be a length three tuple of floats.')
src = _vtk.vtkLineSource()
src.SetPoint1(*pointa)
src.SetPoint2(*pointb)
src.SetResolution(resolution)
src.Update()
line = pyvista.wrap(src.GetOutput())
# Compute distance of every point along line
compute = lambda p0, p1: np.sqrt(np.sum((p1 - p0)**2, axis=1))
distance = compute(np.array(pointa), line.points)
line['Distance'] = distance
return line
def Tube(pointa=(-0.5, 0., 0.), pointb=(0.5, 0., 0.), resolution=1, radius=1.0, n_sides=15):
"""Create a tube.
Parameters
----------
pointa : np.ndarray or list, optional
Location in ``[x, y, z]``.
pointb : np.ndarray or list, optional
Location in ``[x, y, z]``.
resolution : int, optional
Number of pieces to divide tube into.
radius : float, optional
Minimum tube radius (minimum because the tube radius may vary).
n_sides : int, optional
Number of sides for the tube.
Returns
-------
pyvista.PolyData
Tube mesh.
Examples
--------
Create a tube between ``(0, 0, 0)`` and ``(0, 0, 1)``.
>>> import pyvista
>>> mesh = pyvista.Tube((0, 0, 0), (0, 0, 1))
>>> mesh.plot()
"""
if resolution <= 0:
raise ValueError('Resolution must be positive.')
if np.array(pointa).size != 3:
raise TypeError('Point A must be a length three tuple of floats.')
if np.array(pointb).size != 3:
raise TypeError('Point B must be a length three tuple of floats.')
line_src = _vtk.vtkLineSource()
line_src.SetPoint1(*pointa)
line_src.SetPoint2(*pointb)
line_src.SetResolution(resolution)
line_src.Update()
if n_sides < 3:
raise ValueError('Number of sides `n_sides` must be >= 3')
tube_filter = _vtk.vtkTubeFilter()
tube_filter.SetInputConnection(line_src.GetOutputPort())
tube_filter.SetRadius(radius)
tube_filter.SetNumberOfSides(n_sides)
tube_filter.Update()
return pyvista.wrap(tube_filter.GetOutput())
def Cube(center=(0.0, 0.0, 0.0), x_length=1.0, y_length=1.0,
z_length=1.0, bounds=None, clean=True):
"""Create a cube.
It's possible to specify either the center and side lengths or
just the bounds of the cube. If ``bounds`` are given, all other
arguments are ignored.
.. versionchanged:: 0.33.0
The cube is created using ``vtk.vtkCubeSource``. For
compatibility with :func:`pyvista.PlatonicSolid`, face indices
are also added as cell data. For full compatibility with
:func:`PlatonicSolid() <pyvista.PlatonicSolid>`, one has to
use ``x_length = y_length = z_length = 2 * radius / 3**0.5``.
The cube points are also cleaned by default now, leaving only
the 8 corners and a watertight (manifold) mesh.
Parameters
----------
center : sequence, optional
Center in ``[x, y, z]``.
x_length : float, optional
Length of the cube in the x-direction.
y_length : float, optional
Length of the cube in the y-direction.
z_length : float, optional
Length of the cube in the z-direction.
bounds : sequence, optional
Specify the bounding box of the cube. If given, all other size
arguments are ignored. ``(xMin, xMax, yMin, yMax, zMin, zMax)``.
clean : bool, optional
Whether to clean the raw points of the mesh, making the cube
manifold. Note that this will degrade the texture coordinates
that come with the mesh, so if you plan to map a texture on
the cube, consider setting this to ``False``.
.. versionadded:: 0.33.0
Returns
-------
pyvista.PolyData
Mesh of the cube.
Examples
--------
Create a default cube.
>>> import pyvista
>>> mesh = pyvista.Cube()
>>> mesh.plot(show_edges=True, line_width=5)
"""
src = _vtk.vtkCubeSource()
if bounds is not None:
if np.array(bounds).size != 6:
raise TypeError('Bounds must be given as length 6 tuple: (xMin, xMax, yMin, yMax, zMin, zMax)')
src.SetBounds(bounds)
else:
src.SetCenter(center)
src.SetXLength(x_length)
src.SetYLength(y_length)
src.SetZLength(z_length)
src.Update()
cube = pyvista.wrap(src.GetOutput())
# add face index data for compatibility with PlatonicSolid
# but make it inactive for backwards compatibility
cube.cell_data.set_array([1, 4, 0, 3, 5, 2],['FaceIndex'])
# clean duplicate points
if clean:
cube.clean(inplace=True)
return cube
def Box(bounds=(-1., 1., -1., 1., -1., 1.), level=0, quads=True):
"""Create a box with solid faces for the given bounds.
Parameters
----------
bounds : iterable, optional
Specify the bounding box of the cube.
``(xMin, xMax, yMin, yMax, zMin, zMax)``.
level : int, optional
Level of subdivision of the faces.
quads : bool, optional
Flag to tell the source to generate either a quad or two
triangle for a set of four points. Default ``True``.
Returns
-------
pyvista.PolyData
Mesh of the box.
Examples
--------
Create a box with subdivision ``level=2``.
>>> import pyvista
>>> mesh = pyvista.Box(level=2)
>>> mesh.plot(show_edges=True)
"""
if np.array(bounds).size != 6:
raise TypeError('Bounds must be given as length 6 tuple: (xMin, xMax, yMin, yMax, zMin, zMax)')
src = _vtk.vtkTessellatedBoxSource()
src.SetLevel(level)
if quads:
src.QuadsOn()
else:
src.QuadsOff()
src.SetBounds(bounds)
src.Update()
return pyvista.wrap(src.GetOutput())
def Cone(center=(0., 0., 0.), direction=(1., 0., 0.), height=1.0, radius=None,
capping=True, angle=None, resolution=6):
"""Create a cone.
Parameters
----------
center : iterable, optional
Center in ``[x, y, z]``. Axis of the cone passes through this
point.
direction : iterable, optional
Direction vector in ``[x, y, z]``. Orientation vector of the
cone.
height : float, optional
Height along the cone in its specified direction.
radius : float, optional
Base radius of the cone.
capping : bool, optional
Enable or disable the capping the base of the cone with a
polygon.
angle : float, optional
The angle in degrees between the axis of the cone and a
generatrix.
resolution : int, optional
Number of facets used to represent the cone.
Returns
-------
pyvista.PolyData
Cone mesh.
Examples
--------
Create a default Cone.
>>> import pyvista
>>> mesh = pyvista.Cone()
>>> mesh.plot(show_edges=True, line_width=5)
"""
src = _vtk.vtkConeSource()
src.SetCapping(capping)
src.SetDirection(direction)
src.SetCenter(center)
src.SetHeight(height)
if angle and radius:
raise ValueError("Both radius and angle specified. They are mutually exclusive.")
elif angle and not radius:
src.SetAngle(angle)
elif not angle and radius:
src.SetRadius(radius)
elif not angle and not radius:
src.SetRadius(0.5)
src.SetResolution(resolution)
src.Update()
return pyvista.wrap(src.GetOutput())
def Polygon(center=(0., 0., 0.), radius=1, normal=(0, 0, 1), n_sides=6):
"""Create a polygon.
Parameters
----------
center : iterable, optional
Center in ``[x, y, z]``. Central axis of the polygon passes
through this point.
radius : float, optional
The radius of the polygon.
normal : iterable, optional
Direction vector in ``[x, y, z]``. Orientation vector of the polygon.
n_sides : int, optional
Number of sides of the polygon.
Returns
-------
pyvista.PolyData
Mesh of the polygon.
Examples
--------
Create an 8 sided polygon.
>>> import pyvista
>>> mesh = pyvista.Polygon(n_sides=8)
>>> mesh.plot(show_edges=True, line_width=5)
"""
src = _vtk.vtkRegularPolygonSource()
src.SetCenter(center)
src.SetNumberOfSides(n_sides)
src.SetRadius(radius)
src.SetNormal(normal)
src.Update()
return pyvista.wrap(src.GetOutput())
def Disc(center=(0., 0., 0.), inner=0.25, outer=0.5, normal=(0, 0, 1), r_res=1,
c_res=6):
"""Create a polygonal disk with a hole in the center.
The disk has zero height. The user can specify the inner and outer
radius of the disk, and the radial and circumferential resolution
of the polygonal representation.
Parameters
----------
center : iterable
Center in ``[x, y, z]``. Middle of the axis of the disc.
inner : float, optional
The inner radius.
outer : float, optional
The outer radius.
normal : iterable
Direction vector in ``[x, y, z]``. Orientation vector of the disc.
r_res : int, optional
Number of points in radial direction.
c_res : int, optional
Number of points in circumferential direction.
Returns
-------
pyvista.PolyData
Disk mesh.
Examples
--------
Create a disc with 50 points in the circumferential direction.
>>> import pyvista
>>> mesh = pyvista.Disc(c_res=50)
>>> mesh.plot(show_edges=True, line_width=5)
"""
src = _vtk.vtkDiskSource()
src.SetInnerRadius(inner)
src.SetOuterRadius(outer)
src.SetRadialResolution(r_res)
src.SetCircumferentialResolution(c_res)
src.Update()
normal = np.array(normal)
center = np.array(center)
surf = pyvista.wrap(src.GetOutput())
surf.rotate_y(90, inplace=True)
translate(surf, center, normal)
return surf
def Text3D(string, depth=0.5):
"""Create 3D text from a string.
Parameters
----------
string : str
String to generate 3D text from.
depth : float, optional
Depth of the text. Defaults to ``0.5``.
Returns
-------
pyvista.PolyData
3D text mesh.
Examples
--------
>>> import pyvista
>>> text_mesh = pyvista.Text3D('PyVista')
>>> text_mesh.plot(cpos='xy')
"""
vec_text = _vtk.vtkVectorText()
vec_text.SetText(string)
extrude = _vtk.vtkLinearExtrusionFilter()
extrude.SetInputConnection(vec_text.GetOutputPort())
extrude.SetExtrusionTypeToNormalExtrusion()
extrude.SetVector(0, 0, 1)
extrude.SetScaleFactor(depth)
tri_filter = _vtk.vtkTriangleFilter()
tri_filter.SetInputConnection(extrude.GetOutputPort())
tri_filter.Update()
return pyvista.wrap(tri_filter.GetOutput())
def Wavelet(extent=(-10, 10, -10, 10, -10, 10), center=(0, 0, 0), maximum=255,
x_freq=60, y_freq=30, z_freq=40, x_mag=10, y_mag=18, z_mag=5,
std=0.5, subsample_rate=1):
"""Create a wavelet.
Produces images with pixel values determined by
``Maximum*Gaussian*x_mag*sin(x_freq*x)*sin(y_freq*y)*cos(z_freq*z)``
Values are float scalars on point data with name ``"RTData"``.
Parameters
----------
extent : sequence, optional
Set/Get the extent of the whole output image. Default
``(-10, 10, -10, 10, -10, 10)``.
center : list, optional
Center of the wavelet.
maximum : float, optional
Maximum of the wavelet function.
x_freq : float, optional
Natural frequency in the x direction.
y_freq : float, optional
Natural frequency in the y direction.
z_freq : float, optional
Natural frequency in the z direction.
x_mag : float, optional
Magnitude in the x direction.
y_mag : float, optional
Magnitude in the y direction.
z_mag : float, optional
Magnitude in the z direction.
std : float, optional
Standard deviation.
subsample_rate : int, optional
The sub-sample rate.
Returns
-------
pyvista.PolyData
Wavelet mesh.
Examples
--------
>>> import pyvista
>>> wavelet = pyvista.Wavelet(extent=(0, 50, 0, 50, 0, 10), x_freq=20,
... y_freq=10, z_freq=1, x_mag=100, y_mag=100,
... z_mag=1000)
>>> wavelet.plot(show_scalar_bar=False)
Extract lower valued cells of the wavelet and create a surface from it.
>>> thresh = wavelet.threshold(800).extract_surface()
>>> thresh.plot(show_scalar_bar=False)
Smooth it to create "waves"
>>> waves = thresh.smooth(n_iter=100, relaxation_factor=0.1)
>>> waves.plot(color='white', smooth_shading=True, show_edges=True)
"""
wavelet_source = _vtk.vtkRTAnalyticSource()
wavelet_source.SetWholeExtent(*extent)
wavelet_source.SetCenter(center)
wavelet_source.SetMaximum(maximum)
wavelet_source.SetXFreq(x_freq)
wavelet_source.SetYFreq(y_freq)
wavelet_source.SetZFreq(z_freq)
wavelet_source.SetXMag(x_mag)
wavelet_source.SetYMag(y_mag)
wavelet_source.SetZMag(z_mag)
wavelet_source.SetStandardDeviation(std)
wavelet_source.SetSubsampleRate(subsample_rate)
wavelet_source.Update()
return pyvista.wrap(wavelet_source.GetOutput())
def CircularArc(pointa, pointb, center, resolution=100, negative=False):
"""Create a circular arc defined by two endpoints and a center.
The number of segments composing the polyline is controlled by
setting the object resolution.
Parameters
----------
pointa : sequence
Position of the first end point.
pointb : sequence
Position of the other end point.
center : sequence
Center of the circle that defines the arc.
resolution : int, optional
The number of segments of the polyline that draws the arc.
Resolution of 1 will just create a line.
negative : bool, optional
By default the arc spans the shortest angular sector between
``pointa`` and ``pointb``.
By setting this to ``True``, the longest angular sector is
used instead (i.e. the negative coterminal angle to the
shortest one).
Returns
-------
pyvista.PolyData
Circular arc mesh.
Examples
--------
Create a quarter arc centered at the origin in the xy plane.
>>> import pyvista
>>> arc = pyvista.CircularArc([-1, 0, 0], [0, 1, 0], [0, 0, 0])
>>> pl = pyvista.Plotter()
>>> _ = pl.add_mesh(arc, color='k', line_width=10)
>>> _ = pl.show_bounds(location='all', font_size=30, use_2d=True)
>>> _ = pl.view_xy()
>>> pl.show()
"""
check_valid_vector(pointa, 'pointa')
check_valid_vector(pointb, 'pointb')
check_valid_vector(center, 'center')
if not np.isclose(
np.linalg.norm(np.array(pointa) - np.array(center)),
np.linalg.norm(np.array(pointb) - np.array(center)),
):
raise ValueError("pointa and pointb are not equidistant from center")
# fix half-arc bug: if a half arc travels directly through the
# center point, it becomes a line
pointb = list(pointb)
pointb[0] -= 1E-10
pointb[1] -= 1E-10
arc = _vtk.vtkArcSource()
arc.SetPoint1(*pointa)
arc.SetPoint2(*pointb)
arc.SetCenter(*center)
arc.SetResolution(resolution)
arc.SetNegative(negative)
arc.Update()
angle = np.deg2rad(arc.GetAngle())
arc = pyvista.wrap(arc.GetOutput())
# Compute distance of every point along circular arc
center = np.array(center).ravel()
radius = np.sqrt(np.sum((arc.points[0]-center)**2, axis=0))
angles = np.arange(0.0, 1.0 + 1.0/resolution, 1.0/resolution) * angle
arc['Distance'] = radius * angles
return arc
def CircularArcFromNormal(center, resolution=100, normal=None,
polar=None, angle=None):
"""Create a circular arc defined by normal to the plane of the arc, and an angle.
The number of segments composing the polyline is controlled by
setting the object resolution.
Parameters
----------
center : sequence
Center of the circle that defines the arc.
resolution : int, optional
The number of segments of the polyline that draws the arc.
Resolution of 1 will just create a line.
normal : sequence, optional
The normal vector to the plane of the arc. By default it
points in the positive Z direction.
polar : sequence, optional
Starting point of the arc in polar coordinates. By default it
is the unit vector in the positive x direction.
angle : float, optional
Arc length (in degrees) beginning at the polar vector. The
direction is counterclockwise. By default it is 90.
Returns
-------
pyvista.PolyData
Circular arc mesh.
Examples
--------
Quarter arc centered at the origin in the xy plane.
>>> import pyvista
>>> normal = [0, 0, 1]
>>> polar = [-1, 0, 0]
>>> arc = pyvista.CircularArcFromNormal([0, 0, 0], normal=normal, polar=polar)
>>> pl = pyvista.Plotter()
>>> _ = pl.add_mesh(arc, color='k', line_width=10)
>>> _ = pl.show_bounds(location='all', font_size=30, use_2d=True)
>>> _ = pl.view_xy()
>>> pl.show()
"""
check_valid_vector(center, 'center')
if normal is None:
normal = [0, 0, 1]
if polar is None:
polar = [1, 0, 0]
if angle is None:
angle = 90.0
arc = _vtk.vtkArcSource()
arc.SetCenter(*center)
arc.SetResolution(resolution)
arc.UseNormalAndAngleOn()
check_valid_vector(normal, 'normal')
arc.SetNormal(*normal)
check_valid_vector(polar, 'polar')
arc.SetPolarVector(*polar)
arc.SetAngle(angle)
arc.Update()
angle = np.deg2rad(arc.GetAngle())
arc = pyvista.wrap(arc.GetOutput())
# Compute distance of every point along circular arc
center = np.array(center)
radius = np.sqrt(np.sum((arc.points[0] - center)**2, axis=0))
angles = np.linspace(0.0, angle, resolution+1)
arc['Distance'] = radius * angles
return arc
def Pyramid(points=None):
"""Create a pyramid defined by 5 points.
Parameters
----------
points : sequence, optional
Points of the pyramid. Points are ordered such that the first
four points are the four counterclockwise points on the
quadrilateral face, and the last point is the apex.
Defaults to pyramid in example.
Returns
-------
pyvista.UnstructuredGrid
Unstructured grid containing a single pyramid cell.
Examples
--------
>>> import pyvista
>>> pointa = [1.0, 1.0, 0.0]
>>> pointb = [-1.0, 1.0, 0.0]
>>> pointc = [-1.0, -1.0, 0.0]
>>> pointd = [1.0, -1.0, 0.0]
>>> pointe = [0.0, 0.0, 1.608]
>>> pyramid = pyvista.Pyramid([pointa, pointb, pointc, pointd, pointe])
>>> pyramid.plot(show_edges=True, line_width=5)
"""
if points is None:
points = [[1.0, 1.0, 0.0],
[-1.0, 1.0, 0.0],
[-1.0, -1.0, 0.0],
[1.0, -1.0, 0.0],
[0.0, 0.0, (4 - 2**0.5)**0.5]]
if len(points) != 5:
raise TypeError('Points must be given as length 5 np.ndarray or list')
check_valid_vector(points[0], 'points[0]')
check_valid_vector(points[1], 'points[1]')
check_valid_vector(points[2], 'points[2]')
check_valid_vector(points[3], 'points[3]')
check_valid_vector(points[4], 'points[4]')
pyramid = _vtk.vtkPyramid()
pyramid.GetPointIds().SetId(0, 0)
pyramid.GetPointIds().SetId(1, 1)
pyramid.GetPointIds().SetId(2, 2)
pyramid.GetPointIds().SetId(3, 3)
pyramid.GetPointIds().SetId(4, 4)
ug = _vtk.vtkUnstructuredGrid()
ug.SetPoints(pyvista.vtk_points(np.array(points), False))
ug.InsertNextCell(pyramid.GetCellType(), pyramid.GetPointIds())
return pyvista.wrap(ug)
def Triangle(points=None):
"""Create a triangle defined by 3 points.
Parameters
----------
points : sequence, optional
Points of the triangle. Defaults to a right isosceles
triangle (see example).
Returns
-------
pyvista.PolyData
Triangle mesh.
Examples
--------
>>> import pyvista
>>> pointa = [0, 0, 0]
>>> pointb = [1, 0, 0]
>>> pointc = [0.5, 0.707, 0]
>>> triangle = pyvista.Triangle([pointa, pointb, pointc])
>>> triangle.plot(show_edges=True, line_width=5)
"""
if points is None:
points = [[0, 0, 0], [1, 0, 0], [0.5, 0.5**0.5, 0]]
if len(points) != 3:
raise TypeError('Points must be given as length 3 np.ndarray or list')
check_valid_vector(points[0], 'points[0]')
check_valid_vector(points[1], 'points[1]')
check_valid_vector(points[2], 'points[2]')
cells = np.array([[3, 0, 1, 2]])
return pyvista.wrap(pyvista.PolyData(points, cells))
def Rectangle(points=None):
"""Create a rectangle defined by 4 points.
Parameters
----------
points : sequence, optional
Points of the rectangle. Defaults to a simple example.
Returns
-------
pyvista.PolyData
Rectangle mesh.
Examples
--------
>>> import pyvista
>>> pointa = [1.0, 0.0, 0.0]
>>> pointb = [1.0, 1.0, 0.0]
>>> pointc = [0.0, 1.0, 0.0]
>>> pointd = [0.0, 0.0, 0.0]
>>> rectangle = pyvista.Rectangle([pointa, pointb, pointc, pointd])
>>> rectangle.plot(show_edges=True, line_width=5)
"""
if points is None:
points = [[1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]
if len(points) != 4:
raise TypeError('Points must be given as length 4 np.ndarray or list')
check_valid_vector(points[0], 'points[0]')
check_valid_vector(points[1], 'points[1]')
check_valid_vector(points[2], 'points[2]')
check_valid_vector(points[3], 'points[3]')
cells = np.array([[4, 0, 1, 2, 3]])
return pyvista.wrap(pyvista.PolyData(points, cells))
def Circle(radius=0.5, resolution=100):
"""Create a single PolyData circle defined by radius in the XY plane.
Parameters
----------
radius : float, optional
Radius of circle.
resolution : int, optional
Number of points on the circle.
Returns
-------
pyvista.PolyData
Circle mesh.
Examples
--------
>>> import pyvista
>>> radius = 0.5
>>> circle = pyvista.Circle(radius)
>>> circle.plot(show_edges=True, line_width=5)
"""
points = np.zeros((resolution, 3))
theta = np.linspace(0.0, 2.0*np.pi, resolution)
points[:, 0] = radius * np.cos(theta)
points[:, 1] = radius * np.sin(theta)
cells = np.array([np.append(np.array([resolution]), np.arange(resolution))])
return pyvista.wrap(pyvista.PolyData(points, cells))
def Superquadric(center=(0., 0., 0.), scale=(1., 1., 1.), size=0.5,
theta_roundness=1., phi_roundness=1.,
theta_resolution=16, phi_resolution=16,
toroidal=False, thickness=1/3):
"""Create a superquadric.
Parameters
----------
center : iterable, optional
Center of the superquadric in ``[x, y, z]``.
scale : iterable, optional
Scale factors of the superquadric in ``[x, y, z]``.
size : float, optional
Superquadric isotropic size.
theta_roundness : float, optional
Superquadric east/west roundness.
Values range from 0 (rectangular) to 1 (circular) to higher orders.
phi_roundness : float, optional
Superquadric north/south roundness.
Values range from 0 (rectangular) to 1 (circular) to higher orders.
theta_resolution : int, optional
Number of points in the longitude direction.
Values are rounded to nearest multiple of 4.
phi_resolution : int, optional
Number of points in the latitude direction.
Values are rounded to nearest multiple of 8.
toroidal : bool, optional
Whether or not the superquadric is toroidal (``True``)
or ellipsoidal (``False``).
thickness : float, optional
Superquadric ring thickness.
Only applies if toroidal is set to ``True``.
Returns
-------
pyvista.PolyData
Superquadric mesh.
See Also
--------
pyvista.ParametricSuperEllipsoid :
Parametric superquadric if toroidal is ``False``.
pyvista.ParametricSuperToroid :
Parametric superquadric if toroidal is ``True``.
Examples
--------
>>> import pyvista
>>> superquadric = pyvista.Superquadric(scale=(3., 1., 0.5),
... phi_roundness=0.1,
... theta_roundness=0.5)
>>> superquadric.plot(show_edges=True)
"""
superquadricSource = _vtk.vtkSuperquadricSource()
superquadricSource.SetCenter(center)
superquadricSource.SetScale(scale)
superquadricSource.SetSize(size)
superquadricSource.SetThetaRoundness(theta_roundness)
superquadricSource.SetPhiRoundness(phi_roundness)
superquadricSource.SetThetaResolution(round(theta_resolution/4)*4)
superquadricSource.SetPhiResolution(round(phi_resolution/8)*8)
superquadricSource.SetToroidal(toroidal)
superquadricSource.SetThickness(thickness)
superquadricSource.Update()
return pyvista.wrap(superquadricSource.GetOutput())
def PlatonicSolid(kind='tetrahedron', radius=1.0, center=(0.0, 0.0, 0.0)):
"""Create a Platonic solid of a given size.
Parameters
----------
kind : str or int, optional
The kind of Platonic solid to create. Either the name of the
polyhedron or an integer index:
* ``'tetrahedron'`` or ``0``
* ``'cube'`` or ``1``
* ``'octahedron'`` or ``2``
* ``'icosahedron'`` or ``3``
* ``'dodecahedron'`` or ``4``
radius : float, optional
The radius of the circumscribed sphere for the solid to create.
center : sequence, optional
Three-length sequence defining the center of the solid to create.
Returns
-------
pyvista.PolyData
One of the five Platonic solids. Cell scalars are defined that
assign integer labels to each face (with array name
``"FaceIndex"``).
Examples
--------
Create and plot a dodecahedron.
>>> import pyvista
>>> dodeca = pyvista.PlatonicSolid('dodecahedron')
>>> dodeca.plot(categories=True)
See :ref:`platonic_example` for more examples using this filter.
"""
kinds = {
'tetrahedron': 0,
'cube': 1,
'octahedron': 2,
'icosahedron': 3,
'dodecahedron': 4,
}
if isinstance(kind, str):
if kind not in kinds:
raise ValueError(f'Invalid Platonic solid kind "{kind}".')
kind = kinds[kind]
elif isinstance(kind, int) and kind not in range(5):
raise ValueError(f'Invalid Platonic solid index "{kind}".')
elif not isinstance(kind, int):
raise ValueError('Invalid Platonic solid index type '
f'"{type(kind).__name__}".')
check_valid_vector(center, 'center')
solid = _vtk.vtkPlatonicSolidSource()
solid.SetSolidType(kind)
solid.Update()
solid = pyvista.wrap(solid.GetOutput())
solid.scale(radius, inplace=True)
solid.points += np.asanyarray(center) - solid.center
# rename and activate cell scalars
cell_data = solid.get_array(0)
solid.clear_data()
solid.cell_data['FaceIndex'] = cell_data
return solid
def Tetrahedron(radius=1.0, center=(0.0, 0.0, 0.0)):
"""Create a tetrahedron of a given size.
A tetrahedron is composed of four congruent equilateral triangles.
Parameters
----------
radius : float, optional
The radius of the circumscribed sphere for the tetrahedron.
center : sequence, optional
Three-length sequence defining the center of the tetrahedron.
Returns
-------
pyvista.PolyData
Mesh for the tetrahedron. Cell scalars are defined that assign
integer labels to each face (with array name ``"FaceIndex"``).
Examples
--------
Create and plot a tetrahedron.
>>> import pyvista
>>> tetra = pyvista.Tetrahedron()
>>> tetra.plot(categories=True)
See :ref:`platonic_example` for more examples using this filter.
"""
return PlatonicSolid(kind='tetrahedron', radius=radius, center=center)
def Octahedron(radius=1.0, center=(0.0, 0.0, 0.0)):
"""Create an octahedron of a given size.
An octahedron is composed of eight congruent equilateral
triangles.
Parameters
----------
radius : float, optional
The radius of the circumscribed sphere for the octahedron.
center : sequence, optional
Three-length sequence defining the center of the octahedron.
Returns
-------
pyvista.PolyData
Mesh for the octahedron. Cell scalars are defined that assign
integer labels to each face (with array name ``"FaceIndex"``).
Examples
--------
Create and plot an octahedron.
>>> import pyvista
>>> tetra = pyvista.Octahedron()
>>> tetra.plot(categories=True)
See :ref:`platonic_example` for more examples using this filter.
"""
return PlatonicSolid(kind='octahedron', radius=radius, center=center)
def Dodecahedron(radius=1.0, center=(0.0, 0.0, 0.0)):
"""Create a dodecahedron of a given size.
A dodecahedron is composed of twelve congruent regular pentagons.
Parameters
----------
radius : float, optional
The radius of the circumscribed sphere for the dodecahedron.
center : sequence, optional
Three-length sequence defining the center of the dodecahedron.
Returns
-------
pyvista.PolyData
Mesh for the dodecahedron. Cell scalars are defined that assign
integer labels to each face (with array name ``"FaceIndex"``).
Examples
--------
Create and plot a dodecahedron.
>>> import pyvista
>>> tetra = pyvista.Dodecahedron()
>>> tetra.plot(categories=True)
See :ref:`platonic_example` for more examples using this filter.
"""
return PlatonicSolid(kind='dodecahedron', radius=radius, center=center)
def Icosahedron(radius=1.0, center=(0.0, 0.0, 0.0)):
"""Create an icosahedron of a given size.
An icosahedron is composed of twenty congruent equilateral
triangles.
Parameters
----------
radius : float, optional
The radius of the circumscribed sphere for the icosahedron.
center : sequence, optional
Three-length sequence defining the center of the icosahedron.
Returns
-------
pyvista.PolyData
Mesh for the icosahedron. Cell scalars are defined that assign
integer labels to each face (with array name ``"FaceIndex"``).
Examples
--------
Create and plot an icosahedron.
>>> import pyvista
>>> tetra = pyvista.Icosahedron()
>>> tetra.plot(categories=True)
See :ref:`platonic_example` for more examples using this filter.
"""
return PlatonicSolid(kind='icosahedron', radius=radius, center=center)
|
[
"numpy.sum",
"pyvista.StructuredGrid",
"numpy.empty",
"numpy.allclose",
"pyvista._vtk.vtkUnstructuredGrid",
"numpy.sin",
"numpy.linalg.norm",
"numpy.arange",
"numpy.full",
"pyvista._vtk.vtkArrowSource",
"numpy.meshgrid",
"pyvista._vtk.vtkTriangleFilter",
"pyvista._vtk.vtkPlaneSource",
"pyvista._vtk.vtkArcSource",
"pyvista._vtk.vtkSphereSource",
"numpy.linspace",
"pyvista._vtk.vtkCubeSource",
"pyvista.PolyData",
"pyvista._vtk.vtkDiskSource",
"pyvista._vtk.vtkPyramid",
"numpy.cross",
"pyvista._vtk.vtkConeSource",
"pyvista._vtk.vtkRegularPolygonSource",
"pyvista._vtk.vtkLinearExtrusionFilter",
"numpy.cos",
"pyvista._vtk.vtkPlatonicSolidSource",
"pyvista.utilities.check_valid_vector",
"pyvista._vtk.vtkTubeFilter",
"pyvista._vtk.vtkSuperquadricSource",
"pyvista._vtk.vtkCylinderSource",
"pyvista._vtk.vtkRTAnalyticSource",
"pyvista._vtk.vtkVectorText",
"numpy.asanyarray",
"numpy.zeros",
"pyvista._vtk.vtkTessellatedBoxSource",
"pyvista._vtk.vtkLineSource",
"numpy.array",
"pyvista.wrap"
] |
[((894, 926), 'numpy.cross', 'np.cross', (['normx', '[0, 1.0, 1e-07]'], {}), '(normx, [0, 1.0, 1e-07])\n', (902, 926), True, 'import numpy as np\n'), ((944, 965), 'numpy.linalg.norm', 'np.linalg.norm', (['normz'], {}), '(normz)\n', (958, 965), True, 'import numpy as np\n'), ((978, 1000), 'numpy.cross', 'np.cross', (['normz', 'normx'], {}), '(normz, normx)\n', (986, 1000), True, 'import numpy as np\n'), ((1014, 1030), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (1022, 1030), True, 'import numpy as np\n'), ((2352, 2376), 'pyvista._vtk.vtkCylinderSource', '_vtk.vtkCylinderSource', ([], {}), '()\n', (2374, 2376), False, 'from pyvista import _vtk\n'), ((4443, 4490), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)'], {'num': 'theta_resolution'}), '(0, 2 * np.pi, num=theta_resolution)\n', (4454, 4490), True, 'import numpy as np\n'), ((4523, 4544), 'numpy.meshgrid', 'np.meshgrid', (['r', 'theta'], {}), '(r, theta)\n', (4534, 4544), True, 'import numpy as np\n'), ((4847, 4864), 'numpy.empty', 'np.empty', (['yy.size'], {}), '(yy.size)\n', (4855, 4864), True, 'import numpy as np\n'), ((4874, 4909), 'numpy.full', 'np.full', (['(X.size, z_resolution)', 'dz'], {}), '((X.size, z_resolution), dz)\n', (4881, 4909), True, 'import numpy as np\n'), ((4920, 4943), 'numpy.arange', 'np.arange', (['z_resolution'], {}), '(z_resolution)\n', (4929, 4943), True, 'import numpy as np\n'), ((5007, 5031), 'pyvista.StructuredGrid', 'pyvista.StructuredGrid', ([], {}), '()\n', (5029, 5031), False, 'import pyvista\n'), ((5177, 5202), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (5185, 5202), True, 'import numpy as np\n'), ((5541, 5562), 'numpy.array', 'np.array', (['grid.center'], {}), '(grid.center)\n', (5549, 5562), True, 'import numpy as np\n'), ((5582, 5598), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (5590, 5598), True, 'import numpy as np\n'), ((6797, 6818), 'pyvista._vtk.vtkArrowSource', '_vtk.vtkArrowSource', ([], {}), '()\n', (6816, 6818), False, 'from pyvista import _vtk\n'), ((8845, 8867), 'pyvista._vtk.vtkSphereSource', '_vtk.vtkSphereSource', ([], {}), '()\n', (8865, 8867), False, 'from pyvista import _vtk\n'), ((10211, 10232), 'pyvista._vtk.vtkPlaneSource', '_vtk.vtkPlaneSource', ([], {}), '()\n', (10230, 10232), False, 'from pyvista import _vtk\n'), ((11500, 11520), 'pyvista._vtk.vtkLineSource', '_vtk.vtkLineSource', ([], {}), '()\n', (11518, 11520), False, 'from pyvista import _vtk\n'), ((13005, 13025), 'pyvista._vtk.vtkLineSource', '_vtk.vtkLineSource', ([], {}), '()\n', (13023, 13025), False, 'from pyvista import _vtk\n'), ((13257, 13277), 'pyvista._vtk.vtkTubeFilter', '_vtk.vtkTubeFilter', ([], {}), '()\n', (13275, 13277), False, 'from pyvista import _vtk\n'), ((15362, 15382), 'pyvista._vtk.vtkCubeSource', '_vtk.vtkCubeSource', ([], {}), '()\n', (15380, 15382), False, 'from pyvista import _vtk\n'), ((16969, 16999), 'pyvista._vtk.vtkTessellatedBoxSource', '_vtk.vtkTessellatedBoxSource', ([], {}), '()\n', (16997, 16999), False, 'from pyvista import _vtk\n'), ((18297, 18317), 'pyvista._vtk.vtkConeSource', '_vtk.vtkConeSource', ([], {}), '()\n', (18315, 18317), False, 'from pyvista import _vtk\n'), ((19590, 19620), 'pyvista._vtk.vtkRegularPolygonSource', '_vtk.vtkRegularPolygonSource', ([], {}), '()\n', (19618, 19620), False, 'from pyvista import _vtk\n'), ((20899, 20919), 'pyvista._vtk.vtkDiskSource', '_vtk.vtkDiskSource', ([], {}), '()\n', (20917, 20919), False, 'from pyvista import _vtk\n'), ((21089, 21105), 'numpy.array', 'np.array', (['normal'], {}), '(normal)\n', (21097, 21105), True, 'import numpy as np\n'), ((21119, 21135), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (21127, 21135), True, 'import numpy as np\n'), ((21723, 21743), 'pyvista._vtk.vtkVectorText', '_vtk.vtkVectorText', ([], {}), '()\n', (21741, 21743), False, 'from pyvista import _vtk\n'), ((21788, 21819), 'pyvista._vtk.vtkLinearExtrusionFilter', '_vtk.vtkLinearExtrusionFilter', ([], {}), '()\n', (21817, 21819), False, 'from pyvista import _vtk\n'), ((22008, 22032), 'pyvista._vtk.vtkTriangleFilter', '_vtk.vtkTriangleFilter', ([], {}), '()\n', (22030, 22032), False, 'from pyvista import _vtk\n'), ((24166, 24192), 'pyvista._vtk.vtkRTAnalyticSource', '_vtk.vtkRTAnalyticSource', ([], {}), '()\n', (24190, 24192), False, 'from pyvista import _vtk\n'), ((26083, 26119), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['pointa', '"""pointa"""'], {}), "(pointa, 'pointa')\n", (26101, 26119), False, 'from pyvista.utilities import check_valid_vector\n'), ((26124, 26160), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['pointb', '"""pointb"""'], {}), "(pointb, 'pointb')\n", (26142, 26160), False, 'from pyvista.utilities import check_valid_vector\n'), ((26165, 26201), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['center', '"""center"""'], {}), "(center, 'center')\n", (26183, 26201), False, 'from pyvista.utilities import check_valid_vector\n'), ((26621, 26640), 'pyvista._vtk.vtkArcSource', '_vtk.vtkArcSource', ([], {}), '()\n', (26638, 26640), False, 'from pyvista import _vtk\n'), ((28714, 28750), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['center', '"""center"""'], {}), "(center, 'center')\n", (28732, 28750), False, 'from pyvista.utilities import check_valid_vector\n'), ((28903, 28922), 'pyvista._vtk.vtkArcSource', '_vtk.vtkArcSource', ([], {}), '()\n', (28920, 28922), False, 'from pyvista import _vtk\n'), ((29018, 29054), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['normal', '"""normal"""'], {}), "(normal, 'normal')\n", (29036, 29054), False, 'from pyvista.utilities import check_valid_vector\n'), ((29086, 29120), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['polar', '"""polar"""'], {}), "(polar, 'polar')\n", (29104, 29120), False, 'from pyvista.utilities import check_valid_vector\n'), ((29342, 29358), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (29350, 29358), True, 'import numpy as np\n'), ((29438, 29477), 'numpy.linspace', 'np.linspace', (['(0.0)', 'angle', '(resolution + 1)'], {}), '(0.0, angle, resolution + 1)\n', (29449, 29477), True, 'import numpy as np\n'), ((30698, 30740), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[0]', '"""points[0]"""'], {}), "(points[0], 'points[0]')\n", (30716, 30740), False, 'from pyvista.utilities import check_valid_vector\n'), ((30745, 30787), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[1]', '"""points[1]"""'], {}), "(points[1], 'points[1]')\n", (30763, 30787), False, 'from pyvista.utilities import check_valid_vector\n'), ((30792, 30834), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[2]', '"""points[2]"""'], {}), "(points[2], 'points[2]')\n", (30810, 30834), False, 'from pyvista.utilities import check_valid_vector\n'), ((30839, 30881), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[3]', '"""points[3]"""'], {}), "(points[3], 'points[3]')\n", (30857, 30881), False, 'from pyvista.utilities import check_valid_vector\n'), ((30886, 30928), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[4]', '"""points[4]"""'], {}), "(points[4], 'points[4]')\n", (30904, 30928), False, 'from pyvista.utilities import check_valid_vector\n'), ((30944, 30961), 'pyvista._vtk.vtkPyramid', '_vtk.vtkPyramid', ([], {}), '()\n', (30959, 30961), False, 'from pyvista import _vtk\n'), ((31162, 31188), 'pyvista._vtk.vtkUnstructuredGrid', '_vtk.vtkUnstructuredGrid', ([], {}), '()\n', (31186, 31188), False, 'from pyvista import _vtk\n'), ((31331, 31347), 'pyvista.wrap', 'pyvista.wrap', (['ug'], {}), '(ug)\n', (31343, 31347), False, 'import pyvista\n'), ((32103, 32145), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[0]', '"""points[0]"""'], {}), "(points[0], 'points[0]')\n", (32121, 32145), False, 'from pyvista.utilities import check_valid_vector\n'), ((32150, 32192), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[1]', '"""points[1]"""'], {}), "(points[1], 'points[1]')\n", (32168, 32192), False, 'from pyvista.utilities import check_valid_vector\n'), ((32197, 32239), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[2]', '"""points[2]"""'], {}), "(points[2], 'points[2]')\n", (32215, 32239), False, 'from pyvista.utilities import check_valid_vector\n'), ((32253, 32277), 'numpy.array', 'np.array', (['[[3, 0, 1, 2]]'], {}), '([[3, 0, 1, 2]])\n', (32261, 32277), True, 'import numpy as np\n'), ((33144, 33186), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[0]', '"""points[0]"""'], {}), "(points[0], 'points[0]')\n", (33162, 33186), False, 'from pyvista.utilities import check_valid_vector\n'), ((33191, 33233), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[1]', '"""points[1]"""'], {}), "(points[1], 'points[1]')\n", (33209, 33233), False, 'from pyvista.utilities import check_valid_vector\n'), ((33238, 33280), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[2]', '"""points[2]"""'], {}), "(points[2], 'points[2]')\n", (33256, 33280), False, 'from pyvista.utilities import check_valid_vector\n'), ((33285, 33327), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[3]', '"""points[3]"""'], {}), "(points[3], 'points[3]')\n", (33303, 33327), False, 'from pyvista.utilities import check_valid_vector\n'), ((33341, 33368), 'numpy.array', 'np.array', (['[[4, 0, 1, 2, 3]]'], {}), '([[4, 0, 1, 2, 3]])\n', (33349, 33368), True, 'import numpy as np\n'), ((33950, 33975), 'numpy.zeros', 'np.zeros', (['(resolution, 3)'], {}), '((resolution, 3))\n', (33958, 33975), True, 'import numpy as np\n'), ((33988, 34029), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2.0 * np.pi)', 'resolution'], {}), '(0.0, 2.0 * np.pi, resolution)\n', (33999, 34029), True, 'import numpy as np\n'), ((36246, 36274), 'pyvista._vtk.vtkSuperquadricSource', '_vtk.vtkSuperquadricSource', ([], {}), '()\n', (36272, 36274), False, 'from pyvista import _vtk\n'), ((38570, 38606), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['center', '"""center"""'], {}), "(center, 'center')\n", (38588, 38606), False, 'from pyvista.utilities import check_valid_vector\n'), ((38620, 38649), 'pyvista._vtk.vtkPlatonicSolidSource', '_vtk.vtkPlatonicSolidSource', ([], {}), '()\n', (38647, 38649), False, 'from pyvista import _vtk\n'), ((836, 855), 'numpy.array', 'np.array', (['direction'], {}), '(direction)\n', (844, 855), True, 'import numpy as np\n'), ((856, 881), 'numpy.linalg.norm', 'np.linalg.norm', (['direction'], {}), '(direction)\n', (870, 881), True, 'import numpy as np\n'), ((1164, 1200), 'numpy.allclose', 'np.allclose', (['center', '[0.0, 0.0, 0.0]'], {}), '(center, [0.0, 0.0, 0.0])\n', (1175, 1200), True, 'import numpy as np\n'), ((1222, 1238), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (1230, 1238), True, 'import numpy as np\n'), ((4605, 4625), 'numpy.cos', 'np.cos', (['theta_matrix'], {}), '(theta_matrix)\n', (4611, 4625), True, 'import numpy as np\n'), ((4650, 4670), 'numpy.sin', 'np.sin', (['theta_matrix'], {}), '(theta_matrix)\n', (4656, 4670), True, 'import numpy as np\n'), ((5211, 5237), 'numpy.allclose', 'np.allclose', (['vx', 'direction'], {}), '(vx, direction)\n', (5222, 5237), True, 'import numpy as np\n'), ((5260, 5285), 'numpy.linalg.norm', 'np.linalg.norm', (['direction'], {}), '(direction)\n', (5274, 5285), True, 'import numpy as np\n'), ((5344, 5362), 'numpy.linalg.norm', 'np.linalg.norm', (['vx'], {}), '(vx)\n', (5358, 5362), True, 'import numpy as np\n'), ((5376, 5399), 'numpy.cross', 'np.cross', (['direction', 'vx'], {}), '(direction, vx)\n', (5384, 5399), True, 'import numpy as np\n'), ((5415, 5444), 'numpy.array', 'np.array', (['[vx, vy, direction]'], {}), '([vx, vy, direction])\n', (5423, 5444), True, 'import numpy as np\n'), ((11806, 11822), 'numpy.array', 'np.array', (['pointa'], {}), '(pointa)\n', (11814, 11822), True, 'import numpy as np\n'), ((26999, 27044), 'numpy.sum', 'np.sum', (['((arc.points[0] - center) ** 2)'], {'axis': '(0)'}), '((arc.points[0] - center) ** 2, axis=0)\n', (27005, 27044), True, 'import numpy as np\n'), ((27055, 27111), 'numpy.arange', 'np.arange', (['(0.0)', '(1.0 + 1.0 / resolution)', '(1.0 / resolution)'], {}), '(0.0, 1.0 + 1.0 / resolution, 1.0 / resolution)\n', (27064, 27111), True, 'import numpy as np\n'), ((29380, 29425), 'numpy.sum', 'np.sum', (['((arc.points[0] - center) ** 2)'], {'axis': '(0)'}), '((arc.points[0] - center) ** 2, axis=0)\n', (29386, 29425), True, 'import numpy as np\n'), ((32302, 32333), 'pyvista.PolyData', 'pyvista.PolyData', (['points', 'cells'], {}), '(points, cells)\n', (32318, 32333), False, 'import pyvista\n'), ((33393, 33424), 'pyvista.PolyData', 'pyvista.PolyData', (['points', 'cells'], {}), '(points, cells)\n', (33409, 33424), False, 'import pyvista\n'), ((34056, 34069), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (34062, 34069), True, 'import numpy as np\n'), ((34098, 34111), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (34104, 34111), True, 'import numpy as np\n'), ((34217, 34248), 'pyvista.PolyData', 'pyvista.PolyData', (['points', 'cells'], {}), '(points, cells)\n', (34233, 34248), False, 'import pyvista\n'), ((38800, 38821), 'numpy.asanyarray', 'np.asanyarray', (['center'], {}), '(center)\n', (38813, 38821), True, 'import numpy as np\n'), ((4388, 4406), 'numpy.array', 'np.array', (['[radius]'], {}), '([radius])\n', (4396, 4406), True, 'import numpy as np\n'), ((4718, 4746), 'numpy.array', 'np.array', (['([X] * z_resolution)'], {}), '([X] * z_resolution)\n', (4726, 4746), True, 'import numpy as np\n'), ((4764, 4792), 'numpy.array', 'np.array', (['([Y] * z_resolution)'], {}), '([Y] * z_resolution)\n', (4772, 4792), True, 'import numpy as np\n'), ((7127, 7152), 'numpy.linalg.norm', 'np.linalg.norm', (['direction'], {}), '(direction)\n', (7141, 7152), True, 'import numpy as np\n'), ((11277, 11293), 'numpy.array', 'np.array', (['pointa'], {}), '(pointa)\n', (11285, 11293), True, 'import numpy as np\n'), ((11387, 11403), 'numpy.array', 'np.array', (['pointb'], {}), '(pointb)\n', (11395, 11403), True, 'import numpy as np\n'), ((11753, 11783), 'numpy.sum', 'np.sum', (['((p1 - p0) ** 2)'], {'axis': '(1)'}), '((p1 - p0) ** 2, axis=1)\n', (11759, 11783), True, 'import numpy as np\n'), ((12777, 12793), 'numpy.array', 'np.array', (['pointa'], {}), '(pointa)\n', (12785, 12793), True, 'import numpy as np\n'), ((12887, 12903), 'numpy.array', 'np.array', (['pointb'], {}), '(pointb)\n', (12895, 12903), True, 'import numpy as np\n'), ((16827, 16843), 'numpy.array', 'np.array', (['bounds'], {}), '(bounds)\n', (16835, 16843), True, 'import numpy as np\n'), ((26953, 26969), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (26961, 26969), True, 'import numpy as np\n'), ((31225, 31241), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (31233, 31241), True, 'import numpy as np\n'), ((15421, 15437), 'numpy.array', 'np.array', (['bounds'], {}), '(bounds)\n', (15429, 15437), True, 'import numpy as np\n'), ((34144, 34166), 'numpy.array', 'np.array', (['[resolution]'], {}), '([resolution])\n', (34152, 34166), True, 'import numpy as np\n'), ((34168, 34189), 'numpy.arange', 'np.arange', (['resolution'], {}), '(resolution)\n', (34177, 34189), True, 'import numpy as np\n'), ((26248, 26264), 'numpy.array', 'np.array', (['pointa'], {}), '(pointa)\n', (26256, 26264), True, 'import numpy as np\n'), ((26267, 26283), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (26275, 26283), True, 'import numpy as np\n'), ((26309, 26325), 'numpy.array', 'np.array', (['pointb'], {}), '(pointb)\n', (26317, 26325), True, 'import numpy as np\n'), ((26328, 26344), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (26336, 26344), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
'''
Manage Grafana v4.0 users
.. versionadded:: 2017.7.0
:configuration: This state requires a configuration profile to be configured
in the minion config, minion pillar, or master config. The module will use
the 'grafana' key by default, if defined.
Example configuration using basic authentication:
.. code-block:: yaml
grafana:
grafana_url: http://grafana.localhost
grafana_user: admin
grafana_password: <PASSWORD>
grafana_timeout: 3
Example configuration using token based authentication:
.. code-block:: yaml
grafana:
grafana_url: http://grafana.localhost
grafana_token: token
grafana_timeout: 3
.. code-block:: yaml
Ensure foobar user is present:
grafana4_user.present:
- name: foobar
- password: <PASSWORD>
- email: "foobar@localhost"
- fullname: Foo Bar
- is_admin: true
'''
from __future__ import absolute_import, print_function, unicode_literals
import salt.utils.dictupdate as dictupdate
from salt.utils.dictdiffer import deep_diff
# Import 3rd-party libs
from salt.ext.six import string_types
from requests.exceptions import HTTPError
def __virtual__():
'''Only load if grafana4 module is available'''
return 'grafana4.get_user' in __salt__
def present(name,
password,
email=None,
is_admin=False,
fullname=None,
theme=None,
default_organization=None,
organizations=None,
profile='grafana'):
'''
Ensure that a user is present.
name
Name of the user.
password
<PASSWORD>.
email
Optional - Email of the user.
is_admin
Optional - Set user as admin user. Default: False
fullname
Optional - Full name of the user.
theme
Optional - Selected theme of the user.
default_organization
Optional - Set user's default organization
organizations
Optional - List of viewer member organizations or pairs of organization and role that the user belongs to.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
Here is an example for using default_organization and organizations
parameters. The user will be added as a viewer to ReadonlyOrg, as an editor
to TestOrg and as an admin to AdminOrg. When she logs on, TestOrg will be
the default. The state will fail if any organisation is unknown or invalid
roles are defined.
.. code-block:: yaml
add_grafana_test_user:
grafana4_user.present:
- name: test
- password: <PASSWORD>
- fullname: 'Test User'
- default_organization: TestOrg
- organizations:
- ReadonlyOrg
- TestOrg: Editor
- Staging: Admin
'''
if isinstance(profile, string_types):
profile = __salt__['config.option'](profile)
ret = {'name': name, 'result': None, 'comment': None, 'changes': {}}
user = __salt__['grafana4.get_user'](name, profile)
create = not user
if create:
if __opts__['test']:
ret['comment'] = 'User {0} will be created'.format(name)
return ret
__salt__['grafana4.create_user'](
login=name,
password=password,
email=email,
name=fullname,
profile=profile)
user = __salt__['grafana4.get_user'](name, profile)
ret['changes']['new'] = user
user_data = __salt__['grafana4.get_user_data'](user['id'], profile=profile)
if default_organization:
try:
org_id = __salt__['grafana4.get_org'](default_organization, profile)['id']
except HTTPError as e:
ret['comment'] = 'Error while looking up user {}\'s default grafana org {}: {}'.format(
name, default_organization, e)
ret['result'] = False
return ret
new_data = _get_json_data(login=name, email=email, name=fullname, theme=theme,
orgId=org_id if default_organization else None,
defaults=user_data)
old_data = _get_json_data(login=None, email=None, name=None, theme=None,
orgId=None,
defaults=user_data)
if organizations:
ret = _update_user_organizations(name, user['id'], organizations, ret, profile)
if 'result' in ret and ret['result'] is False:
return ret
if new_data != old_data:
if __opts__['test']:
ret['comment'] = 'User {0} will be updated'.format(name)
dictupdate.update(ret['changes'], deep_diff(old_data, new_data))
return ret
__salt__['grafana4.update_user'](user['id'], profile=profile, orgid=org_id, **new_data)
dictupdate.update(
ret['changes'], deep_diff(
user_data, __salt__['grafana4.get_user_data'](user['id'])))
if user['isAdmin'] != is_admin:
if __opts__['test']:
ret['comment'] = 'User {0} isAdmin status will be updated'.format(
name)
return ret
__salt__['grafana4.update_user_permissions'](
user['id'], isGrafanaAdmin=is_admin, profile=profile)
dictupdate.update(ret['changes'], deep_diff(
user, __salt__['grafana4.get_user'](name, profile)))
ret['result'] = True
if create:
ret['changes'] = ret['changes']['new']
ret['comment'] = 'New user {0} added'.format(name)
else:
if ret['changes']:
ret['comment'] = 'User {0} updated'.format(name)
else:
ret['changes'] = {}
ret['comment'] = 'User {0} already up-to-date'.format(name)
return ret
def absent(name, profile='grafana'):
'''
Ensure that a user is present.
name
Name of the user to remove.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
'''
if isinstance(profile, string_types):
profile = __salt__['config.option'](profile)
ret = {'name': name, 'result': None, 'comment': None, 'changes': {}}
user = __salt__['grafana4.get_user'](name, profile)
if user:
if __opts__['test']:
ret['comment'] = 'User {0} will be deleted'.format(name)
return ret
orgs = __salt__['grafana4.get_user_orgs'](user['id'], profile=profile)
__salt__['grafana4.delete_user'](user['id'], profile=profile)
for org in orgs:
if org['name'] == user['email']:
# Remove entire Org in the case where auto_assign_org=false:
# When set to false, new users will automatically cause a new
# organization to be created for that new user (the org name
# will be the email)
__salt__['grafana4.delete_org'](org['orgId'], profile=profile)
else:
__salt__['grafana4.delete_user_org'](
user['id'], org['orgId'], profile=profile)
else:
ret['result'] = True
ret['comment'] = 'User {0} already absent'.format(name)
return ret
ret['result'] = True
ret['changes'][name] = 'Absent'
ret['comment'] = 'User {0} was deleted'.format(name)
return ret
def _get_json_data(defaults=None, **kwargs):
if defaults is None:
defaults = {}
for k, v in kwargs.items():
if v is None:
kwargs[k] = defaults.get(k)
return kwargs
def _update_user_organizations(user_name, user_id, organizations, ret, profile):
for org in organizations.items():
org_name, org_role = org if isinstance(org, tuple) and len(org) == 2 else (org, 'Viewer')
try:
org_users = __salt__['grafana4.get_org_users'](org_name, profile)
except HTTPError as e:
ret['comment'] = 'Error while looking up user {}\'s grafana org {}: {}'.format(
user_name, org_name, e)
ret['result'] = False
return ret
user_found = False
for org_user in org_users:
if org_user['userId'] == user_id:
if org_user['role'] != org_role:
try:
__salt__['grafana4.update_org_user'](user_id,
orgname=org_name, profile=profile, role=org_role)
except HTTPError as e:
ret['comment'] = 'Error while setting role {} for user {} in grafana org {}: {}'.format(
org_role, user_name, org_name, e)
ret['result'] = False
return ret
ret['changes'][org_name] = org_role
user_found = True
break
if not user_found:
ret['changes'][org_name] = org_role
__salt__['grafana4.create_org_user'](orgname=org_name,
profile=profile, role=org_role, loginOrEmail=user_name)
return ret
|
[
"salt.utils.dictdiffer.deep_diff"
] |
[((4816, 4845), 'salt.utils.dictdiffer.deep_diff', 'deep_diff', (['old_data', 'new_data'], {}), '(old_data, new_data)\n', (4825, 4845), False, 'from salt.utils.dictdiffer import deep_diff\n')]
|
"""Turtle invaders"""
import turtle
import time
import random
class CircleBoundary(object):
def __init__(self, pos, size):
self.pos = pos
self.size = size
def goto(self, pos, y=None):
if y:
pos = turtle.Vec2D(pos, y)
self.pos = pos
def intersects(self, other):
dist_vec = self.pos - other.pos
return abs(dist_vec) < (self.size + other.size)
class CollisionSprite():
def __init__(self, pos, size):
self.t = turtle.Turtle()
self.t.speed(0)
self.t.penup()
self.t.goto(pos)
self.boundary = CircleBoundary(pos, 5)
def goto(self, pos, y=None):
self.t.goto(pos, y)
self.boundary.goto(pos, y)
class Bullet(CollisionSprite):
def __init__(self, pos, heading=90, colour='black'):
super().__init__(pos, 5)
self.t.hideturtle()
self.t.left(heading)
self.t.pencolor(colour)
self.live = True
def frame(self):
self.t.clear()
self.t.forward(5)
self.boundary.goto(self.t.position())
if self.t.position()[1] > 400 or self.t.position()[1] < -400:
self.die()
else:
self.t.dot()
def die(self):
self.t.clear()
self.live = False
self.t = None
class Alien(CollisionSprite):
"""An alien sprite is a turtle with an alien image"""
def __init__(self, pos, scene, start_frame=0):
super().__init__(pos, 10)
self.t.shape("triangle")
self.frame_count = start_frame
self.speed = turtle.Vec2D(100/120.0, 0)
self.scene = scene
def frame(self):
if self.frame_count < 120:
self.goto(self.t.position() + self.speed)
elif self.frame_count < 240:
self.goto(self.t.position() -self.speed)
else:
self.frame_count = 0
self.frame_count += 1
if random.randint(0, 400) == 1:
self.scene.add_alien_bullet(Bullet(self.t.position(), heading=-90, colour='red'))
def die(self):
self.t.clear()
self.t.hideturtle()
self.t = None
class Player(CollisionSprite):
def __init__(self, pos, scene):
super().__init__(pos, 10)
self.t.shape("turtle")
self.t.color("green")
self.t.left(90)
self.scene = scene
self.moving = False
self.dying = 0
def die(self):
self.t.color('red')
self.dying = 10
def frame(self):
if self.moving:
self.goto(self.t.position() + self.moving)
if self.dying:
self.dying -= 1
if self.dying == 0:
self.t.color('green')
def left_press(self):
self.moving = turtle.Vec2D(-5, 0)
def right_press(self):
self.moving = turtle.Vec2D(5, 0)
def leftright_release(self):
self.moving = False
def fire(self):
self.scene.add_player_bullet(Bullet(self.t.position()))
class GameScene():
def __init__(self, game):
self.game = game
self.t = turtle.Turtle()
self.t.speed(0)
self.t.hideturtle()
self.angle = 0
self.wave = [Alien(turtle.Vec2D(n, 100), self) for n in range(-350, 350, 50)]
self.wave += [Alien(turtle.Vec2D(n, 200), self, start_frame=120) for n in range(-350, 350, 50)]
self.player = Player(turtle.Vec2D(0, -380), self)
self.player_bullets = []
self.alien_bullets = []
def frame(self):
self.player.frame()
for alien in self.wave:
alien.frame()
for bullet in self.player_bullets:
if bullet.live:
bullet.frame()
# Bullet alien collisions
for alien in self.wave:
if alien.boundary.intersects(bullet.boundary):
self.wave.remove(alien)
alien.die()
bullet.die()
else:
self.player_bullets.remove(bullet)
for bullet in self.alien_bullets:
if bullet.live:
bullet.frame()
if self.player.boundary.intersects(bullet.boundary):
self.player.die()
bullet.die()
else:
self.alien_bullets.remove(bullet)
def add_player_bullet(self, bullet):
self.player_bullets.append(bullet)
def add_alien_bullet(self, bullet):
self.alien_bullets.append(bullet)
def enter_scene(self):
turtle.onkeypress(self.player.left_press, "Left")
turtle.onkeypress(self.player.right_press, "Right")
turtle.onkeyrelease(self.player.leftright_release, "Left")
turtle.onkeyrelease(self.player.leftright_release, "Right")
turtle.onkeyrelease(self.player.fire, "space")
def exit_scene(self):
turtle.onkeypress(None, "Left")
turtle.onkeypress(None, "Right")
turtle.onkeyrelease(None, "Left")
turtle.onkeyrelease(None, "Right")
class Game():
def __init__(self):
turtle.tracer(0, 0)
turtle.listen()
self.scene = None
self.set_scene(GameScene(self))
def set_scene(self, scene):
if self.scene:
self.scene.exit_scene()
self.scene = None
scene.enter_scene()
self.scene = scene
def run(self):
# main loop
while True:
if self.scene:
self.scene.frame()
turtle.update()
time.sleep(1/60)
game = Game()
game.run()
|
[
"turtle.listen",
"random.randint",
"turtle.Turtle",
"turtle.Vec2D",
"time.sleep",
"turtle.tracer",
"turtle.update",
"turtle.onkeypress",
"turtle.onkeyrelease"
] |
[((519, 534), 'turtle.Turtle', 'turtle.Turtle', ([], {}), '()\n', (532, 534), False, 'import turtle\n'), ((1636, 1664), 'turtle.Vec2D', 'turtle.Vec2D', (['(100 / 120.0)', '(0)'], {}), '(100 / 120.0, 0)\n', (1648, 1664), False, 'import turtle\n'), ((2855, 2874), 'turtle.Vec2D', 'turtle.Vec2D', (['(-5)', '(0)'], {}), '(-5, 0)\n', (2867, 2874), False, 'import turtle\n'), ((2936, 2954), 'turtle.Vec2D', 'turtle.Vec2D', (['(5)', '(0)'], {}), '(5, 0)\n', (2948, 2954), False, 'import turtle\n'), ((3211, 3226), 'turtle.Turtle', 'turtle.Turtle', ([], {}), '()\n', (3224, 3226), False, 'import turtle\n'), ((4801, 4850), 'turtle.onkeypress', 'turtle.onkeypress', (['self.player.left_press', '"""Left"""'], {}), "(self.player.left_press, 'Left')\n", (4818, 4850), False, 'import turtle\n'), ((4864, 4915), 'turtle.onkeypress', 'turtle.onkeypress', (['self.player.right_press', '"""Right"""'], {}), "(self.player.right_press, 'Right')\n", (4881, 4915), False, 'import turtle\n'), ((4938, 4996), 'turtle.onkeyrelease', 'turtle.onkeyrelease', (['self.player.leftright_release', '"""Left"""'], {}), "(self.player.leftright_release, 'Left')\n", (4957, 4996), False, 'import turtle\n'), ((5006, 5065), 'turtle.onkeyrelease', 'turtle.onkeyrelease', (['self.player.leftright_release', '"""Right"""'], {}), "(self.player.leftright_release, 'Right')\n", (5025, 5065), False, 'import turtle\n'), ((5075, 5121), 'turtle.onkeyrelease', 'turtle.onkeyrelease', (['self.player.fire', '"""space"""'], {}), "(self.player.fire, 'space')\n", (5094, 5121), False, 'import turtle\n'), ((5160, 5191), 'turtle.onkeypress', 'turtle.onkeypress', (['None', '"""Left"""'], {}), "(None, 'Left')\n", (5177, 5191), False, 'import turtle\n'), ((5205, 5237), 'turtle.onkeypress', 'turtle.onkeypress', (['None', '"""Right"""'], {}), "(None, 'Right')\n", (5222, 5237), False, 'import turtle\n'), ((5260, 5293), 'turtle.onkeyrelease', 'turtle.onkeyrelease', (['None', '"""Left"""'], {}), "(None, 'Left')\n", (5279, 5293), False, 'import turtle\n'), ((5303, 5337), 'turtle.onkeyrelease', 'turtle.onkeyrelease', (['None', '"""Right"""'], {}), "(None, 'Right')\n", (5322, 5337), False, 'import turtle\n'), ((5397, 5416), 'turtle.tracer', 'turtle.tracer', (['(0)', '(0)'], {}), '(0, 0)\n', (5410, 5416), False, 'import turtle\n'), ((5426, 5441), 'turtle.listen', 'turtle.listen', ([], {}), '()\n', (5439, 5441), False, 'import turtle\n'), ((256, 276), 'turtle.Vec2D', 'turtle.Vec2D', (['pos', 'y'], {}), '(pos, y)\n', (268, 276), False, 'import turtle\n'), ((1990, 2012), 'random.randint', 'random.randint', (['(0)', '(400)'], {}), '(0, 400)\n', (2004, 2012), False, 'import random\n'), ((3527, 3548), 'turtle.Vec2D', 'turtle.Vec2D', (['(0)', '(-380)'], {}), '(0, -380)\n', (3539, 3548), False, 'import turtle\n'), ((5876, 5894), 'time.sleep', 'time.sleep', (['(1 / 60)'], {}), '(1 / 60)\n', (5886, 5894), False, 'import time\n'), ((3333, 3353), 'turtle.Vec2D', 'turtle.Vec2D', (['n', '(100)'], {}), '(n, 100)\n', (3345, 3353), False, 'import turtle\n'), ((3421, 3441), 'turtle.Vec2D', 'turtle.Vec2D', (['n', '(200)'], {}), '(n, 200)\n', (3433, 3441), False, 'import turtle\n'), ((5847, 5862), 'turtle.update', 'turtle.update', ([], {}), '()\n', (5860, 5862), False, 'import turtle\n')]
|
"""
Sample script using EEGNet to classify Event-Related Potential (ERP) EEG data
from a four-class classification task, using the sample dataset provided in
the MNE [1, 2] package:
https://martinos.org/mne/stable/manual/sample_dataset.html#ch-sample-data
The four classes used from this dataset are:
LA: Left-ear auditory stimulation
RA: Right-ear auditory stimulation
LV: Left visual field stimulation
RV: Right visual field stimulation
The code to process, filter and epoch the data are originally from Alexandre
Barachant's PyRiemann [3] package, released under the BSD 3-clause. A copy of
the BSD 3-clause license has been provided together with this software to
comply with software licensing requirements.
When you first run this script, MNE will download the dataset and prompt you
to confirm the download location (defaults to ~/mne_data). Follow the prompts
to continue. The dataset size is approx. 1.5GB download.
For comparative purposes you can also compare EEGNet performance to using
Riemannian geometric approaches with xDAWN spatial filtering [4-8] using
PyRiemann (code provided below).
[1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, MNE software for processing MEG and EEG data,
NeuroImage, Volume 86, 1 February 2014, Pages 446-460, ISSN 1053-8119.
[2] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, MEG and EEG data
analysis with MNE-Python, Frontiers in Neuroscience, Volume 7, 2013.
[3] https://github.com/alexandrebarachant/pyRiemann.
[4] <NAME>, <NAME> ,"A Plug&Play P300 BCI Using Information Geometry"
arXiv:1409.0107. link
[5] <NAME>, <NAME>, <NAME> ,"A New generation of Brain-Computer
Interface Based on Riemannian Geometry", arXiv: 1310.8115.
[6] <NAME> and <NAME>, "Channel selection procedure using riemannian
distance for BCI applications," in 2011 5th International IEEE/EMBS
Conference on Neural Engineering (NER), 2011, 348-351.
[7] <NAME>, <NAME>, <NAME> and <NAME>, “Multiclass
Brain-Computer Interface Classification by Riemannian Geometry,” in IEEE
Transactions on Biomedical Engineering, vol. 59, no. 4, p. 920-928, 2012.
[8] <NAME>, <NAME>, <NAME> and <NAME>, “Classification of
covariance matrices using a Riemannian-based kernel for BCI applications“,
in NeuroComputing, vol. 112, p. 172-178, 2013.
Portions of this project are works of the United States Government and are not
subject to domestic copyright protection under 17 USC Sec. 105. Those
portions are released world-wide under the terms of the Creative Commons Zero
1.0 (CC0) license.
Other portions of this project are subject to domestic copyright protection
under 17 USC Sec. 105. Those portions are licensed under the Apache 2.0
license. The complete text of the license governing this material is in
the file labeled LICENSE.TXT that is a part of this project's official
distribution.
"""
import numpy as np
# mne imports
import mne
from mne import io
from mne.datasets import sample
# EEGNet-specific imports
from EEGModels import EEGNet
from tensorflow.keras import utils as np_utils
from tensorflow.keras.callbacks import ModelCheckpoint
# PyRiemann imports
from pyriemann.estimation import XdawnCovariances
from pyriemann.tangentspace import TangentSpace
from pyriemann.utils.viz import plot_confusion_matrix
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
# tools for plotting confusion matrices
from matplotlib import pyplot as plt
plt.switch_backend('agg')
from sklearn.metrics import confusion_matrix
##################### Process, filter and epoch the data ######################
data_path = sample.data_path()
# Set parameters and read data
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0., 1
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True, verbose=False)
raw.filter(2, None, method='iir') # replace baselining with high-pass
events = mne.read_events(event_fname)
raw.info['bads'] = ['MEG 2443'] # set bad channels
picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=False,
picks=picks, baseline=None, preload=True, verbose=False)
labels = epochs.events[:, -1]
# extract raw data. scale by 1000 due to scaling sensitivity in deep learning
X = epochs.get_data()*1000 # format is in (trials, channels, samples)
y = labels
kernels, chans, samples = 1, 60, 151
# take 50/25/25 percent of the data to train/validate/test
X_train = X[0:144,]
Y_train = y[0:144]
X_validate = X[144:216,]
Y_validate = y[144:216]
X_test = X[216:,]
Y_test = y[216:]
oldYTest = Y_test
############################# EEGNet portion ##################################
# convert labels to one-hot encodings.
Y_train = np_utils.to_categorical(Y_train-1)
Y_validate = np_utils.to_categorical(Y_validate-1)
Y_test = np_utils.to_categorical(Y_test-1)
# convert data to NCHW (trials, kernels, channels, samples) format. Data
# contains 60 channels and 151 time-points. Set the number of kernels to 1.
X_train = X_train.reshape(X_train.shape[0], kernels, chans, samples)
X_validate = X_validate.reshape(X_validate.shape[0], kernels, chans, samples)
X_test = X_test.reshape(X_test.shape[0], kernels, chans, samples)
print('X_train shape:', X_train.shape)
print('X_testshape:', X_test.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
print("chans:", chans, "samples:", samples)
# configure the EEGNet-8,2,16 model with kernel length of 32 samples (other
# model configurations may do better, but this is a good starting point)
model = EEGNet(nb_classes = 4, Chans = chans, Samples = samples,
dropoutRate = 0.5, kernLength = 32, F1 = 8, D = 2, F2 = 16,
dropoutType = 'Dropout')
# compile the model and set the optimizers
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics = ['accuracy'])
# count number of parameters in the model
numParams = model.count_params()
# set a valid path for your system to record model checkpoints
checkpointer = ModelCheckpoint(filepath='/tmp/checkpoint.h5', verbose=1,
save_best_only=True)
###############################################################################
# if the classification task was imbalanced (significantly more trials in one
# class versus the others) you can assign a weight to each class during
# optimization to balance it out. This data is approximately balanced so we
# don't need to do this, but is shown here for illustration/completeness.
###############################################################################
# the syntax is {class_1:weight_1, class_2:weight_2,...}. Here just setting
# the weights all to be 1
class_weights = {0:1, 1:1, 2:1, 3:1}
################################################################################
# fit the model. Due to very small sample sizes this can get
# pretty noisy run-to-run, but most runs should be comparable to xDAWN +
# Riemannian geometry classification (below)
################################################################################
fittedModel = model.fit(X_train, Y_train, batch_size = 16, epochs = 300,
verbose = 2, validation_data=(X_validate, Y_validate),
callbacks=[checkpointer], class_weight = class_weights)
# load optimal weights
model.load_weights('/tmp/checkpoint.h5')
###############################################################################
# can alternatively used the weights provided in the repo. If so it should get
# you 93% accuracy. Change the WEIGHTS_PATH variable to wherever it is on your
# system.
###############################################################################
# WEIGHTS_PATH = /path/to/EEGNet-8-2-weights.h5
# model.load_weights(WEIGHTS_PATH)
###############################################################################
# make prediction on test set.
###############################################################################
probs = model.predict(X_test)
preds = probs.argmax(axis = -1)
acc = np.mean(preds == Y_test.argmax(axis=-1))
print("Classification accuracy: %f " % (acc))
# plot the confusion matrices for both classifiers
names = ['audio left', 'audio right', 'vis left', 'vis right']
plt.figure(0)
plot_confusion_matrix(preds, Y_test.argmax(axis = -1), names, title = 'EEGNet-8,2')
plt.savefig('plot-EEG')
print('confusion_matrix')
print(confusion_matrix(oldYTest, preds))
|
[
"matplotlib.pyplot.switch_backend",
"sklearn.metrics.confusion_matrix",
"tensorflow.keras.utils.to_categorical",
"mne.pick_types",
"mne.io.Raw",
"EEGModels.EEGNet",
"tensorflow.keras.callbacks.ModelCheckpoint",
"mne.Epochs",
"matplotlib.pyplot.figure",
"mne.read_events",
"mne.datasets.sample.data_path",
"matplotlib.pyplot.savefig"
] |
[((3628, 3653), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (3646, 3653), True, 'from matplotlib import pyplot as plt\n'), ((3793, 3811), 'mne.datasets.sample.data_path', 'sample.data_path', ([], {}), '()\n', (3809, 3811), False, 'from mne.datasets import sample\n'), ((4102, 4148), 'mne.io.Raw', 'io.Raw', (['raw_fname'], {'preload': '(True)', 'verbose': '(False)'}), '(raw_fname, preload=True, verbose=False)\n', (4108, 4148), False, 'from mne import io\n'), ((4229, 4257), 'mne.read_events', 'mne.read_events', (['event_fname'], {}), '(event_fname)\n', (4244, 4257), False, 'import mne\n'), ((4319, 4407), 'mne.pick_types', 'mne.pick_types', (['raw.info'], {'meg': '(False)', 'eeg': '(True)', 'stim': '(False)', 'eog': '(False)', 'exclude': '"""bads"""'}), "(raw.info, meg=False, eeg=True, stim=False, eog=False,\n exclude='bads')\n", (4333, 4407), False, 'import mne\n'), ((4451, 4569), 'mne.Epochs', 'mne.Epochs', (['raw', 'events', 'event_id', 'tmin', 'tmax'], {'proj': '(False)', 'picks': 'picks', 'baseline': 'None', 'preload': '(True)', 'verbose': '(False)'}), '(raw, events, event_id, tmin, tmax, proj=False, picks=picks,\n baseline=None, preload=True, verbose=False)\n', (4461, 4569), False, 'import mne\n'), ((5178, 5214), 'tensorflow.keras.utils.to_categorical', 'np_utils.to_categorical', (['(Y_train - 1)'], {}), '(Y_train - 1)\n', (5201, 5214), True, 'from tensorflow.keras import utils as np_utils\n'), ((5228, 5267), 'tensorflow.keras.utils.to_categorical', 'np_utils.to_categorical', (['(Y_validate - 1)'], {}), '(Y_validate - 1)\n', (5251, 5267), True, 'from tensorflow.keras import utils as np_utils\n'), ((5281, 5316), 'tensorflow.keras.utils.to_categorical', 'np_utils.to_categorical', (['(Y_test - 1)'], {}), '(Y_test - 1)\n', (5304, 5316), True, 'from tensorflow.keras import utils as np_utils\n'), ((6055, 6182), 'EEGModels.EEGNet', 'EEGNet', ([], {'nb_classes': '(4)', 'Chans': 'chans', 'Samples': 'samples', 'dropoutRate': '(0.5)', 'kernLength': '(32)', 'F1': '(8)', 'D': '(2)', 'F2': '(16)', 'dropoutType': '"""Dropout"""'}), "(nb_classes=4, Chans=chans, Samples=samples, dropoutRate=0.5,\n kernLength=32, F1=8, D=2, F2=16, dropoutType='Dropout')\n", (6061, 6182), False, 'from EEGModels import EEGNet\n'), ((6539, 6617), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': '"""/tmp/checkpoint.h5"""', 'verbose': '(1)', 'save_best_only': '(True)'}), "(filepath='/tmp/checkpoint.h5', verbose=1, save_best_only=True)\n", (6554, 6617), False, 'from tensorflow.keras.callbacks import ModelCheckpoint\n'), ((8801, 8814), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (8811, 8814), True, 'from matplotlib import pyplot as plt\n'), ((8899, 8922), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot-EEG"""'], {}), "('plot-EEG')\n", (8910, 8922), True, 'from matplotlib import pyplot as plt\n'), ((8956, 8989), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['oldYTest', 'preds'], {}), '(oldYTest, preds)\n', (8972, 8989), False, 'from sklearn.metrics import confusion_matrix\n')]
|
from twisted.internet import defer
from twisted.trial import unittest
from txpostgres import reconnection
class ArbitraryException(Exception):
pass
class Reconnectable(object):
def __init__(self):
self.calls = []
self.connects = []
def call(self):
self.calls.append(defer.Deferred())
return self.calls[-1]
def connect(self):
self.connects.append(defer.Deferred())
return self.connects[-1]
def close(self):
pass
class BrokenReconnectable(Reconnectable):
def close(self):
raise RuntimeError()
class TestDeadConnectionDetector(unittest.TestCase):
def setUp(self):
self.recoveries = 0
self.reconnectable = Reconnectable()
self.detector = reconnection.DeadConnectionDetector(self.deathChecker)
self.detector.setReconnectable(self.reconnectable)
self.detector.addRecoveryHandler(self.recovery)
def deathChecker(self, f):
return f.check(ArbitraryException)
def recovery(self):
self.recoveries += 1
def brokenRecovery(self):
self.recoveries += 1
raise RuntimeError()
def test_basic(self):
"""
Only the failure recognized by the death checker causes reconnection to
trigger. Until the connection recovers, all calls through the detector
are immediately failed.
"""
# the first call is successful
d1 = self.detector.callChecking(self.reconnectable.call)
self.reconnectable.calls.pop().callback(None)
self.assertEquals(len(self.reconnectable.connects), 0)
# the second call has an error, but the death checker does not
# recognize it
d2 = self.detector.callChecking(self.reconnectable.call)
self.reconnectable.calls.pop().errback(RuntimeError())
self.assertFailure(d2, RuntimeError)
self.assertEquals(len(self.reconnectable.connects), 0)
# the third and the fourth call discover that the connection is dead,
# but only one reconnection is triggered
d3 = self.detector.callChecking(self.reconnectable.call)
d4 = self.detector.callChecking(self.reconnectable.call)
self.reconnectable.calls.pop().errback(ArbitraryException())
self.reconnectable.calls.pop().errback(ArbitraryException())
self.assertFailure(d3, ArbitraryException)
self.assertFailure(d4, ArbitraryException)
# only one reconnection
self.assertEquals(len(self.reconnectable.connects), 1)
# the fifth call finds the connection dead
d5 = self.detector.callChecking(self.reconnectable.call)
self.assertEquals(len(self.reconnectable.calls), 0)
self.assertFailure(d5, reconnection.ConnectionDead)
rd = self.reconnectable.connects.pop()
self.assertEquals(self.recoveries, 0)
rd.callback(None)
self.assertEquals(self.recoveries, 1)
d6 = self.detector.callChecking(self.reconnectable.call)
self.reconnectable.calls.pop().callback(None)
d = defer.gatherResults([d1, d2, d3, d4, d5, d6])
return d.addCallback(lambda ret: self.assertEquals(ret[5], None))
def test_brokenRecovery(self):
"""
Errors in recovery handlers are logged and discarded.
"""
self.detector.removeRecoveryHandler(self.recovery)
self.detector.addRecoveryHandler(self.brokenRecovery)
d = self.detector.callChecking(self.reconnectable.call)
self.reconnectable.calls.pop().errback(ArbitraryException())
self.assertFailure(d, ArbitraryException)
self.reconnectable.connects.pop().callback(None)
# the error gets logged and discarded
self.assertEquals(len(self.flushLoggedErrors(RuntimeError)), 1)
d = self.detector.callChecking(self.reconnectable.call)
self.reconnectable.calls.pop().callback(None)
return d.addCallback(self.assertEquals, None)
def test_brokenReconnectable(self):
"""
Errors when closing the reconnectable are logged and discarded.
"""
reconnectable = BrokenReconnectable()
self.detector.setReconnectable(reconnectable)
d = self.detector.callChecking(reconnectable.call)
reconnectable.calls.pop().errback(ArbitraryException())
self.assertFailure(d, ArbitraryException)
reconnectable.connects.pop().callback(None)
# the the error in BrokenReconnectable.close got ignored
self.assertEquals(len(self.flushLoggedErrors()), 0)
d = self.detector.callChecking(reconnectable.call)
reconnectable.calls.pop().callback(None)
return d.addCallback(self.assertEquals, None)
|
[
"twisted.internet.defer.gatherResults",
"txpostgres.reconnection.DeadConnectionDetector",
"twisted.internet.defer.Deferred"
] |
[((764, 818), 'txpostgres.reconnection.DeadConnectionDetector', 'reconnection.DeadConnectionDetector', (['self.deathChecker'], {}), '(self.deathChecker)\n', (799, 818), False, 'from txpostgres import reconnection\n'), ((3083, 3128), 'twisted.internet.defer.gatherResults', 'defer.gatherResults', (['[d1, d2, d3, d4, d5, d6]'], {}), '([d1, d2, d3, d4, d5, d6])\n', (3102, 3128), False, 'from twisted.internet import defer\n'), ((308, 324), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (322, 324), False, 'from twisted.internet import defer\n'), ((409, 425), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (423, 425), False, 'from twisted.internet import defer\n')]
|
'''
A class that performs tracking and drift scans
with parameters acquired from the scan queue.
Author: <NAME>
Date: June 2018
'''
from CommandStation import CommandStation
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
from astropy.time import Time
from astropy.table import Table
from astropy import units as u
from numpy import linspace
from datetime import date
from srtutility.NTPTime import NTPTime
import io
import re
import sqlite3
import _thread
class Scan:
def __init__(self):
self.station = CommandStation()
self.ntp = NTPTime()
self.database_location = '../srtdatabase/srtdata.db'
# Method to take a single data point at a single frequency for a single source.
#
# :param azal: tuple containing azimuth and altitude of scan position
# :param freq: frequency in MHz at which to measure
# :return scan: tuple containing a single power measurement and boolean indicating successful movement
def singlescan(self, azal, freq):
movesuccess = self.station.movebyazal(azal[0], azal[1]) # move station to scan position
if movesuccess:
scan = self.station.readpower(freq) # read power at frequency freq
else:
scan = 0
return (scan, movesuccess)
# Method to take data points across a spectrum for a single source.
#
# :param azal: tuple containing azimuth and altitude of scan position
# :param flimit: tuple containing lower and upper frequency limits in MHz
# :param stepnum: number of steps to take over the frequency range
# :return data: dictionary containing a single spectrum with start and end times and a time correction value
def singlespectrum(self, azal, flimit, stepnum):
spectrum = []
starttime = self.ntp.getcurrenttime() # get start time of spectrum scan
spectrumsuccess = True
for freq in linspace(flimit[0], flimit[1], stepnum): # sweep through frequencies in range, taking stepnum steps
if spectrumsuccess:
scan = self.singlescan(azal, freq) # do single scan at current frequency
if scan[1] == False:
spectrumsuccess = False
else:
scan = (0, False)
spectrum.append(scan[0]) # append scan result to spectrum
endtime = self.ntp.getcurrenttime() # get end time of spectrum scan
data = {'spectrum': spectrum, 'starttime': starttime, 'endtime': endtime, 'spectrumsuccess': spectrumsuccess} # package spectrum and time data
return data
# Method to track a position and take data for a specific duration.
#
# :param scanid: the id of the current scan
# :param pos: tuple containing galactic latitude and longitude of the position to track
# :param flimit: tuple containing lower and upper frequency limits in MHz
# :param stepnum: number of steps to take over the frequency range
# :param time: unix time at which to stop scanning
# :return trackdata: tuple containing a list of scan data and a string indicating the status of the scan
def track(self, scanid, pos, flimit, stepnum, time):
print('running a track scan')
srtdb = sqlite3.connect(self.database_location) # establish a connection and cursor into the database
srtdb.row_factory = sqlite3.Row
cur = srtdb.cursor()
curtime = self.ntp.getcurrenttime() # get start time of scan
trackdata = []
while curtime < time: # continue scanning until current time is past the end time
status = cur.execute("SELECT * FROM SCANIDS WHERE ID = ?", (scanid,)).fetchone() # check current status to see if scan was cancelled
if status['status'] == 'cancelled': # if scan was cancelled, return data collected so far
print('scan was cancelled')
srtdb.close()
return (trackdata, 'cancelled')
azal = self.getazal(pos) # get current azimuth and altitude of tracked position
if azal == 'positionerror' or azal == 'moveboundserror': # check for invalid position or movement, return if found
srtdb.close()
return (trackdata, azal)
spectrumdata = self.singlespectrum(azal, flimit, stepnum) # take a spectrum measurement
trackdata.append(spectrumdata) # append spectrum data to the scan
if spectrumdata['spectrumsuccess'] == False:
print('scan timed out')
srtdb.close()
return (trackdata, 'timeout')
curtime = self.ntp.getcurrenttime() # update current time
print('scan complete')
srtdb.close()
return (trackdata, 'complete')
# Method to take data at a single drift position for a specific duration.
#
# :param scanid: the id of the current scan
# :param pos: tuple containing galactic latitude and longitude of drift position
# :param flimit: tuple containing lower and upper frequency limits in MHz
# :param stepnum: number of steps to take over the frequency range
# :param time: unix time at which to stop scanning
# :return driftdata: tuple containing a list of scan data and a string indicating the status of the scan
def drift(self, scanid, pos, flimit, stepnum, time):
print('running a drift scan')
srtdb = sqlite3.connect(self.database_location) # establish a connection and cursor into the database
srtdb.row_factory = sqlite3.Row
cur = srtdb.cursor()
curtime = self.ntp.getcurrenttime() # get start time of scan
driftdata = []
azal = self.getazal(pos) # get azimuth and altitude of the drift position
if azal == 'positionerror' or azal == 'moveboundserror': # check for invalid or movement, return
srtdb.close()
return (driftdata, azal)
while curtime < time: # continue scanning until the current time is past the end time
status = cur.execute("SELECT * FROM SCANID WHERE ID = ?", (scanid,)).fetchone() # check current status to see if scan was cancelled
if status['status'] == 'cancelled': # if scan was cancelled, return data collected so far
print('scan was cancelled')
srtdb.close()
return (driftdata, 'cancelled')
spectrumdata = self.singlespectrum(azal, flimit, stepnum) # take a spectrum measurement
driftdata.append(spectrumdata) # append spectrum data to the scan
if spectrumdata['spectrumsuccess'] == False:
print('scan timed out')
srtdb.close()
return (driftdata, 'timeout')
curtime = self.ntp.getcurrenttime() # update current time
print('scan complete')
srtdb.close()
return (driftdata, 'complete')
# Method that performs an entire scan and stores the collected data in the database.
#
# :param nextscan: a dict object containing the parameters of a scan
def donextscan(self, nextscan):
srtdb = sqlite3.connect(self.database_location) # establish a connection and cursor into the database
srtdb.row_factory = sqlite3.Row
cur = srtdb.cursor()
pos = (nextscan['ras'], nextscan['dec']) # get position of scan
flower = nextscan['freqlower'] # get spectrum parameters
fupper = nextscan['frequpper']
stepnum = nextscan['stepnum']
duration = re.split('[hms]', nextscan['duration']) # get duration values of scan
seconds = int(duration[0]) * 60 * 60 + int(duration[1]) * 60 + int(duration[2])
curtime = self.ntp.getcurrenttime()
endtime = curtime + seconds # calculate the ending time of the scan in unix time
cur.execute("UPDATE STATUS SET ID = ?, CODE = ?", (nextscan['id'], 'ok')) # update the STATUS table
srtdb.commit()
if nextscan['type'] == 'track':
scandata = self.track(nextscan['id'], pos, (flower, fupper), stepnum, endtime) # do a track scan
else:
scandata = self.drift(nextscan['id'], pos, (flower, fupper), stepnum, endtime) # do a drift scan
if len(scandata[0]) != 0:
print('saving scan data')
starttime = Time(scandata[0][0]['starttime'], format = 'unix') # package scan time info into astropy Time objects for format conversion
endtime = Time(scandata[0][len(scandata) - 1]['endtime'], format = 'unix')
nextscan['starttime'] = starttime.iso # store start and end times with scan params in iso format
nextscan['endtime'] = endtime.iso
tablerows = []
for scan in scandata[0]:
tablerows.append(scan['spectrum'])
t = Table(rows = tablerows, meta = nextscan); # initialize astropy Table object to store scan data with scan params as table metadata
# for scan in scandata[0]: # add scan data to the Table
# t.add_row(scan['spectrum'])
b = io.BytesIO() # initialize byte stream for FITS file writing
t.write(b, format='fits') # write the Table to the byte stream in FITS format
d = date.today() # get today's date
with open('testfits.fits', 'w') as f:
f.write(b.getvalue().decode('ascii'))
cur.execute("INSERT INTO SCANRESULTS VALUES (?,?)", (nextscan['id'], b.getvalue())) # store scan name, date, type, and data in the db
srtdb.commit()
cur.execute("UPDATE SCANIDS SET STATUS = ? WHERE ID = ?", (scandata[1], nextscan['id']))
scanname = cur.execute("SELECT * FROM SCANIDS WHERE ID = ?", (nextscan['id'],)).fetchone()['name']
cur.execute("INSERT INTO SCANHISTORY VALUES (?,?,?,?,?,?)", (nextscan['id'], scanname, nextscan['type'], d.day, d.month, d.year))
srtdb.commit()
srtdb.close()
# Helper method to get the azimuth and altitude of a position.
#
# :param pos: tuple containing right ascension and declination
# :return azal: tuple containing azimuth and altitude, or a string containing an error code
def getazal(self, pos):
print('calculating azal')
srtdb = sqlite3.connect(self.database_location) # establish a connection and cursor into the database
srtdb.row_factory = sqlite3.Row
cur = srtdb.cursor()
configdata = cur.execute("SELECT * FROM CONFIG").fetchone() # retrieve config data from the database
position = SkyCoord(pos[0], pos[1], frame = 'icrs') # convert position into astropy SkyCoord object for coord transformation
location = EarthLocation(lat = configdata['lat'], lon = configdata['lon'], height = configdata['height']) # convert location into astropy EarthLocation
srtdb.close()
unixtime = self.ntp.getcurrenttime() # get curent time to establish AltAz reference frame
observingtime = Time(unixtime, format = 'unix') # create astropy Time object using converted ntp time
azalframe = AltAz(location = location, obstime = observingtime) # create AltAz reference frame
try:
position = position.transform_to(azalframe) # transform position from galactic coords to az/alt coords
except ValueError as e: # if transformation is impossible, return position error
print('positionerror')
return 'positionerror'
azal = (float(position.az.to_string(unit=u.deg, decimal=True)), float(position.alt.to_string(unit=u.deg, decimal=True))) # create azal tuple
if azal[1] < 0 or azal[1] > 180: # if position is not in the sky, return position error
print('positionerror')
return 'positionerror'
if azal[0] < configdata['azlower'] or azal[0] > configdata['azupper']: # if motion would violate movement bounds, return movebounds error
print('moveboundserror')
return 'moveboundserror'
if azal[1] < configdata['allower'] or azal[1] > configdata['alupper']:
print('moveboundserror')
return 'moveboundserror'
print(str(azal[0]) + ', ' + str(azal[1]))
return azal
def main():
srtdb = sqlite3.connect('../srtdatabase/srtdata.db') # establish a connection and cursor into the database
srtdb.row_factory = sqlite3.Row
cur = srtdb.cursor()
# cur.execute("INSERT INTO SCANIDS VALUES (?,?,?)", (-50, 'scantest', 'scheduled'))
# cur.execute("INSERT INTO SCANPARAMS VALUES (?,?,?,?,?,?,?,?,?)", (-50, 'track', 'sun', '9h46m58s', '13d22m20s', '0h0m30s', 1500, 1510, 10))
# srtdb.commit()
scan = cur.execute("SELECT * FROM SCANPARAMS WHERE ID = ?", (-50,)).fetchone()
nextscan = {}
for key in scan.keys():
nextscan[key.lower()] = scan[key]
station = Scan()
# _thread.start_new_thread(station.donextscan, (nextscan,))
station.donextscan(nextscan)
# main()
|
[
"io.BytesIO",
"re.split",
"astropy.table.Table",
"astropy.time.Time",
"astropy.coordinates.AltAz",
"CommandStation.CommandStation",
"datetime.date.today",
"sqlite3.connect",
"astropy.coordinates.EarthLocation",
"numpy.linspace",
"srtutility.NTPTime.NTPTime",
"astropy.coordinates.SkyCoord"
] |
[((11124, 11168), 'sqlite3.connect', 'sqlite3.connect', (['"""../srtdatabase/srtdata.db"""'], {}), "('../srtdatabase/srtdata.db')\n", (11139, 11168), False, 'import sqlite3\n'), ((527, 543), 'CommandStation.CommandStation', 'CommandStation', ([], {}), '()\n', (541, 543), False, 'from CommandStation import CommandStation\n'), ((560, 569), 'srtutility.NTPTime.NTPTime', 'NTPTime', ([], {}), '()\n', (567, 569), False, 'from srtutility.NTPTime import NTPTime\n'), ((1794, 1833), 'numpy.linspace', 'linspace', (['flimit[0]', 'flimit[1]', 'stepnum'], {}), '(flimit[0], flimit[1], stepnum)\n', (1802, 1833), False, 'from numpy import linspace\n'), ((2989, 3028), 'sqlite3.connect', 'sqlite3.connect', (['self.database_location'], {}), '(self.database_location)\n', (3004, 3028), False, 'import sqlite3\n'), ((4925, 4964), 'sqlite3.connect', 'sqlite3.connect', (['self.database_location'], {}), '(self.database_location)\n', (4940, 4964), False, 'import sqlite3\n'), ((6440, 6479), 'sqlite3.connect', 'sqlite3.connect', (['self.database_location'], {}), '(self.database_location)\n', (6455, 6479), False, 'import sqlite3\n'), ((6803, 6842), 're.split', 're.split', (['"""[hms]"""', "nextscan['duration']"], {}), "('[hms]', nextscan['duration'])\n", (6811, 6842), False, 'import re\n'), ((9296, 9335), 'sqlite3.connect', 'sqlite3.connect', (['self.database_location'], {}), '(self.database_location)\n', (9311, 9335), False, 'import sqlite3\n'), ((9566, 9604), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['pos[0]', 'pos[1]'], {'frame': '"""icrs"""'}), "(pos[0], pos[1], frame='icrs')\n", (9574, 9604), False, 'from astropy.coordinates import SkyCoord, EarthLocation, AltAz\n'), ((9697, 9790), 'astropy.coordinates.EarthLocation', 'EarthLocation', ([], {'lat': "configdata['lat']", 'lon': "configdata['lon']", 'height': "configdata['height']"}), "(lat=configdata['lat'], lon=configdata['lon'], height=\n configdata['height'])\n", (9710, 9790), False, 'from astropy.coordinates import SkyCoord, EarthLocation, AltAz\n'), ((9969, 9998), 'astropy.time.Time', 'Time', (['unixtime'], {'format': '"""unix"""'}), "(unixtime, format='unix')\n", (9973, 9998), False, 'from astropy.time import Time\n'), ((10071, 10118), 'astropy.coordinates.AltAz', 'AltAz', ([], {'location': 'location', 'obstime': 'observingtime'}), '(location=location, obstime=observingtime)\n', (10076, 10118), False, 'from astropy.coordinates import SkyCoord, EarthLocation, AltAz\n'), ((7526, 7574), 'astropy.time.Time', 'Time', (["scandata[0][0]['starttime']"], {'format': '"""unix"""'}), "(scandata[0][0]['starttime'], format='unix')\n", (7530, 7574), False, 'from astropy.time import Time\n'), ((7977, 8013), 'astropy.table.Table', 'Table', ([], {'rows': 'tablerows', 'meta': 'nextscan'}), '(rows=tablerows, meta=nextscan)\n', (7982, 8013), False, 'from astropy.table import Table\n'), ((8212, 8224), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (8222, 8224), False, 'import io\n'), ((8364, 8376), 'datetime.date.today', 'date.today', ([], {}), '()\n', (8374, 8376), False, 'from datetime import date\n')]
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, random
from frappe.utils.make_random import how_many, get_random
from frappe.desk import query_report
from erpnext.setup.utils import get_exchange_rate
from erpnext.accounts.party import get_party_account_currency
from erpnext.exceptions import InvalidCurrency
from erpnext.stock.doctype.material_request.material_request import make_request_for_quotation
from erpnext.buying.doctype.request_for_quotation.request_for_quotation import \
make_supplier_quotation as make_quotation_from_rfq
def work():
frappe.set_user(frappe.db.get_global('demo_purchase_user'))
if random.random() < 0.3:
report = "Items To Be Requested"
for row in query_report.run(report)["result"][:random.randint(1, 5)]:
item_code, qty = row[0], abs(row[-1])
mr = make_material_request(item_code, qty)
if random.random() < 0.3:
for mr in frappe.get_all('Material Request',
filters={'material_request_type': 'Purchase', 'status': 'Open'},
limit=random.randint(1,6)):
if not frappe.get_all('Request for Quotation',
filters={'material_request': mr.name}, limit=1):
rfq = make_request_for_quotation(mr.name)
rfq.transaction_date = frappe.flags.current_date
add_suppliers(rfq)
rfq.save()
rfq.submit()
# Make suppier quotation from RFQ against each supplier.
if random.random() < 0.3:
for rfq in frappe.get_all('Request for Quotation',
filters={'status': 'Open'}, limit=random.randint(1, 6)):
if not frappe.get_all('Supplier Quotation',
filters={'request_for_quotation': rfq.name}, limit=1):
rfq = frappe.get_doc('Request for Quotation', rfq.name)
for supplier in rfq.suppliers:
supplier_quotation = make_quotation_from_rfq(rfq.name, supplier.supplier)
supplier_quotation.save()
supplier_quotation.submit()
# get supplier details
supplier = get_random("Supplier")
company_currency = frappe.db.get_value("Company", "Wind Power LLC", "default_currency")
party_account_currency = get_party_account_currency("Supplier", supplier, "Wind Power LLC")
if company_currency == party_account_currency:
exchange_rate = 1
else:
exchange_rate = get_exchange_rate(party_account_currency, company_currency)
# make supplier quotations
if random.random() < 0.2:
from erpnext.stock.doctype.material_request.material_request import make_supplier_quotation
report = "Material Requests for which Supplier Quotations are not created"
for row in query_report.run(report)["result"][:random.randint(1, 3)]:
if row[0] != "'Total'":
sq = frappe.get_doc(make_supplier_quotation(row[0]))
sq.transaction_date = frappe.flags.current_date
sq.supplier = supplier
sq.currency = party_account_currency or company_currency
sq.conversion_rate = exchange_rate
sq.insert()
sq.submit()
frappe.db.commit()
# make purchase orders
if random.random() < 0.5:
from erpnext.stock.doctype.material_request.material_request import make_purchase_order
report = "Requested Items To Be Ordered"
for row in query_report.run(report)["result"][:how_many("Purchase Order")]:
if row[0] != "'Total'":
po = frappe.get_doc(make_purchase_order(row[0]))
po.supplier = supplier
po.currency = party_account_currency or company_currency
po.conversion_rate = exchange_rate
po.transaction_date = frappe.flags.current_date
po.insert()
po.submit()
frappe.db.commit()
if random.random() < 0.2:
make_subcontract()
def make_material_request(item_code, qty):
mr = frappe.new_doc("Material Request")
variant_of = frappe.db.get_value('Item', item_code, 'variant_of') or item_code
if frappe.db.get_value('BOM', {'item': variant_of, 'is_default': 1, 'is_active': 1}):
mr.material_request_type = 'Manufacture'
else:
mr.material_request_type = "Purchase"
mr.transaction_date = frappe.flags.current_date
mr.schedule_date = frappe.utils.add_days(mr.transaction_date, 7)
mr.append("items", {
"doctype": "Material Request Item",
"schedule_date": frappe.utils.add_days(mr.transaction_date, 7),
"item_code": item_code,
"qty": qty
})
mr.insert()
mr.submit()
return mr
def add_suppliers(rfq):
for i in range(2):
supplier = get_random("Supplier")
if supplier not in [d.supplier for d in rfq.get('suppliers')]:
rfq.append("suppliers", { "supplier": supplier })
def make_subcontract():
from erpnext.buying.doctype.purchase_order.purchase_order import make_stock_entry
item_code = get_random("Item", {"is_sub_contracted_item": 1})
if item_code:
# make sub-contract PO
po = frappe.new_doc("Purchase Order")
po.is_subcontracted = "Yes"
po.supplier = get_random("Supplier")
po.schedule_date = frappe.utils.add_days(frappe.flags.current_date, 7)
item_code = get_random("Item", {"is_sub_contracted_item": 1})
po.append("items", {
"item_code": item_code,
"schedule_date": frappe.utils.add_days(frappe.flags.current_date, 7),
"qty": random.randint(10, 30)
})
po.set_missing_values()
try:
po.insert()
except InvalidCurrency:
return
po.submit()
# make material request for
make_material_request(po.items[0].item_code, po.items[0].qty)
# transfer material for sub-contract
stock_entry = frappe.get_doc(make_stock_entry(po.name, po.items[0].item_code))
stock_entry.from_warehouse = "Stores - WPL"
stock_entry.to_warehouse = "Supplier - WPL"
stock_entry.insert()
|
[
"erpnext.accounts.party.get_party_account_currency",
"frappe.utils.make_random.how_many",
"frappe.desk.query_report.run",
"frappe.utils.make_random.get_random",
"frappe.db.get_global",
"random.randint",
"frappe.new_doc",
"frappe.get_doc",
"random.random",
"erpnext.stock.doctype.material_request.material_request.make_purchase_order",
"frappe.get_all",
"erpnext.stock.doctype.material_request.material_request.make_request_for_quotation",
"frappe.utils.add_days",
"frappe.db.get_value",
"erpnext.buying.doctype.request_for_quotation.request_for_quotation.make_supplier_quotation",
"erpnext.setup.utils.get_exchange_rate",
"erpnext.stock.doctype.material_request.material_request.make_supplier_quotation",
"erpnext.buying.doctype.purchase_order.purchase_order.make_stock_entry",
"frappe.db.commit"
] |
[((1985, 2007), 'frappe.utils.make_random.get_random', 'get_random', (['"""Supplier"""'], {}), "('Supplier')\n", (1995, 2007), False, 'from frappe.utils.make_random import how_many, get_random\n'), ((2029, 2097), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Company"""', '"""Wind Power LLC"""', '"""default_currency"""'], {}), "('Company', 'Wind Power LLC', 'default_currency')\n", (2048, 2097), False, 'import frappe, random\n'), ((2124, 2190), 'erpnext.accounts.party.get_party_account_currency', 'get_party_account_currency', (['"""Supplier"""', 'supplier', '"""Wind Power LLC"""'], {}), "('Supplier', supplier, 'Wind Power LLC')\n", (2150, 2190), False, 'from erpnext.accounts.party import get_party_account_currency\n'), ((3638, 3672), 'frappe.new_doc', 'frappe.new_doc', (['"""Material Request"""'], {}), "('Material Request')\n", (3652, 3672), False, 'import frappe, random\n'), ((3759, 3844), 'frappe.db.get_value', 'frappe.db.get_value', (['"""BOM"""', "{'item': variant_of, 'is_default': 1, 'is_active': 1}"], {}), "('BOM', {'item': variant_of, 'is_default': 1,\n 'is_active': 1})\n", (3778, 3844), False, 'import frappe, random\n'), ((4002, 4047), 'frappe.utils.add_days', 'frappe.utils.add_days', (['mr.transaction_date', '(7)'], {}), '(mr.transaction_date, 7)\n', (4023, 4047), False, 'import frappe, random\n'), ((4575, 4624), 'frappe.utils.make_random.get_random', 'get_random', (['"""Item"""', "{'is_sub_contracted_item': 1}"], {}), "('Item', {'is_sub_contracted_item': 1})\n", (4585, 4624), False, 'from frappe.utils.make_random import how_many, get_random\n'), ((706, 748), 'frappe.db.get_global', 'frappe.db.get_global', (['"""demo_purchase_user"""'], {}), "('demo_purchase_user')\n", (726, 748), False, 'import frappe, random\n'), ((755, 770), 'random.random', 'random.random', ([], {}), '()\n', (768, 770), False, 'import frappe, random\n'), ((978, 993), 'random.random', 'random.random', ([], {}), '()\n', (991, 993), False, 'import frappe, random\n'), ((1467, 1482), 'random.random', 'random.random', ([], {}), '()\n', (1480, 1482), False, 'import frappe, random\n'), ((2284, 2343), 'erpnext.setup.utils.get_exchange_rate', 'get_exchange_rate', (['party_account_currency', 'company_currency'], {}), '(party_account_currency, company_currency)\n', (2301, 2343), False, 'from erpnext.setup.utils import get_exchange_rate\n'), ((2377, 2392), 'random.random', 'random.random', ([], {}), '()\n', (2390, 2392), False, 'import frappe, random\n'), ((2991, 3006), 'random.random', 'random.random', ([], {}), '()\n', (3004, 3006), False, 'import frappe, random\n'), ((3544, 3559), 'random.random', 'random.random', ([], {}), '()\n', (3557, 3559), False, 'import frappe, random\n'), ((3688, 3740), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Item"""', 'item_code', '"""variant_of"""'], {}), "('Item', item_code, 'variant_of')\n", (3707, 3740), False, 'import frappe, random\n'), ((4313, 4335), 'frappe.utils.make_random.get_random', 'get_random', (['"""Supplier"""'], {}), "('Supplier')\n", (4323, 4335), False, 'from frappe.utils.make_random import how_many, get_random\n'), ((4672, 4704), 'frappe.new_doc', 'frappe.new_doc', (['"""Purchase Order"""'], {}), "('Purchase Order')\n", (4686, 4704), False, 'import frappe, random\n'), ((4751, 4773), 'frappe.utils.make_random.get_random', 'get_random', (['"""Supplier"""'], {}), "('Supplier')\n", (4761, 4773), False, 'from frappe.utils.make_random import how_many, get_random\n'), ((4795, 4846), 'frappe.utils.add_days', 'frappe.utils.add_days', (['frappe.flags.current_date', '(7)'], {}), '(frappe.flags.current_date, 7)\n', (4816, 4846), False, 'import frappe, random\n'), ((4862, 4911), 'frappe.utils.make_random.get_random', 'get_random', (['"""Item"""', "{'is_sub_contracted_item': 1}"], {}), "('Item', {'is_sub_contracted_item': 1})\n", (4872, 4911), False, 'from frappe.utils.make_random import how_many, get_random\n'), ((4128, 4173), 'frappe.utils.add_days', 'frappe.utils.add_days', (['mr.transaction_date', '(7)'], {}), '(mr.transaction_date, 7)\n', (4149, 4173), False, 'import frappe, random\n'), ((5339, 5387), 'erpnext.buying.doctype.purchase_order.purchase_order.make_stock_entry', 'make_stock_entry', (['po.name', 'po.items[0].item_code'], {}), '(po.name, po.items[0].item_code)\n', (5355, 5387), False, 'from erpnext.buying.doctype.purchase_order.purchase_order import make_stock_entry\n'), ((826, 850), 'frappe.desk.query_report.run', 'query_report.run', (['report'], {}), '(report)\n', (842, 850), False, 'from frappe.desk import query_report\n'), ((862, 882), 'random.randint', 'random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (876, 882), False, 'import frappe, random\n'), ((1125, 1145), 'random.randint', 'random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (1139, 1145), False, 'import frappe, random\n'), ((1157, 1249), 'frappe.get_all', 'frappe.get_all', (['"""Request for Quotation"""'], {'filters': "{'material_request': mr.name}", 'limit': '(1)'}), "('Request for Quotation', filters={'material_request': mr.\n name}, limit=1)\n", (1171, 1249), False, 'import frappe, random\n'), ((1260, 1295), 'erpnext.stock.doctype.material_request.material_request.make_request_for_quotation', 'make_request_for_quotation', (['mr.name'], {}), '(mr.name)\n', (1286, 1295), False, 'from erpnext.stock.doctype.material_request.material_request import make_request_for_quotation\n'), ((1580, 1600), 'random.randint', 'random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (1594, 1600), False, 'import frappe, random\n'), ((1613, 1708), 'frappe.get_all', 'frappe.get_all', (['"""Supplier Quotation"""'], {'filters': "{'request_for_quotation': rfq.name}", 'limit': '(1)'}), "('Supplier Quotation', filters={'request_for_quotation': rfq.\n name}, limit=1)\n", (1627, 1708), False, 'import frappe, random\n'), ((1719, 1768), 'frappe.get_doc', 'frappe.get_doc', (['"""Request for Quotation"""', 'rfq.name'], {}), "('Request for Quotation', rfq.name)\n", (1733, 1768), False, 'import frappe, random\n'), ((2585, 2609), 'frappe.desk.query_report.run', 'query_report.run', (['report'], {}), '(report)\n', (2601, 2609), False, 'from frappe.desk import query_report\n'), ((2621, 2641), 'random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (2635, 2641), False, 'import frappe, random\n'), ((2943, 2961), 'frappe.db.commit', 'frappe.db.commit', ([], {}), '()\n', (2959, 2961), False, 'import frappe, random\n'), ((3160, 3184), 'frappe.desk.query_report.run', 'query_report.run', (['report'], {}), '(report)\n', (3176, 3184), False, 'from frappe.desk import query_report\n'), ((3196, 3222), 'frappe.utils.make_random.how_many', 'how_many', (['"""Purchase Order"""'], {}), "('Purchase Order')\n", (3204, 3222), False, 'from frappe.utils.make_random import how_many, get_random\n'), ((3520, 3538), 'frappe.db.commit', 'frappe.db.commit', ([], {}), '()\n', (3536, 3538), False, 'import frappe, random\n'), ((4983, 5034), 'frappe.utils.add_days', 'frappe.utils.add_days', (['frappe.flags.current_date', '(7)'], {}), '(frappe.flags.current_date, 7)\n', (5004, 5034), False, 'import frappe, random\n'), ((5046, 5068), 'random.randint', 'random.randint', (['(10)', '(30)'], {}), '(10, 30)\n', (5060, 5068), False, 'import frappe, random\n'), ((1831, 1883), 'erpnext.buying.doctype.request_for_quotation.request_for_quotation.make_supplier_quotation', 'make_quotation_from_rfq', (['rfq.name', 'supplier.supplier'], {}), '(rfq.name, supplier.supplier)\n', (1854, 1883), True, 'from erpnext.buying.doctype.request_for_quotation.request_for_quotation import make_supplier_quotation as make_quotation_from_rfq\n'), ((2695, 2726), 'erpnext.stock.doctype.material_request.material_request.make_supplier_quotation', 'make_supplier_quotation', (['row[0]'], {}), '(row[0])\n', (2718, 2726), False, 'from erpnext.stock.doctype.material_request.material_request import make_supplier_quotation\n'), ((3276, 3303), 'erpnext.stock.doctype.material_request.material_request.make_purchase_order', 'make_purchase_order', (['row[0]'], {}), '(row[0])\n', (3295, 3303), False, 'from erpnext.stock.doctype.material_request.material_request import make_purchase_order\n')]
|
# -*- coding: utf-8 -*-
from operator import attrgetter
import requests
from argh.decorators import arg
from lain_cli.auth import SSOAccess, get_auth_header
from lain_cli.utils import check_phase, get_app_state, get_domain
from lain_sdk.util import error, info
class AppInfo(object):
"""App info to show"""
def __init__(self, app_info):
self.appname = app_info.get("appname")
self.apptype = app_info.get("apptype")
self.metaversion = app_info.get("metaversion")
self.state = get_app_state(app_info)
@classmethod
def new(cls, app_info):
return AppInfo(app_info)
SORT_CHOICES = ['appname', 'apptype', 'metaversion', 'state']
@arg('phase', help="lain cluster phase id, can be added by lain config save")
@arg('-s', '--sort', choices=SORT_CHOICES, help="sort type when displaying available apps")
def dashboard(phase, sort='appname'):
"""
Basic dashboard of Lain
"""
check_phase(phase)
print_welecome()
print_workflows()
console = "console.%s" % (get_domain(phase))
access_token = SSOAccess.get_token(phase)
auth_header = get_auth_header(access_token)
print_available_repos(console, auth_header)
print_available_apps(console, auth_header, sort)
def print_welecome():
info('##############################')
info('# Welcome to Lain! #')
info('##############################')
def print_workflows():
info('Below is the recommended workflows :')
info(' lain reposit => lain prepare => lain build => lain tag => lain push => lain deploy')
def render_repos(repos):
repos.sort()
for repo in repos:
print("{} ".format(repo)),
def render_apps(apps, sort_type):
apps.sort(key=attrgetter(sort_type))
for app in apps:
print("{:<30} {:<20} {:<60} {:<10}".format(
app.appname, app.apptype, app.metaversion, app.state))
def print_available_repos(console, auth_header):
repos_url = "http://%s/api/v1/repos/" % console
repos_res = requests.get(repos_url, headers=auth_header)
info('Available repos are :')
if repos_res.status_code == 200:
repos = repos_res.json()["repos"]
render_repos([repo["appname"] for repo in repos])
print('')
else:
error("shit happened : %s" % repos_res.content)
def print_available_apps(console, auth_header, sort_type):
apps_url = "http://%s/api/v1/apps/" % console
apps_res = requests.get(apps_url, headers=auth_header)
info('Available apps are :')
print("{:<30} {:<20} {:<60} {:<10}".format(
"Appname", "AppType", "MetaVersion", "State"))
if apps_res.status_code == 200:
apps = apps_res.json()["apps"]
render_apps([AppInfo.new(app) for app in apps], sort_type)
else:
error("shit happened: %s" % apps_res.content)
|
[
"lain_sdk.util.error",
"argh.decorators.arg",
"lain_cli.utils.get_app_state",
"operator.attrgetter",
"lain_cli.utils.get_domain",
"lain_cli.auth.SSOAccess.get_token",
"requests.get",
"lain_cli.auth.get_auth_header",
"lain_cli.utils.check_phase",
"lain_sdk.util.info"
] |
[((690, 766), 'argh.decorators.arg', 'arg', (['"""phase"""'], {'help': '"""lain cluster phase id, can be added by lain config save"""'}), "('phase', help='lain cluster phase id, can be added by lain config save')\n", (693, 766), False, 'from argh.decorators import arg\n'), ((768, 863), 'argh.decorators.arg', 'arg', (['"""-s"""', '"""--sort"""'], {'choices': 'SORT_CHOICES', 'help': '"""sort type when displaying available apps"""'}), "('-s', '--sort', choices=SORT_CHOICES, help=\n 'sort type when displaying available apps')\n", (771, 863), False, 'from argh.decorators import arg\n'), ((946, 964), 'lain_cli.utils.check_phase', 'check_phase', (['phase'], {}), '(phase)\n', (957, 964), False, 'from lain_cli.utils import check_phase, get_app_state, get_domain\n'), ((1076, 1102), 'lain_cli.auth.SSOAccess.get_token', 'SSOAccess.get_token', (['phase'], {}), '(phase)\n', (1095, 1102), False, 'from lain_cli.auth import SSOAccess, get_auth_header\n'), ((1121, 1150), 'lain_cli.auth.get_auth_header', 'get_auth_header', (['access_token'], {}), '(access_token)\n', (1136, 1150), False, 'from lain_cli.auth import SSOAccess, get_auth_header\n'), ((1281, 1319), 'lain_sdk.util.info', 'info', (['"""##############################"""'], {}), "('##############################')\n", (1285, 1319), False, 'from lain_sdk.util import error, info\n'), ((1324, 1362), 'lain_sdk.util.info', 'info', (['"""# Welcome to Lain! #"""'], {}), "('# Welcome to Lain! #')\n", (1328, 1362), False, 'from lain_sdk.util import error, info\n'), ((1367, 1405), 'lain_sdk.util.info', 'info', (['"""##############################"""'], {}), "('##############################')\n", (1371, 1405), False, 'from lain_sdk.util import error, info\n'), ((1435, 1479), 'lain_sdk.util.info', 'info', (['"""Below is the recommended workflows :"""'], {}), "('Below is the recommended workflows :')\n", (1439, 1479), False, 'from lain_sdk.util import error, info\n'), ((1484, 1586), 'lain_sdk.util.info', 'info', (['""" lain reposit => lain prepare => lain build => lain tag => lain push => lain deploy"""'], {}), "(\n ' lain reposit => lain prepare => lain build => lain tag => lain push => lain deploy'\n )\n", (1488, 1586), False, 'from lain_sdk.util import error, info\n'), ((2019, 2063), 'requests.get', 'requests.get', (['repos_url'], {'headers': 'auth_header'}), '(repos_url, headers=auth_header)\n', (2031, 2063), False, 'import requests\n'), ((2068, 2097), 'lain_sdk.util.info', 'info', (['"""Available repos are :"""'], {}), "('Available repos are :')\n", (2072, 2097), False, 'from lain_sdk.util import error, info\n'), ((2445, 2488), 'requests.get', 'requests.get', (['apps_url'], {'headers': 'auth_header'}), '(apps_url, headers=auth_header)\n', (2457, 2488), False, 'import requests\n'), ((2493, 2521), 'lain_sdk.util.info', 'info', (['"""Available apps are :"""'], {}), "('Available apps are :')\n", (2497, 2521), False, 'from lain_sdk.util import error, info\n'), ((520, 543), 'lain_cli.utils.get_app_state', 'get_app_state', (['app_info'], {}), '(app_info)\n', (533, 543), False, 'from lain_cli.utils import check_phase, get_app_state, get_domain\n'), ((1038, 1055), 'lain_cli.utils.get_domain', 'get_domain', (['phase'], {}), '(phase)\n', (1048, 1055), False, 'from lain_cli.utils import check_phase, get_app_state, get_domain\n'), ((2271, 2318), 'lain_sdk.util.error', 'error', (["('shit happened : %s' % repos_res.content)"], {}), "('shit happened : %s' % repos_res.content)\n", (2276, 2318), False, 'from lain_sdk.util import error, info\n'), ((2788, 2833), 'lain_sdk.util.error', 'error', (["('shit happened: %s' % apps_res.content)"], {}), "('shit happened: %s' % apps_res.content)\n", (2793, 2833), False, 'from lain_sdk.util import error, info\n'), ((1734, 1755), 'operator.attrgetter', 'attrgetter', (['sort_type'], {}), '(sort_type)\n', (1744, 1755), False, 'from operator import attrgetter\n')]
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import torch
from torch import Tensor
from torchmetrics.utilities.checks import _check_same_shape
def _mean_absolute_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]:
_check_same_shape(preds, target)
sum_abs_error = torch.sum(torch.abs(preds - target))
n_obs = target.numel()
return sum_abs_error, n_obs
def _mean_absolute_error_compute(sum_abs_error: Tensor, n_obs: int) -> Tensor:
return sum_abs_error / n_obs
def mean_absolute_error(preds: Tensor, target: Tensor) -> Tensor:
"""
Computes mean absolute error
Args:
preds: estimated labels
target: ground truth labels
Return:
Tensor with MAE
Example:
>>> from torchmetrics.functional import mean_absolute_error
>>> x = torch.tensor([0., 1, 2, 3])
>>> y = torch.tensor([0., 1, 2, 2])
>>> mean_absolute_error(x, y)
tensor(0.2500)
"""
sum_abs_error, n_obs = _mean_absolute_error_update(preds, target)
return _mean_absolute_error_compute(sum_abs_error, n_obs)
|
[
"torch.abs",
"torchmetrics.utilities.checks._check_same_shape"
] |
[((803, 835), 'torchmetrics.utilities.checks._check_same_shape', '_check_same_shape', (['preds', 'target'], {}), '(preds, target)\n', (820, 835), False, 'from torchmetrics.utilities.checks import _check_same_shape\n'), ((866, 891), 'torch.abs', 'torch.abs', (['(preds - target)'], {}), '(preds - target)\n', (875, 891), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
#https://packaging.python.org/tutorials/distributing-packages/
import os
#https://pythonhosted.org/versiontools/usage.html
import setuptools
from pip import download
from pip import req
HERE = os.path.dirname(os.path.abspath(__file__))
def get_requirements(file):
path = os.path.join(HERE, file)
deps = list()
for dep in req.parse_requirements(path, session=download.PipSession()):
try:
# Pip 8.1.2 Compatible
specs = ','.join(''.join(str(spec)) for spec in dep.req.specifier)
except AttributeError:
# Pip 1.5.4 Compatible
specs = ','.join(''.join(spec) for spec in dep.req.specs)
requirement = '{name}{extras}{specs}'.format(
name=dep.name,
extras=(
'[{extras}]'.format(extras=','.join(dep.extras))
if dep.extras else ''
),
specs=specs,
)
deps.append(requirement)
return deps
setuptools.setup(
name='trackingsim',
description='Tracking device simulator.',
version=':versiontools:trackingsim:',
packages=setuptools.find_packages(exclude=['docs', 'tests']),
include_package_data=True,
install_requires=get_requirements('requirements.txt'),
setup_requires='versiontools',
author='<NAME>',
author_email='<EMAIL>',
url='dojot.com.br',
)
|
[
"pip.download.PipSession",
"os.path.abspath",
"os.path.join",
"setuptools.find_packages"
] |
[((237, 262), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (252, 262), False, 'import os\n'), ((305, 329), 'os.path.join', 'os.path.join', (['HERE', 'file'], {}), '(HERE, file)\n', (317, 329), False, 'import os\n'), ((1138, 1189), 'setuptools.find_packages', 'setuptools.find_packages', ([], {'exclude': "['docs', 'tests']"}), "(exclude=['docs', 'tests'])\n", (1162, 1189), False, 'import setuptools\n'), ((400, 421), 'pip.download.PipSession', 'download.PipSession', ([], {}), '()\n', (419, 421), False, 'from pip import download\n')]
|
# Generated by Django 3.2 on 2021-04-28 14:41
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('file', models.FileField(blank=True, upload_to='')),
('date_created', models.DateField()),
('date_expired', models.DateField()),
('status', models.CharField(choices=[('active', 'active'), ('dead', 'dead')], max_length=10)),
('document_root', models.CharField(choices=[('public', 'public'), ('private', 'private'), ('secret', 'secret'), ('top-secret', 'top-secret')], max_length=100)),
],
),
]
|
[
"django.db.models.FileField",
"django.db.models.TextField",
"django.db.models.BigAutoField",
"django.db.models.CharField",
"django.db.models.DateField"
] |
[((302, 398), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (321, 398), False, 'from django.db import migrations, models\n'), ((423, 455), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (439, 455), False, 'from django.db import migrations, models\n'), ((483, 501), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (499, 501), False, 'from django.db import migrations, models\n'), ((529, 571), 'django.db.models.FileField', 'models.FileField', ([], {'blank': '(True)', 'upload_to': '""""""'}), "(blank=True, upload_to='')\n", (545, 571), False, 'from django.db import migrations, models\n'), ((607, 625), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (623, 625), False, 'from django.db import migrations, models\n'), ((661, 679), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (677, 679), False, 'from django.db import migrations, models\n'), ((709, 794), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('active', 'active'), ('dead', 'dead')]", 'max_length': '(10)'}), "(choices=[('active', 'active'), ('dead', 'dead')],\n max_length=10)\n", (725, 794), False, 'from django.db import migrations, models\n'), ((827, 972), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('public', 'public'), ('private', 'private'), ('secret', 'secret'), (\n 'top-secret', 'top-secret')]", 'max_length': '(100)'}), "(choices=[('public', 'public'), ('private', 'private'), (\n 'secret', 'secret'), ('top-secret', 'top-secret')], max_length=100)\n", (843, 972), False, 'from django.db import migrations, models\n')]
|
"""
An experimental protocol is handled as a pandas DataFrame
that includes an 'onset' field.
This yields the onset time of the events in the experimental paradigm.
It can also contain:
* a 'trial_type' field that yields the condition identifier.
* a 'duration' field that yields event duration (for so-called block
paradigms).
* a 'modulation' field that associated a scalar value to each event.
Author: <NAME>, 2015
"""
from __future__ import with_statement
import warnings
import numpy as np
def check_events(events):
"""Test that the events data describes a valid experimental paradigm
It is valid if the events data has an 'onset' key.
Parameters
----------
events : pandas DataFrame
Events data that describes a functional experimental paradigm.
Returns
-------
trial_type : array of shape (n_events,), dtype='s'
Per-event experimental conditions identifier.
Defaults to np.repeat('dummy', len(onsets)).
onset : array of shape (n_events,), dtype='f'
Per-event onset time (in seconds)
duration : array of shape (n_events,), dtype='f'
Per-event durantion, (in seconds)
defaults to zeros(n_events) when no duration is provided
modulation : array of shape (n_events,), dtype='f'
Per-event modulation, (in seconds)
defaults to ones(n_events) when no duration is provided
"""
if 'onset' not in events.keys():
raise ValueError('The provided events data has no onset column.')
if 'duration' not in events.keys():
raise ValueError('The provided events data has no duration column.')
onset = np.array(events['onset'])
duration = np.array(events['duration']).astype(np.float)
n_events = len(onset)
trial_type = np.array(events['trial_type'])
modulation = np.ones(n_events)
if 'trial_type' not in events.keys():
warnings.warn("'trial_type' column not found "
"in the given events data.")
trial_type = np.repeat('dummy', n_events)
if 'modulation' in events.keys():
warnings.warn("'modulation' column found in the given events data.")
modulation = np.array(events['modulation']).astype(np.float)
return trial_type, onset, duration, modulation
|
[
"warnings.warn",
"numpy.array",
"numpy.ones",
"numpy.repeat"
] |
[((1664, 1689), 'numpy.array', 'np.array', (["events['onset']"], {}), "(events['onset'])\n", (1672, 1689), True, 'import numpy as np\n'), ((1794, 1824), 'numpy.array', 'np.array', (["events['trial_type']"], {}), "(events['trial_type'])\n", (1802, 1824), True, 'import numpy as np\n'), ((1842, 1859), 'numpy.ones', 'np.ones', (['n_events'], {}), '(n_events)\n', (1849, 1859), True, 'import numpy as np\n'), ((1910, 1982), 'warnings.warn', 'warnings.warn', (['"""\'trial_type\' column not found in the given events data."""'], {}), '("\'trial_type\' column not found in the given events data.")\n', (1923, 1982), False, 'import warnings\n'), ((2029, 2057), 'numpy.repeat', 'np.repeat', (['"""dummy"""', 'n_events'], {}), "('dummy', n_events)\n", (2038, 2057), True, 'import numpy as np\n'), ((2104, 2172), 'warnings.warn', 'warnings.warn', (['"""\'modulation\' column found in the given events data."""'], {}), '("\'modulation\' column found in the given events data.")\n', (2117, 2172), False, 'import warnings\n'), ((1705, 1733), 'numpy.array', 'np.array', (["events['duration']"], {}), "(events['duration'])\n", (1713, 1733), True, 'import numpy as np\n'), ((2194, 2224), 'numpy.array', 'np.array', (["events['modulation']"], {}), "(events['modulation'])\n", (2202, 2224), True, 'import numpy as np\n')]
|
import numpy as np
from BMA_support import *
from BMA_agent import *
try:
from scipy.special import lambertw
except:
print("could not import lambertw (bounded priors won't work)")
class Node(object):
def __init__(self,name='',dims=[],inds=[],num=1,cp=False):
self.name = name
self.ag = [Agent() for i in range(0,count(num))]
# properties that are calculated by the node:
self.marg = Dist()
if cp:
self.prior = Dist()
else:
self.prior = self.marg
self.post = Dist()
self.DKL = 0
self.DKLpr = 0
# properties that have to be defined in the system:
self.p_in = Dist() # used in marginal
self.p0 = Dist()
self.inds = inds
self.beta_r = []
self.dims = dims
self.cp = cp
def initialize(self):
if len(self.inds) < 3: # if no index for beta given, pick [0] as default (->len(beta)=1)
self.inds.append([0])
self.DKL = 0
self.DKLpr = 0
self.beta_r = self.inds[2]
self.post.r = self.inds[1]
self.p_in.r = self.post.r[:-1]
self.post.initialize(self.dims)
self.marg.r = self.inds[0]
self.marg.initialize(self.dims)
self.prior.r = self.inds[0]
self.prior.initialize(self.dims)
self.p0.r = self.inds[0]
self.p0.val = normalize(np.ones(np.shape(self.prior.val)))
for agent in self.ag: agent.reset()
def update_input(self,joint):
Z = np.einsum(joint.val,joint.r,self.prior.r[:-1])
self.p_in.val = np.einsum(1.0/(Z+1e-55),self.prior.r[:-1],joint.val,joint.r,self.post.r[:-1])
def update_posterior(self,U,beta):
if np.shape(U) != np.shape(self.post.val):
print("The utility must have the same shape as the posterior!")
betatimesU = np.einsum(beta,self.beta_r,U,self.post.r,self.post.r)
post = np.einsum(self.prior.val,self.prior.r,np.exp(betatimesU),self.post.r,self.post.r)
self.post.val = normalize(post)
def update_prior(self,alpha,beta):
self.update_marginal()
if self.cp: self.update_bounded_prior(alpha,beta)
def update_marginal(self):
self.marg.val = np.einsum(self.p_in.val,self.p_in.r,self.post.val,self.post.r,self.prior.r)
def update_bounded_prior(self,alpha,beta):
pr = np.copy(self.prior.val)
if len(self.ag) > 1:
for k in range(0,len(self.ag)):
index = np.unravel_index(k,[self.dims[i] for i in self.beta_r])
if alpha[index]/beta[index] > 500:
pr[index] = self.marg.val[index]/beta[index] - self.prior.val[index]*np.log(self.prior.val[index]/self.p0.val[index])/alpha[index]
else:
DKL_pr = np.log(self.prior.val[index]/self.p0.val[index]).dot(self.prior.val[index])
cnst = alpha[index]/beta[index] - DKL_pr
denom = np.real(lambertw(np.exp(cnst)*(alpha[index]/beta[index])*self.marg.val[index]/self.p0.val[index]))
pr[index] = (alpha[index]/beta[index])*self.marg.val[index]/denom + 1e-55
elif len(self.ag) == 1:
if alpha[0]/beta[0] > 500:
pr = self.marg.val/beta[0]-self.prior.val*np.log(self.prior.val/self.p0.val)/alpha[0]
else:
DKL_pr = np.log(self.prior.val/self.p0.val).dot(self.prior.val)
cnst = alpha[0]/beta[0] - DKL_pr
denom = np.real(lambertw(np.exp(cnst)*(alpha[0]/beta[0])*self.marg.val/self.p0.val)) + 1e-55
pr = (alpha[0]/beta[0])*self.marg.val/denom + 1e-55
self.prior.val = normalize(pr)
def process(self,U,beta,alpha,joint):
self.update_input(joint)
self.update_posterior(U,beta)
self.update_prior(alpha,beta)
def calc_DKL(self):
self.DKL = get_DKL(self.post,self.prior)
def calc_DKLpr(self):
self.DKLpr = get_DKL(self.prior,self.p0)
def extract_agents(self):
num = len(self.ag)
if num > 1:
ind = self.prior.r[:-1] # indices of the dimensions that count this nodes agents
dimind = [self.dims[ndx] for ndx in ind]
rgoal = self.post.r[:]
for i in ind:
rgoal.remove(i)
for k in range(0,num):
delta = np.zeros(dimind)
index = np.unravel_index(k,dimind)
delta[index] = 1
self.ag[k].post = np.einsum(delta,ind,self.post.val,self.post.r,rgoal)
prior_r_ag = self.prior.r[:]
for i in ind:
prior_r_ag.remove(i)
self.ag[k].prior = np.einsum(delta,ind,self.prior.val,self.prior.r,prior_r_ag)
pin_r_ag = self.p_in.r[:]
for i in ind:
pin_r_ag.remove(i)
self.ag[k].p_in = np.einsum(delta,ind,self.p_in.val,self.p_in.r,pin_r_ag)
self.ag[k].calc_DKL()
else:
self.ag[0].post = self.post.val
self.ag[0].prior = self.prior.val
self.ag[0].p_in = self.p_in.val
self.ag[0].calc_DKL()
## %%
|
[
"numpy.log",
"numpy.copy",
"numpy.einsum",
"numpy.unravel_index",
"numpy.zeros",
"numpy.shape",
"numpy.exp"
] |
[((1531, 1579), 'numpy.einsum', 'np.einsum', (['joint.val', 'joint.r', 'self.prior.r[:-1]'], {}), '(joint.val, joint.r, self.prior.r[:-1])\n', (1540, 1579), True, 'import numpy as np\n'), ((1602, 1692), 'numpy.einsum', 'np.einsum', (['(1.0 / (Z + 1e-55))', 'self.prior.r[:-1]', 'joint.val', 'joint.r', 'self.post.r[:-1]'], {}), '(1.0 / (Z + 1e-55), self.prior.r[:-1], joint.val, joint.r, self.\n post.r[:-1])\n', (1611, 1692), True, 'import numpy as np\n'), ((1868, 1925), 'numpy.einsum', 'np.einsum', (['beta', 'self.beta_r', 'U', 'self.post.r', 'self.post.r'], {}), '(beta, self.beta_r, U, self.post.r, self.post.r)\n', (1877, 1925), True, 'import numpy as np\n'), ((2244, 2323), 'numpy.einsum', 'np.einsum', (['self.p_in.val', 'self.p_in.r', 'self.post.val', 'self.post.r', 'self.prior.r'], {}), '(self.p_in.val, self.p_in.r, self.post.val, self.post.r, self.prior.r)\n', (2253, 2323), True, 'import numpy as np\n'), ((2381, 2404), 'numpy.copy', 'np.copy', (['self.prior.val'], {}), '(self.prior.val)\n', (2388, 2404), True, 'import numpy as np\n'), ((1731, 1742), 'numpy.shape', 'np.shape', (['U'], {}), '(U)\n', (1739, 1742), True, 'import numpy as np\n'), ((1746, 1769), 'numpy.shape', 'np.shape', (['self.post.val'], {}), '(self.post.val)\n', (1754, 1769), True, 'import numpy as np\n'), ((1975, 1993), 'numpy.exp', 'np.exp', (['betatimesU'], {}), '(betatimesU)\n', (1981, 1993), True, 'import numpy as np\n'), ((1413, 1437), 'numpy.shape', 'np.shape', (['self.prior.val'], {}), '(self.prior.val)\n', (1421, 1437), True, 'import numpy as np\n'), ((2502, 2558), 'numpy.unravel_index', 'np.unravel_index', (['k', '[self.dims[i] for i in self.beta_r]'], {}), '(k, [self.dims[i] for i in self.beta_r])\n', (2518, 2558), True, 'import numpy as np\n'), ((4387, 4403), 'numpy.zeros', 'np.zeros', (['dimind'], {}), '(dimind)\n', (4395, 4403), True, 'import numpy as np\n'), ((4428, 4455), 'numpy.unravel_index', 'np.unravel_index', (['k', 'dimind'], {}), '(k, dimind)\n', (4444, 4455), True, 'import numpy as np\n'), ((4522, 4578), 'numpy.einsum', 'np.einsum', (['delta', 'ind', 'self.post.val', 'self.post.r', 'rgoal'], {}), '(delta, ind, self.post.val, self.post.r, rgoal)\n', (4531, 4578), True, 'import numpy as np\n'), ((4726, 4789), 'numpy.einsum', 'np.einsum', (['delta', 'ind', 'self.prior.val', 'self.prior.r', 'prior_r_ag'], {}), '(delta, ind, self.prior.val, self.prior.r, prior_r_ag)\n', (4735, 4789), True, 'import numpy as np\n'), ((4931, 4990), 'numpy.einsum', 'np.einsum', (['delta', 'ind', 'self.p_in.val', 'self.p_in.r', 'pin_r_ag'], {}), '(delta, ind, self.p_in.val, self.p_in.r, pin_r_ag)\n', (4940, 4990), True, 'import numpy as np\n'), ((2811, 2861), 'numpy.log', 'np.log', (['(self.prior.val[index] / self.p0.val[index])'], {}), '(self.prior.val[index] / self.p0.val[index])\n', (2817, 2861), True, 'import numpy as np\n'), ((3385, 3421), 'numpy.log', 'np.log', (['(self.prior.val / self.p0.val)'], {}), '(self.prior.val / self.p0.val)\n', (3391, 3421), True, 'import numpy as np\n'), ((2698, 2748), 'numpy.log', 'np.log', (['(self.prior.val[index] / self.p0.val[index])'], {}), '(self.prior.val[index] / self.p0.val[index])\n', (2704, 2748), True, 'import numpy as np\n'), ((3298, 3334), 'numpy.log', 'np.log', (['(self.prior.val / self.p0.val)'], {}), '(self.prior.val / self.p0.val)\n', (3304, 3334), True, 'import numpy as np\n'), ((2993, 3005), 'numpy.exp', 'np.exp', (['cnst'], {}), '(cnst)\n', (2999, 3005), True, 'import numpy as np\n'), ((3530, 3542), 'numpy.exp', 'np.exp', (['cnst'], {}), '(cnst)\n', (3536, 3542), True, 'import numpy as np\n')]
|
"""
tests.components.device_tracker.test_mqtt
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the MQTT device tracker component.
"""
import unittest
import os
from homeassistant.components import device_tracker
from homeassistant.const import CONF_PLATFORM
from tests.common import (
get_test_home_assistant, mock_mqtt_component, fire_mqtt_message)
class TestComponentsDeviceTrackerMQTT(unittest.TestCase):
def setUp(self): # pylint: disable=invalid-name
""" Init needed objects. """
self.hass = get_test_home_assistant()
mock_mqtt_component(self.hass)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
try:
os.remove(self.hass.config.path(device_tracker.YAML_DEVICES))
except FileNotFoundError:
pass
def test_new_message(self):
dev_id = 'paulus'
enttiy_id = device_tracker.ENTITY_ID_FORMAT.format(dev_id)
topic = '/location/paulus'
location = 'work'
self.assertTrue(device_tracker.setup(self.hass, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'mqtt',
'devices': {dev_id: topic}
}}))
fire_mqtt_message(self.hass, topic, location)
self.hass.pool.block_till_done()
self.assertEqual(location, self.hass.states.get(enttiy_id).state)
|
[
"homeassistant.components.device_tracker.ENTITY_ID_FORMAT.format",
"tests.common.fire_mqtt_message",
"tests.common.get_test_home_assistant",
"tests.common.mock_mqtt_component",
"homeassistant.components.device_tracker.setup"
] |
[((526, 551), 'tests.common.get_test_home_assistant', 'get_test_home_assistant', ([], {}), '()\n', (549, 551), False, 'from tests.common import get_test_home_assistant, mock_mqtt_component, fire_mqtt_message\n'), ((560, 590), 'tests.common.mock_mqtt_component', 'mock_mqtt_component', (['self.hass'], {}), '(self.hass)\n', (579, 590), False, 'from tests.common import get_test_home_assistant, mock_mqtt_component, fire_mqtt_message\n'), ((909, 955), 'homeassistant.components.device_tracker.ENTITY_ID_FORMAT.format', 'device_tracker.ENTITY_ID_FORMAT.format', (['dev_id'], {}), '(dev_id)\n', (947, 955), False, 'from homeassistant.components import device_tracker\n'), ((1220, 1265), 'tests.common.fire_mqtt_message', 'fire_mqtt_message', (['self.hass', 'topic', 'location'], {}), '(self.hass, topic, location)\n', (1237, 1265), False, 'from tests.common import get_test_home_assistant, mock_mqtt_component, fire_mqtt_message\n'), ((1042, 1155), 'homeassistant.components.device_tracker.setup', 'device_tracker.setup', (['self.hass', "{device_tracker.DOMAIN: {CONF_PLATFORM: 'mqtt', 'devices': {dev_id: topic}}}"], {}), "(self.hass, {device_tracker.DOMAIN: {CONF_PLATFORM:\n 'mqtt', 'devices': {dev_id: topic}}})\n", (1062, 1155), False, 'from homeassistant.components import device_tracker\n')]
|
from boa3.builtin import public
from boa3.builtin.interop.json import json_serialize
@public
def main() -> bytes:
return json_serialize(b'unit test')
|
[
"boa3.builtin.interop.json.json_serialize"
] |
[((127, 155), 'boa3.builtin.interop.json.json_serialize', 'json_serialize', (["b'unit test'"], {}), "(b'unit test')\n", (141, 155), False, 'from boa3.builtin.interop.json import json_serialize\n')]
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2020 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import doctest
import re
from zope.testing import renormalizing
import zc.buildout.testing
from zc.buildout.tests import easy_install_SetUp
from zc.buildout.tests import normalize_bang
def default_cfg():
r"""
>>> home = tmpdir('home')
>>> mkdir(home, '.buildout')
>>> default_cfg = join(home, '.buildout', 'default.cfg')
>>> write(default_cfg, '''
... [debug]
... dec = 1
... 2
... inc = 1
... ''')
>>> write('buildout.cfg', '''
... [buildout]
...
... [debug]
... dec -= 2
... inc += 2
... ''')
>>> env = dict(HOME=home, USERPROFILE=home)
>>> print_(system(buildout+' annotate debug', env=env), end='')
<BLANKLINE>
Annotated sections
==================
<BLANKLINE>
[debug]
dec= 1
/home/.buildout/default.cfg
-= buildout.cfg
inc= 1
2
/home/.buildout/default.cfg
+= buildout.cfg
"""
def default_cfg_extensions():
r"""
Add two extensions as develop eggs
>>> mkdir('demo')
>>> write('demo', 'demo.py', '''
... import sys
... def ext(buildout):
... sys.stdout.write('demo %s %s\\n' % ('ext', sorted(buildout)))
... def unload(buildout):
... sys.stdout.write('demo %s %s\\n' % ('unload', sorted(buildout)))
... ''')
>>> write('demo', 'setup.py', '''
... from setuptools import setup
...
... setup(
... name = "demo",
... entry_points = {
... 'zc.buildout.extension': ['ext = demo:ext'],
... 'zc.buildout.unloadextension': ['ext = demo:unload'],
... },
... )
... ''')
>>> mkdir('demo2')
>>> write('demo2', 'demo2.py', '''
... import sys
... def ext(buildout):
... sys.stdout.write('demo2 %s %s\\n' % ('ext', sorted(buildout)))
... def unload(buildout):
... sys.stdout.write('demo2 %s %s\\n' % ('unload', sorted(buildout)))
... ''')
>>> write('demo2', 'setup.py', '''
... from setuptools import setup
...
... setup(
... name = "demo2",
... entry_points = {
... 'zc.buildout.extension': ['ext = demo2:ext'],
... 'zc.buildout.unloadextension': ['ext = demo2:unload'],
... },
... )
... ''')
>>> write('buildout.cfg', '''
... [buildout]
... develop = demo demo2
... parts =
... ''')
Run buildout once without extensions to actually develop the eggs.
(Develop happens after loading extensions.)
>>> print_(system(buildout), end='')
Develop: '/sample-buildout/demo'
Develop: '/sample-buildout/demo2'
>>> ls("develop-eggs")
- demo.egg-link
- demo2.egg-link
- zc.recipe.egg.egg-link
extensions in .buildout/default.cfg
incremented in buildout.cfg
>>> home = tmpdir('home')
>>> mkdir(home, '.buildout')
>>> default_cfg = join(home, '.buildout', 'default.cfg')
>>> write(default_cfg, '''
... [buildout]
... extensions = demo
... ''')
>>> write('buildout.cfg', '''
... [buildout]
... develop = demo demo2
... extensions += demo2
... parts =
... ''')
>>> env = dict(HOME=home, USERPROFILE=home)
>>> print_(system(buildout+' annotate buildout', env=env), end='')
... # doctest: +ELLIPSIS
<BLANKLINE>
Annotated sections
==================
<BLANKLINE>
[buildout]
...
extensions= demo
demo2
/home/.buildout/default.cfg
+= buildout.cfg
...
versions= versions
DEFAULT_VALUE
"""
def with_extends_increment_in_base():
r"""
>>> home = tmpdir('home')
>>> mkdir(home, '.buildout')
>>> default_cfg = join(home, '.buildout', 'default.cfg')
>>> write(default_cfg, '''
... [buildout]
... extensions = demo
... ''')
>>> write('base.cfg', '''
... [buildout]
... extensions += demo2
... ''')
>>> write('buildout.cfg', '''
... [buildout]
... extends = base.cfg
... parts =
... ''')
>>> env = dict(HOME=home, USERPROFILE=home)
>>> print_(system(buildout+' annotate buildout', env=env), end='')
... # doctest: +ELLIPSIS
<BLANKLINE>
Annotated sections
==================
<BLANKLINE>
[buildout]
...
extensions= demo
demo2
/home/.buildout/default.cfg
+= base.cfg
...
versions= versions
DEFAULT_VALUE
"""
def with_extends_increment_in_base2():
r"""
>>> home = tmpdir('home')
>>> mkdir(home, '.buildout')
>>> default_cfg = join(home, '.buildout', 'default.cfg')
>>> write(default_cfg, '''
... [buildout]
... extensions = demo
... ''')
>>> write('base.cfg', '''
... [buildout]
... ''')
>>> write('base2.cfg', '''
... [buildout]
... extensions += demo2
... ''')
>>> write('buildout.cfg', '''
... [buildout]
... extends = base.cfg
... base2.cfg
... parts =
... ''')
>>> env = dict(HOME=home, USERPROFILE=home)
>>> print_(system(buildout+' annotate buildout', env=env), end='')
... # doctest: +ELLIPSIS
<BLANKLINE>
Annotated sections
==================
<BLANKLINE>
[buildout]
...
extensions= demo
demo2
/home/.buildout/default.cfg
+= base2.cfg
...
versions= versions
DEFAULT_VALUE
"""
def with_extends_increment_in_base2_and_base3():
r"""
>>> home = tmpdir('home')
>>> mkdir(home, '.buildout')
>>> default_cfg = join(home, '.buildout', 'default.cfg')
>>> write(default_cfg, '''
... [buildout]
... extensions = demo
... ''')
>>> write('base.cfg', '''
... [buildout]
... ''')
>>> write('base2.cfg', '''
... [buildout]
... extensions += demo2
... ''')
>>> write('base3.cfg', '''
... [buildout]
... extensions += demo3
... ''')
>>> write('buildout.cfg', '''
... [buildout]
... extends = base.cfg
... base2.cfg
... base3.cfg
... parts =
... ''')
>>> env = dict(HOME=home, USERPROFILE=home)
>>> print_(system(buildout+' annotate buildout', env=env), end='')
... # doctest: +ELLIPSIS
<BLANKLINE>
Annotated sections
==================
<BLANKLINE>
[buildout]
...
extensions= demo
demo2
demo3
/home/.buildout/default.cfg
+= base2.cfg
+= base3.cfg
...
versions= versions
DEFAULT_VALUE
"""
def with_extends_increment_in_buildout():
r"""
>>> home = tmpdir('home')
>>> mkdir(home, '.buildout')
>>> default_cfg = join(home, '.buildout', 'default.cfg')
>>> write(default_cfg, '''
... [buildout]
... extensions = demo
... ''')
>>> write('base.cfg', '''
... [buildout]
... ''')
>>> write('buildout.cfg', '''
... [buildout]
... extends = base.cfg
... extensions += demo2
... parts =
... ''')
>>> env = dict(HOME=home, USERPROFILE=home)
>>> print_(system(buildout+' annotate buildout', env=env), end='')
... # doctest: +ELLIPSIS
<BLANKLINE>
Annotated sections
==================
<BLANKLINE>
[buildout]
...
extensions= demo
demo2
/home/.buildout/default.cfg
+= buildout.cfg
...
versions= versions
DEFAULT_VALUE
"""
def with_extends_increment_in_buildout_with_base_and_root():
r"""
>>> home = tmpdir('home')
>>> mkdir(home, '.buildout')
>>> default_cfg = join(home, '.buildout', 'default.cfg')
>>> write(default_cfg, '''
... [buildout]
... extensions = demo
... ''')
>>> write('root.cfg', '''
... [buildout]
... ''')
>>> write('base.cfg', '''
... [buildout]
... extends = root.cfg
... ''')
>>> write('buildout.cfg', '''
... [buildout]
... extends = base.cfg
... extensions += demo2
... parts =
... ''')
>>> env = dict(HOME=home, USERPROFILE=home)
>>> print_(system(buildout+' annotate buildout', env=env), end='')
... # doctest: +ELLIPSIS
<BLANKLINE>
Annotated sections
==================
<BLANKLINE>
[buildout]
...
extensions= demo
demo2
/home/.buildout/default.cfg
+= buildout.cfg
...
versions= versions
DEFAULT_VALUE
"""
def no_default_with_extends_increment_in_base2_and_base3():
r"""
>>> write('base.cfg', '''
... [buildout]
... ''')
>>> write('base2.cfg', '''
... [buildout]
... extensions += demo2
... ''')
>>> write('base3.cfg', '''
... [buildout]
... extensions += demo3
... ''')
>>> write('buildout.cfg', '''
... [buildout]
... extends = base.cfg
... base2.cfg
... base3.cfg
... parts =
... ''')
>>> print_(system(buildout+' annotate buildout'), end='')
... # doctest: +ELLIPSIS
<BLANKLINE>
Annotated sections
==================
<BLANKLINE>
[buildout]
...
extensions=
demo2
demo3
IMPLICIT_VALUE
+= base2.cfg
+= base3.cfg
...
versions= versions
DEFAULT_VALUE
"""
def test_suite():
return doctest.DocTestSuite(
setUp=easy_install_SetUp,
tearDown=zc.buildout.testing.buildoutTearDown,
checker=renormalizing.RENormalizing([
zc.buildout.testing.normalize_path,
zc.buildout.testing.normalize_endings,
zc.buildout.testing.normalize_script,
zc.buildout.testing.normalize_egg_py,
zc.buildout.testing.normalize___pycache__,
zc.buildout.testing.not_found,
zc.buildout.testing.normalize_exception_type_for_python_2_and_3,
zc.buildout.testing.adding_find_link,
zc.buildout.testing.python27_warning,
zc.buildout.testing.python27_warning_2,
zc.buildout.testing.easyinstall_deprecated,
zc.buildout.testing.setuptools_deprecated,
zc.buildout.testing.pkg_resources_deprecated,
zc.buildout.testing.warnings_warn,
normalize_bang,
(re.compile(r'^(\w+\.)*(Missing\w+: )'), '\2'),
(re.compile(r"buildout: Running \S*setup.py"),
'buildout: Running setup.py'),
(re.compile(r'pip-\S+-'),
'pip.egg'),
(re.compile(r'setuptools-\S+-'),
'setuptools.egg'),
(re.compile(r'zc.buildout-\S+-'),
'zc.buildout.egg'),
(re.compile(r'pip = \S+'), 'pip = 20.0.0'),
(re.compile(r'setuptools = \S+'), 'setuptools = 0.7.99'),
(re.compile(r'File "\S+one.py"'),
'File "one.py"'),
(re.compile(r'We have a develop egg: (\S+) (\S+)'),
r'We have a develop egg: \1 V'),
(re.compile(r'Picked: setuptools = \S+'),
'Picked: setuptools = V'),
(re.compile('[-d] pip'), '- pip'),
(re.compile('[-d] setuptools'), '- setuptools'),
(re.compile(r'\\[\\]?'), '/'),
(re.compile(
'-q develop -mxN -d "/sample-buildout/develop-eggs'),
'-q develop -mxN -d /sample-buildout/develop-eggs'
),
(re.compile(r'^[*]...'), '...'),
# for
# bug_92891
# bootstrap_crashes_with_egg_recipe_in_buildout_section
(re.compile(r"Unused options for buildout: 'eggs' 'scripts'\."),
"Unused options for buildout: 'scripts' 'eggs'."),
# Python 3.4 changed the wording of NameErrors
(re.compile('NameError: global name'), 'NameError: name'),
# fix for test_distutils_scripts_using_import_are_properly_parsed
# and test_distutils_scripts_using_from_are_properly_parsed
# win32 apparently adds a " around sys.executable
(re.compile('#!"python"'), '#!python'),
]),
)
|
[
"re.compile"
] |
[((10745, 10785), 're.compile', 're.compile', (['"""^(\\\\w+\\\\.)*(Missing\\\\w+: )"""'], {}), "('^(\\\\w+\\\\.)*(Missing\\\\w+: )')\n", (10755, 10785), False, 'import re\n'), ((10805, 10849), 're.compile', 're.compile', (['"""buildout: Running \\\\S*setup.py"""'], {}), "('buildout: Running \\\\S*setup.py')\n", (10815, 10849), False, 'import re\n'), ((10908, 10931), 're.compile', 're.compile', (['"""pip-\\\\S+-"""'], {}), "('pip-\\\\S+-')\n", (10918, 10931), False, 'import re\n'), ((10971, 11001), 're.compile', 're.compile', (['"""setuptools-\\\\S+-"""'], {}), "('setuptools-\\\\S+-')\n", (10981, 11001), False, 'import re\n'), ((11048, 11079), 're.compile', 're.compile', (['"""zc.buildout-\\\\S+-"""'], {}), "('zc.buildout-\\\\S+-')\n", (11058, 11079), False, 'import re\n'), ((11127, 11151), 're.compile', 're.compile', (['"""pip = \\\\S+"""'], {}), "('pip = \\\\S+')\n", (11137, 11151), False, 'import re\n'), ((11183, 11214), 're.compile', 're.compile', (['"""setuptools = \\\\S+"""'], {}), "('setuptools = \\\\S+')\n", (11193, 11214), False, 'import re\n'), ((11253, 11284), 're.compile', 're.compile', (['"""File "\\\\S+one.py\\""""'], {}), '(\'File "\\\\S+one.py"\')\n', (11263, 11284), False, 'import re\n'), ((11330, 11380), 're.compile', 're.compile', (['"""We have a develop egg: (\\\\S+) (\\\\S+)"""'], {}), "('We have a develop egg: (\\\\S+) (\\\\S+)')\n", (11340, 11380), False, 'import re\n'), ((11440, 11479), 're.compile', 're.compile', (['"""Picked: setuptools = \\\\S+"""'], {}), "('Picked: setuptools = \\\\S+')\n", (11450, 11479), False, 'import re\n'), ((11534, 11557), 're.compile', 're.compile', (['"""[-d] pip"""'], {}), "('[-d] pip')\n", (11544, 11557), False, 'import re\n'), ((11583, 11613), 're.compile', 're.compile', (['"""[-d] setuptools"""'], {}), "('[-d] setuptools')\n", (11593, 11613), False, 'import re\n'), ((11646, 11671), 're.compile', 're.compile', (['"""\\\\\\\\[\\\\\\\\]?"""'], {}), "('\\\\\\\\[\\\\\\\\]?')\n", (11656, 11671), False, 'import re\n'), ((11689, 11752), 're.compile', 're.compile', (['"""-q develop -mxN -d "/sample-buildout/develop-eggs"""'], {}), '(\'-q develop -mxN -d "/sample-buildout/develop-eggs\')\n', (11699, 11752), False, 'import re\n'), ((11864, 11885), 're.compile', 're.compile', (['"""^[*]..."""'], {}), "('^[*]...')\n", (11874, 11885), False, 'import re\n'), ((12019, 12081), 're.compile', 're.compile', (['"""Unused options for buildout: \'eggs\' \'scripts\'\\\\."""'], {}), '("Unused options for buildout: \'eggs\' \'scripts\'\\\\.")\n', (12029, 12081), False, 'import re\n'), ((12219, 12255), 're.compile', 're.compile', (['"""NameError: global name"""'], {}), "('NameError: global name')\n", (12229, 12255), False, 'import re\n'), ((12502, 12526), 're.compile', 're.compile', (['"""#!"python\\""""'], {}), '(\'#!"python"\')\n', (12512, 12526), False, 'import re\n')]
|
""" Classes for interacting with Salesforce Bulk API """
try:
from collections import OrderedDict
except ImportError:
# Python < 2.7
from ordereddict import OrderedDict
import json
import requests
from time import sleep
from simple_salesforce.util import call_salesforce
class SFBulkHandler(object):
""" Bulk API request handler
Intermediate class which allows us to use commands,
such as 'sf.bulk.Contacts.create(...)'
This is really just a middle layer, whose sole purpose is
to allow the above syntax
"""
def __init__(self, session_id, bulk_url, proxies=None, session=None):
"""Initialize the instance with the given parameters.
Arguments:
* session_id -- the session ID for authenticating to Salesforce
* bulk_url -- API endpoint set in Salesforce instance
* proxies -- the optional map of scheme to proxy server
* session -- Custom requests session, created in calling code. This
enables the use of requests Session features not otherwise
exposed by simple_salesforce.
"""
self.session_id = session_id
self.session = session or requests.Session()
self.bulk_url = bulk_url
# don't wipe out original proxies with None
if not session and proxies is not None:
self.session.proxies = proxies
# Define these headers separate from Salesforce class,
# as bulk uses a slightly different format
self.headers = {
'Content-Type': 'application/json',
'X-SFDC-Session': self.session_id,
'X-PrettyPrint': '1'
}
def __getattr__(self, name):
return SFBulkType(object_name=name, bulk_url=self.bulk_url,
headers=self.headers, session=self.session)
class SFBulkType(object):
""" Interface to Bulk/Async API functions"""
def __init__(self, object_name, bulk_url, headers, session):
"""Initialize the instance with the given parameters.
Arguments:
* object_name -- the name of the type of SObject this represents,
e.g. `Lead` or `Contact`
* bulk_url -- API endpoint set in Salesforce instance
* headers -- bulk API headers
* session -- Custom requests session, created in calling code. This
enables the use of requests Session features not otherwise
exposed by simple_salesforce.
"""
self.object_name = object_name
self.bulk_url = bulk_url
self.session = session
self.headers = headers
def _create_job(self, operation, object_name, external_id_field=None):
""" Create a bulk job
Arguments:
* operation -- Bulk operation to be performed by job
* object_name -- SF object
* external_id_field -- unique identifier field for upsert operations
"""
payload = {
'operation': operation,
'object': object_name,
'contentType': 'JSON'
}
if operation == 'upsert':
payload['externalIdFieldName'] = external_id_field
url = "{}{}".format(self.bulk_url, 'job')
result = call_salesforce(url=url, method='POST', session=self.session,
headers=self.headers,
data=json.dumps(payload))
return result.json(object_pairs_hook=OrderedDict)
def _close_job(self, job_id):
""" Close a bulk job """
payload = {
'state': 'Closed'
}
url = "{}{}{}".format(self.bulk_url, 'job/', job_id)
result = call_salesforce(url=url, method='POST', session=self.session,
headers=self.headers,
data=json.dumps(payload))
return result.json(object_pairs_hook=OrderedDict)
def _get_job(self, job_id):
""" Get an existing job to check the status """
url = "{}{}{}".format(self.bulk_url, 'job/', job_id)
result = call_salesforce(url=url, method='GET', session=self.session,
headers=self.headers)
return result.json(object_pairs_hook=OrderedDict)
def _add_batch(self, job_id, data, operation):
""" Add a set of data as a batch to an existing job
Separating this out in case of later
implementations involving multiple batches
"""
url = "{}{}{}{}".format(self.bulk_url, 'job/', job_id, '/batch')
if operation != 'query':
data = json.dumps(data)
result = call_salesforce(url=url, method='POST', session=self.session,
headers=self.headers, data=data)
return result.json(object_pairs_hook=OrderedDict)
def _get_batch(self, job_id, batch_id):
""" Get an existing batch to check the status """
url = "{}{}{}{}{}".format(self.bulk_url, 'job/',
job_id, '/batch/', batch_id)
result = call_salesforce(url=url, method='GET', session=self.session,
headers=self.headers)
return result.json(object_pairs_hook=OrderedDict)
def _get_batch_results(self, job_id, batch_id, operation):
""" retrieve a set of results from a completed job """
url = "{}{}{}{}{}{}".format(self.bulk_url, 'job/', job_id, '/batch/',
batch_id, '/result')
result = call_salesforce(url=url, method='GET', session=self.session,
headers=self.headers)
if operation == 'query':
url_query_results = "{}{}{}".format(url, '/', result.json()[0])
query_result = call_salesforce(url=url_query_results, method='GET',
session=self.session,
headers=self.headers)
return query_result.json()
return result.json()
#pylint: disable=R0913
def _bulk_operation(self, object_name, operation, data,
external_id_field=None, wait=5):
""" String together helper functions to create a complete
end-to-end bulk API request
Arguments:
* object_name -- SF object
* operation -- Bulk operation to be performed by job
* data -- list of dict to be passed as a batch
* external_id_field -- unique identifier field for upsert operations
* wait -- seconds to sleep between checking batch status
"""
job = self._create_job(object_name=object_name, operation=operation,
external_id_field=external_id_field)
batch = self._add_batch(job_id=job['id'], data=data,
operation=operation)
self._close_job(job_id=job['id'])
batch_status = self._get_batch(job_id=batch['jobId'],
batch_id=batch['id'])['state']
while batch_status not in ['Completed', 'Failed', 'Not Processed']:
sleep(wait)
batch_status = self._get_batch(job_id=batch['jobId'],
batch_id=batch['id'])['state']
results = self._get_batch_results(job_id=batch['jobId'],
batch_id=batch['id'],
operation=operation)
return results
# _bulk_operation wrappers to expose supported Salesforce bulk operations
def delete(self, data):
""" soft delete records """
results = self._bulk_operation(object_name=self.object_name,
operation='delete', data=data)
return results
def insert(self, data):
""" insert records """
results = self._bulk_operation(object_name=self.object_name,
operation='insert', data=data)
return results
def upsert(self, data, external_id_field):
""" upsert records based on a unique identifier """
results = self._bulk_operation(object_name=self.object_name,
operation='upsert',
external_id_field=external_id_field,
data=data)
return results
def update(self, data):
""" update records """
results = self._bulk_operation(object_name=self.object_name,
operation='update', data=data)
return results
def hard_delete(self, data):
""" hard delete records """
results = self._bulk_operation(object_name=self.object_name,
operation='hardDelete', data=data)
return results
def query(self, data):
""" bulk query """
results = self._bulk_operation(object_name=self.object_name,
operation='query', data=data)
return results
|
[
"requests.Session",
"simple_salesforce.util.call_salesforce",
"json.dumps",
"time.sleep"
] |
[((4105, 4192), 'simple_salesforce.util.call_salesforce', 'call_salesforce', ([], {'url': 'url', 'method': '"""GET"""', 'session': 'self.session', 'headers': 'self.headers'}), "(url=url, method='GET', session=self.session, headers=self.\n headers)\n", (4120, 4192), False, 'from simple_salesforce.util import call_salesforce\n'), ((4662, 4761), 'simple_salesforce.util.call_salesforce', 'call_salesforce', ([], {'url': 'url', 'method': '"""POST"""', 'session': 'self.session', 'headers': 'self.headers', 'data': 'data'}), "(url=url, method='POST', session=self.session, headers=self.\n headers, data=data)\n", (4677, 4761), False, 'from simple_salesforce.util import call_salesforce\n'), ((5091, 5178), 'simple_salesforce.util.call_salesforce', 'call_salesforce', ([], {'url': 'url', 'method': '"""GET"""', 'session': 'self.session', 'headers': 'self.headers'}), "(url=url, method='GET', session=self.session, headers=self.\n headers)\n", (5106, 5178), False, 'from simple_salesforce.util import call_salesforce\n'), ((5547, 5634), 'simple_salesforce.util.call_salesforce', 'call_salesforce', ([], {'url': 'url', 'method': '"""GET"""', 'session': 'self.session', 'headers': 'self.headers'}), "(url=url, method='GET', session=self.session, headers=self.\n headers)\n", (5562, 5634), False, 'from simple_salesforce.util import call_salesforce\n'), ((1195, 1213), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1211, 1213), False, 'import requests\n'), ((4627, 4643), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (4637, 4643), False, 'import json\n'), ((5801, 5901), 'simple_salesforce.util.call_salesforce', 'call_salesforce', ([], {'url': 'url_query_results', 'method': '"""GET"""', 'session': 'self.session', 'headers': 'self.headers'}), "(url=url_query_results, method='GET', session=self.session,\n headers=self.headers)\n", (5816, 5901), False, 'from simple_salesforce.util import call_salesforce\n'), ((7154, 7165), 'time.sleep', 'sleep', (['wait'], {}), '(wait)\n', (7159, 7165), False, 'from time import sleep\n'), ((3414, 3433), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (3424, 3433), False, 'import json\n'), ((3858, 3877), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (3868, 3877), False, 'import json\n')]
|
from collections import OrderedDict
from sqlalchemy.exc import (DataError, IntegrityError,
InternalError, ProgrammingError)
from qwc_services_core.database import DatabaseEngine
from qwc_services_core.permissions_reader import PermissionsReader
from qwc_services_core.runtime_config import RuntimeConfig
from dataset_features_provider import DatasetFeaturesProvider
class DataService():
"""DataService class
Manage reading and writing of dataset features.
"""
def __init__(self, tenant, logger):
"""Constructor
:param str tenant: Tenant ID
:param Logger logger: Application logger
"""
self.tenant = tenant
self.logger = logger
self.resources = self.load_resources()
self.permissions_handler = PermissionsReader(tenant, logger)
self.db_engine = DatabaseEngine()
def index(self, identity, dataset, bbox, crs, filterexpr):
"""Find dataset features inside bounding box.
:param str identity: User identity
:param str dataset: Dataset ID
:param str bbox: Bounding box as '<minx>,<miny>,<maxx>,<maxy>' or None
:param str crs: Client CRS as 'EPSG:<srid>' or None
:param str filterexpr: JSON serialized array of filter expressions:
[["<attr>", "<op>", "<value>"], "and|or", ["<attr>", "<op>", "<value>"]]
"""
dataset_features_provider = self.dataset_features_provider(
identity, dataset
)
if dataset_features_provider is not None:
# check read permission
if not dataset_features_provider.readable():
return {
'error': "Dataset not readable",
'error_code': 405
}
if bbox is not None:
# parse and validate input bbox
bbox = dataset_features_provider.parse_bbox(bbox)
if bbox is None:
return {
'error': "Invalid bounding box",
'error_code': 400
}
srid = None
if crs is not None:
# parse and validate unput CRS
srid = dataset_features_provider.parse_crs(crs)
if srid is None:
return {
'error': "Invalid CRS",
'error_code': 400
}
if filterexpr is not None:
# parse and validate input filter
filterexpr = dataset_features_provider.parse_filter(filterexpr)
if filterexpr[0] is None:
return {
'error': (
"Invalid filter expression: %s" % filterexpr[1]
),
'error_code': 400
}
try:
feature_collection = dataset_features_provider.index(
bbox, srid, filterexpr
)
except (DataError, ProgrammingError) as e:
self.logger.error(e)
return {
'error': (
"Feature query failed. Please check filter expression "
"values and operators."
),
'error_code': 400
}
return {'feature_collection': feature_collection}
else:
return {'error': "Dataset not found or permission error"}
def show(self, identity, dataset, id, crs):
"""Get a dataset feature.
:param str identity: User identity
:param str dataset: Dataset ID
:param int id: Dataset feature ID
:param str crs: Client CRS as 'EPSG:<srid>' or None
"""
dataset_features_provider = self.dataset_features_provider(
identity, dataset
)
srid = None
if crs is not None:
# parse and validate unput CRS
srid = dataset_features_provider.parse_crs(crs)
if srid is None:
return {
'error': "Invalid CRS",
'error_code': 400
}
if dataset_features_provider is not None:
# check read permission
if not dataset_features_provider.readable():
return {
'error': "Dataset not readable",
'error_code': 405
}
feature = dataset_features_provider.show(id, srid)
if feature is not None:
return {'feature': feature}
else:
return {'error': "Feature not found"}
else:
return {'error': "Dataset not found or permission error"}
def create(self, identity, dataset, feature, internal_fields={}):
"""Create a new dataset feature.
:param str identity: User identity
:param str dataset: Dataset ID
:param object feature: GeoJSON Feature
:param object internal_fields: Internal fields to inject into permissions
"""
dataset_features_provider = self.dataset_features_provider(
identity, dataset, internal_fields
)
if dataset_features_provider is not None:
# check create permission
if not dataset_features_provider.creatable():
return {
'error': "Dataset not creatable",
'error_code': 405
}
# validate input feature
validation_errors = dataset_features_provider.validate(
feature, new_feature=True
)
if not validation_errors:
# create new feature
try:
feature = dataset_features_provider.create(feature)
except (DataError, IntegrityError,
InternalError, ProgrammingError) as e:
self.logger.error(e)
return {
'error': "Feature commit failed",
'error_details': {
'data_errors': ["Feature could not be created"],
},
'error_code': 422
}
return {'feature': feature}
else:
return {
'error': "Feature validation failed",
'error_details': validation_errors,
'error_code': 422
}
else:
return {'error': "Dataset not found or permission error"}
def update(self, identity, dataset, id, feature, internal_fields={}):
"""Update a dataset feature.
:param str identity: User identity
:param str dataset: Dataset ID
:param int id: Dataset feature ID
:param object feature: GeoJSON Feature
:param object internal_fields: Internal fields to inject into permissions
"""
dataset_features_provider = self.dataset_features_provider(
identity, dataset, internal_fields
)
if dataset_features_provider is not None:
# check update permission
if not dataset_features_provider.updatable():
return {
'error': "Dataset not updatable",
'error_code': 405
}
# validate input feature
validation_errors = dataset_features_provider.validate(feature)
if not validation_errors:
# update feature
try:
feature = dataset_features_provider.update(id, feature)
except (DataError, IntegrityError,
InternalError, ProgrammingError) as e:
self.logger.error(e)
return {
'error': "Feature commit failed",
'error_details': {
'data_errors': ["Feature could not be updated"],
},
'error_code': 422
}
if feature is not None:
return {'feature': feature}
else:
return {'error': "Feature not found"}
else:
return {
'error': "Feature validation failed",
'error_details': validation_errors,
'error_code': 422
}
else:
return {'error': "Dataset not found or permission error"}
def destroy(self, identity, dataset, id):
"""Delete a dataset feature.
:param str identity: User identity
:param str dataset: Dataset ID
:param int id: Dataset feature ID
"""
dataset_features_provider = self.dataset_features_provider(
identity, dataset
)
if dataset_features_provider is not None:
# check delete permission
if not dataset_features_provider.deletable():
return {
'error': "Dataset not deletable",
'error_code': 405
}
if dataset_features_provider.destroy(id):
return {}
else:
return {'error': "Feature not found"}
else:
return {'error': "Dataset not found or permission error"}
def is_editable(self, identity, dataset, id):
"""Returns whether a dataset is editable.
:param str identity: User identity
:param str dataset: Dataset ID
:param int id: Dataset feature ID
"""
dataset_features_provider = self.dataset_features_provider(
identity, dataset
)
if dataset_features_provider is not None:
# check update permission
if not dataset_features_provider.updatable():
return False
return dataset_features_provider.exists(id)
def dataset_features_provider(self, identity, dataset, internal_fields={}):
"""Return DatasetFeaturesProvider if available and permitted.
:param str identity: User identity
:param str dataset: Dataset ID
:param object internal_fields: Internal fields to inject into permissions
"""
dataset_features_provider = None
# check permissions
permissions = self.dataset_edit_permissions(
dataset, identity, internal_fields
)
if permissions:
# create DatasetFeaturesProvider
dataset_features_provider = DatasetFeaturesProvider(
permissions, self.db_engine
)
return dataset_features_provider
def load_resources(self):
"""Load service resources from config."""
# read config
config_handler = RuntimeConfig("data", self.logger)
config = config_handler.tenant_config(self.tenant)
# get service resources
datasets = {}
for resource in config.resources().get('datasets', []):
datasets[resource['name']] = resource
return {
'datasets': datasets
}
def dataset_edit_permissions(self, dataset, identity, internal_fields):
"""Return dataset edit permissions if available and permitted.
:param str dataset: Dataset ID
:param obj identity: User identity
:param object internal_fields: Internal fields to inject into permissions
"""
# find resource for requested dataset
resource = self.resources['datasets'].get(dataset)
if resource is None:
# dataset not found
return {}
# get permissions for dataset
resource_permissions = self.permissions_handler.resource_permissions(
'data_datasets', identity, dataset
)
if not resource_permissions:
# dataset not permitted
return {}
# combine permissions
permitted_attributes = set()
writable = False
creatable = False
readable = False
updatable = False
deletable = False
for permission in resource_permissions:
# collect permitted attributes
permitted_attributes.update(permission.get('attributes', []))
# allow writable and CRUD actions if any role permits them
writable |= permission.get('writable', False)
creatable |= permission.get('creatable', False)
readable |= permission.get('readable', False)
updatable |= permission.get('updatable', False)
deletable |= permission.get('deletable', False)
# make writable consistent with CRUD actions
writable |= creatable and readable and updatable and deletable
# make CRUD actions consistent with writable
creatable |= writable
readable |= writable
updatable |= writable
deletable |= writable
permitted = creatable or readable or updatable or deletable
if not permitted:
# no CRUD action permitted
return {}
# filter by permissions
attributes = [
field['name'] for field in resource['fields']
if field['name'] in permitted_attributes
]
fields = {}
for field in resource['fields']:
if field['name'] in permitted_attributes:
fields[field['name']] = field
# NOTE: 'geometry' is None for datasets without geometry
geometry = resource.get('geometry', {})
for key in internal_fields:
fields[key] = internal_fields[key]
attributes.append(key)
return {
"dataset": resource['name'],
"database_read": resource['db_url'],
"database_write": resource.get('db_write_url', resource['db_url']),
"schema": resource['schema'],
"table_name": resource['table_name'],
"primary_key": resource['primary_key'],
"attributes": attributes,
"fields": fields,
"geometry_column": geometry.get('geometry_column'),
"geometry_type": geometry.get('geometry_type'),
"srid": geometry.get('srid'),
"allow_null_geometry": geometry.get('allow_null', False),
"writable": writable,
"creatable": creatable,
"readable": readable,
"updatable": updatable,
"deletable": deletable
}
|
[
"qwc_services_core.permissions_reader.PermissionsReader",
"qwc_services_core.runtime_config.RuntimeConfig",
"dataset_features_provider.DatasetFeaturesProvider",
"qwc_services_core.database.DatabaseEngine"
] |
[((808, 841), 'qwc_services_core.permissions_reader.PermissionsReader', 'PermissionsReader', (['tenant', 'logger'], {}), '(tenant, logger)\n', (825, 841), False, 'from qwc_services_core.permissions_reader import PermissionsReader\n'), ((867, 883), 'qwc_services_core.database.DatabaseEngine', 'DatabaseEngine', ([], {}), '()\n', (881, 883), False, 'from qwc_services_core.database import DatabaseEngine\n'), ((10952, 10986), 'qwc_services_core.runtime_config.RuntimeConfig', 'RuntimeConfig', (['"""data"""', 'self.logger'], {}), "('data', self.logger)\n", (10965, 10986), False, 'from qwc_services_core.runtime_config import RuntimeConfig\n'), ((10699, 10751), 'dataset_features_provider.DatasetFeaturesProvider', 'DatasetFeaturesProvider', (['permissions', 'self.db_engine'], {}), '(permissions, self.db_engine)\n', (10722, 10751), False, 'from dataset_features_provider import DatasetFeaturesProvider\n')]
|
import os
import time
import torch
from dataset.music import MUSICMixDataset
from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr
from steps.common import build_model, get_underlying_nets, init_history, adjust_learning_rate
from steps.evaluate_base import _evaluate
def save_nets(ctx, suffix):
path = get_ctx(ctx, 'path')
nets = get_underlying_nets(get_ctx(ctx, 'net_wrapper'))
(net_sound, net_frame, net_synthesizer) = nets
torch.save(net_sound.state_dict(), os.path.join(path, f'sound_{suffix}'))
torch.save(net_frame.state_dict(), os.path.join(path, f'frame_{suffix}'))
torch.save(net_synthesizer.state_dict(), os.path.join(path, f'synthesizer_{suffix}'))
def checkpoint(ctx: dict):
epoch = get_ctx(ctx, 'epoch')
history = get_ctx(ctx, 'history')
path = get_ctx(ctx, 'path')
print('Saving checkpoints at {} epochs.'.format(epoch))
suffix_latest = 'latest.pth'
suffix_best = 'best.pth'
torch.save(epoch, os.path.join(path, f'epoch_{suffix_latest}'))
torch.save(history, os.path.join(path, f'history_{suffix_latest}'))
save_nets(ctx, suffix_latest)
cur_metrics = (history['val']['sdr'][-1] + history['val']['sir'][-1] + history['val']['sar'][-1]) / 3
if cur_metrics > get_ctx(ctx, 'best_metrics') and epoch % get_ctx(ctx, 'eval_epoch') == 0:
print(f'Best model, epoch = {epoch}, mean metrics = {cur_metrics}, prev best = {get_ctx(ctx, "best_metrics")}')
ctx['best_metrics'] = cur_metrics
save_nets(ctx, suffix_best)
if get_ctx(ctx, 'checkpoint_epoch') is not None and epoch % get_ctx(ctx, 'checkpoint_epoch') == 0:
save_nets(ctx, f'{epoch}.pth')
def synchronize(ctx: dict):
if get_ctx(ctx, 'device').type != 'cpu':
torch.cuda.synchronize()
def train_epoch(ctx: dict):
net_wrapper = get_ctx(ctx, 'net_wrapper')
optimizer = get_ctx(ctx, 'optimizer')
loader = get_ctx(ctx, 'loader_train')
history = get_ctx(ctx, 'history')
epoch = get_ctx(ctx, 'epoch')
batch_time = AverageMeter()
data_time = AverageMeter()
# switch to train mode
net_wrapper.train()
# main loop
synchronize(ctx)
tic = time.perf_counter()
for i, batch_data in enumerate(loader):
# measure data time
synchronize(ctx)
data_time.update(time.perf_counter() - tic)
# forward pass
net_wrapper.zero_grad()
err, _ = net_wrapper.forward(batch_data, ctx)
err = err.mean()
# backward
err.backward()
optimizer.step()
# measure total time
synchronize(ctx)
batch_time.update(time.perf_counter() - tic)
tic = time.perf_counter()
# display
if i % get_ctx(ctx, 'disp_iter') == 0:
print(f'{get_timestr()} Epoch: [{epoch}][{i}/{get_ctx(ctx, "epoch_iters")}],'
f' Time: {batch_time.average():.2f}, Data: {data_time.average():.2f}, '
f'lr_sound: {get_ctx(ctx, "lr_sound")}, lr_frame: {get_ctx(ctx, "lr_frame")}, '
f'lr_synthesizer: {get_ctx(ctx, "lr_synthesizer")}, '
f'loss: {err.item():.4f}')
fractional_epoch = epoch - 1 + 1. * i / get_ctx(ctx, 'epoch_iters')
history['train']['epoch'].append(fractional_epoch)
history['train']['err'].append(err.item())
def create_optimizer(nets, ctx):
(net_sound, net_frame, net_synthesizer) = nets
param_groups = [{'params': net_sound.parameters(), 'lr': get_ctx(ctx, 'lr_sound')},
{'params': net_synthesizer.parameters(), 'lr': get_ctx(ctx, 'lr_synthesizer')},
{'params': net_frame.features.parameters(), 'lr': get_ctx(ctx, 'lr_frame')},
{'params': net_frame.fc.parameters(), 'lr': get_ctx(ctx, 'lr_frame')}]
return torch.optim.SGD(param_groups, momentum=get_ctx(ctx, 'beta1'),
weight_decay=get_ctx(ctx, 'weight_decay'))
def train(ctx: dict):
ctx['net_wrapper'] = build_model(ctx)
ctx['optimizer'] = create_optimizer(get_underlying_nets(get_ctx(ctx, 'net_wrapper')), ctx)
dataset_train = MUSICMixDataset(get_ctx(ctx, 'list_train'), ctx, split='train')
ctx['loader_train'] = torch.utils.data.DataLoader(
dataset_train,
batch_size=get_ctx(ctx, 'batch_size'),
shuffle=True,
num_workers=int(get_ctx(ctx, 'workers')),
drop_last=True)
ctx['epoch_iters'] = len(dataset_train) // get_ctx(ctx, 'batch_size')
print(f'1 Epoch = {get_ctx(ctx, "epoch_iters")} iters')
dataset_val = MUSICMixDataset(get_ctx(ctx, 'list_val'), ctx,
max_sample=get_ctx(ctx, 'num_val'), split='val')
ctx['loader_val'] = torch.utils.data.DataLoader(
dataset_val,
batch_size=get_ctx(ctx, 'batch_size'),
shuffle=False,
num_workers=2,
drop_last=False)
ctx['history'], from_epoch = init_history(ctx)
if get_ctx(ctx, 'continue_training') == '':
makedirs(get_ctx(ctx, 'path'), remove=True)
for epoch in range(from_epoch, get_ctx(ctx, 'num_epoch') + 1):
ctx['epoch'] = epoch
with torch.set_grad_enabled(True):
train_epoch(ctx)
with torch.set_grad_enabled(False):
if epoch % get_ctx(ctx, 'eval_epoch') == 0:
_evaluate(ctx)
checkpoint(ctx)
# drop learning rate
if epoch in get_ctx(ctx, 'lr_steps'):
adjust_learning_rate(ctx)
print('Training Done!')
|
[
"torch.cuda.synchronize",
"steps.common.adjust_learning_rate",
"steps.evaluate_base._evaluate",
"helpers.utils.AverageMeter",
"time.perf_counter",
"helpers.utils.get_timestr",
"steps.common.build_model",
"helpers.utils.get_ctx",
"steps.common.init_history",
"torch.set_grad_enabled",
"os.path.join"
] |
[((327, 347), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""path"""'], {}), "(ctx, 'path')\n", (334, 347), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((747, 768), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""epoch"""'], {}), "(ctx, 'epoch')\n", (754, 768), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((783, 806), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""history"""'], {}), "(ctx, 'history')\n", (790, 806), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((818, 838), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""path"""'], {}), "(ctx, 'path')\n", (825, 838), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((1836, 1863), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""net_wrapper"""'], {}), "(ctx, 'net_wrapper')\n", (1843, 1863), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((1880, 1905), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""optimizer"""'], {}), "(ctx, 'optimizer')\n", (1887, 1905), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((1919, 1947), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""loader_train"""'], {}), "(ctx, 'loader_train')\n", (1926, 1947), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((1962, 1985), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""history"""'], {}), "(ctx, 'history')\n", (1969, 1985), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((1998, 2019), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""epoch"""'], {}), "(ctx, 'epoch')\n", (2005, 2019), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((2038, 2052), 'helpers.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2050, 2052), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((2069, 2083), 'helpers.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2081, 2083), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((2183, 2202), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2200, 2202), False, 'import time\n'), ((4010, 4026), 'steps.common.build_model', 'build_model', (['ctx'], {}), '(ctx)\n', (4021, 4026), False, 'from steps.common import build_model, get_underlying_nets, init_history, adjust_learning_rate\n'), ((4938, 4955), 'steps.common.init_history', 'init_history', (['ctx'], {}), '(ctx)\n', (4950, 4955), False, 'from steps.common import build_model, get_underlying_nets, init_history, adjust_learning_rate\n'), ((379, 406), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""net_wrapper"""'], {}), "(ctx, 'net_wrapper')\n", (386, 406), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((499, 536), 'os.path.join', 'os.path.join', (['path', 'f"""sound_{suffix}"""'], {}), "(path, f'sound_{suffix}')\n", (511, 536), False, 'import os\n'), ((577, 614), 'os.path.join', 'os.path.join', (['path', 'f"""frame_{suffix}"""'], {}), "(path, f'frame_{suffix}')\n", (589, 614), False, 'import os\n'), ((661, 704), 'os.path.join', 'os.path.join', (['path', 'f"""synthesizer_{suffix}"""'], {}), "(path, f'synthesizer_{suffix}')\n", (673, 704), False, 'import os\n'), ((985, 1029), 'os.path.join', 'os.path.join', (['path', 'f"""epoch_{suffix_latest}"""'], {}), "(path, f'epoch_{suffix_latest}')\n", (997, 1029), False, 'import os\n'), ((1055, 1101), 'os.path.join', 'os.path.join', (['path', 'f"""history_{suffix_latest}"""'], {}), "(path, f'history_{suffix_latest}')\n", (1067, 1101), False, 'import os\n'), ((1763, 1787), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1785, 1787), False, 'import torch\n'), ((2677, 2696), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2694, 2696), False, 'import time\n'), ((4159, 4185), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""list_train"""'], {}), "(ctx, 'list_train')\n", (4166, 4185), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((4476, 4502), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""batch_size"""'], {}), "(ctx, 'batch_size')\n", (4483, 4502), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((4598, 4622), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""list_val"""'], {}), "(ctx, 'list_val')\n", (4605, 4622), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((4963, 4996), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""continue_training"""'], {}), "(ctx, 'continue_training')\n", (4970, 4996), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((1265, 1293), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""best_metrics"""'], {}), "(ctx, 'best_metrics')\n", (1272, 1293), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((1545, 1577), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""checkpoint_epoch"""'], {}), "(ctx, 'checkpoint_epoch')\n", (1552, 1577), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((1717, 1739), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""device"""'], {}), "(ctx, 'device')\n", (1724, 1739), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((3503, 3527), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""lr_sound"""'], {}), "(ctx, 'lr_sound')\n", (3510, 3527), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((3597, 3627), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""lr_synthesizer"""'], {}), "(ctx, 'lr_synthesizer')\n", (3604, 3627), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((3700, 3724), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""lr_frame"""'], {}), "(ctx, 'lr_frame')\n", (3707, 3724), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((3791, 3815), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""lr_frame"""'], {}), "(ctx, 'lr_frame')\n", (3798, 3815), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((3868, 3889), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""beta1"""'], {}), "(ctx, 'beta1')\n", (3875, 3889), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((3931, 3959), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""weight_decay"""'], {}), "(ctx, 'weight_decay')\n", (3938, 3959), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((4087, 4114), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""net_wrapper"""'], {}), "(ctx, 'net_wrapper')\n", (4094, 4114), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((4304, 4330), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""batch_size"""'], {}), "(ctx, 'batch_size')\n", (4311, 4330), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((4674, 4697), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""num_val"""'], {}), "(ctx, 'num_val')\n", (4681, 4697), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((4805, 4831), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""batch_size"""'], {}), "(ctx, 'batch_size')\n", (4812, 4831), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((5021, 5041), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""path"""'], {}), "(ctx, 'path')\n", (5028, 5041), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((5092, 5117), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""num_epoch"""'], {}), "(ctx, 'num_epoch')\n", (5099, 5117), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((5167, 5195), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (5189, 5195), False, 'import torch\n'), ((5240, 5269), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (5262, 5269), False, 'import torch\n'), ((5436, 5460), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""lr_steps"""'], {}), "(ctx, 'lr_steps')\n", (5443, 5460), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((5474, 5499), 'steps.common.adjust_learning_rate', 'adjust_learning_rate', (['ctx'], {}), '(ctx)\n', (5494, 5499), False, 'from steps.common import build_model, get_underlying_nets, init_history, adjust_learning_rate\n'), ((1306, 1332), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""eval_epoch"""'], {}), "(ctx, 'eval_epoch')\n", (1313, 1332), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((1602, 1634), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""checkpoint_epoch"""'], {}), "(ctx, 'checkpoint_epoch')\n", (1609, 1634), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((2325, 2344), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2342, 2344), False, 'import time\n'), ((2636, 2655), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2653, 2655), False, 'import time\n'), ((2731, 2756), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""disp_iter"""'], {}), "(ctx, 'disp_iter')\n", (2738, 2756), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((4378, 4401), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""workers"""'], {}), "(ctx, 'workers')\n", (4385, 4401), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((4526, 4553), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""epoch_iters"""'], {}), "(ctx, 'epoch_iters')\n", (4533, 4553), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((5343, 5357), 'steps.evaluate_base._evaluate', '_evaluate', (['ctx'], {}), '(ctx)\n', (5352, 5357), False, 'from steps.evaluate_base import _evaluate\n'), ((1427, 1455), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""best_metrics"""'], {}), "(ctx, 'best_metrics')\n", (1434, 1455), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((3210, 3237), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""epoch_iters"""'], {}), "(ctx, 'epoch_iters')\n", (3217, 3237), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((5294, 5320), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""eval_epoch"""'], {}), "(ctx, 'eval_epoch')\n", (5301, 5320), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((2784, 2797), 'helpers.utils.get_timestr', 'get_timestr', ([], {}), '()\n', (2795, 2797), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((2821, 2848), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""epoch_iters"""'], {}), "(ctx, 'epoch_iters')\n", (2828, 2848), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((2974, 2998), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""lr_sound"""'], {}), "(ctx, 'lr_sound')\n", (2981, 2998), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((3012, 3036), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""lr_frame"""'], {}), "(ctx, 'lr_frame')\n", (3019, 3036), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n'), ((3078, 3108), 'helpers.utils.get_ctx', 'get_ctx', (['ctx', '"""lr_synthesizer"""'], {}), "(ctx, 'lr_synthesizer')\n", (3085, 3108), False, 'from helpers.utils import AverageMeter, makedirs, get_ctx, get_timestr\n')]
|
import MultipleFiles as mf
import glob
import sys
import numpy as np
path = "/home/xjh0560/Supernova_Lightcurves/LC_Data/"
filelist = np.asarray(glob.glob("/home/xjh0560/Supernova_Lightcurves/LC_Data/sample_lc_v2/*.h5"))
args = sys.argv
print("Running")
mf.run_analysis_multi(filelist[int(args[1]):int(args[1])+2])
|
[
"glob.glob"
] |
[((146, 220), 'glob.glob', 'glob.glob', (['"""/home/xjh0560/Supernova_Lightcurves/LC_Data/sample_lc_v2/*.h5"""'], {}), "('/home/xjh0560/Supernova_Lightcurves/LC_Data/sample_lc_v2/*.h5')\n", (155, 220), False, 'import glob\n')]
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Collection of SSL management operations.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from pycopia import tty
from pycopia.ssl import certs
def get_pass(verify):
"""Basic callback for getting passphrase."""
if verify:
retries = 3
while retries > 0:
pw = tty.getpass("Passphrase? ")
npw = tty.getpass("Passphrase again? ")
if pw == npw:
return pw
print("Phrases don't match. Please try again.")
retries -= 1
raise crypto.Error("Too many tries reading passphrase.")
else:
return tty.getpass("Passphrase? ")
def certificate_request(filename, country=None, state=None, locality=None, organization=None,
organization_unit=None, name=None, email=None, passphrase=get_pass):
"""Basic certificate request with no extensions."""
req = certs.CertificateRequest(country, state, locality, organization,
organization_unit, name, email)
pkey = certs.create_rsa_keypair()
req.pubkey = pkey
req.sign(pkey, "sha1")
with open(filename, "w+") as fo:
req.emit(fo)
print("Encrypt private key with secret.")
ektext = pkey.encrypt(passphrase)
with open(filename+".key", "w+") as fo:
fo.write(ektext)
def certificate_sign():
pass
if __name__ == "__main__":
certificate_request("/tmp/certreqtest.pem",
country="US",
state="California",
locality="Santa Clara",
organization="Acme Inc.",
organization_unit="Slaves",
name="www.foo.com",
)
|
[
"pycopia.ssl.certs.CertificateRequest",
"pycopia.tty.getpass",
"pycopia.ssl.certs.create_rsa_keypair"
] |
[((1618, 1718), 'pycopia.ssl.certs.CertificateRequest', 'certs.CertificateRequest', (['country', 'state', 'locality', 'organization', 'organization_unit', 'name', 'email'], {}), '(country, state, locality, organization,\n organization_unit, name, email)\n', (1642, 1718), False, 'from pycopia.ssl import certs\n'), ((1738, 1764), 'pycopia.ssl.certs.create_rsa_keypair', 'certs.create_rsa_keypair', ([], {}), '()\n', (1762, 1764), False, 'from pycopia.ssl import certs\n'), ((1351, 1378), 'pycopia.tty.getpass', 'tty.getpass', (['"""Passphrase? """'], {}), "('Passphrase? ')\n", (1362, 1378), False, 'from pycopia import tty\n'), ((1044, 1071), 'pycopia.tty.getpass', 'tty.getpass', (['"""Passphrase? """'], {}), "('Passphrase? ')\n", (1055, 1071), False, 'from pycopia import tty\n'), ((1090, 1123), 'pycopia.tty.getpass', 'tty.getpass', (['"""Passphrase again? """'], {}), "('Passphrase again? ')\n", (1101, 1123), False, 'from pycopia import tty\n')]
|
# Copyright 2018 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
"""Authentication API Endpoint Test"""
import requests
def create_test_user(session):
"""Create a user and authenticate to use api endpoints during testing."""
create_user_input = {
"name": "<NAME>",
"username": "susan20",
"password": "<PASSWORD>",
"email": "<EMAIL>",
}
session.post("http://rbac-server:8000/api/users", json=create_user_input)
def test_search_api():
"""Tests the search api endpoint functions and returns a valid payload."""
with requests.Session() as session:
create_test_user(session)
search_query = {
"query": {
"search_input": "search input",
"search_object_types": ["role", "pack", "user"],
"page_size": "20",
"page": "2",
}
}
response = session.post("http://rbac-server:8000/api/search", json=search_query)
assert response.json()["data"] == {"roles": [], "packs": [], "users": []}
|
[
"requests.Session"
] |
[((1188, 1206), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1204, 1206), False, 'import requests\n')]
|
import io
import re
from setuptools import find_packages
from setuptools import setup
with io.open("README.rst", "rt", encoding="utf8") as f:
readme = f.read()
with io.open("src/flask/__init__.py", "rt", encoding="utf8") as f:
version = re.search(r'__version__ = "(.*?)"', f.read()).group(1)
setup(
name="Flask",
version=version,
url="https://palletsprojects.com/p/flask/",
project_urls={
"Documentation": "https://flask.palletsprojects.com/",
"Code": "https://github.com/pallets/flask",
"Issue tracker": "https://github.com/pallets/flask/issues",
},
license="BSD-3-Clause",
author="<NAME>",
author_email="<EMAIL>",
maintainer="Pallets",
maintainer_email="<EMAIL>",
description="A simple framework for building complex web applications.",
long_description=readme,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Flask",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"Topic :: Software Development :: Libraries :: Application Frameworks",
"Topic :: Software Development :: Libraries :: Python Modules",
],
packages=find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
install_requires=[
"Werkzeug>=0.15",
"Jinja2>=2.10.1",
"itsdangerous>=0.24",
"click>=5.1",
],
extras_require={
"dotenv": ["python-dotenv"],
"dev": [
"pytest",
"coverage",
"tox",
"sphinx",
"pallets-sphinx-themes",
"sphinxcontrib-log-cabinet",
"sphinx-issues",
],
"docs": [
"sphinx",
"pallets-sphinx-themes",
"sphinxcontrib-log-cabinet",
"sphinx-issues",
],
},
entry_points={"console_scripts": ["flask = flask.cli:main"]},
)
|
[
"setuptools.find_packages",
"io.open"
] |
[((93, 137), 'io.open', 'io.open', (['"""README.rst"""', '"""rt"""'], {'encoding': '"""utf8"""'}), "('README.rst', 'rt', encoding='utf8')\n", (100, 137), False, 'import io\n'), ((172, 227), 'io.open', 'io.open', (['"""src/flask/__init__.py"""', '"""rt"""'], {'encoding': '"""utf8"""'}), "('src/flask/__init__.py', 'rt', encoding='utf8')\n", (179, 227), False, 'import io\n'), ((1760, 1780), 'setuptools.find_packages', 'find_packages', (['"""src"""'], {}), "('src')\n", (1773, 1780), False, 'from setuptools import find_packages\n')]
|
import logging
import numpy as np
from monai.transforms import LoadImage
from monailabel.interfaces.datastore import Datastore, DefaultLabelTag
from monailabel.interfaces.tasks import ScoringMethod
logger = logging.getLogger(__name__)
class Sum(ScoringMethod):
"""
Consider implementing simple np sum method of label tags; Also add valid slices that have label mask
"""
def __init__(self, tags=(DefaultLabelTag.FINAL.value, DefaultLabelTag.ORIGINAL.value)):
super().__init__("Compute Numpy Sum for Final/Original Labels")
self.tags = tags
def __call__(self, request, datastore: Datastore):
loader = LoadImage(image_only=True)
result = {}
for image_id in datastore.list_images():
for tag in self.tags:
label_id: str = datastore.get_label_by_image_id(image_id, tag)
if label_id:
label = loader(datastore.get_label_uri(label_id))
slices = [sid for sid in range(label.shape[0]) if np.sum(label[sid] > 0)]
info = {"sum": int(np.sum(label)), "slices": slices}
logger.info(f"{label_id} => {info}")
datastore.update_label_info(label_id, info)
result[label_id] = info
return result
|
[
"monai.transforms.LoadImage",
"numpy.sum",
"logging.getLogger"
] |
[((210, 237), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (227, 237), False, 'import logging\n'), ((650, 676), 'monai.transforms.LoadImage', 'LoadImage', ([], {'image_only': '(True)'}), '(image_only=True)\n', (659, 676), False, 'from monai.transforms import LoadImage\n'), ((1028, 1050), 'numpy.sum', 'np.sum', (['(label[sid] > 0)'], {}), '(label[sid] > 0)\n', (1034, 1050), True, 'import numpy as np\n'), ((1091, 1104), 'numpy.sum', 'np.sum', (['label'], {}), '(label)\n', (1097, 1104), True, 'import numpy as np\n')]
|
#
# pr8_1_1
from math import pi
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import ellipord, ellip, freqz, group_delay
def freqz_m(b, a):
"""
Modified version of freqz subroutine
:param b: numerator polynomial of H(z) (for FIR: b=h)
:param a: denominator polynomial of H(z) (for FIR: a=[1])
:return db: Relative magnitude in dB computed over 0 to pi radians
:return mag: absolute magnitude computed over 0 to pi radians
:return pha: Phase response in radians over 0 to pi radians
:return grd: Group delay over 0 to pi radians
:return w: 501 frequency samples between 0 to pi radians
"""
w, H = freqz(b, a, 1000, whole=True)
H = H[0:501]
w = w[0:501]
mag = np.abs(H)
eps = np.finfo(float).eps
db = 20 * np.log10((mag + eps) / np.max(mag))
pha = np.angle(H)
_, grd = group_delay((b, a), w)
return db, mag, pha, grd, w
if __name__ == '__main__':
fs = 8000 # sampling frequency
fs2 = fs / 2
Wp = np.array([60, 500]) / fs2 # filter pass band
Ws = np.array([20, 2000]) / fs2 # filter stop band
Rp = 1 # passband ripple
Rs = 40 # stopband attenuation
n, Wn = ellipord(Wp,Ws,Rp,Rs) # filter order
b, a = ellip(n, Rp, Rs, Wn, 'bandpass') # filter coefficients
print('b = {} \na = {}'.format(b, a))
db, mag, pha, grd, w = freqz_m(b, a) # frequency response curve
# figure
plt.figure(figsize=(16, 9))
plt.plot(w / pi* fs2, db, linewidth=2)
plt.grid()
plt.axis([0, 4000, -90, 10])
plt.title('Frequency Response of Elliptical 6th-order BPF')
plt.xlabel('Frequency [Hz]')
plt.ylabel('Amplitude [dB]')
plt.savefig('images/elliptical_6th_BPF.png', bbox_inches='tight', dpi=600)
plt.show()
|
[
"matplotlib.pyplot.title",
"numpy.abs",
"scipy.signal.ellip",
"scipy.signal.group_delay",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"numpy.angle",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.axis",
"numpy.finfo",
"matplotlib.pyplot.figure",
"numpy.max",
"numpy.array",
"scipy.signal.ellipord",
"scipy.signal.freqz",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel"
] |
[((636, 665), 'scipy.signal.freqz', 'freqz', (['b', 'a', '(1000)'], {'whole': '(True)'}), '(b, a, 1000, whole=True)\n', (641, 665), False, 'from scipy.signal import ellipord, ellip, freqz, group_delay\n'), ((701, 710), 'numpy.abs', 'np.abs', (['H'], {}), '(H)\n', (707, 710), True, 'import numpy as np\n'), ((792, 803), 'numpy.angle', 'np.angle', (['H'], {}), '(H)\n', (800, 803), True, 'import numpy as np\n'), ((814, 836), 'scipy.signal.group_delay', 'group_delay', (['(b, a)', 'w'], {}), '((b, a), w)\n', (825, 836), False, 'from scipy.signal import ellipord, ellip, freqz, group_delay\n'), ((1264, 1288), 'scipy.signal.ellipord', 'ellipord', (['Wp', 'Ws', 'Rp', 'Rs'], {}), '(Wp, Ws, Rp, Rs)\n', (1272, 1288), False, 'from scipy.signal import ellipord, ellip, freqz, group_delay\n'), ((1327, 1359), 'scipy.signal.ellip', 'ellip', (['n', 'Rp', 'Rs', 'Wn', '"""bandpass"""'], {}), "(n, Rp, Rs, Wn, 'bandpass')\n", (1332, 1359), False, 'from scipy.signal import ellipord, ellip, freqz, group_delay\n'), ((1530, 1557), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (1540, 1557), True, 'import matplotlib.pyplot as plt\n'), ((1559, 1598), 'matplotlib.pyplot.plot', 'plt.plot', (['(w / pi * fs2)', 'db'], {'linewidth': '(2)'}), '(w / pi * fs2, db, linewidth=2)\n', (1567, 1598), True, 'import matplotlib.pyplot as plt\n'), ((1599, 1609), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1607, 1609), True, 'import matplotlib.pyplot as plt\n'), ((1611, 1639), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 4000, -90, 10]'], {}), '([0, 4000, -90, 10])\n', (1619, 1639), True, 'import matplotlib.pyplot as plt\n'), ((1641, 1700), 'matplotlib.pyplot.title', 'plt.title', (['"""Frequency Response of Elliptical 6th-order BPF"""'], {}), "('Frequency Response of Elliptical 6th-order BPF')\n", (1650, 1700), True, 'import matplotlib.pyplot as plt\n'), ((1702, 1730), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency [Hz]"""'], {}), "('Frequency [Hz]')\n", (1712, 1730), True, 'import matplotlib.pyplot as plt\n'), ((1732, 1760), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude [dB]"""'], {}), "('Amplitude [dB]')\n", (1742, 1760), True, 'import matplotlib.pyplot as plt\n'), ((1762, 1836), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/elliptical_6th_BPF.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(600)'}), "('images/elliptical_6th_BPF.png', bbox_inches='tight', dpi=600)\n", (1773, 1836), True, 'import matplotlib.pyplot as plt\n'), ((1838, 1848), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1846, 1848), True, 'import matplotlib.pyplot as plt\n'), ((718, 733), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (726, 733), True, 'import numpy as np\n'), ((986, 1005), 'numpy.array', 'np.array', (['[60, 500]'], {}), '([60, 500])\n', (994, 1005), True, 'import numpy as np\n'), ((1054, 1074), 'numpy.array', 'np.array', (['[20, 2000]'], {}), '([20, 2000])\n', (1062, 1074), True, 'import numpy as np\n'), ((772, 783), 'numpy.max', 'np.max', (['mag'], {}), '(mag)\n', (778, 783), True, 'import numpy as np\n')]
|
from django.conf import settings
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.views.generic import TemplateView
from applications.models import Application
from reimbursement.models import Reimbursement
from baggage.models import Bag
from django.shortcuts import get_object_or_404
from urllib.parse import quote
from django.http import StreamingHttpResponse
import os
from app import utils, mixins
def root_view(request):
if not request.user.is_authenticated() and not utils.is_app_closed():
return HttpResponseRedirect(reverse('account_signup'))
if not request.user.is_authenticated() and utils.is_app_closed():
return HttpResponseRedirect(reverse('account_login'))
if not request.user.email_verified:
return HttpResponseRedirect(reverse('verify_email_required'))
if request.user.is_organizer:
return HttpResponseRedirect(reverse('review'))
elif request.user.is_volunteer:
return HttpResponseRedirect(reverse('check_in_list'))
return HttpResponseRedirect(reverse('dashboard'))
def code_conduct(request):
code_link = getattr(settings, 'CODE_CONDUCT_LINK', None)
if code_link:
return HttpResponseRedirect(code_link)
return render(request, 'code_conduct.html')
def legal_notice(request):
return render(request, 'legal_notice.html')
def privacy_and_cookies(request):
return render(request, 'privacy_and_cookies.html')
def terms_and_conditions(request):
return render(request, 'terms_and_conditions.html')
def protectedMedia(request, file_):
path, file_name = os.path.split(file_)
downloadable_path = None
if path == "resumes":
app = get_object_or_404(Application, resume=file_)
if request.user.is_authenticated() and (request.user.is_organizer or
(app and (app.user_id == request.user.id))):
downloadable_path = app.resume.path
elif path == "receipt":
app = get_object_or_404(Reimbursement, receipt=file_)
if request.user.is_authenticated() and (request.user.is_organizer or
(app and (app.hacker_id == request.user.id))):
downloadable_path = app.receipt.path
elif path == "baggage":
bag = get_object_or_404(Bag, image=file_)
if request.user.is_authenticated() and (request.user.is_organizer or request.user.is_volunteer):
downloadable_path = bag.image.path
if downloadable_path:
response = StreamingHttpResponse(open(downloadable_path, 'rb'))
response['Content-Type'] = ''
response['Content-Disposition'] = 'attachment; filename*=UTF-8\'\'%s' % quote(file_name)
response['Content-Transfer-Encoding'] = 'binary'
response['Expires'] = '0'
response['Cache-Control'] = 'must-revalidate'
response['Pragma'] = 'public'
return response
return HttpResponseRedirect(reverse('account_login'))
class TabsView(mixins.TabsViewMixin, TemplateView):
pass
|
[
"django.urls.reverse",
"django.shortcuts.get_object_or_404",
"urllib.parse.quote",
"app.utils.is_app_closed",
"django.shortcuts.render",
"django.http.HttpResponseRedirect",
"os.path.split"
] |
[((1291, 1327), 'django.shortcuts.render', 'render', (['request', '"""code_conduct.html"""'], {}), "(request, 'code_conduct.html')\n", (1297, 1327), False, 'from django.shortcuts import render\n'), ((1368, 1404), 'django.shortcuts.render', 'render', (['request', '"""legal_notice.html"""'], {}), "(request, 'legal_notice.html')\n", (1374, 1404), False, 'from django.shortcuts import render\n'), ((1452, 1495), 'django.shortcuts.render', 'render', (['request', '"""privacy_and_cookies.html"""'], {}), "(request, 'privacy_and_cookies.html')\n", (1458, 1495), False, 'from django.shortcuts import render\n'), ((1544, 1588), 'django.shortcuts.render', 'render', (['request', '"""terms_and_conditions.html"""'], {}), "(request, 'terms_and_conditions.html')\n", (1550, 1588), False, 'from django.shortcuts import render\n'), ((1649, 1669), 'os.path.split', 'os.path.split', (['file_'], {}), '(file_)\n', (1662, 1669), False, 'import os\n'), ((689, 710), 'app.utils.is_app_closed', 'utils.is_app_closed', ([], {}), '()\n', (708, 710), False, 'from app import utils, mixins\n'), ((1103, 1123), 'django.urls.reverse', 'reverse', (['"""dashboard"""'], {}), "('dashboard')\n", (1110, 1123), False, 'from django.urls import reverse\n'), ((1248, 1279), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['code_link'], {}), '(code_link)\n', (1268, 1279), False, 'from django.http import HttpResponseRedirect\n'), ((1739, 1783), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Application'], {'resume': 'file_'}), '(Application, resume=file_)\n', (1756, 1783), False, 'from django.shortcuts import get_object_or_404\n'), ((3015, 3039), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (3022, 3039), False, 'from django.urls import reverse\n'), ((556, 577), 'app.utils.is_app_closed', 'utils.is_app_closed', ([], {}), '()\n', (575, 577), False, 'from app import utils, mixins\n'), ((615, 640), 'django.urls.reverse', 'reverse', (['"""account_signup"""'], {}), "('account_signup')\n", (622, 640), False, 'from django.urls import reverse\n'), ((748, 772), 'django.urls.reverse', 'reverse', (['"""account_login"""'], {}), "('account_login')\n", (755, 772), False, 'from django.urls import reverse\n'), ((850, 882), 'django.urls.reverse', 'reverse', (['"""verify_email_required"""'], {}), "('verify_email_required')\n", (857, 882), False, 'from django.urls import reverse\n'), ((954, 971), 'django.urls.reverse', 'reverse', (['"""review"""'], {}), "('review')\n", (961, 971), False, 'from django.urls import reverse\n'), ((2044, 2091), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Reimbursement'], {'receipt': 'file_'}), '(Reimbursement, receipt=file_)\n', (2061, 2091), False, 'from django.shortcuts import get_object_or_404\n'), ((2759, 2775), 'urllib.parse.quote', 'quote', (['file_name'], {}), '(file_name)\n', (2764, 2775), False, 'from urllib.parse import quote\n'), ((1045, 1069), 'django.urls.reverse', 'reverse', (['"""check_in_list"""'], {}), "('check_in_list')\n", (1052, 1069), False, 'from django.urls import reverse\n'), ((2355, 2390), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Bag'], {'image': 'file_'}), '(Bag, image=file_)\n', (2372, 2390), False, 'from django.shortcuts import get_object_or_404\n')]
|
""" manager, for starters
"""
# pylint: disable=no-self-use,unsubscriptable-object,fixme,bad-continuation
import base64
from pathlib import Path
from typing import List, Text
from urllib.parse import unquote
import jinja2
import traitlets as T
from jupyter_core.paths import jupyter_config_path
from notebook import _tz as tz
from notebook.services.config import ConfigManager
from notebook.utils import maybe_future, url_path_join as ujoin
from traitlets.config import LoggingConfigurable
from .py_starters.cookiecutter import cookiecutter_starters
from .schema.v2 import STARTERS
from .trait_types import Schema
# default patterns to ignore
DEFAULT_IGNORE_PATTERNS = [
"__pycache__",
".git",
".ipynb_checkpoints",
"*.pyc",
"node_modules",
]
class StarterManager(LoggingConfigurable):
""" handlers starting starters
"""
starters = Schema(validator=STARTERS)
jinja_env = T.Instance(jinja2.Environment)
jinja_env_extensions = T.Dict()
config_dict = T.Dict()
extra_starters = Schema(default_value={}, validator=STARTERS).tag(config=True)
extra_jinja_env_extensions = T.Dict({}).tag(config=True)
@property
def contents_manager(self):
""" use the contents manager from parent
"""
return self.parent.contents_manager
@T.default("jinja_env_extensions")
def _default_env_extensions(self):
""" get env extensions from extras and config
"""
extensions = {}
extensions.update(self.config_dict.get("extra_jinja_env_extensions", {}))
extensions.update(self.extra_jinja_env_extensions)
return extensions
@T.default("jinja_env")
def _default_env(self):
return jinja2.Environment(
extensions=[
ext for ext, enabled in self.jinja_env_extensions.items() if enabled
]
)
@T.default("config_dict")
def _default_config_dict(self):
""" load merged config from more jupyter_notebook_config.d files
re-uses notebook loading machinery to look through more locations
"""
manager = ConfigManager(read_config_path=jupyter_config_path())
return manager.get("jupyter_notebook_config").get("StarterManager", {})
@T.default("starters")
def _default_starters(self):
""" default starters
"""
starters = {}
starters.update(cookiecutter_starters())
starters.update(self.config_dict.get("extra_starters", {}))
starters.update(self.extra_starters)
return starters
@property
def starter_names(self) -> List[Text]:
""" convenience method to get names of starters
"""
return sorted(dict(self.starters).keys())
async def start(self, name, path, body):
""" start a starter
"""
starter = self.starters[name]
starter_type = starter["type"]
if starter_type == "copy":
return await self.start_copy(name, starter, path, body)
if starter_type == "python":
return await self.start_python(name, starter, path, body)
raise NotImplementedError(starter["type"])
async def start_copy(self, name, starter, path, body):
""" start a copy starter
"""
root = Path(starter["src"]).resolve()
root_uri = root.as_uri()
dest_tmpl_str = starter.get("dest")
if dest_tmpl_str is not None:
dest_tmpl = self.jinja_env.from_string(dest_tmpl_str)
dest = ujoin(path, dest_tmpl.render(**(body or {})))
else:
dest = ujoin(path, root.name)
await self.save_one(root, dest)
for child in iter_not_ignored(
root, starter.get("ignore", DEFAULT_IGNORE_PATTERNS)
):
await self.save_one(
child, unquote(ujoin(dest, child.as_uri().replace(root_uri, ""))),
)
return {
"body": body,
"name": name,
"path": dest,
"starter": starter,
"status": "done",
}
async def start_python(self, name, starter, path, body):
""" start a python starter
"""
func = T.import_item(starter["callable"])
return await func(name, starter, path, body, self)
async def save_one(self, src, dest):
""" use the contents manager to write a single file/folder
"""
# pylint: disable=broad-except
stat = src.stat()
is_dir = src.is_dir()
model = dict(
name=src.name,
type="directory" if is_dir else "file",
path=dest,
last_modified=tz.utcfromtimestamp(stat.st_mtime),
created=tz.utcfromtimestamp(stat.st_ctime),
content=None
if is_dir
else base64.b64encode(src.read_bytes()).decode("utf-8"),
format=None if is_dir else "base64",
mimetype=None,
size=stat.st_size,
)
allow_hidden = None
if hasattr(self.contents_manager, "allow_hidden"):
allow_hidden = self.contents_manager.allow_hidden
self.contents_manager.allow_hidden = True
try:
await maybe_future(self.contents_manager.save(model, dest))
except Exception as err:
self.log.error(f"Couldn't save {dest}: {err}")
finally:
if allow_hidden is not None:
self.contents_manager.allow_hidden = allow_hidden
def iter_not_ignored(root, ignore_patterns):
""" yield all children under a root that do not match the ignore patterns
"""
if root.is_dir():
ignored = set()
for src in sorted(root.rglob("*")):
if ignored & set(src.parents):
continue
root_rel = src.relative_to(root)
if any(root_rel.match(pattern) for pattern in ignore_patterns):
ignored.add(src)
continue
yield src
|
[
"traitlets.default",
"notebook.utils.url_path_join",
"traitlets.Dict",
"jupyter_core.paths.jupyter_config_path",
"pathlib.Path",
"notebook._tz.utcfromtimestamp",
"traitlets.Instance",
"traitlets.import_item"
] |
[((913, 943), 'traitlets.Instance', 'T.Instance', (['jinja2.Environment'], {}), '(jinja2.Environment)\n', (923, 943), True, 'import traitlets as T\n'), ((971, 979), 'traitlets.Dict', 'T.Dict', ([], {}), '()\n', (977, 979), True, 'import traitlets as T\n'), ((998, 1006), 'traitlets.Dict', 'T.Dict', ([], {}), '()\n', (1004, 1006), True, 'import traitlets as T\n'), ((1310, 1343), 'traitlets.default', 'T.default', (['"""jinja_env_extensions"""'], {}), "('jinja_env_extensions')\n", (1319, 1343), True, 'import traitlets as T\n'), ((1646, 1668), 'traitlets.default', 'T.default', (['"""jinja_env"""'], {}), "('jinja_env')\n", (1655, 1668), True, 'import traitlets as T\n'), ((1872, 1896), 'traitlets.default', 'T.default', (['"""config_dict"""'], {}), "('config_dict')\n", (1881, 1896), True, 'import traitlets as T\n'), ((2255, 2276), 'traitlets.default', 'T.default', (['"""starters"""'], {}), "('starters')\n", (2264, 2276), True, 'import traitlets as T\n'), ((4197, 4231), 'traitlets.import_item', 'T.import_item', (["starter['callable']"], {}), "(starter['callable'])\n", (4210, 4231), True, 'import traitlets as T\n'), ((1124, 1134), 'traitlets.Dict', 'T.Dict', (['{}'], {}), '({})\n', (1130, 1134), True, 'import traitlets as T\n'), ((3595, 3617), 'notebook.utils.url_path_join', 'ujoin', (['path', 'root.name'], {}), '(path, root.name)\n', (3600, 3617), True, 'from notebook.utils import maybe_future, url_path_join as ujoin\n'), ((2146, 2167), 'jupyter_core.paths.jupyter_config_path', 'jupyter_config_path', ([], {}), '()\n', (2165, 2167), False, 'from jupyter_core.paths import jupyter_config_path\n'), ((3282, 3302), 'pathlib.Path', 'Path', (["starter['src']"], {}), "(starter['src'])\n", (3286, 3302), False, 'from pathlib import Path\n'), ((4659, 4693), 'notebook._tz.utcfromtimestamp', 'tz.utcfromtimestamp', (['stat.st_mtime'], {}), '(stat.st_mtime)\n', (4678, 4693), True, 'from notebook import _tz as tz\n'), ((4715, 4749), 'notebook._tz.utcfromtimestamp', 'tz.utcfromtimestamp', (['stat.st_ctime'], {}), '(stat.st_ctime)\n', (4734, 4749), True, 'from notebook import _tz as tz\n')]
|
"""Model class for sorting numbers."""
import torch.nn as nn
class Features(nn.Module):
def __init__(self, latent_dim, output_dim, dropout_prob):
"""
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
This Feature extractor class takes an input and constructs a feature vector. It can be applied independently to all elements of the input sequence
in_flattened_vector: input flattened vector
latent_dim: number of neurons in latent layer
output_dim: dimension of log alpha square matrix
"""
super().__init__()
# net: output of the first neural network that connects numbers to a
# 'latent' representation.
# activation_fn: ReLU is default hence it is specified here
# dropout p – probability of an element to be zeroed
self.linear1 = nn.Linear(1, latent_dim)
self.relu1 = nn.ReLU()
self.d1 = nn.Dropout(p = dropout_prob)
# now those latent representation are connected to rows of the matrix
# log_alpha.
self.linear2 = nn.Linear(latent_dim, output_dim)
self.d2 = nn.Dropout(p=dropout_prob)
def forward(self, x):
"""
In the forward function we accept a Variable of input data and we must
return a Variable of output data. We can use Modules defined in the
constructor as well as arbitrary operators on Variables.
x: Tensor of shape (batch_size, 1)
"""
# activation_fn: ReLU
x = self.d1(self.relu1(self.linear1(x)))
# no activation function is enabled
x = self.d2(self.linear2(x))
return x
class Sinkhorn_Net(nn.Module):
def __init__(self, latent_dim, output_dim, dropout_prob):
super().__init__()
self.output_dim = output_dim
self.features = Features(latent_dim, output_dim, dropout_prob)
def forward(self, x):
"""
x: Tensor of length (batch, sequence_length)
Note that output_dim should correspond to the intended sequence length
"""
# each number is processed with the same network, so data is reshaped
# so that numbers occupy the 'batch' position.
x = x.view(-1, 1)
x = self.features(x)
#reshape to cubic for sinkhorn operation
x = x.reshape(-1, self.output_dim, self.output_dim)
return x
|
[
"torch.nn.Dropout",
"torch.nn.ReLU",
"torch.nn.Linear"
] |
[((899, 923), 'torch.nn.Linear', 'nn.Linear', (['(1)', 'latent_dim'], {}), '(1, latent_dim)\n', (908, 923), True, 'import torch.nn as nn\n'), ((945, 954), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (952, 954), True, 'import torch.nn as nn\n'), ((973, 999), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_prob'}), '(p=dropout_prob)\n', (983, 999), True, 'import torch.nn as nn\n'), ((1124, 1157), 'torch.nn.Linear', 'nn.Linear', (['latent_dim', 'output_dim'], {}), '(latent_dim, output_dim)\n', (1133, 1157), True, 'import torch.nn as nn\n'), ((1176, 1202), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_prob'}), '(p=dropout_prob)\n', (1186, 1202), True, 'import torch.nn as nn\n')]
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-GPU tests for MirroredStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import sys
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.contrib.distribute.python import strategy_test_lib
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.keras.engine import training as keras_training
from tensorflow.python.keras.layers import core as keras_core
from tensorflow.python.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import server_lib
GPU_TEST = "test_gpu" in sys.argv[0]
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus],
mode=["graph", "eager"]))
class MirroredTwoDeviceDistributionTest(
strategy_test_lib.DistributionTestBase,
strategy_test_lib.TwoDeviceDistributionTestBase,
parameterized.TestCase):
def testMinimizeLoss(self, distribution):
if context.executing_eagerly():
self._test_minimize_loss_eager(distribution)
else:
self._test_minimize_loss_graph(distribution)
def testReplicaId(self, distribution):
self._test_replica_id(distribution)
def testNumReplicasInSync(self, distribution):
self.assertEqual(2, distribution.num_replicas_in_sync)
def testCallAndMergeExceptions(self, distribution):
self._test_call_and_merge_exceptions(distribution)
def testRunRegroupError(self, distribution):
def run_fn():
replica_id = int(self.evaluate(_replica_id()))
# Generates a list with different lengths on different devices.
# Will fail in _regroup() (if more than one device).
return list(range(replica_id))
with distribution.scope(), self.assertRaises(AssertionError):
distribution.extended.call_for_each_replica(run_fn)
def testReduceToCpu(self, distribution):
with distribution.scope():
result = distribution.extended.call_for_each_replica(_replica_id)
reduced = distribution.reduce(reduce_util.ReduceOp.SUM, result)
expected = sum(range(distribution.num_replicas_in_sync))
self.assertEqual(expected, self.evaluate(reduced))
def testMakeInputFnIteratorWithDataset(self, distribution):
dataset_fn = lambda: dataset_ops.Dataset.range(10)
expected_values = [[i, i+1] for i in range(0, 10, 2)]
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=2,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(iterator, distribution.extended.worker_devices,
expected_values)
# TODO(b/124344198): Re-enable after fixing this flaky test.
def DISABLED_testMakeInputFnIteratorWithCallable(self, distribution):
def fn():
dataset = dataset_ops.Dataset.range(2).interleave(
(lambda _: dataset_ops.Dataset.range(10)), cycle_length=2)
it = dataset.make_one_shot_iterator()
return it.get_next
expected_values = [[i, i] for i in range(0, 10)]
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=2,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(iterator, distribution.extended.worker_devices,
expected_values, test_reinitialize=False)
def testNumpyIterator(self, distribution):
self._test_numpy_iterator(distribution)
def testGlobalStepUpdate(self, distribution):
self._test_global_step_update(distribution)
def testRun(self, distribution):
self._test_run(distribution)
def testAllReduceSum(self, distribution):
self._test_all_reduce_sum(distribution)
def testAllReduceSumGradients(self, distribution):
self._test_all_reduce_sum_gradients(distribution)
def testAllReduceSumGradientTape(self, distribution):
self._test_all_reduce_sum_gradient_tape(distribution)
def testAllReduceMean(self, distribution):
self._test_all_reduce_mean(distribution)
def testAllReduceMeanGradients(self, distribution):
self._test_all_reduce_mean_gradients(distribution)
def testAllReduceMeanGradientTape(self, distribution):
self._test_all_reduce_mean_gradient_tape(distribution)
def testSummaryForReplicaZeroOnly(self, distribution):
self._test_summary_for_replica_zero_only(distribution)
def one_device_combinations():
return combinations.combine(
distribution=[
combinations.mirrored_strategy_with_one_cpu,
combinations.mirrored_strategy_with_one_gpu,
combinations.core_mirrored_strategy_with_one_cpu,
combinations.core_mirrored_strategy_with_one_gpu],
mode=["graph", "eager"])
@combinations.generate(one_device_combinations())
class MirroredOneDeviceDistributionTest(
strategy_test_lib.DistributionTestBase,
strategy_test_lib.OneDeviceDistributionTestBase,
parameterized.TestCase):
def testMinimizeLoss(self, distribution):
if context.executing_eagerly():
self._test_minimize_loss_eager(distribution)
else:
self._test_minimize_loss_graph(distribution)
def testReplicaId(self, distribution):
self._test_replica_id(distribution)
def testCallAndMergeExceptions(self, distribution):
self._test_call_and_merge_exceptions(distribution)
def testRun(self, distribution):
self._test_run(distribution)
def testAllReduceSum(self, distribution):
self._test_all_reduce_sum(distribution)
def testAllReduceSumGradients(self, distribution):
self._test_all_reduce_sum_gradients(distribution)
def testAllReduceSumGradientTape(self, distribution):
self._test_all_reduce_sum_gradient_tape(distribution)
def testAllReduceMean(self, distribution):
self._test_all_reduce_mean(distribution)
def testAllReduceMeanGradients(self, distribution):
self._test_all_reduce_mean_gradients(distribution)
def testAllReduceMeanGradientTape(self, distribution):
self._test_all_reduce_mean_gradient_tape(distribution)
class MirroredStrategyVariableCreatorStackTest(
test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(
distribution=[combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph"]))
def testCreatorStacksAreThreadLocal(self, distribution):
def model_fn():
replica_id_str = str(self.evaluate(_replica_id()))
def thread_creator_fn(next_creator, *args, **kwargs):
return next_creator(*args, **kwargs) + ":thread_" + replica_id_str
with variable_scope.variable_creator_scope(thread_creator_fn):
# Create a variable in this scope.
v = variable_scope.variable(1.0)
# This will pause the current thread, and execute the other thread.
ds_context.get_replica_context().merge_call(lambda _: _)
return v
def main_thread_creator(next_creator, *args, **kwargs):
# We are not using the underlying next_creator for test purposes.
del next_creator, args, kwargs
return "main_thread"
with context.graph_mode(), \
distribution.scope(), \
variable_scope.variable_creator_scope(main_thread_creator):
result = distribution.extended.call_for_each_replica(model_fn)
result = distribution.experimental_local_results(result)
expected = ("main_thread:thread_0", "main_thread:thread_1")
self.assertEqual(expected, result)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph", "eager"]))
class MirroredStrategyCallForEachReplicaTest(test.TestCase):
def testExecutingEagerlyOutsideFunction(self, distribution):
"""Verify we preserve the value of executing_eagerly_outside_functions()."""
def model_fn():
return ops.executing_eagerly_outside_functions()
originally = ops.executing_eagerly_outside_functions()
with distribution.scope():
in_scope = ops.executing_eagerly_outside_functions()
in_model_fn = distribution.extended.call_for_each_replica(model_fn)
unwrapped = distribution.experimental_local_results(in_model_fn)
self.assertEqual(in_scope, unwrapped[0])
self.assertEqual(in_scope, originally)
# Verify this all again, but this time in a FuncGraph.
with func_graph.FuncGraph("fg").as_default(), distribution.scope():
in_scope = ops.executing_eagerly_outside_functions()
in_model_fn = distribution.extended.call_for_each_replica(model_fn)
unwrapped = distribution.experimental_local_results(in_model_fn)
self.assertEqual(in_scope, unwrapped[0])
self.assertEqual(in_scope, originally)
def testFunctionInCallForEachReplicaNoMergeCall(self, distribution):
@def_function.function
def model_fn():
return 0.
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual((0., 0.), self.evaluate(result.values))
def testFunctionInCallForEachReplicaWithMergeCall(self, distribution):
def merge_fn(_):
pass
@def_function.function
def model_fn():
ds_context.get_replica_context().merge_call(merge_fn)
return 0.
with distribution.scope():
with self.assertRaisesRegexp(
RuntimeError, "`merge_call` called while defining a new graph."):
distribution.extended.call_for_each_replica(model_fn)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph", "eager"]))
class MirroredStrategyVariableCreationTest(test.TestCase):
# TODO(priyag): Modify more tests to use this helper and check more
# properties.
def _test_mv_properties(self, var, name, strategy):
self.assertIsInstance(var, values.MirroredVariable)
self.assertEqual(name, var.name)
self.assertIs(strategy, var.distribute_strategy)
for d in var.devices:
self.assertEqual(d, var.get(d).device)
self.assertIs(strategy, var.get(d)._distribute_strategy) # pylint: disable=protected-access
def testVariableInFuncGraph(self, distribution):
def model_fn():
v = variable_scope.variable(2.0, name="bar")
ds_context.get_replica_context().merge_call(lambda _: _)
return v
with func_graph.FuncGraph("fg").as_default(), distribution.scope():
v1 = variable_scope.variable(1.0, name="foo")
v2 = distribution.extended.call_for_each_replica(model_fn)
self._test_mv_properties(v1, "foo:0", distribution)
self._test_mv_properties(v2, "bar:0", distribution)
def testSingleVariable(self, distribution):
def model_fn():
# This variable should be created only once across the threads because of
# special variable_creator functions used by
# `distribution.extended.call_for_each_replica`.
v = variable_scope.variable(1.0, name="foo")
ds_context.get_replica_context().merge_call(lambda _: _)
return v
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self._test_mv_properties(result, "foo:0", distribution)
def testUnnamedVariable(self, distribution):
def model_fn():
v = variable_scope.variable(1.0)
ds_context.get_replica_context().merge_call(lambda _: _)
return v
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self._test_mv_properties(result, "Variable:0", distribution)
def testMultipleVariables(self, distribution):
def model_fn():
vs = []
for i in range(5):
vs.append(variable_scope.variable(1.0, name="foo" + str(i)))
ds_context.get_replica_context().merge_call(lambda _: _)
return vs
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
for i, v in enumerate(result):
self._test_mv_properties(v, "foo" + str(i) + ":0", distribution)
def testMultipleVariablesWithSameCanonicalName(self, distribution):
def model_fn():
vs = []
vs.append(variable_scope.variable(1.0, name="foo/bar"))
vs.append(variable_scope.variable(1.0, name="foo_1/bar"))
vs.append(variable_scope.variable(1.0, name="foo_1/bar_1"))
vs.append(variable_scope.variable(1.0, name="foo/bar_1"))
ds_context.get_replica_context().merge_call(lambda _: _)
return vs
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
for v in result:
self.assertIsInstance(v, values.MirroredVariable)
self.assertEqual(4, len(result))
self.assertEqual("foo/bar:0", result[0].name)
self.assertEqual("foo_1/bar:0", result[1].name)
self.assertEqual("foo_1/bar_1:0", result[2].name)
self.assertEqual("foo/bar_1:0", result[3].name)
def testVariableWithSameCanonicalNameAcrossThreads(self, distribution):
def model_fn():
replica_id = self.evaluate(_replica_id())
v = variable_scope.variable(1.0, name="foo_" + str(replica_id))
ds_context.get_replica_context().merge_call(lambda _: _)
return v
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertIsInstance(result, values.MirroredVariable)
# The resulting mirrored variable will use the name from the first device.
self.assertEqual("foo_0:0", result.name)
def testWithLayers(self, distribution):
def model_fn(features):
with variable_scope.variable_scope("common"):
layer1 = core.Dense(1)
layer1(features)
layer2 = core.Dense(1)
layer2(features)
# This will pause the current thread, and execute the other thread.
ds_context.get_replica_context().merge_call(lambda _: _)
layer3 = core.Dense(1)
layer3(features)
return [(layer1.kernel, layer1.bias),
(layer2.kernel, layer2.bias),
(layer3.kernel, layer3.bias)]
iterator = distribution.make_input_fn_iterator(
lambda _: dataset_ops.Dataset.from_tensors([[1.]]).repeat(10))
self.evaluate(iterator.initialize())
features = iterator.get_next()
with distribution.scope():
result = distribution.extended.call_for_each_replica(
model_fn, args=(features,))
suffixes = ["", "_1", "_2"]
for (kernel, bias), suffix in zip(result, suffixes):
self.assertIsInstance(kernel, values.MirroredVariable)
self.assertEqual("common/dense" + suffix + "/kernel:0", kernel.name)
self.assertIsInstance(bias, values.MirroredVariable)
self.assertEqual("common/dense" + suffix + "/bias:0", bias.name)
def testWithVariableAndVariableScope(self, distribution):
def model_fn():
v0 = variable_scope.variable(1.0, name="var0", aggregation=None)
with variable_scope.variable_scope("common"):
v1 = variable_scope.variable(1.0, name="var1")
# This will pause the current thread, and execute the other thread.
ds_context.get_replica_context().merge_call(lambda _: _)
v2 = variable_scope.variable(
1.0,
name="var2",
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
v3 = variable_scope.variable(
1.0,
name="var3",
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=variable_scope.VariableAggregation.MEAN)
return v0, v1, v2, v3
with distribution.scope():
v = variable_scope.variable(1.0, name="var-main0")
self.assertEqual("var-main0:0", v.name)
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual(4, len(result))
v0, v1, v2, v3 = result
self.assertIsInstance(v0, values.MirroredVariable)
self.assertEqual("var0:0", v0.name)
self.assertIsInstance(v1, values.MirroredVariable)
self.assertEqual("common/var1:0", v1.name)
self.assertIsInstance(v2, values.SyncOnReadVariable)
self.assertEqual("common/var2:0", v2.name)
self.assertEqual(variable_scope.VariableAggregation.SUM, v2.aggregation)
self.assertIsInstance(v3, values.MirroredVariable)
self.assertEqual("common/var3:0", v3.name)
self.assertEqual(variable_scope.VariableAggregation.MEAN, v3.aggregation)
def testWithGetVariableAndVariableScope(self, distribution):
def model_fn():
v0 = variable_scope.get_variable("var0", [1])
with variable_scope.variable_scope("common"):
v1 = variable_scope.get_variable("var1", [1])
# This will pause the current thread, and execute the other thread.
ds_context.get_replica_context().merge_call(lambda _: _)
v2 = variable_scope.get_variable(
"var2", [1],
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
v3 = variable_scope.get_variable(
"var3", [1],
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=variable_scope.VariableAggregation.MEAN)
return v0, v1, v2, v3
with distribution.scope():
with variable_scope.variable_scope("main"):
v = variable_scope.get_variable("var-main0", [1])
self.assertEqual("main/var-main0:0", v.name)
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual(4, len(result))
v0, v1, v2, v3 = result
self.assertIsInstance(v0, values.MirroredVariable)
self.assertEqual("main/var0:0", v0.name)
self.assertIsInstance(v1, values.MirroredVariable)
self.assertEqual("main/common/var1:0", v1.name)
self.assertIsInstance(v2, values.SyncOnReadVariable)
self.assertEqual("main/common/var2:0", v2.name)
self.assertEqual(variable_scope.VariableAggregation.SUM,
v2.aggregation)
self.assertIsInstance(v3, values.MirroredVariable)
self.assertEqual("main/common/var3:0", v3.name)
self.assertEqual(variable_scope.VariableAggregation.MEAN,
v3.aggregation)
def testOnlyFirstReplicaUpdatesVariables(self, distribution):
def create_fn():
aggregation = variable_scope.VariableAggregation.ONLY_FIRST_REPLICA
v0 = variable_scope.variable(
2.0,
name="on_read",
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=aggregation)
v1 = variable_scope.variable(
3.0,
name="on_write",
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=aggregation)
return v0, v1
devices = ["/device:GPU:0", "/device:CPU:0"]
with distribution.scope():
v0, v1 = distribution.extended.call_for_each_replica(create_fn)
self.evaluate(v0.initializer)
self.assertEqual(2.0, self.evaluate(v0.get(devices[0])))
self.assertEqual(2.0, self.evaluate(v0.get(devices[1])))
self.assertEqual(2.0, self.evaluate(distribution.extended.read_var(v0)))
self.evaluate(v1.initializer)
self.assertEqual(3.0, self.evaluate(v1.get(devices[0])))
self.assertEqual(3.0, self.evaluate(v1.get(devices[1])))
self.assertEqual(3.0, self.evaluate(distribution.extended.read_var(v1)))
def replica_id_plus_one():
return math_ops.cast(_replica_id() + 1, dtype=dtypes.float32)
# Update using the assign_add member function.
def update_member_fn():
update0 = v0.assign_add(5.0 * replica_id_plus_one())
update1 = v1.assign_add(7.0 * replica_id_plus_one())
return update0, update1
update0a, update1a = distribution.extended.call_for_each_replica(
update_member_fn)
# Update "sync on read" variable.
self.evaluate(distribution.group(update0a))
self.assertEqual(2.0 + 5.0, self.evaluate(v0.get(devices[0])))
# Writes are not synchronized for "sync on read" variables,
# so device[1] can end up with a different value.
self.assertEqual(2.0 + 2*5.0, self.evaluate(v0.get(devices[1])))
# Always reads from device 0.
self.assertEqual(2.0 + 5.0, self.evaluate(
distribution.extended.read_var(v0)))
# Update "sync on write" variable.
self.evaluate(distribution.group(update1a))
self.assertEqual(3.0 + 7.0, self.evaluate(v1.get(devices[0])))
# Writes are synchronized for v1, only the argument to assign_add on
# device[0] is used.
self.assertEqual(3.0 + 7.0, self.evaluate(v1.get(devices[1])))
self.assertEqual(3.0 + 7.0, self.evaluate(
distribution.extended.read_var(v1)))
# Update using state_ops.assign_add global function.
def update_state_ops_fn():
update0 = state_ops.assign_add(v0, 11.0 * replica_id_plus_one())
update1 = state_ops.assign_add(v1, 13.0 * replica_id_plus_one())
return update0, update1
update0b, update1b = distribution.extended.call_for_each_replica(
update_state_ops_fn)
self.evaluate(distribution.group(update0b))
# Update "sync on read" variable.
self.assertEqual(2.0 + 5.0 + 11.0, self.evaluate(v0.get(devices[0])))
self.assertEqual(2.0 + 2*5.0 + 2*11.0, self.evaluate(v0.get(devices[1])))
self.assertEqual(2.0 + 5.0 + 11.0, self.evaluate(
distribution.extended.read_var(v0)))
# Update "sync on write" variable.
self.evaluate(distribution.group(update1b))
self.assertEqual(3.0 + 7.0 + 13.0, self.evaluate(v1.get(devices[0])))
self.assertEqual(3.0 + 7.0 + 13.0, self.evaluate(v1.get(devices[1])))
self.assertEqual(3.0 + 7.0 + 13.0, self.evaluate(
distribution.extended.read_var(v1)))
def testNoneSynchronizationWithGetVariable(self, distribution):
with distribution.scope():
with self.assertRaisesRegexp(
ValueError, "`NONE` variable synchronization mode is not "
"supported with `Mirrored` distribution strategy. Please change "
"the `synchronization` for variable: v"):
variable_scope.get_variable(
"v", [1],
synchronization=variable_scope.VariableSynchronization.NONE)
def testNoneSynchronizationWithVariable(self, distribution):
with distribution.scope():
with self.assertRaisesRegexp(
ValueError, "`NONE` variable synchronization mode is not "
"supported with `Mirrored` distribution strategy. Please change "
"the `synchronization` for variable: v"):
variable_scope.variable(
1.0,
name="v",
synchronization=variable_scope.VariableSynchronization.NONE)
def testInvalidSynchronizationWithVariable(self, distribution):
with distribution.scope():
with self.assertRaisesRegexp(
ValueError, "Invalid variable synchronization mode: Invalid for "
"variable: v"):
variable_scope.variable(1.0, name="v", synchronization="Invalid")
def testInvalidAggregationWithGetVariable(self, distribution):
with distribution.scope():
with self.assertRaisesRegexp(
ValueError, "Invalid variable aggregation mode: invalid for "
"variable: v"):
variable_scope.get_variable(
"v", [1],
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation="invalid")
def testInvalidAggregationWithVariable(self, distribution):
with distribution.scope():
with self.assertRaisesRegexp(
ValueError, "Invalid variable aggregation mode: invalid for "
"variable: v"):
variable_scope.variable(
1.0,
name="v",
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation="invalid")
def testNonMatchingVariableCreation(self, distribution):
self.skipTest("b/123075960")
def model_fn(name):
v = variable_scope.variable(1.0, name=name)
ds_context.get_replica_context().merge_call(lambda _: _)
return v
with distribution.scope():
device_map = values.ReplicaDeviceMap(("/device:CPU:0", "/device:GPU:0"))
names = values.DistributedValues(device_map, ("foo", "bar"))
with self.assertRaises(RuntimeError):
_ = distribution.extended.call_for_each_replica(model_fn, args=(names,))
def testSyncOnReadVariable(self, distribution):
all_v_sum = {}
all_v_mean = {}
components_sum = {}
components_mean = {}
def model_fn():
replica_id = self.evaluate(_replica_id())
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
v_mean = variable_scope.variable(
4.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.MEAN)
self.assertIsInstance(v_sum, values.SyncOnReadVariable)
self.assertIsInstance(v_mean, values.SyncOnReadVariable)
updates = [v_sum.assign_add(2.0 + replica_id),
v_mean.assign(6.0 * replica_id)]
all_v_sum[replica_id] = v_sum
all_v_mean[replica_id] = v_mean
c_sum = v_sum.get()
c_mean = v_mean.get()
components_sum[replica_id] = c_sum
components_mean[replica_id] = c_mean
self.assertIsNot(v_sum, c_sum)
self.assertIsNot(v_mean, c_mean)
return updates, v_sum, v_mean, c_sum, c_mean
with distribution.scope():
# Create "sum" and "mean" versions of SyncOnReadVariables.
ret_ops, ret_v_sum, ret_v_mean, regrouped_sum, regrouped_mean = (
distribution.extended.call_for_each_replica(model_fn))
# Should see the same wrapping instance in all replicas.
self.assertIs(all_v_sum[0], ret_v_sum)
self.assertIs(all_v_mean[0], ret_v_mean)
self.assertIs(all_v_sum[0], all_v_sum[1])
self.assertIs(all_v_mean[0], all_v_mean[1])
# Regroup should recover the same wrapper.
self.assertIs(ret_v_sum, regrouped_sum)
self.assertIs(ret_v_mean, regrouped_mean)
self.assertIsNot(components_sum[0], components_sum[1])
self.assertIsNot(components_mean[0], components_mean[1])
# Apply updates
self.evaluate(variables.global_variables_initializer())
self.evaluate([y for x in ret_ops # pylint: disable=g-complex-comprehension
for y in distribution.experimental_local_results(x)])
expected_sum = 0.0
expected_mean = 0.0
for i, d in enumerate(distribution.extended.worker_devices):
# Should see different values on different devices.
v_sum_value = self.evaluate(ret_v_sum.get(d).read_value())
v_mean_value = self.evaluate(ret_v_mean.get(d).read_value())
expected = i + 3.0
self.assertEqual(expected, v_sum_value)
expected_sum += expected
expected = i * 6.0
self.assertEqual(expected, v_mean_value)
expected_mean += expected
expected_mean /= len(distribution.extended.worker_devices)
# Without get(device), should return the value you get by
# applying the reduction across all replicas (whether you use
# read_var(), get(), or nothing).
self.assertEqual(expected_sum, self.evaluate(
distribution.extended.read_var(ret_v_sum)))
self.assertEqual(expected_mean, self.evaluate(
distribution.extended.read_var(ret_v_mean)))
self.assertEqual(expected_sum, self.evaluate(ret_v_sum.get()))
self.assertEqual(expected_mean, self.evaluate(ret_v_mean.get()))
self.assertEqual(expected_sum, self.evaluate(ret_v_sum))
self.assertEqual(expected_mean, self.evaluate(ret_v_mean))
# TODO(priyag): Update this test to work in eager mode as well.
def testDynamicRnnVariables(self, distribution):
def model_fn():
inputs = constant_op.constant(2 * [2 * [[0.0, 1.0, 2.0, 3.0, 4.0]]])
cell_fw = rnn_cell_impl.LSTMCell(300)
cell_bw = rnn_cell_impl.LSTMCell(300)
(outputs, _) = rnn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
inputs,
dtype=dtypes.float32)
return outputs
with context.graph_mode(), distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
# Two variables are created by the RNN layer.
self.assertEqual(2, len(result))
for v in result:
self.assertIsInstance(v, values.DistributedValues)
_, v1 = distribution.experimental_local_results(v)
self.assertStartsWith(v1._op.name, "replica_1/")
def testSyncOnReadVariableUpdate(self, distribution):
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertIsInstance(v_sum, values.SyncOnReadVariable)
return v_sum
def update(var, value):
return var.assign(value)
with distribution.scope():
ret_v_sum = distribution.extended.call_for_each_replica(model_fn)
# Initialize variables.
self.evaluate(variables.global_variables_initializer())
# Assert that the aggregated value of the sync on read var is the sum
# of the individual values before running the update ops.
self.assertEqual(1.0, self.evaluate(ret_v_sum.get(
distribution.extended.worker_devices[0]).read_value()))
self.assertEqual(2.0, self.evaluate(ret_v_sum))
# Apply updates.
update_ops = distribution.extended.update(
ret_v_sum, update, args=(5.0,), group=False)
self.evaluate(update_ops)
# Assert that the aggregated value of the sync on read vars is the sum
# of the individual values after running the update ops.
self.assertEqual(5.0, self.evaluate(ret_v_sum.get(
distribution.extended.worker_devices[0]).read_value()))
self.assertEqual(10.0, self.evaluate(ret_v_sum))
def testVarDistributeStrategy(self, distribution):
with distribution.scope():
mirrored = variable_scope.variable(1.0)
sync_on_read = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ)
self.assertIs(distribution, mirrored.distribute_strategy)
self.assertIs(distribution, sync_on_read.distribute_strategy)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph"]))
class MirroredStrategyNameScopeTest(test.TestCase):
# NOTE(priyag): Names and name scopes are ignored in eager, hence we are not
# testing this in eager mode.
def testNameScope(self, distribution):
def model_fn():
with ops.name_scope("foo"):
a = constant_op.constant(1.0, name="a")
ds_context.get_replica_context().merge_call(lambda _: _)
b = constant_op.constant(1.0, name="b")
return a, b
with context.graph_mode(), distribution.scope():
with ops.name_scope("main"):
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual(2, len(result))
for v, name in zip(result, ["a", "b"]):
self.assertIsInstance(v, values.DistributedValues)
v0, v1 = distribution.experimental_local_results(v)
self.assertEqual("main/foo/" + name + ":0", v0.name)
self.assertEqual("main/replica_1/foo/" + name + ":0", v1.name)
def testWithDefaultName(self, distribution):
def model_fn():
with ops.name_scope(None, "foo"):
a = constant_op.constant(1.0, name="a")
ds_context.get_replica_context().merge_call(lambda _: _)
b = constant_op.constant(2.0, name="b")
return a, b
with context.graph_mode(), distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual(2, len(result))
for v, name in zip(result, ["a", "b"]):
self.assertIsInstance(v, values.DistributedValues)
v0, v1 = distribution.experimental_local_results(v)
self.assertEqual("foo/" + name + ":0", v0.name)
self.assertEqual("replica_1/foo/" + name + ":0", v1.name)
# variable_scope.variable() respects name scopes when creating
# variables. On the other hand variable_scope.get_variable() ignores name
# scopes when creating variables. We test both methods of creating variables
# to make sure that we have the same variable names in both cases.
def testNameScopeWithVariable(self, distribution):
def in_cross_replica(_):
c = variable_scope.variable(1.0, name="c")
return c
def model_fn():
b = variable_scope.variable(1.0, name="b")
with ops.name_scope("foo"):
c = ds_context.get_replica_context().merge_call(in_cross_replica)
return b, c
with context.graph_mode(), distribution.scope():
with ops.name_scope("main"):
a = variable_scope.variable(1.0, name="a")
result = distribution.extended.call_for_each_replica(model_fn)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = distribution.experimental_local_results(a)
b0, b1 = distribution.experimental_local_results(result_b)
c0, c1 = distribution.experimental_local_results(result_c)
self.assertEqual("main/a:0", a0.name)
self.assertEqual("main/a/replica_1:0", a1.name)
self.assertEqual("main/b:0", b0.name)
self.assertEqual("main/b/replica_1:0", b1.name)
self.assertEqual("main/foo/c:0", c0.name)
self.assertEqual("main/foo/c/replica_1:0", c1.name)
def testNameScopeWithGetVariable(self, distribution):
def in_cross_replica(_):
c = variable_scope.get_variable("c", [1])
return c
def model_fn():
b = variable_scope.get_variable("b", [1])
with ops.name_scope("foo"):
c = ds_context.get_replica_context().merge_call(in_cross_replica)
return b, c
with context.graph_mode(), distribution.scope():
with ops.name_scope("main"):
a = variable_scope.get_variable("a", [1])
result = distribution.extended.call_for_each_replica(model_fn)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = distribution.experimental_local_results(a)
b0, b1 = distribution.experimental_local_results(result_b)
c0, c1 = distribution.experimental_local_results(result_c)
self.assertEqual("a:0", a0.name)
self.assertEqual("a/replica_1:0", a1.name)
self.assertEqual("b:0", b0.name)
self.assertEqual("b/replica_1:0", b1.name)
self.assertEqual("c:0", c0.name)
self.assertEqual("c/replica_1:0", c1.name)
@combinations.generate(
combinations.combine(
distribution=[
combinations.NamedDistribution(
"Mirrored3Devices",
# pylint: disable=g-long-lambda
lambda: mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:GPU:1", "/device:CPU:0"]),
required_gpus=2),
combinations.NamedDistribution(
"CoreMirrored3Devices",
# pylint: disable=g-long-lambda
lambda: mirrored_strategy.CoreMirroredStrategy(
["/device:GPU:0", "/device:GPU:1", "/device:CPU:0"]),
required_gpus=2)
],
mode=["graph", "eager"]))
class MirroredThreeDeviceDistributionTest(
strategy_test_lib.DistributionTestBase,
parameterized.TestCase):
def testThreeDevices(self, distribution):
def model_fn():
v = variable_scope.variable(1.0, name="foo")
ds_context.get_replica_context().merge_call(lambda _: _)
return v
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertIsInstance(result, values.MirroredVariable)
self.assertEqual("foo:0", result.name)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph", "eager"]))
class MirroredVariableUpdateTest(test.TestCase):
# The following tests check assign, assign_add and assign_sub on Mirrored
# variables in replica and cross replica context.
def testAssignMirroredVarReplicaContextWithoutAggregationType(self,
distribution):
# Test that we always have an aggregation type set on the mirrored variable
# if we assign to it in replica mode.
def var_fn():
v = variable_scope.variable(1.0, name="foo")
return v
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
def model_fn():
return mirrored_var.assign(5.0)
with self.assertRaisesRegexp(
ValueError, "You must specify an aggregation method to update a "
"MirroredVariable in Replica Context."):
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
def testAssignMirroredVarReplicaContextWithSum(self, distribution):
# Test that we don't reduce a non-per-replica value with the "sum"
# aggregation type.
def var_fn():
v = variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.SUM)
return v
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
def model_fn():
return mirrored_var.assign(5.0)
with self.assertRaisesRegexp(
ValueError, "A non-DistributedValues value 5.0 cannot be reduced "
"with the given reduce op ReduceOp.SUM."):
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
def testAssignMirroredVarCrossDeviceContext(self, distribution):
def var_fn():
return variable_scope.variable(1.0, name="foo")
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
mirrored_var_result = self.evaluate(mirrored_var.assign(6.0))
self.assertEqual(6.0, mirrored_var_result)
def testAssignMirroredVarReplicaContext(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(
ds_context.get_replica_context().replica_id_in_sync_group,
mirrored_var.dtype)
return mirrored_var.assign(value)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(0.5, self.evaluate(mirrored_var))
def testAssignMirroredVarReplicaContextWithSingleValue(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
return mirrored_var.assign(5.0)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(5.0, self.evaluate(mirrored_var))
def testAssignAddMirroredVarCrossDeviceContext(self, distribution):
def var_fn():
return variable_scope.variable(1.0, name="foo")
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
# read_value == True
mirrored_var_result = self.evaluate(
mirrored_var.assign_add(6.0, read_value=True))
self.assertEqual(7.0, mirrored_var_result)
self.assertEqual(7.0, self.evaluate(mirrored_var.get("/device:CPU:0")))
self.assertEqual(7.0, self.evaluate(mirrored_var.get("/device:GPU:0")))
# read_value == False
self.evaluate(mirrored_var.assign_add(2.0, read_value=False))
self.assertEqual(9.0, self.evaluate(mirrored_var.get("/device:CPU:0")))
self.assertEqual(9.0, self.evaluate(mirrored_var.get("/device:GPU:0")))
def testAssignAddMirroredVarReplicaContext(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(
ds_context.get_replica_context().replica_id_in_sync_group,
mirrored_var.dtype)
return mirrored_var.assign_add(value)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(1.5, self.evaluate(mirrored_var))
def testAssignAddMirroredVarReplicaContextWithSingleValue(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
return mirrored_var.assign_add(5.0)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(6.0, self.evaluate(mirrored_var))
def testAssignSubMirroredVarCrossDeviceContext(self, distribution):
def var_fn():
return variable_scope.variable(5.0, name="foo")
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(mirrored_var))
mirrored_var_result = self.evaluate(mirrored_var.assign_sub(2.0))
self.assertEqual(3.0, mirrored_var_result)
self.assertEqual(3.0, self.evaluate(mirrored_var.get("/device:GPU:0")))
self.assertEqual(3.0, self.evaluate(mirrored_var.get("/device:CPU:0")))
def testAssignSubMirroredVarReplicaContext(self, distribution):
def var_fn():
return variable_scope.variable(
5.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(
ds_context.get_replica_context().replica_id_in_sync_group,
mirrored_var.dtype)
return mirrored_var.assign_sub(value)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(4.5, self.evaluate(mirrored_var))
def testAssignSubMirroredVarReplicaContextWithSingleValue(self, distribution):
def var_fn():
return variable_scope.variable(
5.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(mirrored_var))
def model_fn():
return mirrored_var.assign_sub(1.0)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(4.0, self.evaluate(mirrored_var))
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph", "eager"]))
class MirroredAndSyncOnReadVariableInitializerTest(test.TestCase):
def testAssignMirroredVarInitializer(self, distribution):
# This test is not eager compatible since in eager variables are initialized
# upon construction instead of once the initialization op is run.
with context.graph_mode():
def var_fn():
v = variable_scope.variable(1.0, name="foo")
return v
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.assertFalse(self.evaluate(mirrored_var.is_initialized()))
self.evaluate(mirrored_var.initializer)
self.assertTrue(self.evaluate(mirrored_var.is_initialized()))
def testAssignReplicaLocalVarInitializer(self, distribution):
# This test is not eager compatible since in eager variables are initialized
# upon construction instead of once the initialization op is run.
with context.graph_mode():
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertIsInstance(v_sum, values.SyncOnReadVariable)
return v_sum
with distribution.scope():
sync_on_read_var = distribution.extended.call_for_each_replica(
model_fn)
self.assertIsInstance(sync_on_read_var, values.SyncOnReadVariable)
self.assertFalse(self.evaluate(sync_on_read_var.is_initialized()))
self.evaluate(sync_on_read_var.initializer)
self.assertTrue(self.evaluate(sync_on_read_var.is_initialized()))
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph", "eager"]))
class SyncOnReadVariableAssignTest(test.TestCase):
def testAssignReplicaLocalVarSumAggregation(self, distribution):
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
return v_sum
with distribution.scope():
sync_on_read_var = distribution.extended.call_for_each_replica(model_fn)
self.assertIsInstance(sync_on_read_var, values.SyncOnReadVariable)
self.evaluate(variables.global_variables_initializer())
# Each replica has a value of 1.0 assigned to it in replica context.
# When we read the value using `read_var` we should see the SUM of each of
# values on each of the replicas.
self.assertEqual(2.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
# Assigning 6.0 in cross replica context will assign a value of
# 6.0/num_replicas to each replica.
tlv_ops = sync_on_read_var.assign(6.0)
self.evaluate(tlv_ops)
# On reading the sync on read var we should get the assigned value back.
# The value on all the replicas are added before being returned by
# `read_var`.
self.assertEqual(6.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
def testAssignReplicaLocalVarMeanAggregation(self, distribution):
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.MEAN)
return v_sum
with distribution.scope():
sync_on_read_var = distribution.extended.call_for_each_replica(model_fn)
self.assertIsInstance(sync_on_read_var, values.SyncOnReadVariable)
self.evaluate(variables.global_variables_initializer())
# Each replica has a value of 1.0 assigned to it in replica context.
# When we read the value using `read_var` we should see the MEAN of values
# on all replicas which is the value assigned in replica context.
self.assertEqual(1.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
tlv_ops = sync_on_read_var.assign(6.0)
self.evaluate(tlv_ops)
# On reading the sync on read var we should get the MEAN of all values
# which is equal to the value assigned.
self.assertEqual(6.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
class MockModel(object):
def __init__(self, two_variables=False):
self.variables = []
self.variables.append(variable_scope.variable(1.25, name="dummy_var1"))
if two_variables:
self.variables.append(variable_scope.variable(2.0, name="dummy_var2"))
def __call__(self, factor=2):
x = factor * self.variables[0]
if len(self.variables) > 1:
x += self.variables[1]
return x
class MiniModel(keras_training.Model):
"""Minimal model for mnist.
Useful for testing and debugging on slow TPU simulators.
"""
def __init__(self):
super(MiniModel, self).__init__(name="")
self.fc = keras_core.Dense(1, name="fc", kernel_initializer="ones",
bias_initializer="ones")
def call(self, inputs, training=True):
inputs = array_ops.ones([1, 10])
return self.fc(inputs)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph", "eager"]))
class MirroredStrategyDefunTest(test.TestCase):
def _call_and_check(self, distribution, model_fn, inputs, expected_result,
defuns, two_variables=False):
cpu_dev = device_util.canonicalize("CPU:0")
gpu_dev = device_util.canonicalize("GPU:0")
devices = [cpu_dev, gpu_dev]
with distribution.scope():
mock_model = MockModel(two_variables)
self.evaluate(variables.global_variables_initializer())
result = distribution.extended.call_for_each_replica(
model_fn, args=[mock_model] + inputs)
for r in range(len(devices)):
device_result = values.select_replica(r, result)
device_expected_result = values.select_replica(r, expected_result)
self.assertAllClose(device_expected_result,
self.evaluate(device_result))
for defun in defuns:
# `Function`s are specialized to the current device stack, so
# call_for_each has one trace per device. To check that the expected set
# of variables was accessed on each trace, we first retrieve each
# device-specific graph function.
per_replica_graph_functions = (
distribution.extended.call_for_each_replica(
defun.get_concrete_function, args=[mock_model] + inputs))
for device in devices:
graph_function = per_replica_graph_functions.get(device=device)
self.assertEqual(set(mock_model.variables),
set(graph_function.graph.variables))
def testVariableInDefun(self, distribution):
@function.defun
def times_two(mock_model):
return mock_model()
def model_fn(mock_model):
return times_two(mock_model)
self._call_and_check(distribution, model_fn, [], 2.5, [times_two])
def testVariableInNestedDefun(self, distribution):
@function.defun
def times_two(mock_model):
return mock_model()
@function.defun
def two_x_plus_one(mock_model):
return times_two(mock_model) + 1
def model_fn(mock_model):
return two_x_plus_one(mock_model)
self._call_and_check(distribution, model_fn, [], 3.5,
[times_two, two_x_plus_one])
def testTwoVariablesInNestedDefun(self, distribution):
@function.defun
def fn1(mock_model):
return mock_model()
@function.defun
def fn2(mock_model):
return fn1(mock_model) + 1
def model_fn(mock_model):
return fn2(mock_model)
self._call_and_check(distribution, model_fn, [], 5.5, [fn1, fn2],
two_variables=True)
def testGradientTapeOverNestedDefuns(self, distribution):
@function.defun
def fn1(mock_model):
return mock_model()
@function.defun
def fn2(mock_model):
return fn1(mock_model) + 1
def model_fn(mock_model):
with backprop.GradientTape(persistent=True) as gtape:
result = fn2(mock_model)
grads = gtape.gradient(result,
[v.get() for v in mock_model.variables])
return grads
self._call_and_check(distribution, model_fn, [], [2.0, 1.0], [fn1, fn2],
two_variables=True)
def testPassPerReplica(self, distribution):
@function.defun
def fn1(mock_model, factor):
return mock_model(factor)
device_map = values.ReplicaDeviceMap(("/device:CPU:0", "/device:GPU:0"))
factors = values.PerReplica(device_map, (5.0, 3.0))
expected_result = values.PerReplica(device_map, (5.0 * 1.25, 3.0 * 1.25))
self._call_and_check(distribution, fn1, [factors], expected_result, [fn1])
def testTrain(self, distribution):
with distribution.scope():
mock_model = MiniModel()
mock_model.call = function.defun(mock_model.call)
def loss_fn(ctx):
del ctx
return mock_model(array_ops.ones([1, 10]))
gradients_fn = backprop.implicit_grad(loss_fn)
gradients_fn = optimizer_lib.get_filtered_grad_fn(gradients_fn)
grads_and_vars = distribution.extended.call_for_each_replica(
gradients_fn, args=(None,))
optimizer = gradient_descent.GradientDescentOptimizer(0.25)
update_ops = optimizer._distributed_apply(distribution, grads_and_vars) # pylint: disable=protected-access
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.evaluate(update_ops)
updated_var_values = self.evaluate(mock_model.variables)
# All variables start at 1.0 and get two updates of 0.25.
self.assertAllEqual(0.5 * np.ones([10, 1]), updated_var_values[0])
self.assertAllEqual([0.5], updated_var_values[1])
@combinations.generate(
combinations.combine(
distribution=[
combinations.NamedDistribution(
"Mirrored",
# pylint: disable=g-long-lambda
lambda: mirrored_strategy.MirroredStrategy(num_gpus_per_worker=
context.num_gpus()),
required_gpus=1),
combinations.NamedDistribution(
"CoreMirrored",
# pylint: disable=g-long-lambda
lambda: mirrored_strategy.CoreMirroredStrategy(
mirrored_strategy.all_local_devices()),
required_gpus=1)
],
mode=["graph"]))
class MultiWorkerMirroredStrategyTest(
multi_worker_test_base.MultiWorkerTestBase,
strategy_test_lib.DistributionTestBase):
def _configure_distribution_strategy(self, distribution):
cluster_spec = server_lib.ClusterSpec({
"worker": ["/job:worker/task:0", "/job:worker/task:1"]
})
distribution.configure(cluster_spec=cluster_spec)
def test_num_replicas_in_sync(self, distribution):
self._configure_distribution_strategy(distribution)
# We calculate the total number of gpus across the workers(2) specified in
# the cluster spec.
self.assertEqual(context.num_gpus() * 2, distribution.num_replicas_in_sync)
def testMinimizeLossGraph(self, distribution):
self._configure_distribution_strategy(distribution)
self._test_minimize_loss_graph(distribution, learning_rate=0.05)
def testDeviceScope(self, distribution):
"""Test the device scope of multi-worker MirroredStrategy."""
self._configure_distribution_strategy(distribution)
with distribution.scope():
a = constant_op.constant(1.)
with ops.device("/cpu:0"):
b = constant_op.constant(1.)
self.assertEqual(a.device, "/job:worker/task:0")
self.assertEqual(b.device, "/job:worker/task:0/device:CPU:0")
def testMakeInputFnIteratorWithDataset(self, distribution):
self._configure_distribution_strategy(distribution)
dataset_fn = lambda: dataset_ops.Dataset.range(100)
num_gpus = context.num_gpus()
num_workers = 2
expected_values = [[i+j for j in range(num_gpus)] * num_workers
for i in range(0, 100, num_gpus)]
with context.graph_mode(), self.cached_session() as sess:
# `expected_input_pipeline_id` is None because the input_fn will be called
# multiple times, each with a different input_pipeline_id.
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=num_workers*num_gpus,
expected_num_input_pipelines=num_workers,
expected_input_pipeline_id=None)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(
iterator, distribution.extended.worker_devices, expected_values, sess)
def DISABLED_testMakeInputFnIteratorWithCallable(self, distribution):
self._configure_distribution_strategy(distribution)
def fn():
dataset = dataset_ops.Dataset.range(100)
it = dataset.make_one_shot_iterator()
return it.get_next
num_gpus = context.num_gpus()
num_workers = 2
expected_values = []
for i in range(0, 100, num_gpus):
expected_values.append([i+j for j in range(num_gpus)] * num_workers)
with context.graph_mode(), self.cached_session() as sess:
# `expected_input_pipeline_id` is None because the input_fn will be called
# multiple times, each with a different input_pipeline_id.
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=num_workers*num_gpus,
expected_num_input_pipelines=num_workers,
expected_input_pipeline_id=None)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(
iterator, distribution.extended.worker_devices, expected_values, sess,
test_reinitialize=False)
def testUpdateConfigProto(self, distribution):
distribution.configure(cluster_spec={"worker": ["fake1", "fake2"]})
config_proto = config_pb2.ConfigProto()
new_config = distribution.update_config_proto(config_proto)
# Verify isolate_session_state
self.assertTrue(new_config.isolate_session_state)
class MultiWorkerMirroredStrategyTestWithChief(
multi_worker_test_base.MultiWorkerTestBase,
strategy_test_lib.DistributionTestBase):
@classmethod
def setUpClass(cls):
"""Create a local cluster with 2 workers and 1 chief."""
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=2, num_ps=0, has_chief=True)
cls._default_target = "grpc://" + cls._cluster_spec["chief"][0]
def testMinimizeLossGraph(self):
strategy = mirrored_strategy.MirroredStrategy(
num_gpus_per_worker=context.num_gpus())
strategy.configure(cluster_spec=self._cluster_spec)
self._test_minimize_loss_graph(strategy, learning_rate=0.05)
def testMinimizeLossGraphCoreMirroredStrategy(self):
strategy = mirrored_strategy.CoreMirroredStrategy(
mirrored_strategy.all_local_devices())
strategy.configure(cluster_spec=self._cluster_spec)
self._test_minimize_loss_graph(strategy, learning_rate=0.05)
def testMinimizeLossGraphCoreMirroredStrategyWithOneNode(self):
cluster_spec = {}
cluster_spec["chief"] = self._cluster_spec["chief"]
tf_config = {"cluster": cluster_spec}
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(tf_config)}):
strategy = mirrored_strategy.CoreMirroredStrategy()
self.assertIsInstance(strategy.extended._inferred_cross_device_ops,
cross_device_ops_lib.NcclAllReduce)
self._test_minimize_loss_graph(strategy, learning_rate=0.05)
def testInitializeFromTFConfig(self):
tf_config = {"cluster": self._cluster_spec}
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(tf_config)}):
strategy = mirrored_strategy.CoreMirroredStrategy()
self.assertEqual(
max(context.num_gpus(), 1) * 3, strategy.num_replicas_in_sync)
def testSummaryForReplicaZeroOnly(self):
strategy = mirrored_strategy.CoreMirroredStrategy(
mirrored_strategy.all_local_devices())
strategy.configure(cluster_spec=self._cluster_spec)
self._test_summary_for_replica_zero_only(strategy)
def _replica_id():
replica_id = ds_context.get_replica_context().replica_id_in_sync_group
if not isinstance(replica_id, ops.Tensor):
replica_id = constant_op.constant(replica_id)
return replica_id
if __name__ == "__main__":
test.main()
|
[
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors",
"tensorflow.contrib.distribute.python.combinations.combine",
"tensorflow.python.framework.constant_op.constant",
"numpy.ones",
"tensorflow.python.distribute.values.select_replica",
"tensorflow.python.framework.ops.device",
"json.dumps",
"tensorflow.python.training.optimizer.get_filtered_grad_fn",
"tensorflow.python.training.server_lib.ClusterSpec",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.contrib.distribute.python.mirrored_strategy.CoreMirroredStrategy",
"tensorflow.python.eager.test.main",
"tensorflow.python.eager.backprop.GradientTape",
"tensorflow.python.layers.core.Dense",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.python.ops.variable_scope.variable_creator_scope",
"tensorflow.contrib.distribute.python.multi_worker_test_base.create_in_process_cluster",
"tensorflow.python.eager.backprop.implicit_grad",
"tensorflow.python.distribute.distribution_strategy_context.get_replica_context",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.keras.layers.core.Dense",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.variable_scope.variable",
"tensorflow.python.ops.rnn.bidirectional_dynamic_rnn",
"tensorflow.python.distribute.device_util.canonicalize",
"tensorflow.python.data.ops.dataset_ops.Dataset.range",
"tensorflow.python.distribute.values.DistributedValues",
"tensorflow.contrib.distribute.python.mirrored_strategy.MirroredStrategy",
"tensorflow.python.distribute.values.PerReplica",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.python.framework.func_graph.FuncGraph",
"tensorflow.python.eager.function.defun",
"tensorflow.python.training.gradient_descent.GradientDescentOptimizer",
"tensorflow.contrib.distribute.python.mirrored_strategy.all_local_devices",
"tensorflow.python.ops.rnn_cell_impl.LSTMCell",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.eager.context.num_gpus",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.distribute.values.ReplicaDeviceMap"
] |
[((2753, 3033), 'tensorflow.contrib.distribute.python.combinations.combine', 'combinations.combine', ([], {'distribution': '[combinations.mirrored_strategy_with_gpu_and_cpu, combinations.\n mirrored_strategy_with_two_gpus, combinations.\n core_mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_two_gpus]', 'mode': "['graph', 'eager']"}), "(distribution=[combinations.\n mirrored_strategy_with_gpu_and_cpu, combinations.\n mirrored_strategy_with_two_gpus, combinations.\n core_mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_two_gpus], mode=['graph', 'eager'])\n", (2773, 3033), False, 'from tensorflow.contrib.distribute.python import combinations\n'), ((6871, 7141), 'tensorflow.contrib.distribute.python.combinations.combine', 'combinations.combine', ([], {'distribution': '[combinations.mirrored_strategy_with_one_cpu, combinations.\n mirrored_strategy_with_one_gpu, combinations.\n core_mirrored_strategy_with_one_cpu, combinations.\n core_mirrored_strategy_with_one_gpu]', 'mode': "['graph', 'eager']"}), "(distribution=[combinations.\n mirrored_strategy_with_one_cpu, combinations.\n mirrored_strategy_with_one_gpu, combinations.\n core_mirrored_strategy_with_one_cpu, combinations.\n core_mirrored_strategy_with_one_gpu], mode=['graph', 'eager'])\n", (6891, 7141), False, 'from tensorflow.contrib.distribute.python import combinations\n'), ((9965, 10138), 'tensorflow.contrib.distribute.python.combinations.combine', 'combinations.combine', ([], {'distribution': '[combinations.mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu]', 'mode': "['graph', 'eager']"}), "(distribution=[combinations.\n mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu], mode=['graph', 'eager'])\n", (9985, 10138), False, 'from tensorflow.contrib.distribute.python import combinations\n'), ((12011, 12184), 'tensorflow.contrib.distribute.python.combinations.combine', 'combinations.combine', ([], {'distribution': '[combinations.mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu]', 'mode': "['graph', 'eager']"}), "(distribution=[combinations.\n mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu], mode=['graph', 'eager'])\n", (12031, 12184), False, 'from tensorflow.contrib.distribute.python import combinations\n'), ((33197, 33361), 'tensorflow.contrib.distribute.python.combinations.combine', 'combinations.combine', ([], {'distribution': '[combinations.mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu]', 'mode': "['graph']"}), "(distribution=[combinations.\n mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu], mode=['graph'])\n", (33217, 33361), False, 'from tensorflow.contrib.distribute.python import combinations\n'), ((39025, 39198), 'tensorflow.contrib.distribute.python.combinations.combine', 'combinations.combine', ([], {'distribution': '[combinations.mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu]', 'mode': "['graph', 'eager']"}), "(distribution=[combinations.\n mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu], mode=['graph', 'eager'])\n", (39045, 39198), False, 'from tensorflow.contrib.distribute.python import combinations\n'), ((48480, 48653), 'tensorflow.contrib.distribute.python.combinations.combine', 'combinations.combine', ([], {'distribution': '[combinations.mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu]', 'mode': "['graph', 'eager']"}), "(distribution=[combinations.\n mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu], mode=['graph', 'eager'])\n", (48500, 48653), False, 'from tensorflow.contrib.distribute.python import combinations\n'), ((50419, 50592), 'tensorflow.contrib.distribute.python.combinations.combine', 'combinations.combine', ([], {'distribution': '[combinations.mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu]', 'mode': "['graph', 'eager']"}), "(distribution=[combinations.\n mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu], mode=['graph', 'eager'])\n", (50439, 50592), False, 'from tensorflow.contrib.distribute.python import combinations\n'), ((54020, 54193), 'tensorflow.contrib.distribute.python.combinations.combine', 'combinations.combine', ([], {'distribution': '[combinations.mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu]', 'mode': "['graph', 'eager']"}), "(distribution=[combinations.\n mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu], mode=['graph', 'eager'])\n", (54040, 54193), False, 'from tensorflow.contrib.distribute.python import combinations\n'), ((65599, 65610), 'tensorflow.python.eager.test.main', 'test.main', ([], {}), '()\n', (65608, 65610), False, 'from tensorflow.python.eager import test\n'), ((3276, 3303), 'tensorflow.python.eager.context.executing_eagerly', 'context.executing_eagerly', ([], {}), '()\n', (3301, 3303), False, 'from tensorflow.python.eager import context\n'), ((7447, 7474), 'tensorflow.python.eager.context.executing_eagerly', 'context.executing_eagerly', ([], {}), '()\n', (7472, 7474), False, 'from tensorflow.python.eager import context\n'), ((8599, 8763), 'tensorflow.contrib.distribute.python.combinations.combine', 'combinations.combine', ([], {'distribution': '[combinations.mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu]', 'mode': "['graph']"}), "(distribution=[combinations.\n mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu], mode=['graph'])\n", (8619, 8763), False, 'from tensorflow.contrib.distribute.python import combinations\n'), ((10455, 10496), 'tensorflow.python.framework.ops.executing_eagerly_outside_functions', 'ops.executing_eagerly_outside_functions', ([], {}), '()\n', (10494, 10496), False, 'from tensorflow.python.framework import ops\n'), ((53775, 53862), 'tensorflow.python.keras.layers.core.Dense', 'keras_core.Dense', (['(1)'], {'name': '"""fc"""', 'kernel_initializer': '"""ones"""', 'bias_initializer': '"""ones"""'}), "(1, name='fc', kernel_initializer='ones', bias_initializer=\n 'ones')\n", (53791, 53862), True, 'from tensorflow.python.keras.layers import core as keras_core\n'), ((53944, 53967), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['[1, 10]'], {}), '([1, 10])\n', (53958, 53967), False, 'from tensorflow.python.ops import array_ops\n'), ((54403, 54436), 'tensorflow.python.distribute.device_util.canonicalize', 'device_util.canonicalize', (['"""CPU:0"""'], {}), "('CPU:0')\n", (54427, 54436), False, 'from tensorflow.python.distribute import device_util\n'), ((54451, 54484), 'tensorflow.python.distribute.device_util.canonicalize', 'device_util.canonicalize', (['"""GPU:0"""'], {}), "('GPU:0')\n", (54475, 54484), False, 'from tensorflow.python.distribute import device_util\n'), ((57524, 57583), 'tensorflow.python.distribute.values.ReplicaDeviceMap', 'values.ReplicaDeviceMap', (["('/device:CPU:0', '/device:GPU:0')"], {}), "(('/device:CPU:0', '/device:GPU:0'))\n", (57547, 57583), False, 'from tensorflow.python.distribute import values\n'), ((57598, 57639), 'tensorflow.python.distribute.values.PerReplica', 'values.PerReplica', (['device_map', '(5.0, 3.0)'], {}), '(device_map, (5.0, 3.0))\n', (57615, 57639), False, 'from tensorflow.python.distribute import values\n'), ((57662, 57717), 'tensorflow.python.distribute.values.PerReplica', 'values.PerReplica', (['device_map', '(5.0 * 1.25, 3.0 * 1.25)'], {}), '(device_map, (5.0 * 1.25, 3.0 * 1.25))\n', (57679, 57717), False, 'from tensorflow.python.distribute import values\n'), ((59772, 59857), 'tensorflow.python.training.server_lib.ClusterSpec', 'server_lib.ClusterSpec', (["{'worker': ['/job:worker/task:0', '/job:worker/task:1']}"], {}), "({'worker': ['/job:worker/task:0', '/job:worker/task:1']}\n )\n", (59794, 59857), False, 'from tensorflow.python.training import server_lib\n'), ((61004, 61022), 'tensorflow.python.eager.context.num_gpus', 'context.num_gpus', ([], {}), '()\n', (61020, 61022), False, 'from tensorflow.python.eager import context\n'), ((62066, 62084), 'tensorflow.python.eager.context.num_gpus', 'context.num_gpus', ([], {}), '()\n', (62082, 62084), False, 'from tensorflow.python.eager import context\n'), ((63036, 63060), 'tensorflow.core.protobuf.config_pb2.ConfigProto', 'config_pb2.ConfigProto', ([], {}), '()\n', (63058, 63060), False, 'from tensorflow.core.protobuf import config_pb2\n'), ((63482, 63575), 'tensorflow.contrib.distribute.python.multi_worker_test_base.create_in_process_cluster', 'multi_worker_test_base.create_in_process_cluster', ([], {'num_workers': '(2)', 'num_ps': '(0)', 'has_chief': '(True)'}), '(num_workers=2, num_ps=0,\n has_chief=True)\n', (63530, 63575), False, 'from tensorflow.contrib.distribute.python import multi_worker_test_base\n'), ((65395, 65427), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (65425, 65427), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((65515, 65547), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['replica_id'], {}), '(replica_id)\n', (65535, 65547), False, 'from tensorflow.python.framework import constant_op\n'), ((4553, 4582), 'tensorflow.python.data.ops.dataset_ops.Dataset.range', 'dataset_ops.Dataset.range', (['(10)'], {}), '(10)\n', (4578, 4582), False, 'from tensorflow.python.data.ops import dataset_ops\n'), ((9578, 9598), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (9596, 9598), False, 'from tensorflow.python.eager import context\n'), ((9642, 9700), 'tensorflow.python.ops.variable_scope.variable_creator_scope', 'variable_scope.variable_creator_scope', (['main_thread_creator'], {}), '(main_thread_creator)\n', (9679, 9700), False, 'from tensorflow.python.ops import variable_scope\n'), ((10395, 10436), 'tensorflow.python.framework.ops.executing_eagerly_outside_functions', 'ops.executing_eagerly_outside_functions', ([], {}), '()\n', (10434, 10436), False, 'from tensorflow.python.framework import ops\n'), ((10545, 10586), 'tensorflow.python.framework.ops.executing_eagerly_outside_functions', 'ops.executing_eagerly_outside_functions', ([], {}), '()\n', (10584, 10586), False, 'from tensorflow.python.framework import ops\n'), ((10973, 11014), 'tensorflow.python.framework.ops.executing_eagerly_outside_functions', 'ops.executing_eagerly_outside_functions', ([], {}), '()\n', (11012, 11014), False, 'from tensorflow.python.framework import ops\n'), ((12800, 12840), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(2.0)'], {'name': '"""bar"""'}), "(2.0, name='bar')\n", (12823, 12840), False, 'from tensorflow.python.ops import variable_scope\n'), ((13003, 13043), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""'}), "(1.0, name='foo')\n", (13026, 13043), False, 'from tensorflow.python.ops import variable_scope\n'), ((13485, 13525), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""'}), "(1.0, name='foo')\n", (13508, 13525), False, 'from tensorflow.python.ops import variable_scope\n'), ((13845, 13873), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {}), '(1.0)\n', (13868, 13873), False, 'from tensorflow.python.ops import variable_scope\n'), ((17405, 17464), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""var0"""', 'aggregation': 'None'}), "(1.0, name='var0', aggregation=None)\n", (17428, 17464), False, 'from tensorflow.python.ops import variable_scope\n'), ((18226, 18272), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""var-main0"""'}), "(1.0, name='var-main0')\n", (18249, 18272), False, 'from tensorflow.python.ops import variable_scope\n'), ((19131, 19171), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""var0"""', '[1]'], {}), "('var0', [1])\n", (19158, 19171), False, 'from tensorflow.python.ops import variable_scope\n'), ((21041, 21179), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(2.0)'], {'name': '"""on_read"""', 'synchronization': 'variable_scope.VariableSynchronization.ON_READ', 'aggregation': 'aggregation'}), "(2.0, name='on_read', synchronization=variable_scope\n .VariableSynchronization.ON_READ, aggregation=aggregation)\n", (21064, 21179), False, 'from tensorflow.python.ops import variable_scope\n'), ((21227, 21367), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(3.0)'], {'name': '"""on_write"""', 'synchronization': 'variable_scope.VariableSynchronization.ON_WRITE', 'aggregation': 'aggregation'}), "(3.0, name='on_write', synchronization=\n variable_scope.VariableSynchronization.ON_WRITE, aggregation=aggregation)\n", (21250, 21367), False, 'from tensorflow.python.ops import variable_scope\n'), ((26672, 26711), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': 'name'}), '(1.0, name=name)\n', (26695, 26711), False, 'from tensorflow.python.ops import variable_scope\n'), ((26841, 26900), 'tensorflow.python.distribute.values.ReplicaDeviceMap', 'values.ReplicaDeviceMap', (["('/device:CPU:0', '/device:GPU:0')"], {}), "(('/device:CPU:0', '/device:GPU:0'))\n", (26864, 26900), False, 'from tensorflow.python.distribute import values\n'), ((26915, 26967), 'tensorflow.python.distribute.values.DistributedValues', 'values.DistributedValues', (['device_map', "('foo', 'bar')"], {}), "(device_map, ('foo', 'bar'))\n", (26939, 26967), False, 'from tensorflow.python.distribute import values\n'), ((27315, 27469), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'synchronization': 'variable_scope.VariableSynchronization.ON_READ', 'aggregation': 'variable_scope.VariableAggregation.SUM'}), '(1.0, synchronization=variable_scope.\n VariableSynchronization.ON_READ, aggregation=variable_scope.\n VariableAggregation.SUM)\n', (27338, 27469), False, 'from tensorflow.python.ops import variable_scope\n'), ((27506, 27661), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(4.0)'], {'synchronization': 'variable_scope.VariableSynchronization.ON_READ', 'aggregation': 'variable_scope.VariableAggregation.MEAN'}), '(4.0, synchronization=variable_scope.\n VariableSynchronization.ON_READ, aggregation=variable_scope.\n VariableAggregation.MEAN)\n', (27529, 27661), False, 'from tensorflow.python.ops import variable_scope\n'), ((30653, 30712), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(2 * [2 * [[0.0, 1.0, 2.0, 3.0, 4.0]]])'], {}), '(2 * [2 * [[0.0, 1.0, 2.0, 3.0, 4.0]]])\n', (30673, 30712), False, 'from tensorflow.python.framework import constant_op\n'), ((30729, 30756), 'tensorflow.python.ops.rnn_cell_impl.LSTMCell', 'rnn_cell_impl.LSTMCell', (['(300)'], {}), '(300)\n', (30751, 30756), False, 'from tensorflow.python.ops import rnn_cell_impl\n'), ((30773, 30800), 'tensorflow.python.ops.rnn_cell_impl.LSTMCell', 'rnn_cell_impl.LSTMCell', (['(300)'], {}), '(300)\n', (30795, 30800), False, 'from tensorflow.python.ops import rnn_cell_impl\n'), ((30822, 30899), 'tensorflow.python.ops.rnn.bidirectional_dynamic_rnn', 'rnn.bidirectional_dynamic_rnn', (['cell_fw', 'cell_bw', 'inputs'], {'dtype': 'dtypes.float32'}), '(cell_fw, cell_bw, inputs, dtype=dtypes.float32)\n', (30851, 30899), False, 'from tensorflow.python.ops import rnn\n'), ((30972, 30992), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (30990, 30992), False, 'from tensorflow.python.eager import context\n'), ((31465, 31619), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'synchronization': 'variable_scope.VariableSynchronization.ON_READ', 'aggregation': 'variable_scope.VariableAggregation.SUM'}), '(1.0, synchronization=variable_scope.\n VariableSynchronization.ON_READ, aggregation=variable_scope.\n VariableAggregation.SUM)\n', (31488, 31619), False, 'from tensorflow.python.ops import variable_scope\n'), ((32876, 32904), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {}), '(1.0)\n', (32899, 32904), False, 'from tensorflow.python.ops import variable_scope\n'), ((32926, 33023), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'synchronization': 'variable_scope.VariableSynchronization.ON_READ'}), '(1.0, synchronization=variable_scope.\n VariableSynchronization.ON_READ)\n', (32949, 33023), False, 'from tensorflow.python.ops import variable_scope\n'), ((33827, 33847), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (33845, 33847), False, 'from tensorflow.python.eager import context\n'), ((34622, 34642), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (34640, 34642), False, 'from tensorflow.python.eager import context\n'), ((35443, 35481), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""c"""'}), "(1.0, name='c')\n", (35466, 35481), False, 'from tensorflow.python.ops import variable_scope\n'), ((35528, 35566), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""b"""'}), "(1.0, name='b')\n", (35551, 35566), False, 'from tensorflow.python.ops import variable_scope\n'), ((35703, 35723), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (35721, 35723), False, 'from tensorflow.python.eager import context\n'), ((36672, 36709), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""c"""', '[1]'], {}), "('c', [1])\n", (36699, 36709), False, 'from tensorflow.python.ops import variable_scope\n'), ((36756, 36793), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""b"""', '[1]'], {}), "('b', [1])\n", (36783, 36793), False, 'from tensorflow.python.ops import variable_scope\n'), ((36930, 36950), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (36948, 36950), False, 'from tensorflow.python.eager import context\n'), ((38674, 38714), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""'}), "(1.0, name='foo')\n", (38697, 38714), False, 'from tensorflow.python.ops import variable_scope\n'), ((39693, 39733), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""'}), "(1.0, name='foo')\n", (39716, 39733), False, 'from tensorflow.python.ops import variable_scope\n'), ((40547, 40644), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""', 'aggregation': 'variable_scope.VariableAggregation.SUM'}), "(1.0, name='foo', aggregation=variable_scope.\n VariableAggregation.SUM)\n", (40570, 40644), False, 'from tensorflow.python.ops import variable_scope\n'), ((41360, 41400), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""'}), "(1.0, name='foo')\n", (41383, 41400), False, 'from tensorflow.python.ops import variable_scope\n'), ((41904, 42002), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""', 'aggregation': 'variable_scope.VariableAggregation.MEAN'}), "(1.0, name='foo', aggregation=variable_scope.\n VariableAggregation.MEAN)\n", (41927, 42002), False, 'from tensorflow.python.ops import variable_scope\n'), ((42794, 42892), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""', 'aggregation': 'variable_scope.VariableAggregation.MEAN'}), "(1.0, name='foo', aggregation=variable_scope.\n VariableAggregation.MEAN)\n", (42817, 42892), False, 'from tensorflow.python.ops import variable_scope\n'), ((43540, 43580), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""'}), "(1.0, name='foo')\n", (43563, 43580), False, 'from tensorflow.python.ops import variable_scope\n'), ((44556, 44654), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""', 'aggregation': 'variable_scope.VariableAggregation.MEAN'}), "(1.0, name='foo', aggregation=variable_scope.\n VariableAggregation.MEAN)\n", (44579, 44654), False, 'from tensorflow.python.ops import variable_scope\n'), ((45453, 45551), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""', 'aggregation': 'variable_scope.VariableAggregation.MEAN'}), "(1.0, name='foo', aggregation=variable_scope.\n VariableAggregation.MEAN)\n", (45476, 45551), False, 'from tensorflow.python.ops import variable_scope\n'), ((46203, 46243), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(5.0)'], {'name': '"""foo"""'}), "(5.0, name='foo')\n", (46226, 46243), False, 'from tensorflow.python.ops import variable_scope\n'), ((46910, 47008), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(5.0)'], {'name': '"""foo"""', 'aggregation': 'variable_scope.VariableAggregation.MEAN'}), "(5.0, name='foo', aggregation=variable_scope.\n VariableAggregation.MEAN)\n", (46933, 47008), False, 'from tensorflow.python.ops import variable_scope\n'), ((47807, 47905), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(5.0)'], {'name': '"""foo"""', 'aggregation': 'variable_scope.VariableAggregation.MEAN'}), "(5.0, name='foo', aggregation=variable_scope.\n VariableAggregation.MEAN)\n", (47830, 47905), False, 'from tensorflow.python.ops import variable_scope\n'), ((48959, 48979), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (48977, 48979), False, 'from tensorflow.python.eager import context\n'), ((49663, 49683), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (49681, 49683), False, 'from tensorflow.python.eager import context\n'), ((50763, 50917), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'synchronization': 'variable_scope.VariableSynchronization.ON_READ', 'aggregation': 'variable_scope.VariableAggregation.SUM'}), '(1.0, synchronization=variable_scope.\n VariableSynchronization.ON_READ, aggregation=variable_scope.\n VariableAggregation.SUM)\n', (50786, 50917), False, 'from tensorflow.python.ops import variable_scope\n'), ((52069, 52224), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'synchronization': 'variable_scope.VariableSynchronization.ON_READ', 'aggregation': 'variable_scope.VariableAggregation.MEAN'}), '(1.0, synchronization=variable_scope.\n VariableSynchronization.ON_READ, aggregation=variable_scope.\n VariableAggregation.MEAN)\n', (52092, 52224), False, 'from tensorflow.python.ops import variable_scope\n'), ((53265, 53313), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.25)'], {'name': '"""dummy_var1"""'}), "(1.25, name='dummy_var1')\n", (53288, 53313), False, 'from tensorflow.python.ops import variable_scope\n'), ((57921, 57952), 'tensorflow.python.eager.function.defun', 'function.defun', (['mock_model.call'], {}), '(mock_model.call)\n', (57935, 57952), False, 'from tensorflow.python.eager import function\n'), ((58067, 58098), 'tensorflow.python.eager.backprop.implicit_grad', 'backprop.implicit_grad', (['loss_fn'], {}), '(loss_fn)\n', (58089, 58098), False, 'from tensorflow.python.eager import backprop\n'), ((58120, 58168), 'tensorflow.python.training.optimizer.get_filtered_grad_fn', 'optimizer_lib.get_filtered_grad_fn', (['gradients_fn'], {}), '(gradients_fn)\n', (58154, 58168), True, 'from tensorflow.python.training import optimizer as optimizer_lib\n'), ((58294, 58341), 'tensorflow.python.training.gradient_descent.GradientDescentOptimizer', 'gradient_descent.GradientDescentOptimizer', (['(0.25)'], {}), '(0.25)\n', (58335, 58341), False, 'from tensorflow.python.training import gradient_descent\n'), ((60596, 60621), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {}), '(1.0)\n', (60616, 60621), False, 'from tensorflow.python.framework import constant_op\n'), ((60958, 60988), 'tensorflow.python.data.ops.dataset_ops.Dataset.range', 'dataset_ops.Dataset.range', (['(100)'], {}), '(100)\n', (60983, 60988), False, 'from tensorflow.python.data.ops import dataset_ops\n'), ((61179, 61199), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (61197, 61199), False, 'from tensorflow.python.eager import context\n'), ((61951, 61981), 'tensorflow.python.data.ops.dataset_ops.Dataset.range', 'dataset_ops.Dataset.range', (['(100)'], {}), '(100)\n', (61976, 61981), False, 'from tensorflow.python.data.ops import dataset_ops\n'), ((62254, 62274), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (62272, 62274), False, 'from tensorflow.python.eager import context\n'), ((64024, 64061), 'tensorflow.contrib.distribute.python.mirrored_strategy.all_local_devices', 'mirrored_strategy.all_local_devices', ([], {}), '()\n', (64059, 64061), False, 'from tensorflow.contrib.distribute.python import mirrored_strategy\n'), ((64501, 64541), 'tensorflow.contrib.distribute.python.mirrored_strategy.CoreMirroredStrategy', 'mirrored_strategy.CoreMirroredStrategy', ([], {}), '()\n', (64539, 64541), False, 'from tensorflow.contrib.distribute.python import mirrored_strategy\n'), ((64964, 65004), 'tensorflow.contrib.distribute.python.mirrored_strategy.CoreMirroredStrategy', 'mirrored_strategy.CoreMirroredStrategy', ([], {}), '()\n', (65002, 65004), False, 'from tensorflow.contrib.distribute.python import mirrored_strategy\n'), ((65209, 65246), 'tensorflow.contrib.distribute.python.mirrored_strategy.all_local_devices', 'mirrored_strategy.all_local_devices', ([], {}), '()\n', (65244, 65246), False, 'from tensorflow.contrib.distribute.python import mirrored_strategy\n'), ((9072, 9128), 'tensorflow.python.ops.variable_scope.variable_creator_scope', 'variable_scope.variable_creator_scope', (['thread_creator_fn'], {}), '(thread_creator_fn)\n', (9109, 9128), False, 'from tensorflow.python.ops import variable_scope\n'), ((9185, 9213), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {}), '(1.0)\n', (9208, 9213), False, 'from tensorflow.python.ops import variable_scope\n'), ((14709, 14753), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo/bar"""'}), "(1.0, name='foo/bar')\n", (14732, 14753), False, 'from tensorflow.python.ops import variable_scope\n'), ((14771, 14817), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo_1/bar"""'}), "(1.0, name='foo_1/bar')\n", (14794, 14817), False, 'from tensorflow.python.ops import variable_scope\n'), ((14835, 14883), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo_1/bar_1"""'}), "(1.0, name='foo_1/bar_1')\n", (14858, 14883), False, 'from tensorflow.python.ops import variable_scope\n'), ((14901, 14947), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo/bar_1"""'}), "(1.0, name='foo/bar_1')\n", (14924, 14947), False, 'from tensorflow.python.ops import variable_scope\n'), ((16128, 16167), 'tensorflow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', (['"""common"""'], {}), "('common')\n", (16157, 16167), False, 'from tensorflow.python.ops import variable_scope\n'), ((16186, 16199), 'tensorflow.python.layers.core.Dense', 'core.Dense', (['(1)'], {}), '(1)\n', (16196, 16199), False, 'from tensorflow.python.layers import core\n'), ((16242, 16255), 'tensorflow.python.layers.core.Dense', 'core.Dense', (['(1)'], {}), '(1)\n', (16252, 16255), False, 'from tensorflow.python.layers import core\n'), ((16439, 16452), 'tensorflow.python.layers.core.Dense', 'core.Dense', (['(1)'], {}), '(1)\n', (16449, 16452), False, 'from tensorflow.python.layers import core\n'), ((17476, 17515), 'tensorflow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', (['"""common"""'], {}), "('common')\n", (17505, 17515), False, 'from tensorflow.python.ops import variable_scope\n'), ((17530, 17571), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""var1"""'}), "(1.0, name='var1')\n", (17553, 17571), False, 'from tensorflow.python.ops import variable_scope\n'), ((17726, 17893), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""var2"""', 'synchronization': 'variable_scope.VariableSynchronization.ON_READ', 'aggregation': 'variable_scope.VariableAggregation.SUM'}), "(1.0, name='var2', synchronization=variable_scope.\n VariableSynchronization.ON_READ, aggregation=variable_scope.\n VariableAggregation.SUM)\n", (17749, 17893), False, 'from tensorflow.python.ops import variable_scope\n'), ((17946, 18115), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""var3"""', 'synchronization': 'variable_scope.VariableSynchronization.ON_WRITE', 'aggregation': 'variable_scope.VariableAggregation.MEAN'}), "(1.0, name='var3', synchronization=variable_scope.\n VariableSynchronization.ON_WRITE, aggregation=variable_scope.\n VariableAggregation.MEAN)\n", (17969, 18115), False, 'from tensorflow.python.ops import variable_scope\n'), ((19183, 19222), 'tensorflow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', (['"""common"""'], {}), "('common')\n", (19212, 19222), False, 'from tensorflow.python.ops import variable_scope\n'), ((19237, 19277), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""var1"""', '[1]'], {}), "('var1', [1])\n", (19264, 19277), False, 'from tensorflow.python.ops import variable_scope\n'), ((19432, 19598), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""var2"""', '[1]'], {'synchronization': 'variable_scope.VariableSynchronization.ON_READ', 'aggregation': 'variable_scope.VariableAggregation.SUM'}), "('var2', [1], synchronization=variable_scope.\n VariableSynchronization.ON_READ, aggregation=variable_scope.\n VariableAggregation.SUM)\n", (19459, 19598), False, 'from tensorflow.python.ops import variable_scope\n'), ((19639, 19807), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""var3"""', '[1]'], {'synchronization': 'variable_scope.VariableSynchronization.ON_WRITE', 'aggregation': 'variable_scope.VariableAggregation.MEAN'}), "('var3', [1], synchronization=variable_scope.\n VariableSynchronization.ON_WRITE, aggregation=variable_scope.\n VariableAggregation.MEAN)\n", (19666, 19807), False, 'from tensorflow.python.ops import variable_scope\n'), ((19907, 19944), 'tensorflow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', (['"""main"""'], {}), "('main')\n", (19936, 19944), False, 'from tensorflow.python.ops import variable_scope\n'), ((19958, 20003), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""var-main0"""', '[1]'], {}), "('var-main0', [1])\n", (19985, 20003), False, 'from tensorflow.python.ops import variable_scope\n'), ((24824, 24927), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""v"""', '[1]'], {'synchronization': 'variable_scope.VariableSynchronization.NONE'}), "('v', [1], synchronization=variable_scope.\n VariableSynchronization.NONE)\n", (24851, 24927), False, 'from tensorflow.python.ops import variable_scope\n'), ((25284, 25388), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""v"""', 'synchronization': 'variable_scope.VariableSynchronization.NONE'}), "(1.0, name='v', synchronization=variable_scope.\n VariableSynchronization.NONE)\n", (25307, 25388), False, 'from tensorflow.python.ops import variable_scope\n'), ((25665, 25730), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""v"""', 'synchronization': '"""Invalid"""'}), "(1.0, name='v', synchronization='Invalid')\n", (25688, 25730), False, 'from tensorflow.python.ops import variable_scope\n'), ((25970, 26100), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""v"""', '[1]'], {'synchronization': 'variable_scope.VariableSynchronization.ON_WRITE', 'aggregation': '"""invalid"""'}), "('v', [1], synchronization=variable_scope.\n VariableSynchronization.ON_WRITE, aggregation='invalid')\n", (25997, 26100), False, 'from tensorflow.python.ops import variable_scope\n'), ((26369, 26500), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""v"""', 'synchronization': 'variable_scope.VariableSynchronization.ON_WRITE', 'aggregation': '"""invalid"""'}), "(1.0, name='v', synchronization=variable_scope.\n VariableSynchronization.ON_WRITE, aggregation='invalid')\n", (26392, 26500), False, 'from tensorflow.python.ops import variable_scope\n'), ((29048, 29088), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (29086, 29088), False, 'from tensorflow.python.ops import variables\n'), ((31937, 31977), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (31975, 31977), False, 'from tensorflow.python.ops import variables\n'), ((33615, 33636), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['"""foo"""'], {}), "('foo')\n", (33629, 33636), False, 'from tensorflow.python.framework import ops\n'), ((33650, 33685), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'name': '"""a"""'}), "(1.0, name='a')\n", (33670, 33685), False, 'from tensorflow.python.framework import constant_op\n'), ((33763, 33798), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'name': '"""b"""'}), "(1.0, name='b')\n", (33783, 33798), False, 'from tensorflow.python.framework import constant_op\n'), ((33882, 33904), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['"""main"""'], {}), "('main')\n", (33896, 33904), False, 'from tensorflow.python.framework import ops\n'), ((34404, 34431), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['None', '"""foo"""'], {}), "(None, 'foo')\n", (34418, 34431), False, 'from tensorflow.python.framework import ops\n'), ((34445, 34480), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'name': '"""a"""'}), "(1.0, name='a')\n", (34465, 34480), False, 'from tensorflow.python.framework import constant_op\n'), ((34558, 34593), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(2.0)'], {'name': '"""b"""'}), "(2.0, name='b')\n", (34578, 34593), False, 'from tensorflow.python.framework import constant_op\n'), ((35578, 35599), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['"""foo"""'], {}), "('foo')\n", (35592, 35599), False, 'from tensorflow.python.framework import ops\n'), ((35758, 35780), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['"""main"""'], {}), "('main')\n", (35772, 35780), False, 'from tensorflow.python.framework import ops\n'), ((35794, 35832), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""a"""'}), "(1.0, name='a')\n", (35817, 35832), False, 'from tensorflow.python.ops import variable_scope\n'), ((36805, 36826), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['"""foo"""'], {}), "('foo')\n", (36819, 36826), False, 'from tensorflow.python.framework import ops\n'), ((36985, 37007), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['"""main"""'], {}), "('main')\n", (36999, 37007), False, 'from tensorflow.python.framework import ops\n'), ((37021, 37058), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""a"""', '[1]'], {}), "('a', [1])\n", (37048, 37058), False, 'from tensorflow.python.ops import variable_scope\n'), ((39941, 39981), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (39979, 39981), False, 'from tensorflow.python.ops import variables\n'), ((40858, 40898), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (40896, 40898), False, 'from tensorflow.python.ops import variables\n'), ((41593, 41633), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (41631, 41633), False, 'from tensorflow.python.ops import variables\n'), ((42201, 42241), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (42239, 42241), False, 'from tensorflow.python.ops import variables\n'), ((43091, 43131), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (43129, 43131), False, 'from tensorflow.python.ops import variables\n'), ((43773, 43813), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (43811, 43813), False, 'from tensorflow.python.ops import variables\n'), ((44853, 44893), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (44891, 44893), False, 'from tensorflow.python.ops import variables\n'), ((45750, 45790), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (45788, 45790), False, 'from tensorflow.python.ops import variables\n'), ((46436, 46476), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (46474, 46476), False, 'from tensorflow.python.ops import variables\n'), ((47207, 47247), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (47245, 47247), False, 'from tensorflow.python.ops import variables\n'), ((48104, 48144), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (48142, 48144), False, 'from tensorflow.python.ops import variables\n'), ((49013, 49053), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""'}), "(1.0, name='foo')\n", (49036, 49053), False, 'from tensorflow.python.ops import variable_scope\n'), ((49723, 49877), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'synchronization': 'variable_scope.VariableSynchronization.ON_READ', 'aggregation': 'variable_scope.VariableAggregation.SUM'}), '(1.0, synchronization=variable_scope.\n VariableSynchronization.ON_READ, aggregation=variable_scope.\n VariableAggregation.SUM)\n', (49746, 49877), False, 'from tensorflow.python.ops import variable_scope\n'), ((51162, 51202), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (51200, 51202), False, 'from tensorflow.python.ops import variables\n'), ((52469, 52509), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (52507, 52509), False, 'from tensorflow.python.ops import variables\n'), ((53365, 53412), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(2.0)'], {'name': '"""dummy_var2"""'}), "(2.0, name='dummy_var2')\n", (53388, 53412), False, 'from tensorflow.python.ops import variable_scope\n'), ((54614, 54654), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (54652, 54654), False, 'from tensorflow.python.ops import variables\n'), ((54825, 54857), 'tensorflow.python.distribute.values.select_replica', 'values.select_replica', (['r', 'result'], {}), '(r, result)\n', (54846, 54857), False, 'from tensorflow.python.distribute import values\n'), ((54891, 54932), 'tensorflow.python.distribute.values.select_replica', 'values.select_replica', (['r', 'expected_result'], {}), '(r, expected_result)\n', (54912, 54932), False, 'from tensorflow.python.distribute import values\n'), ((57043, 57081), 'tensorflow.python.eager.backprop.GradientTape', 'backprop.GradientTape', ([], {'persistent': '(True)'}), '(persistent=True)\n', (57064, 57081), False, 'from tensorflow.python.eager import backprop\n'), ((58470, 58497), 'tensorflow.python.eager.context.executing_eagerly', 'context.executing_eagerly', ([], {}), '()\n', (58495, 58497), False, 'from tensorflow.python.eager import context\n'), ((60155, 60173), 'tensorflow.python.eager.context.num_gpus', 'context.num_gpus', ([], {}), '()\n', (60171, 60173), False, 'from tensorflow.python.eager import context\n'), ((60632, 60652), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (60642, 60652), False, 'from tensorflow.python.framework import ops\n'), ((60666, 60691), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {}), '(1.0)\n', (60686, 60691), False, 'from tensorflow.python.framework import constant_op\n'), ((63764, 63782), 'tensorflow.python.eager.context.num_gpus', 'context.num_gpus', ([], {}), '()\n', (63780, 63782), False, 'from tensorflow.python.eager import context\n'), ((5192, 5220), 'tensorflow.python.data.ops.dataset_ops.Dataset.range', 'dataset_ops.Dataset.range', (['(2)'], {}), '(2)\n', (5217, 5220), False, 'from tensorflow.python.data.ops import dataset_ops\n'), ((5254, 5283), 'tensorflow.python.data.ops.dataset_ops.Dataset.range', 'dataset_ops.Dataset.range', (['(10)'], {}), '(10)\n', (5279, 5283), False, 'from tensorflow.python.data.ops import dataset_ops\n'), ((10893, 10919), 'tensorflow.python.framework.func_graph.FuncGraph', 'func_graph.FuncGraph', (['"""fg"""'], {}), "('fg')\n", (10913, 10919), False, 'from tensorflow.python.framework import func_graph\n'), ((11711, 11743), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (11741, 11743), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((12847, 12879), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (12877, 12879), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((12929, 12955), 'tensorflow.python.framework.func_graph.FuncGraph', 'func_graph.FuncGraph', (['"""fg"""'], {}), "('fg')\n", (12949, 12955), False, 'from tensorflow.python.framework import func_graph\n'), ((13532, 13564), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (13562, 13564), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((13880, 13912), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (13910, 13912), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((14304, 14336), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (14334, 14336), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((14955, 14987), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (14985, 14987), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((15684, 15716), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (15714, 15716), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((26718, 26750), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (26748, 26750), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((38721, 38753), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (38751, 38753), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((58020, 58043), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['[1, 10]'], {}), '([1, 10])\n', (58034, 58043), False, 'from tensorflow.python.ops import array_ops\n'), ((58521, 58561), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (58559, 58561), False, 'from tensorflow.python.ops import variables\n'), ((58757, 58773), 'numpy.ones', 'np.ones', (['[10, 1]'], {}), '([10, 1])\n', (58764, 58773), True, 'import numpy as np\n'), ((64459, 64480), 'json.dumps', 'json.dumps', (['tf_config'], {}), '(tf_config)\n', (64469, 64480), False, 'import json\n'), ((64922, 64943), 'json.dumps', 'json.dumps', (['tf_config'], {}), '(tf_config)\n', (64932, 64943), False, 'import json\n'), ((9299, 9331), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (9329, 9331), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((16365, 16397), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (16395, 16397), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((16687, 16728), 'tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors', 'dataset_ops.Dataset.from_tensors', (['[[1.0]]'], {}), '([[1.0]])\n', (16719, 16728), False, 'from tensorflow.python.data.ops import dataset_ops\n'), ((17656, 17688), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (17686, 17688), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((19362, 19394), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (19392, 19394), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((33694, 33726), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (33724, 33726), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((34489, 34521), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (34519, 34521), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((35613, 35645), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (35643, 35645), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((36840, 36872), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (36870, 36872), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((37991, 38082), 'tensorflow.contrib.distribute.python.mirrored_strategy.MirroredStrategy', 'mirrored_strategy.MirroredStrategy', (["['/device:GPU:0', '/device:GPU:1', '/device:CPU:0']"], {}), "(['/device:GPU:0', '/device:GPU:1',\n '/device:CPU:0'])\n", (38025, 38082), False, 'from tensorflow.contrib.distribute.python import mirrored_strategy\n'), ((38291, 38386), 'tensorflow.contrib.distribute.python.mirrored_strategy.CoreMirroredStrategy', 'mirrored_strategy.CoreMirroredStrategy', (["['/device:GPU:0', '/device:GPU:1', '/device:CPU:0']"], {}), "(['/device:GPU:0', '/device:GPU:1',\n '/device:CPU:0'])\n", (38329, 38386), False, 'from tensorflow.contrib.distribute.python import mirrored_strategy\n'), ((42366, 42398), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (42396, 42398), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((45018, 45050), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (45048, 45050), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((47372, 47404), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (47402, 47404), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((65043, 65061), 'tensorflow.python.eager.context.num_gpus', 'context.num_gpus', ([], {}), '()\n', (65059, 65061), False, 'from tensorflow.python.eager import context\n'), ((59451, 59488), 'tensorflow.contrib.distribute.python.mirrored_strategy.all_local_devices', 'mirrored_strategy.all_local_devices', ([], {}), '()\n', (59486, 59488), False, 'from tensorflow.contrib.distribute.python import mirrored_strategy\n'), ((59188, 59206), 'tensorflow.python.eager.context.num_gpus', 'context.num_gpus', ([], {}), '()\n', (59204, 59206), False, 'from tensorflow.python.eager import context\n')]
|
import numpy
import argparse
from matplotlib import colors
from src.powerspectrum import from_frequency_to_eta
from src.powerspectrum import fiducial_eor_power_spectrum
from src.radiotelescope import RadioTelescope
from src.plottools import plot_2dpower_spectrum
from src.plottools import plot_power_contours
from src.generaltools import from_jansky_to_milikelvin
from src.covariance import calibrated_residual_error
from src.covariance import compute_weights
from src.util import redundant_baseline_finder
def main(labelfontsize = 16, ticksize= 11):
output_path = "/home/ronniyjoseph/Sync/PhD/Thesis/ThesisTex/images/chapter_7/"
contour_levels = numpy.array([1e0, 1e1, 1e2])
# telescope_position_path = "./Data/MWA_Compact_Coordinates.txt"
# tile_diameter = 4
# fraction_broken = 0.3
# model_limit = 1e-1
telescope_position_path = "./Data/HERA_128.txt"
tile_diameter = 14
fraction_broken = 0.3
model_limit = 1e-1
k_perp_range = numpy.array([1e-4, 1.1e-1])
u_range = numpy.logspace(0, numpy.log10(500), 50)
frequency_range = numpy.linspace(135, 165, 251) * 1e6
eta = from_frequency_to_eta(frequency_range)
eor_power_spectrum = fiducial_eor_power_spectrum(u_range, eta)
telescope = RadioTelescope(load=True, path=telescope_position_path)
redundant_table = telescope.baseline_table
# redundant_table = redundant_baseline_finder(telescope.baseline_table)
weights = compute_weights(u_range, redundant_table.u_coordinates,
redundant_table.v_coordinates)
sky_clocations = None# [(6e-2, 0.21), (4e-2, 0.13), (3e-2, 0.07 )]
beam_clocations = sky_clocations
total_clocations = sky_clocations
# print(numpy.max(numpy.sqrt(redundant_table.u_coordinates**2 + redundant_table.v_coordinates**2)))
sky_calibrated = calibrated_residual_error(u=u_range, nu=frequency_range, residuals='sky',
calibration_type='sky', weights = weights, tile_diameter=tile_diameter,
broken_baselines_weight = fraction_broken, model_limit=model_limit)
beam_calibrated = calibrated_residual_error(u=u_range, nu=frequency_range, residuals='beam',
calibration_type='sky', weights = weights,
tile_diameter=tile_diameter,
broken_baselines_weight = fraction_broken, model_limit=model_limit)
total_calibrated = calibrated_residual_error(u=u_range, nu=frequency_range, residuals='both',
calibration_type='sky', weights = weights,
tile_diameter=tile_diameter,
broken_baselines_weight = fraction_broken, model_limit=model_limit)
figure, axes = pyplot.subplots(1, 3, figsize=(15, 5))
ps_norm = colors.LogNorm(vmin=1e3, vmax=1e15)
plot_2dpower_spectrum(u_range, eta, frequency_range, sky_calibrated, title="Sky Error", axes=axes[0],
axes_label_font=labelfontsize, tickfontsize=ticksize, colorbar_show=False,
xlabel_show=True, norm=ps_norm, ylabel_show=True)
plot_2dpower_spectrum(u_range, eta, frequency_range, beam_calibrated, title="Beam Variations", axes=axes[1],
axes_label_font=labelfontsize, tickfontsize=ticksize, colorbar_show=False,
xlabel_show=True, norm=ps_norm, ylabel_show=False)
plot_2dpower_spectrum(u_range, eta, frequency_range, total_calibrated, title="Total Error", axes=axes[2],
axes_label_font=labelfontsize, tickfontsize=ticksize, colorbar_show=True,
xlabel_show=True, norm=ps_norm, ylabel_show=False, zlabel_show=True)
plot_power_contours(u_range, eta, frequency_range, from_jansky_to_milikelvin(sky_calibrated,
frequency_range)/eor_power_spectrum,
axes=axes[0], ratio=True, axes_label_font=labelfontsize, tickfontsize=ticksize, xlabel_show=True,
norm=ps_norm, ylabel_show=True, contour_levels=contour_levels, contour_label_locs=sky_clocations)
plot_power_contours(u_range, eta, frequency_range, from_jansky_to_milikelvin(beam_calibrated, frequency_range)/eor_power_spectrum,
axes=axes[1], ratio=True, axes_label_font=labelfontsize, tickfontsize=ticksize, xlabel_show=True,
norm=ps_norm, ylabel_show=False, contour_levels=contour_levels, contour_label_locs=beam_clocations)
plot_power_contours(u_range, eta, frequency_range, from_jansky_to_milikelvin(total_calibrated, frequency_range)/eor_power_spectrum,
axes=axes[2], ratio=True, axes_label_font=labelfontsize, tickfontsize=ticksize, xlabel_show=True,
norm=ps_norm, ylabel_show=False, contour_levels=contour_levels, contour_label_locs=total_clocations)
pyplot.tight_layout()
# pyplot.savefig(output_path + "Calibrated_Residuals_Sky_MWA.pdf")
pyplot.show()
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ssh", action="store_true", dest="ssh_key", default=False)
params = parser.parse_args()
import matplotlib
if params.ssh_key:
matplotlib.use("Agg")
from matplotlib import pyplot
main()
|
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"src.powerspectrum.fiducial_eor_power_spectrum",
"src.plottools.plot_2dpower_spectrum",
"src.covariance.calibrated_residual_error",
"matplotlib.colors.LogNorm",
"numpy.array",
"matplotlib.use",
"src.powerspectrum.from_frequency_to_eta",
"src.covariance.compute_weights",
"numpy.linspace",
"src.generaltools.from_jansky_to_milikelvin",
"numpy.log10",
"matplotlib.pyplot.subplots",
"src.radiotelescope.RadioTelescope"
] |
[((663, 694), 'numpy.array', 'numpy.array', (['[1.0, 10.0, 100.0]'], {}), '([1.0, 10.0, 100.0])\n', (674, 694), False, 'import numpy\n'), ((984, 1011), 'numpy.array', 'numpy.array', (['[0.0001, 0.11]'], {}), '([0.0001, 0.11])\n', (995, 1011), False, 'import numpy\n'), ((1136, 1174), 'src.powerspectrum.from_frequency_to_eta', 'from_frequency_to_eta', (['frequency_range'], {}), '(frequency_range)\n', (1157, 1174), False, 'from src.powerspectrum import from_frequency_to_eta\n'), ((1200, 1241), 'src.powerspectrum.fiducial_eor_power_spectrum', 'fiducial_eor_power_spectrum', (['u_range', 'eta'], {}), '(u_range, eta)\n', (1227, 1241), False, 'from src.powerspectrum import fiducial_eor_power_spectrum\n'), ((1260, 1315), 'src.radiotelescope.RadioTelescope', 'RadioTelescope', ([], {'load': '(True)', 'path': 'telescope_position_path'}), '(load=True, path=telescope_position_path)\n', (1274, 1315), False, 'from src.radiotelescope import RadioTelescope\n'), ((1453, 1544), 'src.covariance.compute_weights', 'compute_weights', (['u_range', 'redundant_table.u_coordinates', 'redundant_table.v_coordinates'], {}), '(u_range, redundant_table.u_coordinates, redundant_table.\n v_coordinates)\n', (1468, 1544), False, 'from src.covariance import compute_weights\n'), ((1843, 2060), 'src.covariance.calibrated_residual_error', 'calibrated_residual_error', ([], {'u': 'u_range', 'nu': 'frequency_range', 'residuals': '"""sky"""', 'calibration_type': '"""sky"""', 'weights': 'weights', 'tile_diameter': 'tile_diameter', 'broken_baselines_weight': 'fraction_broken', 'model_limit': 'model_limit'}), "(u=u_range, nu=frequency_range, residuals='sky',\n calibration_type='sky', weights=weights, tile_diameter=tile_diameter,\n broken_baselines_weight=fraction_broken, model_limit=model_limit)\n", (1868, 2060), False, 'from src.covariance import calibrated_residual_error\n'), ((2180, 2398), 'src.covariance.calibrated_residual_error', 'calibrated_residual_error', ([], {'u': 'u_range', 'nu': 'frequency_range', 'residuals': '"""beam"""', 'calibration_type': '"""sky"""', 'weights': 'weights', 'tile_diameter': 'tile_diameter', 'broken_baselines_weight': 'fraction_broken', 'model_limit': 'model_limit'}), "(u=u_range, nu=frequency_range, residuals='beam',\n calibration_type='sky', weights=weights, tile_diameter=tile_diameter,\n broken_baselines_weight=fraction_broken, model_limit=model_limit)\n", (2205, 2398), False, 'from src.covariance import calibrated_residual_error\n'), ((2592, 2810), 'src.covariance.calibrated_residual_error', 'calibrated_residual_error', ([], {'u': 'u_range', 'nu': 'frequency_range', 'residuals': '"""both"""', 'calibration_type': '"""sky"""', 'weights': 'weights', 'tile_diameter': 'tile_diameter', 'broken_baselines_weight': 'fraction_broken', 'model_limit': 'model_limit'}), "(u=u_range, nu=frequency_range, residuals='both',\n calibration_type='sky', weights=weights, tile_diameter=tile_diameter,\n broken_baselines_weight=fraction_broken, model_limit=model_limit)\n", (2617, 2810), False, 'from src.covariance import calibrated_residual_error\n'), ((3004, 3042), 'matplotlib.pyplot.subplots', 'pyplot.subplots', (['(1)', '(3)'], {'figsize': '(15, 5)'}), '(1, 3, figsize=(15, 5))\n', (3019, 3042), False, 'from matplotlib import pyplot\n'), ((3058, 3110), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {'vmin': '(1000.0)', 'vmax': '(1000000000000000.0)'}), '(vmin=1000.0, vmax=1000000000000000.0)\n', (3072, 3110), False, 'from matplotlib import colors\n'), ((3099, 3339), 'src.plottools.plot_2dpower_spectrum', 'plot_2dpower_spectrum', (['u_range', 'eta', 'frequency_range', 'sky_calibrated'], {'title': '"""Sky Error"""', 'axes': 'axes[0]', 'axes_label_font': 'labelfontsize', 'tickfontsize': 'ticksize', 'colorbar_show': '(False)', 'xlabel_show': '(True)', 'norm': 'ps_norm', 'ylabel_show': '(True)'}), "(u_range, eta, frequency_range, sky_calibrated, title=\n 'Sky Error', axes=axes[0], axes_label_font=labelfontsize, tickfontsize=\n ticksize, colorbar_show=False, xlabel_show=True, norm=ps_norm,\n ylabel_show=True)\n", (3120, 3339), False, 'from src.plottools import plot_2dpower_spectrum\n'), ((3379, 3627), 'src.plottools.plot_2dpower_spectrum', 'plot_2dpower_spectrum', (['u_range', 'eta', 'frequency_range', 'beam_calibrated'], {'title': '"""Beam Variations"""', 'axes': 'axes[1]', 'axes_label_font': 'labelfontsize', 'tickfontsize': 'ticksize', 'colorbar_show': '(False)', 'xlabel_show': '(True)', 'norm': 'ps_norm', 'ylabel_show': '(False)'}), "(u_range, eta, frequency_range, beam_calibrated, title\n ='Beam Variations', axes=axes[1], axes_label_font=labelfontsize,\n tickfontsize=ticksize, colorbar_show=False, xlabel_show=True, norm=\n ps_norm, ylabel_show=False)\n", (3400, 3627), False, 'from src.plottools import plot_2dpower_spectrum\n'), ((3667, 3928), 'src.plottools.plot_2dpower_spectrum', 'plot_2dpower_spectrum', (['u_range', 'eta', 'frequency_range', 'total_calibrated'], {'title': '"""Total Error"""', 'axes': 'axes[2]', 'axes_label_font': 'labelfontsize', 'tickfontsize': 'ticksize', 'colorbar_show': '(True)', 'xlabel_show': '(True)', 'norm': 'ps_norm', 'ylabel_show': '(False)', 'zlabel_show': '(True)'}), "(u_range, eta, frequency_range, total_calibrated,\n title='Total Error', axes=axes[2], axes_label_font=labelfontsize,\n tickfontsize=ticksize, colorbar_show=True, xlabel_show=True, norm=\n ps_norm, ylabel_show=False, zlabel_show=True)\n", (3688, 3928), False, 'from src.plottools import plot_2dpower_spectrum\n'), ((5195, 5216), 'matplotlib.pyplot.tight_layout', 'pyplot.tight_layout', ([], {}), '()\n', (5214, 5216), False, 'from matplotlib import pyplot\n'), ((5292, 5305), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (5303, 5305), False, 'from matplotlib import pyplot\n'), ((5359, 5384), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5382, 5384), False, 'import argparse\n'), ((1045, 1061), 'numpy.log10', 'numpy.log10', (['(500)'], {}), '(500)\n', (1056, 1061), False, 'import numpy\n'), ((1090, 1119), 'numpy.linspace', 'numpy.linspace', (['(135)', '(165)', '(251)'], {}), '(135, 165, 251)\n', (1104, 1119), False, 'import numpy\n'), ((5558, 5579), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (5572, 5579), False, 'import matplotlib\n'), ((4020, 4078), 'src.generaltools.from_jansky_to_milikelvin', 'from_jansky_to_milikelvin', (['sky_calibrated', 'frequency_range'], {}), '(sky_calibrated, frequency_range)\n', (4045, 4078), False, 'from src.generaltools import from_jansky_to_milikelvin\n'), ((4480, 4539), 'src.generaltools.from_jansky_to_milikelvin', 'from_jansky_to_milikelvin', (['beam_calibrated', 'frequency_range'], {}), '(beam_calibrated, frequency_range)\n', (4505, 4539), False, 'from src.generaltools import from_jansky_to_milikelvin\n'), ((4862, 4922), 'src.generaltools.from_jansky_to_milikelvin', 'from_jansky_to_milikelvin', (['total_calibrated', 'frequency_range'], {}), '(total_calibrated, frequency_range)\n', (4887, 4922), False, 'from src.generaltools import from_jansky_to_milikelvin\n')]
|
import argparse, os, time, func_timeout
from shutil import copyfile
from .colorful import *
def secure_chained_vars(default_cfg, new_cfg, vb):
default_cfg_dict = default_cfg.__dict__
altered_cv = []
for key in default_cfg_dict:
if not key.endswith('_cv'): continue
o_key = key.replace('_cv','')
if o_key in new_cfg: continue
assert hasattr(default_cfg, o_key), ('twin var does not have original')
# get twin
chain_var = getattr(default_cfg, key)
need_reflesh = False
for chain_by_var in chain_var.chained_with:
if chain_by_var in new_cfg: need_reflesh = True
if not need_reflesh: continue
replace_item = chain_var.chain_func(*[getattr(default_cfg, v) for v in chain_var.chained_with])
original_item = getattr(default_cfg, o_key)
if vb: print靛('[config] warning, %s is chained by %s, automatic modifying:'%(o_key,
str(chain_var.chained_with)), original_item, '-->', replace_item)
setattr(default_cfg, o_key, replace_item)
altered_cv.append(o_key)
return altered_cv
def override_config_file(cfg_group, new_cfg, vb):
import importlib
assert '->' in cfg_group
str_pro = '------------- %s -------------'%cfg_group
if vb: print绿(str_pro)
file_, class_ = cfg_group.split('->')
if '.py' in file_: file_ = file_.replace('.py', '')
default_configs = getattr(importlib.import_module(file_), class_)
for key in new_cfg:
if new_cfg[key] is None: continue
my_setattr(conf_class=default_configs, key=key, new_value=new_cfg[key], vb=vb)
altered_cv = secure_chained_vars(default_configs, new_cfg, vb)
if vb:
print绿(''.join(['-']*len(str_pro)),)
arg_summary(default_configs, new_cfg, altered_cv)
print绿(''.join(['-']*len(str_pro)),'\n\n\n')
def check_config_relevence(json_data):
env_name = json_data['config.py->GlobalConfig']['env_name']
env_path = json_data['config.py->GlobalConfig']['env_path']
for key in json_data.keys():
if 'MISSIONS' in key: assert env_path in key, ('configering wrong env!')
def load_config_via_json(json_data, vb):
for cfg_group in json_data:
override_config_file(cfg_group, json_data[cfg_group], vb)
check_config_relevence(json_data)
return None
def get_args(vb=True):
parser = argparse.ArgumentParser(description='HMP')
parser.add_argument('-c', '--cfg', help='Path of the configuration file')
parser.add_argument('-s', '--skip', action='store_true', help='skip logdir check')
args, unknown = parser.parse_known_args()
load_via_json = (hasattr(args, 'cfg') and args.cfg is not None)
skip_logdir_check = (hasattr(args, 'skip') and (args.skip is not None) and args.skip)
if load_via_json:
if len(unknown) > 0 and vb:
print亮红('Warning! In json setting mode, %s is ignored'%str(unknown))
import json
with open(args.cfg) as f:
json_data = json.load(f)
new_args = load_config_via_json(json_data, vb)
else:
new_args = load_config_via_cmdline(vb)
from config import GlobalConfig as cfg
note_name_overide = None
if not skip_logdir_check:
note_name_overide = check_experiment_log_path(cfg.logdir)
if note_name_overide is not None:
override_config_file('config.py->GlobalConfig', {'note':note_name_overide}, vb)
if not os.path.exists(cfg.logdir): os.makedirs(cfg.logdir)
if load_via_json:
copyfile(args.cfg, '%s/experiment.json'%cfg.logdir)
backup_files(cfg.backup_files, cfg.logdir)
return cfg
def backup_files(files, logdir):
for file in files:
print绿('[config] Backup File:',file)
bkdir = '%s/backup_files/'%logdir
if not os.path.exists(bkdir): os.makedirs(bkdir)
copyfile(file, '%s/%s'%(bkdir, os.path.basename(file)))
return
def check_experiment_log_path(logdir):
res = None
if os.path.exists(logdir):
print亮红('Warning! you will overwrite old experiment if continue!')
print亮红("Pause for 60 seconds before continue (or enter NEW note name!)")
try:
res = askChoice()
if res == '': res = None
except func_timeout.exceptions.FunctionTimedOut as e:
res = None
return res
@func_timeout.func_set_timeout(60)
def askChoice():
return input('>>')
# def input_or_timeout(timeout):
# for i in range(30):
# time.sleep(1)
# return
def arg_summary(config_class, modify_dict = {}, altered_cv = []):
for key in config_class.__dict__:
if '__' in key: continue
if key.endswith('_cv'): continue
if (not key in modify_dict) or (modify_dict[key] is None):
if key not in altered_cv:
print绿(key.center(25), '-->', str(getattr(config_class,key)))
else:
print靛(key.center(25), '-->', str(getattr(config_class,key)))
else:
print红(key.center(25), '-->', str(getattr(config_class,key)))
def load_config_via_cmdline(vb):
parser = argparse.ArgumentParser(description='HMP')
# environment
from config import GlobalConfig as cfg
for setting_name in cfg.__dict__:
if '__' in setting_name: continue
if setting_name.endswith('_cv'): continue
try:
parser.add_argument('--' + setting_name)
except:
if vb: print红('[config] Repeated Arg! ->', setting_name)
args = vars(parser.parse_args())
args = {key: args[key] for key in args if args[key] is not None}
override_config_file('config.py->GlobalConfig', args, vb)
if vb: arg_summary(cfg, args)
return args
def my_setattr(conf_class, key, new_value, vb):
assert hasattr(conf_class, key), (conf_class, 'has no such config item: **%s**'%key)
setting_name = key
replace_item = new_value
original_item = getattr(conf_class, setting_name)
if vb: print绿('[config] override %s:'%setting_name, original_item, '-->', replace_item)
if isinstance(original_item, float):
replace_item = float(replace_item)
elif isinstance(original_item, bool):
if replace_item == 'True':
replace_item = True
elif replace_item == 'False':
replace_item = False
else:
assert False, ('enter True or False, but have:', replace_item)
elif isinstance(original_item, int):
replace_item = int(replace_item)
elif isinstance(original_item, str):
replace_item = replace_item
elif isinstance(original_item, list):
assert isinstance(replace_item, list)
else:
assert False, ('not support this type')
setattr(conf_class, setting_name, replace_item)
return
def find_all_conf():
import glob
py_script_list = glob.glob('./**/*.py', recursive=True)
conf_class_gather = []
for python_file in py_script_list:
with open(python_file,encoding='UTF-8') as f:
lines = f.readlines()
for line in lines:
if 'ADD_TO_CONF_SYSTEM' not in line: continue
if 'class ' not in line: continue
conf_class_gather.append({'line':line, 'file':python_file})
def getBetween(str, str1, str2):
strOutput = str[str.find(str1)+len(str1):str.find(str2)]
return strOutput
for target in conf_class_gather:
class_name = getBetween(target['line'], 'class ', '(')
target['class_name'] = class_name
target['file'] = target['file'].replace('/', '.').replace('..', '')
import importlib
target['class'] = getattr(importlib.import_module(target['file'].replace('.py', '')), class_name)
return conf_class_gather
def make_json(conf_list):
import json
out = {}
for conf in conf_list:
local_conf = {}
config_class = conf['class']
for key in config_class.__dict__:
if '__' in key or '_cv' in key: continue
item_to_be_serialize = getattr(config_class, key)
try:
json.dumps(item_to_be_serialize)
except:
item_to_be_serialize = '[cannot be json]' + str(item_to_be_serialize)
local_conf[key] = item_to_be_serialize
out[conf['file']] = local_conf
# json_str = json.dumps(out)
with open('all_conf.json', 'w') as f:
json.dump(out, f, indent=4)
print亮紫('the conf summary is successfully saved to all_conf.json')
if __name__ == '__main__':
conf_list = find_all_conf()
res_json = make_json(conf_list)
|
[
"json.dump",
"json.load",
"argparse.ArgumentParser",
"importlib.import_module",
"os.makedirs",
"os.path.basename",
"os.path.exists",
"json.dumps",
"glob.glob",
"func_timeout.func_set_timeout",
"shutil.copyfile"
] |
[((4379, 4412), 'func_timeout.func_set_timeout', 'func_timeout.func_set_timeout', (['(60)'], {}), '(60)\n', (4408, 4412), False, 'import argparse, os, time, func_timeout\n'), ((2399, 2441), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""HMP"""'}), "(description='HMP')\n", (2422, 2441), False, 'import argparse, os, time, func_timeout\n'), ((4010, 4032), 'os.path.exists', 'os.path.exists', (['logdir'], {}), '(logdir)\n', (4024, 4032), False, 'import argparse, os, time, func_timeout\n'), ((5150, 5192), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""HMP"""'}), "(description='HMP')\n", (5173, 5192), False, 'import argparse, os, time, func_timeout\n'), ((6870, 6908), 'glob.glob', 'glob.glob', (['"""./**/*.py"""'], {'recursive': '(True)'}), "('./**/*.py', recursive=True)\n", (6879, 6908), False, 'import glob\n'), ((1450, 1480), 'importlib.import_module', 'importlib.import_module', (['file_'], {}), '(file_)\n', (1473, 1480), False, 'import importlib\n'), ((3470, 3496), 'os.path.exists', 'os.path.exists', (['cfg.logdir'], {}), '(cfg.logdir)\n', (3484, 3496), False, 'import argparse, os, time, func_timeout\n'), ((3498, 3521), 'os.makedirs', 'os.makedirs', (['cfg.logdir'], {}), '(cfg.logdir)\n', (3509, 3521), False, 'import argparse, os, time, func_timeout\n'), ((3553, 3606), 'shutil.copyfile', 'copyfile', (['args.cfg', "('%s/experiment.json' % cfg.logdir)"], {}), "(args.cfg, '%s/experiment.json' % cfg.logdir)\n", (3561, 3606), False, 'from shutil import copyfile\n'), ((8418, 8445), 'json.dump', 'json.dump', (['out', 'f'], {'indent': '(4)'}), '(out, f, indent=4)\n', (8427, 8445), False, 'import json\n'), ((3029, 3041), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3038, 3041), False, 'import json\n'), ((3830, 3851), 'os.path.exists', 'os.path.exists', (['bkdir'], {}), '(bkdir)\n', (3844, 3851), False, 'import argparse, os, time, func_timeout\n'), ((3853, 3871), 'os.makedirs', 'os.makedirs', (['bkdir'], {}), '(bkdir)\n', (3864, 3871), False, 'import argparse, os, time, func_timeout\n'), ((8106, 8138), 'json.dumps', 'json.dumps', (['item_to_be_serialize'], {}), '(item_to_be_serialize)\n', (8116, 8138), False, 'import json\n'), ((3911, 3933), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (3927, 3933), False, 'import argparse, os, time, func_timeout\n')]
|
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("gallery", "0004_responsiveimage_description")]
operations = [
migrations.AddField(
model_name="responsiveimage",
name="image_wide",
field=models.FileField(
default="",
upload_to=b"images/responsive/wide",
verbose_name="Bredformat",
),
preserve_default=False,
)
]
|
[
"django.db.models.FileField"
] |
[((318, 414), 'django.db.models.FileField', 'models.FileField', ([], {'default': '""""""', 'upload_to': "b'images/responsive/wide'", 'verbose_name': '"""Bredformat"""'}), "(default='', upload_to=b'images/responsive/wide',\n verbose_name='Bredformat')\n", (334, 414), False, 'from django.db import migrations, models\n')]
|
import unittest
from django.conf import settings
from django.core.checks import Error, Warning
from django.core.checks.model_checks import _check_lazy_references
from django.core.exceptions import ImproperlyConfigured
from django.db import connection, connections, models
from django.db.models.functions import Lower
from django.db.models.signals import post_init
from django.test import SimpleTestCase
from django.test.utils import isolate_apps, override_settings, register_lookup
def get_max_column_name_length():
allowed_len = None
db_alias = None
for db in settings.DATABASES:
connection = connections[db]
max_name_length = connection.ops.max_name_length()
if max_name_length is not None and not connection.features.truncates_names:
if allowed_len is None or max_name_length < allowed_len:
allowed_len = max_name_length
db_alias = db
return (allowed_len, db_alias)
@isolate_apps('invalid_models_tests')
class IndexTogetherTests(SimpleTestCase):
def test_non_iterable(self):
class Model(models.Model):
class Meta:
index_together = 42
self.assertEqual(Model.check(), [
Error(
"'index_together' must be a list or tuple.",
obj=Model,
id='models.E008',
),
])
def test_non_list(self):
class Model(models.Model):
class Meta:
index_together = 'not-a-list'
self.assertEqual(Model.check(), [
Error(
"'index_together' must be a list or tuple.",
obj=Model,
id='models.E008',
),
])
def test_list_containing_non_iterable(self):
class Model(models.Model):
class Meta:
index_together = [('a', 'b'), 42]
self.assertEqual(Model.check(), [
Error(
"All 'index_together' elements must be lists or tuples.",
obj=Model,
id='models.E009',
),
])
def test_pointing_to_missing_field(self):
class Model(models.Model):
class Meta:
index_together = [['missing_field']]
self.assertEqual(Model.check(), [
Error(
"'index_together' refers to the nonexistent field 'missing_field'.",
obj=Model,
id='models.E012',
),
])
def test_pointing_to_non_local_field(self):
class Foo(models.Model):
field1 = models.IntegerField()
class Bar(Foo):
field2 = models.IntegerField()
class Meta:
index_together = [['field2', 'field1']]
self.assertEqual(Bar.check(), [
Error(
"'index_together' refers to field 'field1' which is not "
"local to model 'Bar'.",
hint='This issue may be caused by multi-table inheritance.',
obj=Bar,
id='models.E016',
),
])
def test_pointing_to_m2m_field(self):
class Model(models.Model):
m2m = models.ManyToManyField('self')
class Meta:
index_together = [['m2m']]
self.assertEqual(Model.check(), [
Error(
"'index_together' refers to a ManyToManyField 'm2m', but "
"ManyToManyFields are not permitted in 'index_together'.",
obj=Model,
id='models.E013',
),
])
def test_pointing_to_fk(self):
class Foo(models.Model):
pass
class Bar(models.Model):
foo_1 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_1')
foo_2 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_2')
class Meta:
index_together = [['foo_1_id', 'foo_2']]
self.assertEqual(Bar.check(), [])
# unique_together tests are very similar to index_together tests.
@isolate_apps('invalid_models_tests')
class UniqueTogetherTests(SimpleTestCase):
def test_non_iterable(self):
class Model(models.Model):
class Meta:
unique_together = 42
self.assertEqual(Model.check(), [
Error(
"'unique_together' must be a list or tuple.",
obj=Model,
id='models.E010',
),
])
def test_list_containing_non_iterable(self):
class Model(models.Model):
one = models.IntegerField()
two = models.IntegerField()
class Meta:
unique_together = [('a', 'b'), 42]
self.assertEqual(Model.check(), [
Error(
"All 'unique_together' elements must be lists or tuples.",
obj=Model,
id='models.E011',
),
])
def test_non_list(self):
class Model(models.Model):
class Meta:
unique_together = 'not-a-list'
self.assertEqual(Model.check(), [
Error(
"'unique_together' must be a list or tuple.",
obj=Model,
id='models.E010',
),
])
def test_valid_model(self):
class Model(models.Model):
one = models.IntegerField()
two = models.IntegerField()
class Meta:
# unique_together can be a simple tuple
unique_together = ('one', 'two')
self.assertEqual(Model.check(), [])
def test_pointing_to_missing_field(self):
class Model(models.Model):
class Meta:
unique_together = [['missing_field']]
self.assertEqual(Model.check(), [
Error(
"'unique_together' refers to the nonexistent field 'missing_field'.",
obj=Model,
id='models.E012',
),
])
def test_pointing_to_m2m(self):
class Model(models.Model):
m2m = models.ManyToManyField('self')
class Meta:
unique_together = [['m2m']]
self.assertEqual(Model.check(), [
Error(
"'unique_together' refers to a ManyToManyField 'm2m', but "
"ManyToManyFields are not permitted in 'unique_together'.",
obj=Model,
id='models.E013',
),
])
def test_pointing_to_fk(self):
class Foo(models.Model):
pass
class Bar(models.Model):
foo_1 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_1')
foo_2 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_2')
class Meta:
unique_together = [['foo_1_id', 'foo_2']]
self.assertEqual(Bar.check(), [])
@isolate_apps('invalid_models_tests')
class IndexesTests(SimpleTestCase):
def test_pointing_to_missing_field(self):
class Model(models.Model):
class Meta:
indexes = [models.Index(fields=['missing_field'], name='name')]
self.assertEqual(Model.check(), [
Error(
"'indexes' refers to the nonexistent field 'missing_field'.",
obj=Model,
id='models.E012',
),
])
def test_pointing_to_m2m_field(self):
class Model(models.Model):
m2m = models.ManyToManyField('self')
class Meta:
indexes = [models.Index(fields=['m2m'], name='name')]
self.assertEqual(Model.check(), [
Error(
"'indexes' refers to a ManyToManyField 'm2m', but "
"ManyToManyFields are not permitted in 'indexes'.",
obj=Model,
id='models.E013',
),
])
def test_pointing_to_non_local_field(self):
class Foo(models.Model):
field1 = models.IntegerField()
class Bar(Foo):
field2 = models.IntegerField()
class Meta:
indexes = [models.Index(fields=['field2', 'field1'], name='name')]
self.assertEqual(Bar.check(), [
Error(
"'indexes' refers to field 'field1' which is not local to "
"model 'Bar'.",
hint='This issue may be caused by multi-table inheritance.',
obj=Bar,
id='models.E016',
),
])
def test_pointing_to_fk(self):
class Foo(models.Model):
pass
class Bar(models.Model):
foo_1 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_1')
foo_2 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_2')
class Meta:
indexes = [models.Index(fields=['foo_1_id', 'foo_2'], name='index_name')]
self.assertEqual(Bar.check(), [])
def test_name_constraints(self):
class Model(models.Model):
class Meta:
indexes = [
models.Index(fields=['id'], name='_index_name'),
models.Index(fields=['id'], name='5index_name'),
]
self.assertEqual(Model.check(), [
Error(
"The index name '%sindex_name' cannot start with an "
"underscore or a number." % prefix,
obj=Model,
id='models.E033',
) for prefix in ('_', '5')
])
def test_max_name_length(self):
index_name = 'x' * 31
class Model(models.Model):
class Meta:
indexes = [models.Index(fields=['id'], name=index_name)]
self.assertEqual(Model.check(), [
Error(
"The index name '%s' cannot be longer than 30 characters."
% index_name,
obj=Model,
id='models.E034',
),
])
@isolate_apps('invalid_models_tests')
class FieldNamesTests(SimpleTestCase):
def test_ending_with_underscore(self):
class Model(models.Model):
field_ = models.CharField(max_length=10)
m2m_ = models.ManyToManyField('self')
self.assertEqual(Model.check(), [
Error(
'Field names must not end with an underscore.',
obj=Model._meta.get_field('field_'),
id='fields.E001',
),
Error(
'Field names must not end with an underscore.',
obj=Model._meta.get_field('m2m_'),
id='fields.E001',
),
])
max_column_name_length, column_limit_db_alias = get_max_column_name_length()
@unittest.skipIf(max_column_name_length is None, "The database doesn't have a column name length limit.")
def test_M2M_long_column_name(self):
"""
#13711 -- Model check for long M2M column names when database has
column name length limits.
"""
allowed_len, db_alias = get_max_column_name_length()
# A model with very long name which will be used to set relations to.
class VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(models.Model):
title = models.CharField(max_length=11)
# Main model for which checks will be performed.
class ModelWithLongField(models.Model):
m2m_field = models.ManyToManyField(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
related_name='rn1',
)
m2m_field2 = models.ManyToManyField(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
related_name='rn2', through='m2msimple',
)
m2m_field3 = models.ManyToManyField(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
related_name='rn3',
through='m2mcomplex',
)
fk = models.ForeignKey(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
models.CASCADE,
related_name='rn4',
)
# Models used for setting `through` in M2M field.
class m2msimple(models.Model):
id2 = models.ForeignKey(ModelWithLongField, models.CASCADE)
class m2mcomplex(models.Model):
id2 = models.ForeignKey(ModelWithLongField, models.CASCADE)
long_field_name = 'a' * (self.max_column_name_length + 1)
models.ForeignKey(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
models.CASCADE,
).contribute_to_class(m2msimple, long_field_name)
models.ForeignKey(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
models.CASCADE,
db_column=long_field_name
).contribute_to_class(m2mcomplex, long_field_name)
errors = ModelWithLongField.check()
# First error because of M2M field set on the model with long name.
m2m_long_name = "verylongmodelnamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz_id"
if self.max_column_name_length > len(m2m_long_name):
# Some databases support names longer than the test name.
expected = []
else:
expected = [
Error(
'Autogenerated column name too long for M2M field "%s". '
'Maximum length is "%s" for database "%s".'
% (m2m_long_name, self.max_column_name_length, self.column_limit_db_alias),
hint="Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'.",
obj=ModelWithLongField,
id='models.E019',
)
]
# Second error because the FK specified in the `through` model
# `m2msimple` has auto-generated name longer than allowed.
# There will be no check errors in the other M2M because it
# specifies db_column for the FK in `through` model even if the actual
# name is longer than the limits of the database.
expected.append(
Error(
'Autogenerated column name too long for M2M field "%s_id". '
'Maximum length is "%s" for database "%s".'
% (long_field_name, self.max_column_name_length, self.column_limit_db_alias),
hint="Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'.",
obj=ModelWithLongField,
id='models.E019',
)
)
self.assertEqual(errors, expected)
@unittest.skipIf(max_column_name_length is None, "The database doesn't have a column name length limit.")
def test_local_field_long_column_name(self):
"""
#13711 -- Model check for long column names
when database does not support long names.
"""
allowed_len, db_alias = get_max_column_name_length()
class ModelWithLongField(models.Model):
title = models.CharField(max_length=11)
long_field_name = 'a' * (self.max_column_name_length + 1)
long_field_name2 = 'b' * (self.max_column_name_length + 1)
models.CharField(max_length=11).contribute_to_class(ModelWithLongField, long_field_name)
models.CharField(max_length=11, db_column='vlmn').contribute_to_class(ModelWithLongField, long_field_name2)
self.assertEqual(ModelWithLongField.check(), [
Error(
'Autogenerated column name too long for field "%s". '
'Maximum length is "%s" for database "%s".'
% (long_field_name, self.max_column_name_length, self.column_limit_db_alias),
hint="Set the column name manually using 'db_column'.",
obj=ModelWithLongField,
id='models.E018',
)
])
def test_including_separator(self):
class Model(models.Model):
some__field = models.IntegerField()
self.assertEqual(Model.check(), [
Error(
'Field names must not contain "__".',
obj=Model._meta.get_field('some__field'),
id='fields.E002',
)
])
def test_pk(self):
class Model(models.Model):
pk = models.IntegerField()
self.assertEqual(Model.check(), [
Error(
"'pk' is a reserved word that cannot be used as a field name.",
obj=Model._meta.get_field('pk'),
id='fields.E003',
)
])
def test_db_column_clash(self):
class Model(models.Model):
foo = models.IntegerField()
bar = models.IntegerField(db_column='foo')
self.assertEqual(Model.check(), [
Error(
"Field 'bar' has column name 'foo' that is used by "
"another field.",
hint="Specify a 'db_column' for the field.",
obj=Model,
id='models.E007',
)
])
@isolate_apps('invalid_models_tests')
class ShadowingFieldsTests(SimpleTestCase):
def test_field_name_clash_with_child_accessor(self):
class Parent(models.Model):
pass
class Child(Parent):
child = models.CharField(max_length=100)
self.assertEqual(Child.check(), [
Error(
"The field 'child' clashes with the field "
"'child' from model 'invalid_models_tests.parent'.",
obj=Child._meta.get_field('child'),
id='models.E006',
)
])
def test_multiinheritance_clash(self):
class Mother(models.Model):
clash = models.IntegerField()
class Father(models.Model):
clash = models.IntegerField()
class Child(Mother, Father):
# Here we have two clashed: id (automatic field) and clash, because
# both parents define these fields.
pass
self.assertEqual(Child.check(), [
Error(
"The field 'id' from parent model "
"'invalid_models_tests.mother' clashes with the field 'id' "
"from parent model 'invalid_models_tests.father'.",
obj=Child,
id='models.E005',
),
Error(
"The field 'clash' from parent model "
"'invalid_models_tests.mother' clashes with the field 'clash' "
"from parent model 'invalid_models_tests.father'.",
obj=Child,
id='models.E005',
)
])
def test_inheritance_clash(self):
class Parent(models.Model):
f_id = models.IntegerField()
class Target(models.Model):
# This field doesn't result in a clash.
f_id = models.IntegerField()
class Child(Parent):
# This field clashes with parent "f_id" field.
f = models.ForeignKey(Target, models.CASCADE)
self.assertEqual(Child.check(), [
Error(
"The field 'f' clashes with the field 'f_id' "
"from model 'invalid_models_tests.parent'.",
obj=Child._meta.get_field('f'),
id='models.E006',
)
])
def test_multigeneration_inheritance(self):
class GrandParent(models.Model):
clash = models.IntegerField()
class Parent(GrandParent):
pass
class Child(Parent):
pass
class GrandChild(Child):
clash = models.IntegerField()
self.assertEqual(GrandChild.check(), [
Error(
"The field 'clash' clashes with the field 'clash' "
"from model 'invalid_models_tests.grandparent'.",
obj=GrandChild._meta.get_field('clash'),
id='models.E006',
)
])
def test_id_clash(self):
class Target(models.Model):
pass
class Model(models.Model):
fk = models.ForeignKey(Target, models.CASCADE)
fk_id = models.IntegerField()
self.assertEqual(Model.check(), [
Error(
"The field 'fk_id' clashes with the field 'fk' from model "
"'invalid_models_tests.model'.",
obj=Model._meta.get_field('fk_id'),
id='models.E006',
)
])
@isolate_apps('invalid_models_tests')
class OtherModelTests(SimpleTestCase):
def test_unique_primary_key(self):
invalid_id = models.IntegerField(primary_key=False)
class Model(models.Model):
id = invalid_id
self.assertEqual(Model.check(), [
Error(
"'id' can only be used as a field name if the field also sets "
"'primary_key=True'.",
obj=Model,
id='models.E004',
),
])
def test_ordering_non_iterable(self):
class Model(models.Model):
class Meta:
ordering = 'missing_field'
self.assertEqual(Model.check(), [
Error(
"'ordering' must be a tuple or list "
"(even if you want to order by only one field).",
obj=Model,
id='models.E014',
),
])
def test_just_ordering_no_errors(self):
class Model(models.Model):
order = models.PositiveIntegerField()
class Meta:
ordering = ['order']
self.assertEqual(Model.check(), [])
def test_just_order_with_respect_to_no_errors(self):
class Question(models.Model):
pass
class Answer(models.Model):
question = models.ForeignKey(Question, models.CASCADE)
class Meta:
order_with_respect_to = 'question'
self.assertEqual(Answer.check(), [])
def test_ordering_with_order_with_respect_to(self):
class Question(models.Model):
pass
class Answer(models.Model):
question = models.ForeignKey(Question, models.CASCADE)
order = models.IntegerField()
class Meta:
order_with_respect_to = 'question'
ordering = ['order']
self.assertEqual(Answer.check(), [
Error(
"'ordering' and 'order_with_respect_to' cannot be used together.",
obj=Answer,
id='models.E021',
),
])
def test_non_valid(self):
class RelationModel(models.Model):
pass
class Model(models.Model):
relation = models.ManyToManyField(RelationModel)
class Meta:
ordering = ['relation']
self.assertEqual(Model.check(), [
Error(
"'ordering' refers to the nonexistent field, related field, "
"or lookup 'relation'.",
obj=Model,
id='models.E015',
),
])
def test_ordering_pointing_to_missing_field(self):
class Model(models.Model):
class Meta:
ordering = ('missing_field',)
self.assertEqual(Model.check(), [
Error(
"'ordering' refers to the nonexistent field, related field, "
"or lookup 'missing_field'.",
obj=Model,
id='models.E015',
)
])
def test_ordering_pointing_to_missing_foreignkey_field(self):
class Model(models.Model):
missing_fk_field = models.IntegerField()
class Meta:
ordering = ('missing_fk_field_id',)
self.assertEqual(Model.check(), [
Error(
"'ordering' refers to the nonexistent field, related field, "
"or lookup 'missing_fk_field_id'.",
obj=Model,
id='models.E015',
)
])
def test_ordering_pointing_to_missing_related_field(self):
class Model(models.Model):
test = models.IntegerField()
class Meta:
ordering = ('missing_related__id',)
self.assertEqual(Model.check(), [
Error(
"'ordering' refers to the nonexistent field, related field, "
"or lookup 'missing_related__id'.",
obj=Model,
id='models.E015',
)
])
def test_ordering_pointing_to_missing_related_model_field(self):
class Parent(models.Model):
pass
class Child(models.Model):
parent = models.ForeignKey(Parent, models.CASCADE)
class Meta:
ordering = ('parent__missing_field',)
self.assertEqual(Child.check(), [
Error(
"'ordering' refers to the nonexistent field, related field, "
"or lookup 'parent__missing_field'.",
obj=Child,
id='models.E015',
)
])
def test_ordering_pointing_to_non_related_field(self):
class Child(models.Model):
parent = models.IntegerField()
class Meta:
ordering = ('parent__missing_field',)
self.assertEqual(Child.check(), [
Error(
"'ordering' refers to the nonexistent field, related field, "
"or lookup 'parent__missing_field'.",
obj=Child,
id='models.E015',
)
])
def test_ordering_pointing_to_two_related_model_field(self):
class Parent2(models.Model):
pass
class Parent1(models.Model):
parent2 = models.ForeignKey(Parent2, models.CASCADE)
class Child(models.Model):
parent1 = models.ForeignKey(Parent1, models.CASCADE)
class Meta:
ordering = ('parent1__parent2__missing_field',)
self.assertEqual(Child.check(), [
Error(
"'ordering' refers to the nonexistent field, related field, "
"or lookup 'parent1__parent2__missing_field'.",
obj=Child,
id='models.E015',
)
])
def test_ordering_pointing_multiple_times_to_model_fields(self):
class Parent(models.Model):
field1 = models.CharField(max_length=100)
field2 = models.CharField(max_length=100)
class Child(models.Model):
parent = models.ForeignKey(Parent, models.CASCADE)
class Meta:
ordering = ('parent__field1__field2',)
self.assertEqual(Child.check(), [
Error(
"'ordering' refers to the nonexistent field, related field, "
"or lookup 'parent__field1__field2'.",
obj=Child,
id='models.E015',
)
])
def test_ordering_allows_registered_lookups(self):
class Model(models.Model):
test = models.CharField(max_length=100)
class Meta:
ordering = ('test__lower',)
with register_lookup(models.CharField, Lower):
self.assertEqual(Model.check(), [])
def test_ordering_pointing_to_lookup_not_transform(self):
class Model(models.Model):
test = models.CharField(max_length=100)
class Meta:
ordering = ('test__isnull',)
self.assertEqual(Model.check(), [])
def test_ordering_pointing_to_related_model_pk(self):
class Parent(models.Model):
pass
class Child(models.Model):
parent = models.ForeignKey(Parent, models.CASCADE)
class Meta:
ordering = ('parent__pk',)
self.assertEqual(Child.check(), [])
def test_ordering_pointing_to_foreignkey_field(self):
class Parent(models.Model):
pass
class Child(models.Model):
parent = models.ForeignKey(Parent, models.CASCADE)
class Meta:
ordering = ('parent_id',)
self.assertFalse(Child.check())
def test_name_beginning_with_underscore(self):
class _Model(models.Model):
pass
self.assertEqual(_Model.check(), [
Error(
"The model name '_Model' cannot start or end with an underscore "
"as it collides with the query lookup syntax.",
obj=_Model,
id='models.E023',
)
])
def test_name_ending_with_underscore(self):
class Model_(models.Model):
pass
self.assertEqual(Model_.check(), [
Error(
"The model name 'Model_' cannot start or end with an underscore "
"as it collides with the query lookup syntax.",
obj=Model_,
id='models.E023',
)
])
def test_name_contains_double_underscores(self):
class Test__Model(models.Model):
pass
self.assertEqual(Test__Model.check(), [
Error(
"The model name 'Test__Model' cannot contain double underscores "
"as it collides with the query lookup syntax.",
obj=Test__Model,
id='models.E024',
)
])
def test_property_and_related_field_accessor_clash(self):
class Model(models.Model):
fk = models.ForeignKey('self', models.CASCADE)
@property
def fk_id(self):
pass
self.assertEqual(Model.check(), [
Error(
"The property 'fk_id' clashes with a related field accessor.",
obj=Model,
id='models.E025',
)
])
def test_single_primary_key(self):
class Model(models.Model):
foo = models.IntegerField(primary_key=True)
bar = models.IntegerField(primary_key=True)
self.assertEqual(Model.check(), [
Error(
"The model cannot have more than one field with 'primary_key=True'.",
obj=Model,
id='models.E026',
)
])
@override_settings(TEST_SWAPPED_MODEL_BAD_VALUE='not-a-model')
def test_swappable_missing_app_name(self):
class Model(models.Model):
class Meta:
swappable = 'TEST_SWAPPED_MODEL_BAD_VALUE'
self.assertEqual(Model.check(), [
Error(
"'TEST_SWAPPED_MODEL_BAD_VALUE' is not of the form 'app_label.app_name'.",
id='models.E001',
),
])
@override_settings(TEST_SWAPPED_MODEL_BAD_MODEL='not_an_app.Target')
def test_swappable_missing_app(self):
class Model(models.Model):
class Meta:
swappable = 'TEST_SWAPPED_MODEL_BAD_MODEL'
self.assertEqual(Model.check(), [
Error(
"'TEST_SWAPPED_MODEL_BAD_MODEL' references 'not_an_app.Target', "
'which has not been installed, or is abstract.',
id='models.E002',
),
])
def test_two_m2m_through_same_relationship(self):
class Person(models.Model):
pass
class Group(models.Model):
primary = models.ManyToManyField(Person, through='Membership', related_name='primary')
secondary = models.ManyToManyField(Person, through='Membership', related_name='secondary')
class Membership(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
group = models.ForeignKey(Group, models.CASCADE)
self.assertEqual(Group.check(), [
Error(
"The model has two identical many-to-many relations through "
"the intermediate model 'invalid_models_tests.Membership'.",
obj=Group,
id='models.E003',
)
])
def test_two_m2m_through_same_model_with_different_through_fields(self):
class Country(models.Model):
pass
class ShippingMethod(models.Model):
to_countries = models.ManyToManyField(
Country, through='ShippingMethodPrice',
through_fields=('method', 'to_country'),
)
from_countries = models.ManyToManyField(
Country, through='ShippingMethodPrice',
through_fields=('method', 'from_country'),
related_name='+',
)
class ShippingMethodPrice(models.Model):
method = models.ForeignKey(ShippingMethod, models.CASCADE)
to_country = models.ForeignKey(Country, models.CASCADE)
from_country = models.ForeignKey(Country, models.CASCADE)
self.assertEqual(ShippingMethod.check(), [])
def test_missing_parent_link(self):
msg = 'Add parent_link=True to invalid_models_tests.ParkingLot.parent.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
class Place(models.Model):
pass
class ParkingLot(Place):
parent = models.OneToOneField(Place, models.CASCADE)
def test_m2m_table_name_clash(self):
class Foo(models.Model):
bar = models.ManyToManyField('Bar', db_table='myapp_bar')
class Meta:
db_table = 'myapp_foo'
class Bar(models.Model):
class Meta:
db_table = 'myapp_bar'
self.assertEqual(Foo.check(), [
Error(
"The field's intermediary table 'myapp_bar' clashes with the "
"table name of 'invalid_models_tests.Bar'.",
obj=Foo._meta.get_field('bar'),
id='fields.E340',
)
])
def test_m2m_field_table_name_clash(self):
class Foo(models.Model):
pass
class Bar(models.Model):
foos = models.ManyToManyField(Foo, db_table='clash')
class Baz(models.Model):
foos = models.ManyToManyField(Foo, db_table='clash')
self.assertEqual(Bar.check() + Baz.check(), [
Error(
"The field's intermediary table 'clash' clashes with the "
"table name of 'invalid_models_tests.Baz.foos'.",
obj=Bar._meta.get_field('foos'),
id='fields.E340',
),
Error(
"The field's intermediary table 'clash' clashes with the "
"table name of 'invalid_models_tests.Bar.foos'.",
obj=Baz._meta.get_field('foos'),
id='fields.E340',
)
])
def test_m2m_autogenerated_table_name_clash(self):
class Foo(models.Model):
class Meta:
db_table = 'bar_foos'
class Bar(models.Model):
# The autogenerated `db_table` will be bar_foos.
foos = models.ManyToManyField(Foo)
class Meta:
db_table = 'bar'
self.assertEqual(Bar.check(), [
Error(
"The field's intermediary table 'bar_foos' clashes with the "
"table name of 'invalid_models_tests.Foo'.",
obj=Bar._meta.get_field('foos'),
id='fields.E340',
)
])
def test_m2m_unmanaged_shadow_models_not_checked(self):
class A1(models.Model):
pass
class C1(models.Model):
mm_a = models.ManyToManyField(A1, db_table='d1')
# Unmanaged models that shadow the above models. Reused table names
# shouldn't be flagged by any checks.
class A2(models.Model):
class Meta:
managed = False
class C2(models.Model):
mm_a = models.ManyToManyField(A2, through='Intermediate')
class Meta:
managed = False
class Intermediate(models.Model):
a2 = models.ForeignKey(A2, models.CASCADE, db_column='a1_id')
c2 = models.ForeignKey(C2, models.CASCADE, db_column='c1_id')
class Meta:
db_table = 'd1'
managed = False
self.assertEqual(C1.check(), [])
self.assertEqual(C2.check(), [])
def test_m2m_to_concrete_and_proxy_allowed(self):
class A(models.Model):
pass
class Through(models.Model):
a = models.ForeignKey('A', models.CASCADE)
c = models.ForeignKey('C', models.CASCADE)
class ThroughProxy(Through):
class Meta:
proxy = True
class C(models.Model):
mm_a = models.ManyToManyField(A, through=Through)
mm_aproxy = models.ManyToManyField(A, through=ThroughProxy, related_name='proxied_m2m')
self.assertEqual(C.check(), [])
@isolate_apps('django.contrib.auth', kwarg_name='apps')
def test_lazy_reference_checks(self, apps):
class DummyModel(models.Model):
author = models.ForeignKey('Author', models.CASCADE)
class Meta:
app_label = 'invalid_models_tests'
class DummyClass:
def __call__(self, **kwargs):
pass
def dummy_method(self):
pass
def dummy_function(*args, **kwargs):
pass
apps.lazy_model_operation(dummy_function, ('auth', 'imaginarymodel'))
apps.lazy_model_operation(dummy_function, ('fanciful_app', 'imaginarymodel'))
post_init.connect(dummy_function, sender='missing-app.Model', apps=apps)
post_init.connect(DummyClass(), sender='missing-app.Model', apps=apps)
post_init.connect(DummyClass().dummy_method, sender='missing-app.Model', apps=apps)
self.assertEqual(_check_lazy_references(apps), [
Error(
"%r contains a lazy reference to auth.imaginarymodel, "
"but app 'auth' doesn't provide model 'imaginarymodel'." % dummy_function,
obj=dummy_function,
id='models.E022',
),
Error(
"%r contains a lazy reference to fanciful_app.imaginarymodel, "
"but app 'fanciful_app' isn't installed." % dummy_function,
obj=dummy_function,
id='models.E022',
),
Error(
"An instance of class 'DummyClass' was connected to "
"the 'post_init' signal with a lazy reference to the sender "
"'missing-app.model', but app 'missing-app' isn't installed.",
hint=None,
obj='invalid_models_tests.test_models',
id='signals.E001',
),
Error(
"Bound method 'DummyClass.dummy_method' was connected to the "
"'post_init' signal with a lazy reference to the sender "
"'missing-app.model', but app 'missing-app' isn't installed.",
hint=None,
obj='invalid_models_tests.test_models',
id='signals.E001',
),
Error(
"The field invalid_models_tests.DummyModel.author was declared "
"with a lazy reference to 'invalid_models_tests.author', but app "
"'invalid_models_tests' isn't installed.",
hint=None,
obj=DummyModel.author.field,
id='fields.E307',
),
Error(
"The function 'dummy_function' was connected to the 'post_init' "
"signal with a lazy reference to the sender "
"'missing-app.model', but app 'missing-app' isn't installed.",
hint=None,
obj='invalid_models_tests.test_models',
id='signals.E001',
),
])
@isolate_apps('invalid_models_tests')
class ConstraintsTests(SimpleTestCase):
def test_check_constraints(self):
class Model(models.Model):
age = models.IntegerField()
class Meta:
constraints = [models.CheckConstraint(check=models.Q(age__gte=18), name='is_adult')]
errors = Model.check()
warn = Warning(
'%s does not support check constraints.' % connection.display_name,
hint=(
"A constraint won't be created. Silence this warning if you "
"don't care about it."
),
obj=Model,
id='models.W027',
)
expected = [] if connection.features.supports_table_check_constraints else [warn, warn]
self.assertCountEqual(errors, expected)
def test_check_constraints_required_db_features(self):
class Model(models.Model):
age = models.IntegerField()
class Meta:
required_db_features = {'supports_table_check_constraints'}
constraints = [models.CheckConstraint(check=models.Q(age__gte=18), name='is_adult')]
self.assertEqual(Model.check(), [])
|
[
"django.db.connection.ops.max_name_length",
"unittest.skipIf",
"django.db.models.OneToOneField",
"django.db.models.ManyToManyField",
"django.test.utils.register_lookup",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.PositiveIntegerField",
"django.db.models.Index",
"django.core.checks.Warning",
"django.db.models.Q",
"django.db.models.IntegerField",
"django.test.utils.override_settings",
"django.test.utils.isolate_apps",
"django.core.checks.Error",
"django.db.models.signals.post_init.connect",
"django.core.checks.model_checks._check_lazy_references"
] |
[((961, 997), 'django.test.utils.isolate_apps', 'isolate_apps', (['"""invalid_models_tests"""'], {}), "('invalid_models_tests')\n", (973, 997), False, 'from django.test.utils import isolate_apps, override_settings, register_lookup\n'), ((4100, 4136), 'django.test.utils.isolate_apps', 'isolate_apps', (['"""invalid_models_tests"""'], {}), "('invalid_models_tests')\n", (4112, 4136), False, 'from django.test.utils import isolate_apps, override_settings, register_lookup\n'), ((6973, 7009), 'django.test.utils.isolate_apps', 'isolate_apps', (['"""invalid_models_tests"""'], {}), "('invalid_models_tests')\n", (6985, 7009), False, 'from django.test.utils import isolate_apps, override_settings, register_lookup\n'), ((10090, 10126), 'django.test.utils.isolate_apps', 'isolate_apps', (['"""invalid_models_tests"""'], {}), "('invalid_models_tests')\n", (10102, 10126), False, 'from django.test.utils import isolate_apps, override_settings, register_lookup\n'), ((17448, 17484), 'django.test.utils.isolate_apps', 'isolate_apps', (['"""invalid_models_tests"""'], {}), "('invalid_models_tests')\n", (17460, 17484), False, 'from django.test.utils import isolate_apps, override_settings, register_lookup\n'), ((20884, 20920), 'django.test.utils.isolate_apps', 'isolate_apps', (['"""invalid_models_tests"""'], {}), "('invalid_models_tests')\n", (20896, 20920), False, 'from django.test.utils import isolate_apps, override_settings, register_lookup\n'), ((40403, 40439), 'django.test.utils.isolate_apps', 'isolate_apps', (['"""invalid_models_tests"""'], {}), "('invalid_models_tests')\n", (40415, 40439), False, 'from django.test.utils import isolate_apps, override_settings, register_lookup\n'), ((10858, 10966), 'unittest.skipIf', 'unittest.skipIf', (['(max_column_name_length is None)', '"""The database doesn\'t have a column name length limit."""'], {}), '(max_column_name_length is None,\n "The database doesn\'t have a column name length limit.")\n', (10873, 10966), False, 'import unittest\n'), ((15002, 15110), 'unittest.skipIf', 'unittest.skipIf', (['(max_column_name_length is None)', '"""The database doesn\'t have a column name length limit."""'], {}), '(max_column_name_length is None,\n "The database doesn\'t have a column name length limit.")\n', (15017, 15110), False, 'import unittest\n'), ((30726, 30787), 'django.test.utils.override_settings', 'override_settings', ([], {'TEST_SWAPPED_MODEL_BAD_VALUE': '"""not-a-model"""'}), "(TEST_SWAPPED_MODEL_BAD_VALUE='not-a-model')\n", (30743, 30787), False, 'from django.test.utils import isolate_apps, override_settings, register_lookup\n'), ((31172, 31239), 'django.test.utils.override_settings', 'override_settings', ([], {'TEST_SWAPPED_MODEL_BAD_MODEL': '"""not_an_app.Target"""'}), "(TEST_SWAPPED_MODEL_BAD_MODEL='not_an_app.Target')\n", (31189, 31239), False, 'from django.test.utils import isolate_apps, override_settings, register_lookup\n'), ((37390, 37444), 'django.test.utils.isolate_apps', 'isolate_apps', (['"""django.contrib.auth"""'], {'kwarg_name': '"""apps"""'}), "('django.contrib.auth', kwarg_name='apps')\n", (37402, 37444), False, 'from django.test.utils import isolate_apps, override_settings, register_lookup\n'), ((660, 692), 'django.db.connection.ops.max_name_length', 'connection.ops.max_name_length', ([], {}), '()\n', (690, 692), False, 'from django.db import connection, connections, models\n'), ((21021, 21059), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'primary_key': '(False)'}), '(primary_key=False)\n', (21040, 21059), False, 'from django.db import connection, connections, models\n'), ((38059, 38131), 'django.db.models.signals.post_init.connect', 'post_init.connect', (['dummy_function'], {'sender': '"""missing-app.Model"""', 'apps': 'apps'}), "(dummy_function, sender='missing-app.Model', apps=apps)\n", (38076, 38131), False, 'from django.db.models.signals import post_init\n'), ((40766, 40972), 'django.core.checks.Warning', 'Warning', (["('%s does not support check constraints.' % connection.display_name)"], {'hint': '"""A constraint won\'t be created. Silence this warning if you don\'t care about it."""', 'obj': 'Model', 'id': '"""models.W027"""'}), '(\'%s does not support check constraints.\' % connection.display_name,\n hint=\n "A constraint won\'t be created. Silence this warning if you don\'t care about it."\n , obj=Model, id=\'models.W027\')\n', (40773, 40972), False, 'from django.core.checks import Error, Warning\n'), ((2602, 2623), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (2621, 2623), False, 'from django.db import connection, connections, models\n'), ((2670, 2691), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (2689, 2691), False, 'from django.db import connection, connections, models\n'), ((3206, 3236), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""self"""'], {}), "('self')\n", (3228, 3236), False, 'from django.db import connection, connections, models\n'), ((3744, 3814), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Foo'], {'on_delete': 'models.CASCADE', 'related_name': '"""bar_1"""'}), "(Foo, on_delete=models.CASCADE, related_name='bar_1')\n", (3761, 3814), False, 'from django.db import connection, connections, models\n'), ((3835, 3905), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Foo'], {'on_delete': 'models.CASCADE', 'related_name': '"""bar_2"""'}), "(Foo, on_delete=models.CASCADE, related_name='bar_2')\n", (3852, 3905), False, 'from django.db import connection, connections, models\n'), ((4624, 4645), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (4643, 4645), False, 'from django.db import connection, connections, models\n'), ((4664, 4685), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (4683, 4685), False, 'from django.db import connection, connections, models\n'), ((5419, 5440), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (5438, 5440), False, 'from django.db import connection, connections, models\n'), ((5459, 5480), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (5478, 5480), False, 'from django.db import connection, connections, models\n'), ((6141, 6171), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""self"""'], {}), "('self')\n", (6163, 6171), False, 'from django.db import connection, connections, models\n'), ((6682, 6752), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Foo'], {'on_delete': 'models.CASCADE', 'related_name': '"""bar_1"""'}), "(Foo, on_delete=models.CASCADE, related_name='bar_1')\n", (6699, 6752), False, 'from django.db import connection, connections, models\n'), ((6773, 6843), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Foo'], {'on_delete': 'models.CASCADE', 'related_name': '"""bar_2"""'}), "(Foo, on_delete=models.CASCADE, related_name='bar_2')\n", (6790, 6843), False, 'from django.db import connection, connections, models\n'), ((7555, 7585), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""self"""'], {}), "('self')\n", (7577, 7585), False, 'from django.db import connection, connections, models\n'), ((8069, 8090), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (8088, 8090), False, 'from django.db import connection, connections, models\n'), ((8137, 8158), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (8156, 8158), False, 'from django.db import connection, connections, models\n'), ((8737, 8807), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Foo'], {'on_delete': 'models.CASCADE', 'related_name': '"""bar_1"""'}), "(Foo, on_delete=models.CASCADE, related_name='bar_1')\n", (8754, 8807), False, 'from django.db import connection, connections, models\n'), ((8828, 8898), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Foo'], {'on_delete': 'models.CASCADE', 'related_name': '"""bar_2"""'}), "(Foo, on_delete=models.CASCADE, related_name='bar_2')\n", (8845, 8898), False, 'from django.db import connection, connections, models\n'), ((10266, 10297), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (10282, 10297), False, 'from django.db import connection, connections, models\n'), ((10317, 10347), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""self"""'], {}), "('self')\n", (10339, 10347), False, 'from django.db import connection, connections, models\n'), ((11398, 11429), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(11)'}), '(max_length=11)\n', (11414, 11429), False, 'from django.db import connection, connections, models\n'), ((11560, 11684), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz'], {'related_name': '"""rn1"""'}), "(\n VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,\n related_name='rn1')\n", (11582, 11684), False, 'from django.db import connection, connections, models\n'), ((11748, 11893), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz'], {'related_name': '"""rn2"""', 'through': '"""m2msimple"""'}), "(\n VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,\n related_name='rn2', through='m2msimple')\n", (11770, 11893), False, 'from django.db import connection, connections, models\n'), ((11957, 12103), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz'], {'related_name': '"""rn3"""', 'through': '"""m2mcomplex"""'}), "(\n VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,\n related_name='rn3', through='m2mcomplex')\n", (11979, 12103), False, 'from django.db import connection, connections, models\n'), ((12175, 12310), 'django.db.models.ForeignKey', 'models.ForeignKey', (['VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz', 'models.CASCADE'], {'related_name': '"""rn4"""'}), "(\n VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,\n models.CASCADE, related_name='rn4')\n", (12192, 12310), False, 'from django.db import connection, connections, models\n'), ((12481, 12534), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ModelWithLongField', 'models.CASCADE'], {}), '(ModelWithLongField, models.CASCADE)\n', (12498, 12534), False, 'from django.db import connection, connections, models\n'), ((12594, 12647), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ModelWithLongField', 'models.CASCADE'], {}), '(ModelWithLongField, models.CASCADE)\n', (12611, 12647), False, 'from django.db import connection, connections, models\n'), ((14475, 14828), 'django.core.checks.Error', 'Error', (['(\'Autogenerated column name too long for M2M field "%s_id". Maximum length is "%s" for database "%s".\'\n % (long_field_name, self.max_column_name_length, self.\n column_limit_db_alias))'], {'hint': '"""Use \'through\' to create a separate model for M2M and then set column_name using \'db_column\'."""', 'obj': 'ModelWithLongField', 'id': '"""models.E019"""'}), '(\n \'Autogenerated column name too long for M2M field "%s_id". Maximum length is "%s" for database "%s".\'\n % (long_field_name, self.max_column_name_length, self.\n column_limit_db_alias), hint=\n "Use \'through\' to create a separate model for M2M and then set column_name using \'db_column\'."\n , obj=ModelWithLongField, id=\'models.E019\')\n', (14480, 14828), False, 'from django.core.checks import Error, Warning\n'), ((15413, 15444), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(11)'}), '(max_length=11)\n', (15429, 15444), False, 'from django.db import connection, connections, models\n'), ((16363, 16384), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (16382, 16384), False, 'from django.db import connection, connections, models\n'), ((16694, 16715), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (16713, 16715), False, 'from django.db import connection, connections, models\n'), ((17056, 17077), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (17075, 17077), False, 'from django.db import connection, connections, models\n'), ((17096, 17132), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'db_column': '"""foo"""'}), "(db_column='foo')\n", (17115, 17132), False, 'from django.db import connection, connections, models\n'), ((17690, 17722), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (17706, 17722), False, 'from django.db import connection, connections, models\n'), ((18125, 18146), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (18144, 18146), False, 'from django.db import connection, connections, models\n'), ((18204, 18225), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (18223, 18225), False, 'from django.db import connection, connections, models\n'), ((19146, 19167), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (19165, 19167), False, 'from django.db import connection, connections, models\n'), ((19276, 19297), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (19295, 19297), False, 'from django.db import connection, connections, models\n'), ((19403, 19444), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Target', 'models.CASCADE'], {}), '(Target, models.CASCADE)\n', (19420, 19444), False, 'from django.db import connection, connections, models\n'), ((19848, 19869), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (19867, 19869), False, 'from django.db import connection, connections, models\n'), ((20024, 20045), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (20043, 20045), False, 'from django.db import connection, connections, models\n'), ((20499, 20540), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Target', 'models.CASCADE'], {}), '(Target, models.CASCADE)\n', (20516, 20540), False, 'from django.db import connection, connections, models\n'), ((20561, 20582), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (20580, 20582), False, 'from django.db import connection, connections, models\n'), ((21906, 21935), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (21933, 21935), False, 'from django.db import connection, connections, models\n'), ((22216, 22259), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Question', 'models.CASCADE'], {}), '(Question, models.CASCADE)\n', (22233, 22259), False, 'from django.db import connection, connections, models\n'), ((22554, 22597), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Question', 'models.CASCADE'], {}), '(Question, models.CASCADE)\n', (22571, 22597), False, 'from django.db import connection, connections, models\n'), ((22618, 22639), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (22637, 22639), False, 'from django.db import connection, connections, models\n'), ((23137, 23174), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['RelationModel'], {}), '(RelationModel)\n', (23159, 23174), False, 'from django.db import connection, connections, models\n'), ((24074, 24095), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (24093, 24095), False, 'from django.db import connection, connections, models\n'), ((24569, 24590), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (24588, 24590), False, 'from django.db import connection, connections, models\n'), ((25126, 25167), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Parent', 'models.CASCADE'], {}), '(Parent, models.CASCADE)\n', (25143, 25167), False, 'from django.db import connection, connections, models\n'), ((25643, 25664), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (25662, 25664), False, 'from django.db import connection, connections, models\n'), ((26204, 26246), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Parent2', 'models.CASCADE'], {}), '(Parent2, models.CASCADE)\n', (26221, 26246), False, 'from django.db import connection, connections, models\n'), ((26305, 26347), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Parent1', 'models.CASCADE'], {}), '(Parent1, models.CASCADE)\n', (26322, 26347), False, 'from django.db import connection, connections, models\n'), ((26854, 26886), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (26870, 26886), False, 'from django.db import connection, connections, models\n'), ((26908, 26940), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (26924, 26940), False, 'from django.db import connection, connections, models\n'), ((26998, 27039), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Parent', 'models.CASCADE'], {}), '(Parent, models.CASCADE)\n', (27015, 27039), False, 'from django.db import connection, connections, models\n'), ((27511, 27543), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (27527, 27543), False, 'from django.db import connection, connections, models\n'), ((27627, 27667), 'django.test.utils.register_lookup', 'register_lookup', (['models.CharField', 'Lower'], {}), '(models.CharField, Lower)\n', (27642, 27667), False, 'from django.test.utils import isolate_apps, override_settings, register_lookup\n'), ((27834, 27866), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (27850, 27866), False, 'from django.db import connection, connections, models\n'), ((28151, 28192), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Parent', 'models.CASCADE'], {}), '(Parent, models.CASCADE)\n', (28168, 28192), False, 'from django.db import connection, connections, models\n'), ((28475, 28516), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Parent', 'models.CASCADE'], {}), '(Parent, models.CASCADE)\n', (28492, 28516), False, 'from django.db import connection, connections, models\n'), ((29957, 29998), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""self"""', 'models.CASCADE'], {}), "('self', models.CASCADE)\n", (29974, 29998), False, 'from django.db import connection, connections, models\n'), ((30392, 30429), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (30411, 30429), False, 'from django.db import connection, connections, models\n'), ((30448, 30485), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (30467, 30485), False, 'from django.db import connection, connections, models\n'), ((31835, 31911), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Person'], {'through': '"""Membership"""', 'related_name': '"""primary"""'}), "(Person, through='Membership', related_name='primary')\n", (31857, 31911), False, 'from django.db import connection, connections, models\n'), ((31936, 32014), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Person'], {'through': '"""Membership"""', 'related_name': '"""secondary"""'}), "(Person, through='Membership', related_name='secondary')\n", (31958, 32014), False, 'from django.db import connection, connections, models\n'), ((32077, 32118), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Person', 'models.CASCADE'], {}), '(Person, models.CASCADE)\n', (32094, 32118), False, 'from django.db import connection, connections, models\n'), ((32139, 32179), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Group', 'models.CASCADE'], {}), '(Group, models.CASCADE)\n', (32156, 32179), False, 'from django.db import connection, connections, models\n'), ((32687, 32794), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Country'], {'through': '"""ShippingMethodPrice"""', 'through_fields': "('method', 'to_country')"}), "(Country, through='ShippingMethodPrice',\n through_fields=('method', 'to_country'))\n", (32709, 32794), False, 'from django.db import connection, connections, models\n'), ((32867, 32994), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Country'], {'through': '"""ShippingMethodPrice"""', 'through_fields': "('method', 'from_country')", 'related_name': '"""+"""'}), "(Country, through='ShippingMethodPrice',\n through_fields=('method', 'from_country'), related_name='+')\n", (32889, 32994), False, 'from django.db import connection, connections, models\n'), ((33125, 33174), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ShippingMethod', 'models.CASCADE'], {}), '(ShippingMethod, models.CASCADE)\n', (33142, 33174), False, 'from django.db import connection, connections, models\n'), ((33200, 33242), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Country', 'models.CASCADE'], {}), '(Country, models.CASCADE)\n', (33217, 33242), False, 'from django.db import connection, connections, models\n'), ((33270, 33312), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Country', 'models.CASCADE'], {}), '(Country, models.CASCADE)\n', (33287, 33312), False, 'from django.db import connection, connections, models\n'), ((33814, 33865), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""Bar"""'], {'db_table': '"""myapp_bar"""'}), "('Bar', db_table='myapp_bar')\n", (33836, 33865), False, 'from django.db import connection, connections, models\n'), ((34485, 34530), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Foo'], {'db_table': '"""clash"""'}), "(Foo, db_table='clash')\n", (34507, 34530), False, 'from django.db import connection, connections, models\n'), ((34584, 34629), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Foo'], {'db_table': '"""clash"""'}), "(Foo, db_table='clash')\n", (34606, 34629), False, 'from django.db import connection, connections, models\n'), ((35476, 35503), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Foo'], {}), '(Foo)\n', (35498, 35503), False, 'from django.db import connection, connections, models\n'), ((36031, 36072), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['A1'], {'db_table': '"""d1"""'}), "(A1, db_table='d1')\n", (36053, 36072), False, 'from django.db import connection, connections, models\n'), ((36336, 36386), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['A2'], {'through': '"""Intermediate"""'}), "(A2, through='Intermediate')\n", (36358, 36386), False, 'from django.db import connection, connections, models\n'), ((36504, 36560), 'django.db.models.ForeignKey', 'models.ForeignKey', (['A2', 'models.CASCADE'], {'db_column': '"""a1_id"""'}), "(A2, models.CASCADE, db_column='a1_id')\n", (36521, 36560), False, 'from django.db import connection, connections, models\n'), ((36578, 36634), 'django.db.models.ForeignKey', 'models.ForeignKey', (['C2', 'models.CASCADE'], {'db_column': '"""c1_id"""'}), "(C2, models.CASCADE, db_column='c1_id')\n", (36595, 36634), False, 'from django.db import connection, connections, models\n'), ((36964, 37002), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""A"""', 'models.CASCADE'], {}), "('A', models.CASCADE)\n", (36981, 37002), False, 'from django.db import connection, connections, models\n'), ((37019, 37057), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""C"""', 'models.CASCADE'], {}), "('C', models.CASCADE)\n", (37036, 37057), False, 'from django.db import connection, connections, models\n'), ((37200, 37242), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['A'], {'through': 'Through'}), '(A, through=Through)\n', (37222, 37242), False, 'from django.db import connection, connections, models\n'), ((37267, 37342), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['A'], {'through': 'ThroughProxy', 'related_name': '"""proxied_m2m"""'}), "(A, through=ThroughProxy, related_name='proxied_m2m')\n", (37289, 37342), False, 'from django.db import connection, connections, models\n'), ((37554, 37597), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Author"""', 'models.CASCADE'], {}), "('Author', models.CASCADE)\n", (37571, 37597), False, 'from django.db import connection, connections, models\n'), ((38329, 38357), 'django.core.checks.model_checks._check_lazy_references', '_check_lazy_references', (['apps'], {}), '(apps)\n', (38351, 38357), False, 'from django.core.checks.model_checks import _check_lazy_references\n'), ((40571, 40592), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (40590, 40592), False, 'from django.db import connection, connections, models\n'), ((41326, 41347), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (41345, 41347), False, 'from django.db import connection, connections, models\n'), ((1224, 1303), 'django.core.checks.Error', 'Error', (['"""\'index_together\' must be a list or tuple."""'], {'obj': 'Model', 'id': '"""models.E008"""'}), '("\'index_together\' must be a list or tuple.", obj=Model, id=\'models.E008\')\n', (1229, 1303), False, 'from django.core.checks import Error, Warning\n'), ((1569, 1648), 'django.core.checks.Error', 'Error', (['"""\'index_together\' must be a list or tuple."""'], {'obj': 'Model', 'id': '"""models.E008"""'}), '("\'index_together\' must be a list or tuple.", obj=Model, id=\'models.E008\')\n', (1574, 1648), False, 'from django.core.checks import Error, Warning\n'), ((1938, 2034), 'django.core.checks.Error', 'Error', (['"""All \'index_together\' elements must be lists or tuples."""'], {'obj': 'Model', 'id': '"""models.E009"""'}), '("All \'index_together\' elements must be lists or tuples.", obj=Model,\n id=\'models.E009\')\n', (1943, 2034), False, 'from django.core.checks import Error, Warning\n'), ((2320, 2427), 'django.core.checks.Error', 'Error', (['"""\'index_together\' refers to the nonexistent field \'missing_field\'."""'], {'obj': 'Model', 'id': '"""models.E012"""'}), '("\'index_together\' refers to the nonexistent field \'missing_field\'.",\n obj=Model, id=\'models.E012\')\n', (2325, 2427), False, 'from django.core.checks import Error, Warning\n'), ((2826, 3013), 'django.core.checks.Error', 'Error', (['"""\'index_together\' refers to field \'field1\' which is not local to model \'Bar\'."""'], {'hint': '"""This issue may be caused by multi-table inheritance."""', 'obj': 'Bar', 'id': '"""models.E016"""'}), '(\n "\'index_together\' refers to field \'field1\' which is not local to model \'Bar\'."\n , hint=\'This issue may be caused by multi-table inheritance.\', obj=Bar,\n id=\'models.E016\')\n', (2831, 3013), False, 'from django.core.checks import Error, Warning\n'), ((3360, 3519), 'django.core.checks.Error', 'Error', (['"""\'index_together\' refers to a ManyToManyField \'m2m\', but ManyToManyFields are not permitted in \'index_together\'."""'], {'obj': 'Model', 'id': '"""models.E013"""'}), '(\n "\'index_together\' refers to a ManyToManyField \'m2m\', but ManyToManyFields are not permitted in \'index_together\'."\n , obj=Model, id=\'models.E013\')\n', (3365, 3519), False, 'from django.core.checks import Error, Warning\n'), ((4365, 4450), 'django.core.checks.Error', 'Error', (['"""\'unique_together\' must be a list or tuple."""'], {'obj': 'Model', 'id': '"""models.E010"""'}), '("\'unique_together\' must be a list or tuple.", obj=Model, id=\'models.E010\'\n )\n', (4370, 4450), False, 'from django.core.checks import Error, Warning\n'), ((4817, 4914), 'django.core.checks.Error', 'Error', (['"""All \'unique_together\' elements must be lists or tuples."""'], {'obj': 'Model', 'id': '"""models.E011"""'}), '("All \'unique_together\' elements must be lists or tuples.", obj=Model,\n id=\'models.E011\')\n', (4822, 4914), False, 'from django.core.checks import Error, Warning\n'), ((5177, 5262), 'django.core.checks.Error', 'Error', (['"""\'unique_together\' must be a list or tuple."""'], {'obj': 'Model', 'id': '"""models.E010"""'}), '("\'unique_together\' must be a list or tuple.", obj=Model, id=\'models.E010\'\n )\n', (5182, 5262), False, 'from django.core.checks import Error, Warning\n'), ((5871, 5979), 'django.core.checks.Error', 'Error', (['"""\'unique_together\' refers to the nonexistent field \'missing_field\'."""'], {'obj': 'Model', 'id': '"""models.E012"""'}), '("\'unique_together\' refers to the nonexistent field \'missing_field\'.",\n obj=Model, id=\'models.E012\')\n', (5876, 5979), False, 'from django.core.checks import Error, Warning\n'), ((6296, 6457), 'django.core.checks.Error', 'Error', (['"""\'unique_together\' refers to a ManyToManyField \'m2m\', but ManyToManyFields are not permitted in \'unique_together\'."""'], {'obj': 'Model', 'id': '"""models.E013"""'}), '(\n "\'unique_together\' refers to a ManyToManyField \'m2m\', but ManyToManyFields are not permitted in \'unique_together\'."\n , obj=Model, id=\'models.E013\')\n', (6301, 6457), False, 'from django.core.checks import Error, Warning\n'), ((7287, 7388), 'django.core.checks.Error', 'Error', (['"""\'indexes\' refers to the nonexistent field \'missing_field\'."""'], {'obj': 'Model', 'id': '"""models.E012"""'}), '("\'indexes\' refers to the nonexistent field \'missing_field\'.", obj=\n Model, id=\'models.E012\')\n', (7292, 7388), False, 'from django.core.checks import Error, Warning\n'), ((7736, 7881), 'django.core.checks.Error', 'Error', (['"""\'indexes\' refers to a ManyToManyField \'m2m\', but ManyToManyFields are not permitted in \'indexes\'."""'], {'obj': 'Model', 'id': '"""models.E013"""'}), '(\n "\'indexes\' refers to a ManyToManyField \'m2m\', but ManyToManyFields are not permitted in \'indexes\'."\n , obj=Model, id=\'models.E013\')\n', (7741, 7881), False, 'from django.core.checks import Error, Warning\n'), ((8320, 8494), 'django.core.checks.Error', 'Error', (['"""\'indexes\' refers to field \'field1\' which is not local to model \'Bar\'."""'], {'hint': '"""This issue may be caused by multi-table inheritance."""', 'obj': 'Bar', 'id': '"""models.E016"""'}), '("\'indexes\' refers to field \'field1\' which is not local to model \'Bar\'.",\n hint=\'This issue may be caused by multi-table inheritance.\', obj=Bar,\n id=\'models.E016\')\n', (8325, 8494), False, 'from django.core.checks import Error, Warning\n'), ((9393, 9524), 'django.core.checks.Error', 'Error', (['("The index name \'%sindex_name\' cannot start with an underscore or a number." %\n prefix)'], {'obj': 'Model', 'id': '"""models.E033"""'}), '(\n "The index name \'%sindex_name\' cannot start with an underscore or a number."\n % prefix, obj=Model, id=\'models.E033\')\n', (9398, 9524), False, 'from django.core.checks import Error, Warning\n'), ((9888, 9999), 'django.core.checks.Error', 'Error', (['("The index name \'%s\' cannot be longer than 30 characters." % index_name)'], {'obj': 'Model', 'id': '"""models.E034"""'}), '("The index name \'%s\' cannot be longer than 30 characters." %\n index_name, obj=Model, id=\'models.E034\')\n', (9893, 9999), False, 'from django.core.checks import Error, Warning\n'), ((12723, 12838), 'django.db.models.ForeignKey', 'models.ForeignKey', (['VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz', 'models.CASCADE'], {}), '(\n VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,\n models.CASCADE)\n', (12740, 12838), False, 'from django.db import connection, connections, models\n'), ((12922, 13064), 'django.db.models.ForeignKey', 'models.ForeignKey', (['VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz', 'models.CASCADE'], {'db_column': 'long_field_name'}), '(\n VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,\n models.CASCADE, db_column=long_field_name)\n', (12939, 13064), False, 'from django.db import connection, connections, models\n'), ((13586, 13934), 'django.core.checks.Error', 'Error', (['(\'Autogenerated column name too long for M2M field "%s". Maximum length is "%s" for database "%s".\'\n % (m2m_long_name, self.max_column_name_length, self.column_limit_db_alias)\n )'], {'hint': '"""Use \'through\' to create a separate model for M2M and then set column_name using \'db_column\'."""', 'obj': 'ModelWithLongField', 'id': '"""models.E019"""'}), '(\n \'Autogenerated column name too long for M2M field "%s". Maximum length is "%s" for database "%s".\'\n % (m2m_long_name, self.max_column_name_length, self.\n column_limit_db_alias), hint=\n "Use \'through\' to create a separate model for M2M and then set column_name using \'db_column\'."\n , obj=ModelWithLongField, id=\'models.E019\')\n', (13591, 13934), False, 'from django.core.checks import Error, Warning\n'), ((15587, 15618), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(11)'}), '(max_length=11)\n', (15603, 15618), False, 'from django.db import connection, connections, models\n'), ((15684, 15733), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(11)', 'db_column': '"""vlmn"""'}), "(max_length=11, db_column='vlmn')\n", (15700, 15733), False, 'from django.db import connection, connections, models\n'), ((15859, 16160), 'django.core.checks.Error', 'Error', (['(\'Autogenerated column name too long for field "%s". Maximum length is "%s" for database "%s".\'\n % (long_field_name, self.max_column_name_length, self.\n column_limit_db_alias))'], {'hint': '"""Set the column name manually using \'db_column\'."""', 'obj': 'ModelWithLongField', 'id': '"""models.E018"""'}), '(\n \'Autogenerated column name too long for field "%s". Maximum length is "%s" for database "%s".\'\n % (long_field_name, self.max_column_name_length, self.\n column_limit_db_alias), hint=\n "Set the column name manually using \'db_column\'.", obj=\n ModelWithLongField, id=\'models.E018\')\n', (15864, 16160), False, 'from django.core.checks import Error, Warning\n'), ((17188, 17339), 'django.core.checks.Error', 'Error', (['"""Field \'bar\' has column name \'foo\' that is used by another field."""'], {'hint': '"""Specify a \'db_column\' for the field."""', 'obj': 'Model', 'id': '"""models.E007"""'}), '("Field \'bar\' has column name \'foo\' that is used by another field.",\n hint="Specify a \'db_column\' for the field.", obj=Model, id=\'models.E007\')\n', (17193, 17339), False, 'from django.core.checks import Error, Warning\n'), ((18464, 18651), 'django.core.checks.Error', 'Error', (['"""The field \'id\' from parent model \'invalid_models_tests.mother\' clashes with the field \'id\' from parent model \'invalid_models_tests.father\'."""'], {'obj': 'Child', 'id': '"""models.E005"""'}), '(\n "The field \'id\' from parent model \'invalid_models_tests.mother\' clashes with the field \'id\' from parent model \'invalid_models_tests.father\'."\n , obj=Child, id=\'models.E005\')\n', (18469, 18651), False, 'from django.core.checks import Error, Warning\n'), ((18756, 18949), 'django.core.checks.Error', 'Error', (['"""The field \'clash\' from parent model \'invalid_models_tests.mother\' clashes with the field \'clash\' from parent model \'invalid_models_tests.father\'."""'], {'obj': 'Child', 'id': '"""models.E005"""'}), '(\n "The field \'clash\' from parent model \'invalid_models_tests.mother\' clashes with the field \'clash\' from parent model \'invalid_models_tests.father\'."\n , obj=Child, id=\'models.E005\')\n', (18761, 18949), False, 'from django.core.checks import Error, Warning\n'), ((21179, 21307), 'django.core.checks.Error', 'Error', (['"""\'id\' can only be used as a field name if the field also sets \'primary_key=True\'."""'], {'obj': 'Model', 'id': '"""models.E004"""'}), '(\n "\'id\' can only be used as a field name if the field also sets \'primary_key=True\'."\n , obj=Model, id=\'models.E004\')\n', (21184, 21307), False, 'from django.core.checks import Error, Warning\n'), ((21592, 21721), 'django.core.checks.Error', 'Error', (['"""\'ordering\' must be a tuple or list (even if you want to order by only one field)."""'], {'obj': 'Model', 'id': '"""models.E014"""'}), '(\n "\'ordering\' must be a tuple or list (even if you want to order by only one field)."\n , obj=Model, id=\'models.E014\')\n', (21597, 21721), False, 'from django.core.checks import Error, Warning\n'), ((22809, 22915), 'django.core.checks.Error', 'Error', (['"""\'ordering\' and \'order_with_respect_to\' cannot be used together."""'], {'obj': 'Answer', 'id': '"""models.E021"""'}), '("\'ordering\' and \'order_with_respect_to\' cannot be used together.",\n obj=Answer, id=\'models.E021\')\n', (22814, 22915), False, 'from django.core.checks import Error, Warning\n'), ((23295, 23423), 'django.core.checks.Error', 'Error', (['"""\'ordering\' refers to the nonexistent field, related field, or lookup \'relation\'."""'], {'obj': 'Model', 'id': '"""models.E015"""'}), '(\n "\'ordering\' refers to the nonexistent field, related field, or lookup \'relation\'."\n , obj=Model, id=\'models.E015\')\n', (23300, 23423), False, 'from django.core.checks import Error, Warning\n'), ((23724, 23857), 'django.core.checks.Error', 'Error', (['"""\'ordering\' refers to the nonexistent field, related field, or lookup \'missing_field\'."""'], {'obj': 'Model', 'id': '"""models.E015"""'}), '(\n "\'ordering\' refers to the nonexistent field, related field, or lookup \'missing_field\'."\n , obj=Model, id=\'models.E015\')\n', (23729, 23857), False, 'from django.core.checks import Error, Warning\n'), ((24228, 24367), 'django.core.checks.Error', 'Error', (['"""\'ordering\' refers to the nonexistent field, related field, or lookup \'missing_fk_field_id\'."""'], {'obj': 'Model', 'id': '"""models.E015"""'}), '(\n "\'ordering\' refers to the nonexistent field, related field, or lookup \'missing_fk_field_id\'."\n , obj=Model, id=\'models.E015\')\n', (24233, 24367), False, 'from django.core.checks import Error, Warning\n'), ((24723, 24862), 'django.core.checks.Error', 'Error', (['"""\'ordering\' refers to the nonexistent field, related field, or lookup \'missing_related__id\'."""'], {'obj': 'Model', 'id': '"""models.E015"""'}), '(\n "\'ordering\' refers to the nonexistent field, related field, or lookup \'missing_related__id\'."\n , obj=Model, id=\'models.E015\')\n', (24728, 24862), False, 'from django.core.checks import Error, Warning\n'), ((25302, 25443), 'django.core.checks.Error', 'Error', (['"""\'ordering\' refers to the nonexistent field, related field, or lookup \'parent__missing_field\'."""'], {'obj': 'Child', 'id': '"""models.E015"""'}), '(\n "\'ordering\' refers to the nonexistent field, related field, or lookup \'parent__missing_field\'."\n , obj=Child, id=\'models.E015\')\n', (25307, 25443), False, 'from django.core.checks import Error, Warning\n'), ((25799, 25940), 'django.core.checks.Error', 'Error', (['"""\'ordering\' refers to the nonexistent field, related field, or lookup \'parent__missing_field\'."""'], {'obj': 'Child', 'id': '"""models.E015"""'}), '(\n "\'ordering\' refers to the nonexistent field, related field, or lookup \'parent__missing_field\'."\n , obj=Child, id=\'models.E015\')\n', (25804, 25940), False, 'from django.core.checks import Error, Warning\n'), ((26492, 26643), 'django.core.checks.Error', 'Error', (['"""\'ordering\' refers to the nonexistent field, related field, or lookup \'parent1__parent2__missing_field\'."""'], {'obj': 'Child', 'id': '"""models.E015"""'}), '(\n "\'ordering\' refers to the nonexistent field, related field, or lookup \'parent1__parent2__missing_field\'."\n , obj=Child, id=\'models.E015\')\n', (26497, 26643), False, 'from django.core.checks import Error, Warning\n'), ((27175, 27317), 'django.core.checks.Error', 'Error', (['"""\'ordering\' refers to the nonexistent field, related field, or lookup \'parent__field1__field2\'."""'], {'obj': 'Child', 'id': '"""models.E015"""'}), '(\n "\'ordering\' refers to the nonexistent field, related field, or lookup \'parent__field1__field2\'."\n , obj=Child, id=\'models.E015\')\n', (27180, 27317), False, 'from django.core.checks import Error, Warning\n'), ((28786, 28942), 'django.core.checks.Error', 'Error', (['"""The model name \'_Model\' cannot start or end with an underscore as it collides with the query lookup syntax."""'], {'obj': '_Model', 'id': '"""models.E023"""'}), '(\n "The model name \'_Model\' cannot start or end with an underscore as it collides with the query lookup syntax."\n , obj=_Model, id=\'models.E023\')\n', (28791, 28942), False, 'from django.core.checks import Error, Warning\n'), ((29184, 29340), 'django.core.checks.Error', 'Error', (['"""The model name \'Model_\' cannot start or end with an underscore as it collides with the query lookup syntax."""'], {'obj': 'Model_', 'id': '"""models.E023"""'}), '(\n "The model name \'Model_\' cannot start or end with an underscore as it collides with the query lookup syntax."\n , obj=Model_, id=\'models.E023\')\n', (29189, 29340), False, 'from django.core.checks import Error, Warning\n'), ((29597, 29758), 'django.core.checks.Error', 'Error', (['"""The model name \'Test__Model\' cannot contain double underscores as it collides with the query lookup syntax."""'], {'obj': 'Test__Model', 'id': '"""models.E024"""'}), '(\n "The model name \'Test__Model\' cannot contain double underscores as it collides with the query lookup syntax."\n , obj=Test__Model, id=\'models.E024\')\n', (29602, 29758), False, 'from django.core.checks import Error, Warning\n'), ((30127, 30229), 'django.core.checks.Error', 'Error', (['"""The property \'fk_id\' clashes with a related field accessor."""'], {'obj': 'Model', 'id': '"""models.E025"""'}), '("The property \'fk_id\' clashes with a related field accessor.", obj=\n Model, id=\'models.E025\')\n', (30132, 30229), False, 'from django.core.checks import Error, Warning\n'), ((30541, 30649), 'django.core.checks.Error', 'Error', (['"""The model cannot have more than one field with \'primary_key=True\'."""'], {'obj': 'Model', 'id': '"""models.E026"""'}), '("The model cannot have more than one field with \'primary_key=True\'.",\n obj=Model, id=\'models.E026\')\n', (30546, 30649), False, 'from django.core.checks import Error, Warning\n'), ((31008, 31111), 'django.core.checks.Error', 'Error', (['"""\'TEST_SWAPPED_MODEL_BAD_VALUE\' is not of the form \'app_label.app_name\'."""'], {'id': '"""models.E001"""'}), '("\'TEST_SWAPPED_MODEL_BAD_VALUE\' is not of the form \'app_label.app_name\'."\n , id=\'models.E001\')\n', (31013, 31111), False, 'from django.core.checks import Error, Warning\n'), ((31455, 31600), 'django.core.checks.Error', 'Error', (['"""\'TEST_SWAPPED_MODEL_BAD_MODEL\' references \'not_an_app.Target\', which has not been installed, or is abstract."""'], {'id': '"""models.E002"""'}), '(\n "\'TEST_SWAPPED_MODEL_BAD_MODEL\' references \'not_an_app.Target\', which has not been installed, or is abstract."\n , id=\'models.E002\')\n', (31460, 31600), False, 'from django.core.checks import Error, Warning\n'), ((32235, 32399), 'django.core.checks.Error', 'Error', (['"""The model has two identical many-to-many relations through the intermediate model \'invalid_models_tests.Membership\'."""'], {'obj': 'Group', 'id': '"""models.E003"""'}), '(\n "The model has two identical many-to-many relations through the intermediate model \'invalid_models_tests.Membership\'."\n , obj=Group, id=\'models.E003\')\n', (32240, 32399), False, 'from django.core.checks import Error, Warning\n'), ((33677, 33720), 'django.db.models.OneToOneField', 'models.OneToOneField', (['Place', 'models.CASCADE'], {}), '(Place, models.CASCADE)\n', (33697, 33720), False, 'from django.db import connection, connections, models\n'), ((38373, 38554), 'django.core.checks.Error', 'Error', (['("%r contains a lazy reference to auth.imaginarymodel, but app \'auth\' doesn\'t provide model \'imaginarymodel\'."\n % dummy_function)'], {'obj': 'dummy_function', 'id': '"""models.E022"""'}), '(\n "%r contains a lazy reference to auth.imaginarymodel, but app \'auth\' doesn\'t provide model \'imaginarymodel\'."\n % dummy_function, obj=dummy_function, id=\'models.E022\')\n', (38378, 38554), False, 'from django.core.checks import Error, Warning\n'), ((38640, 38814), 'django.core.checks.Error', 'Error', (['("%r contains a lazy reference to fanciful_app.imaginarymodel, but app \'fanciful_app\' isn\'t installed."\n % dummy_function)'], {'obj': 'dummy_function', 'id': '"""models.E022"""'}), '(\n "%r contains a lazy reference to fanciful_app.imaginarymodel, but app \'fanciful_app\' isn\'t installed."\n % dummy_function, obj=dummy_function, id=\'models.E022\')\n', (38645, 38814), False, 'from django.core.checks import Error, Warning\n'), ((38900, 39158), 'django.core.checks.Error', 'Error', (['"""An instance of class \'DummyClass\' was connected to the \'post_init\' signal with a lazy reference to the sender \'missing-app.model\', but app \'missing-app\' isn\'t installed."""'], {'hint': 'None', 'obj': '"""invalid_models_tests.test_models"""', 'id': '"""signals.E001"""'}), '(\n "An instance of class \'DummyClass\' was connected to the \'post_init\' signal with a lazy reference to the sender \'missing-app.model\', but app \'missing-app\' isn\'t installed."\n , hint=None, obj=\'invalid_models_tests.test_models\', id=\'signals.E001\')\n', (38905, 39158), False, 'from django.core.checks import Error, Warning\n'), ((39279, 39542), 'django.core.checks.Error', 'Error', (['"""Bound method \'DummyClass.dummy_method\' was connected to the \'post_init\' signal with a lazy reference to the sender \'missing-app.model\', but app \'missing-app\' isn\'t installed."""'], {'hint': 'None', 'obj': '"""invalid_models_tests.test_models"""', 'id': '"""signals.E001"""'}), '(\n "Bound method \'DummyClass.dummy_method\' was connected to the \'post_init\' signal with a lazy reference to the sender \'missing-app.model\', but app \'missing-app\' isn\'t installed."\n , hint=None, obj=\'invalid_models_tests.test_models\', id=\'signals.E001\')\n', (39284, 39542), False, 'from django.core.checks import Error, Warning\n'), ((39663, 39905), 'django.core.checks.Error', 'Error', (['"""The field invalid_models_tests.DummyModel.author was declared with a lazy reference to \'invalid_models_tests.author\', but app \'invalid_models_tests\' isn\'t installed."""'], {'hint': 'None', 'obj': 'DummyModel.author.field', 'id': '"""fields.E307"""'}), '(\n "The field invalid_models_tests.DummyModel.author was declared with a lazy reference to \'invalid_models_tests.author\', but app \'invalid_models_tests\' isn\'t installed."\n , hint=None, obj=DummyModel.author.field, id=\'fields.E307\')\n', (39668, 39905), False, 'from django.core.checks import Error, Warning\n'), ((40026, 40280), 'django.core.checks.Error', 'Error', (['"""The function \'dummy_function\' was connected to the \'post_init\' signal with a lazy reference to the sender \'missing-app.model\', but app \'missing-app\' isn\'t installed."""'], {'hint': 'None', 'obj': '"""invalid_models_tests.test_models"""', 'id': '"""signals.E001"""'}), '(\n "The function \'dummy_function\' was connected to the \'post_init\' signal with a lazy reference to the sender \'missing-app.model\', but app \'missing-app\' isn\'t installed."\n , hint=None, obj=\'invalid_models_tests.test_models\', id=\'signals.E001\')\n', (40031, 40280), False, 'from django.core.checks import Error, Warning\n'), ((7179, 7230), 'django.db.models.Index', 'models.Index', ([], {'fields': "['missing_field']", 'name': '"""name"""'}), "(fields=['missing_field'], name='name')\n", (7191, 7230), False, 'from django.db import connection, connections, models\n'), ((7638, 7679), 'django.db.models.Index', 'models.Index', ([], {'fields': "['m2m']", 'name': '"""name"""'}), "(fields=['m2m'], name='name')\n", (7650, 7679), False, 'from django.db import connection, connections, models\n'), ((8211, 8265), 'django.db.models.Index', 'models.Index', ([], {'fields': "['field2', 'field1']", 'name': '"""name"""'}), "(fields=['field2', 'field1'], name='name')\n", (8223, 8265), False, 'from django.db import connection, connections, models\n'), ((8951, 9012), 'django.db.models.Index', 'models.Index', ([], {'fields': "['foo_1_id', 'foo_2']", 'name': '"""index_name"""'}), "(fields=['foo_1_id', 'foo_2'], name='index_name')\n", (8963, 9012), False, 'from django.db import connection, connections, models\n'), ((9202, 9249), 'django.db.models.Index', 'models.Index', ([], {'fields': "['id']", 'name': '"""_index_name"""'}), "(fields=['id'], name='_index_name')\n", (9214, 9249), False, 'from django.db import connection, connections, models\n'), ((9271, 9318), 'django.db.models.Index', 'models.Index', ([], {'fields': "['id']", 'name': '"""5index_name"""'}), "(fields=['id'], name='5index_name')\n", (9283, 9318), False, 'from django.db import connection, connections, models\n'), ((9787, 9831), 'django.db.models.Index', 'models.Index', ([], {'fields': "['id']", 'name': 'index_name'}), "(fields=['id'], name=index_name)\n", (9799, 9831), False, 'from django.db import connection, connections, models\n'), ((40678, 40699), 'django.db.models.Q', 'models.Q', ([], {'age__gte': '(18)'}), '(age__gte=18)\n', (40686, 40699), False, 'from django.db import connection, connections, models\n'), ((41509, 41530), 'django.db.models.Q', 'models.Q', ([], {'age__gte': '(18)'}), '(age__gte=18)\n', (41517, 41530), False, 'from django.db import connection, connections, models\n')]
|
import socket
class client:
def __init__(self):
self.clientSocket = self.setups.SocketSetup()
self.alias = ""
# Function for connecting to a server
def connect(self,alias,host,port):
try:
self.clienSocket.connect((host,port))
self.alias = alias
return True
except:
return False
# Function for getting data from the server
def get(self):
return str(self.clientSocket.recv(1024),"ASCII")
# Function for sending data to the server, aka relaying the data to all other users
def send(self,data):
try:
self.clientSocket.send(bytes(self.alias + ": " + data,"ASCII"))
return 1
except:
return 0
# Function for closing the connection to the server
def close(self):
try:
self.clientSocket.close()
self.clientSocket = self.setups.SocketSetup()
return 1
except:
return 0
class setups:
# Function for setting up a socket
def SocketSetup(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return sock
|
[
"socket.socket"
] |
[((1115, 1164), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1128, 1164), False, 'import socket\n')]
|
"""Support for Vallox ventilation units."""
from __future__ import annotations
from dataclasses import dataclass, field
import ipaddress
import logging
from typing import Any, NamedTuple
from uuid import UUID
from vallox_websocket_api import PROFILE as VALLOX_PROFILE, Vallox
from vallox_websocket_api.exceptions import ValloxApiException
from vallox_websocket_api.vallox import get_uuid as calculate_uuid
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_HOST, CONF_NAME, Platform
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.typing import ConfigType, StateType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
DEFAULT_FAN_SPEED_AWAY,
DEFAULT_FAN_SPEED_BOOST,
DEFAULT_FAN_SPEED_HOME,
DEFAULT_NAME,
DOMAIN,
METRIC_KEY_PROFILE_FAN_SPEED_AWAY,
METRIC_KEY_PROFILE_FAN_SPEED_BOOST,
METRIC_KEY_PROFILE_FAN_SPEED_HOME,
STATE_SCAN_INTERVAL,
STR_TO_VALLOX_PROFILE_SETTABLE,
)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): vol.All(ipaddress.ip_address, cv.string),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
},
),
extra=vol.ALLOW_EXTRA,
)
PLATFORMS: list[str] = [
Platform.SENSOR,
Platform.FAN,
Platform.BINARY_SENSOR,
]
ATTR_PROFILE = "profile"
ATTR_PROFILE_FAN_SPEED = "fan_speed"
SERVICE_SCHEMA_SET_PROFILE = vol.Schema(
{
vol.Required(ATTR_PROFILE): vol.All(
cv.string, vol.In(STR_TO_VALLOX_PROFILE_SETTABLE)
)
}
)
SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED = vol.Schema(
{
vol.Required(ATTR_PROFILE_FAN_SPEED): vol.All(
vol.Coerce(int), vol.Clamp(min=0, max=100)
)
}
)
class ServiceMethodDetails(NamedTuple):
"""Details for SERVICE_TO_METHOD mapping."""
method: str
schema: vol.Schema
SERVICE_SET_PROFILE = "set_profile"
SERVICE_SET_PROFILE_FAN_SPEED_HOME = "set_profile_fan_speed_home"
SERVICE_SET_PROFILE_FAN_SPEED_AWAY = "set_profile_fan_speed_away"
SERVICE_SET_PROFILE_FAN_SPEED_BOOST = "set_profile_fan_speed_boost"
SERVICE_TO_METHOD = {
SERVICE_SET_PROFILE: ServiceMethodDetails(
method="async_set_profile",
schema=SERVICE_SCHEMA_SET_PROFILE,
),
SERVICE_SET_PROFILE_FAN_SPEED_HOME: ServiceMethodDetails(
method="async_set_profile_fan_speed_home",
schema=SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED,
),
SERVICE_SET_PROFILE_FAN_SPEED_AWAY: ServiceMethodDetails(
method="async_set_profile_fan_speed_away",
schema=SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED,
),
SERVICE_SET_PROFILE_FAN_SPEED_BOOST: ServiceMethodDetails(
method="async_set_profile_fan_speed_boost",
schema=SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED,
),
}
@dataclass
class ValloxState:
"""Describes the current state of the unit."""
metric_cache: dict[str, Any] = field(default_factory=dict)
profile: VALLOX_PROFILE = VALLOX_PROFILE.NONE
def get_metric(self, metric_key: str) -> StateType:
"""Return cached state value."""
if (value := self.metric_cache.get(metric_key)) is None:
return None
if not isinstance(value, (str, int, float)):
return None
return value
def get_uuid(self) -> UUID | None:
"""Return cached UUID value."""
uuid = calculate_uuid(self.metric_cache)
if not isinstance(uuid, UUID):
raise ValueError
return uuid
class ValloxDataUpdateCoordinator(DataUpdateCoordinator):
"""The DataUpdateCoordinator for Vallox."""
data: ValloxState
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the integration from configuration.yaml (DEPRECATED)."""
if DOMAIN not in config:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=config[DOMAIN],
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up the client and boot the platforms."""
host = entry.data[CONF_HOST]
name = entry.data[CONF_NAME]
client = Vallox(host)
async def async_update_data() -> ValloxState:
"""Fetch state update."""
_LOGGER.debug("Updating Vallox state cache")
try:
metric_cache = await client.fetch_metrics()
profile = await client.get_profile()
except (OSError, ValloxApiException) as err:
raise UpdateFailed("Error during state cache update") from err
return ValloxState(metric_cache, profile)
coordinator = ValloxDataUpdateCoordinator(
hass,
_LOGGER,
name=f"{name} DataUpdateCoordinator",
update_interval=STATE_SCAN_INTERVAL,
update_method=async_update_data,
)
await coordinator.async_config_entry_first_refresh()
service_handler = ValloxServiceHandler(client, coordinator)
for vallox_service, service_details in SERVICE_TO_METHOD.items():
hass.services.async_register(
DOMAIN,
vallox_service,
service_handler.async_handle,
schema=service_details.schema,
)
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = {
"client": client,
"coordinator": coordinator,
"name": name,
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):
hass.data[DOMAIN].pop(entry.entry_id)
if hass.data[DOMAIN]:
return unload_ok
for service in SERVICE_TO_METHOD:
hass.services.async_remove(DOMAIN, service)
return unload_ok
class ValloxServiceHandler:
"""Services implementation."""
def __init__(
self, client: Vallox, coordinator: DataUpdateCoordinator[ValloxState]
) -> None:
"""Initialize the proxy."""
self._client = client
self._coordinator = coordinator
async def async_set_profile(self, profile: str = "Home") -> bool:
"""Set the ventilation profile."""
_LOGGER.debug("Setting ventilation profile to: %s", profile)
_LOGGER.warning(
"Attention: The service 'vallox.set_profile' is superseded by the "
"'fan.set_preset_mode' service. It will be removed in the future, please migrate to "
"'fan.set_preset_mode' to prevent breakage"
)
try:
await self._client.set_profile(STR_TO_VALLOX_PROFILE_SETTABLE[profile])
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting ventilation profile: %s", err)
return False
async def async_set_profile_fan_speed_home(
self, fan_speed: int = DEFAULT_FAN_SPEED_HOME
) -> bool:
"""Set the fan speed in percent for the Home profile."""
_LOGGER.debug("Setting Home fan speed to: %d%%", fan_speed)
try:
await self._client.set_values(
{METRIC_KEY_PROFILE_FAN_SPEED_HOME: fan_speed}
)
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting fan speed for Home profile: %s", err)
return False
async def async_set_profile_fan_speed_away(
self, fan_speed: int = DEFAULT_FAN_SPEED_AWAY
) -> bool:
"""Set the fan speed in percent for the Away profile."""
_LOGGER.debug("Setting Away fan speed to: %d%%", fan_speed)
try:
await self._client.set_values(
{METRIC_KEY_PROFILE_FAN_SPEED_AWAY: fan_speed}
)
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting fan speed for Away profile: %s", err)
return False
async def async_set_profile_fan_speed_boost(
self, fan_speed: int = DEFAULT_FAN_SPEED_BOOST
) -> bool:
"""Set the fan speed in percent for the Boost profile."""
_LOGGER.debug("Setting Boost fan speed to: %d%%", fan_speed)
try:
await self._client.set_values(
{METRIC_KEY_PROFILE_FAN_SPEED_BOOST: fan_speed}
)
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting fan speed for Boost profile: %s", err)
return False
async def async_handle(self, call: ServiceCall) -> None:
"""Dispatch a service call."""
service_details = SERVICE_TO_METHOD.get(call.service)
params = call.data.copy()
if service_details is None:
return
if not hasattr(self, service_details.method):
_LOGGER.error("Service not implemented: %s", service_details.method)
return
result = await getattr(self, service_details.method)(**params)
# This state change affects other entities like sensors. Force an immediate update that can
# be observed by all parties involved.
if result:
await self._coordinator.async_request_refresh()
|
[
"homeassistant.helpers.config_validation.deprecated",
"homeassistant.helpers.update_coordinator.UpdateFailed",
"voluptuous.Optional",
"vallox_websocket_api.Vallox",
"voluptuous.All",
"voluptuous.Required",
"voluptuous.Clamp",
"dataclasses.field",
"vallox_websocket_api.vallox.get_uuid",
"voluptuous.In",
"logging.getLogger",
"voluptuous.Coerce"
] |
[((1162, 1189), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1179, 1189), False, 'import logging\n'), ((3251, 3278), 'dataclasses.field', 'field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (3256, 3278), False, 'from dataclasses import dataclass, field\n'), ((4578, 4590), 'vallox_websocket_api.Vallox', 'Vallox', (['host'], {}), '(host)\n', (4584, 4590), False, 'from vallox_websocket_api import PROFILE as VALLOX_PROFILE, Vallox\n'), ((1240, 1261), 'homeassistant.helpers.config_validation.deprecated', 'cv.deprecated', (['DOMAIN'], {}), '(DOMAIN)\n', (1253, 1261), True, 'from homeassistant.helpers import config_validation as cv\n'), ((1781, 1807), 'voluptuous.Required', 'vol.Required', (['ATTR_PROFILE'], {}), '(ATTR_PROFILE)\n', (1793, 1807), True, 'import voluptuous as vol\n'), ((1964, 2000), 'voluptuous.Required', 'vol.Required', (['ATTR_PROFILE_FAN_SPEED'], {}), '(ATTR_PROFILE_FAN_SPEED)\n', (1976, 2000), True, 'import voluptuous as vol\n'), ((3712, 3745), 'vallox_websocket_api.vallox.get_uuid', 'calculate_uuid', (['self.metric_cache'], {}), '(self.metric_cache)\n', (3726, 3745), True, 'from vallox_websocket_api.vallox import get_uuid as calculate_uuid\n'), ((1841, 1879), 'voluptuous.In', 'vol.In', (['STR_TO_VALLOX_PROFILE_SETTABLE'], {}), '(STR_TO_VALLOX_PROFILE_SETTABLE)\n', (1847, 1879), True, 'import voluptuous as vol\n'), ((2023, 2038), 'voluptuous.Coerce', 'vol.Coerce', (['int'], {}), '(int)\n', (2033, 2038), True, 'import voluptuous as vol\n'), ((2040, 2065), 'voluptuous.Clamp', 'vol.Clamp', ([], {'min': '(0)', 'max': '(100)'}), '(min=0, max=100)\n', (2049, 2065), True, 'import voluptuous as vol\n'), ((4920, 4967), 'homeassistant.helpers.update_coordinator.UpdateFailed', 'UpdateFailed', (['"""Error during state cache update"""'], {}), "('Error during state cache update')\n", (4932, 4967), False, 'from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed\n'), ((1343, 1366), 'voluptuous.Required', 'vol.Required', (['CONF_HOST'], {}), '(CONF_HOST)\n', (1355, 1366), True, 'import voluptuous as vol\n'), ((1430, 1475), 'voluptuous.Optional', 'vol.Optional', (['CONF_NAME'], {'default': 'DEFAULT_NAME'}), '(CONF_NAME, default=DEFAULT_NAME)\n', (1442, 1475), True, 'import voluptuous as vol\n'), ((1368, 1408), 'voluptuous.All', 'vol.All', (['ipaddress.ip_address', 'cv.string'], {}), '(ipaddress.ip_address, cv.string)\n', (1375, 1408), True, 'import voluptuous as vol\n')]
|
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
import board
import digitalio
import usb_hid
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keycode import Keycode
kbd = Keyboard(usb_hid.devices)
# define buttons. these can be any physical switches/buttons, but the values
# here work out-of-the-box with a CircuitPlayground Express' A and B buttons.
swap = digitalio.DigitalInOut(board.D4)
swap.direction = digitalio.Direction.INPUT
swap.pull = digitalio.Pull.DOWN
search = digitalio.DigitalInOut(board.D5)
search.direction = digitalio.Direction.INPUT
search.pull = digitalio.Pull.DOWN
while True:
# press ALT+TAB to swap windows
if swap.value:
kbd.send(Keycode.ALT, Keycode.TAB)
# press CTRL+K, which in a web browser will open the search dialog
elif search.value:
kbd.send(Keycode.CONTROL, Keycode.K)
time.sleep(0.1)
|
[
"digitalio.DigitalInOut",
"adafruit_hid.keyboard.Keyboard",
"time.sleep"
] |
[((243, 268), 'adafruit_hid.keyboard.Keyboard', 'Keyboard', (['usb_hid.devices'], {}), '(usb_hid.devices)\n', (251, 268), False, 'from adafruit_hid.keyboard import Keyboard\n'), ((432, 464), 'digitalio.DigitalInOut', 'digitalio.DigitalInOut', (['board.D4'], {}), '(board.D4)\n', (454, 464), False, 'import digitalio\n'), ((550, 582), 'digitalio.DigitalInOut', 'digitalio.DigitalInOut', (['board.D5'], {}), '(board.D5)\n', (572, 582), False, 'import digitalio\n'), ((918, 933), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (928, 933), False, 'import time\n')]
|
import socket
import re
import backoff
from . import basetest
from .runner import CfLocalRunnerWithPostgreSQL
# Constants
KAFKA_CLUSTER_IMAGE_NAME = "johnnypark/kafka-zookeeper"
KAFKA_CLUSTER_IMAGE_VERSION = "2.4.0"
KAFKA_CLUSTER_NAME = "kafka-cluster"
KAFKA_CONNECT_URL = "http://localhost:8083"
KAFKA_PG_CONNECTOR_NAME = "mx-databroker-PostgreSQL-source-connector"
KAFKA_PG_CONNECTOR_STATUS_API = "{}/connectors/{}/status".format(
KAFKA_CONNECT_URL,
KAFKA_PG_CONNECTOR_NAME,
)
KAFKA_BROKER_PORT = 9092
KAFKA_ZOOKEEPER_PORT = 2181
DATABROKER_TOPIC_FORMAT_VERSION = "1_0_0"
POSTGRES_DB_DOCKER_IMAGE = "debezium/postgres"
POSTGRES_DB_VERSION = "9.6-alpine"
MAX_RETRY_COUNT = 8
BACKOFF_TIME = 10
class CfLocalRunnerWithKafka(CfLocalRunnerWithPostgreSQL):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._database_postgres_image = POSTGRES_DB_DOCKER_IMAGE
self._database_postgres_version = POSTGRES_DB_VERSION
self._kafka_container_name = "{}-{}".format(
self._app_name, KAFKA_CLUSTER_NAME
)
def _get_environment(self, env_vars):
environment = super()._get_environment(env_vars)
environment.update(
{
"MX_MyFirstModule_broker_url": "{}:{}".format(
self.get_host(),
KAFKA_BROKER_PORT,
)
}
)
return environment
def _start_kafka_cluster(self):
result = self._cmd(
(
"docker",
"run",
"--name",
self._kafka_container_name,
"-p",
"{}:{}".format(KAFKA_BROKER_PORT, KAFKA_BROKER_PORT),
"-e",
"ADVERTISED_HOST={}".format(self._host),
"-e",
"NUM_PARTITIONS={}".format(3),
"-d",
"{}:{}".format(
KAFKA_CLUSTER_IMAGE_NAME,
KAFKA_CLUSTER_IMAGE_VERSION,
),
)
)
if not result[1]:
raise RuntimeError(
"Cannot create {} container: {}".format(
KAFKA_CLUSTER_NAME,
result[0],
)
)
def stage(self, *args, **kwargs):
result = super().stage(*args, **kwargs)
self._start_kafka_cluster()
@backoff.on_predicate(backoff.expo, lambda x: x > 0, max_time=30)
def _await_kafka_cluster():
return socket.socket(
socket.AF_INET, socket.SOCK_STREAM
).connect_ex(("localhost", KAFKA_BROKER_PORT))
_await_kafka_cluster()
return result
def is_debezium_running(self):
return self.run_on_container("curl " + KAFKA_PG_CONNECTOR_STATUS_API)
def is_azkarra_running(self):
topics = self.run_on_container(
"./opt/kafka_2.12-{}/bin/kafka-topics.sh --list --zookeeper localhost:{}".format(
KAFKA_CLUSTER_IMAGE_VERSION,
KAFKA_ZOOKEEPER_PORT,
),
target_container=self._kafka_container_name,
)
expect_public_topic_pattern = r".*?\.{}".format(
DATABROKER_TOPIC_FORMAT_VERSION
)
return (
len(
re.findall(
r"(mx-databroker-connect-(?:configs|offsets|status))",
topics,
)
)
== 3
and len(re.findall(expect_public_topic_pattern, topics)) > 0
)
class TestCaseDataBroker(basetest.BaseTestWithPostgreSQL):
def _init_cflocal_runner(self, *args, **kwargs):
return CfLocalRunnerWithKafka(*args, **kwargs)
def test_databroker_running(self):
# os.environ[
# "PACKAGE_URL"
# ] = "https://dghq119eo3niv.cloudfront.net/test-app/MyProducer902.mda"
self.stage_container(
package="https://dghq119eo3niv.cloudfront.net/test-app/MyProducer902.mda",
env_vars={
"DATABROKER_ENABLED": "true",
"FORCED_MXRUNTIME_URL": "https://dghq119eo3niv.cloudfront.net/",
},
)
self.start_container()
# check app is running
self.assert_app_running()
@backoff.on_exception(
backoff.constant,
Exception,
interval=BACKOFF_TIME,
max_tries=MAX_RETRY_COUNT,
)
def check_if_dbz_running():
return self._runner.is_debezium_running()
response = check_if_dbz_running()
assert str(response).find('"state":"RUNNING"') > 0
# check azkarra is running by verify expected topics have been created
assert self._runner.is_azkarra_running()
# check streaming service
output = self.get_recent_logs()
assert output is not None
assert (
str(output).find("State transition from REBALANCING to RUNNING")
>= 0
)
|
[
"socket.socket",
"re.findall",
"backoff.on_predicate",
"backoff.on_exception"
] |
[((2509, 2573), 'backoff.on_predicate', 'backoff.on_predicate', (['backoff.expo', '(lambda x: x > 0)'], {'max_time': '(30)'}), '(backoff.expo, lambda x: x > 0, max_time=30)\n', (2529, 2573), False, 'import backoff\n'), ((4465, 4568), 'backoff.on_exception', 'backoff.on_exception', (['backoff.constant', 'Exception'], {'interval': 'BACKOFF_TIME', 'max_tries': 'MAX_RETRY_COUNT'}), '(backoff.constant, Exception, interval=BACKOFF_TIME,\n max_tries=MAX_RETRY_COUNT)\n', (4485, 4568), False, 'import backoff\n'), ((2631, 2680), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (2644, 2680), False, 'import socket\n'), ((3448, 3520), 're.findall', 're.findall', (['"""(mx-databroker-connect-(?:configs|offsets|status))"""', 'topics'], {}), "('(mx-databroker-connect-(?:configs|offsets|status))', topics)\n", (3458, 3520), False, 'import re\n'), ((3638, 3685), 're.findall', 're.findall', (['expect_public_topic_pattern', 'topics'], {}), '(expect_public_topic_pattern, topics)\n', (3648, 3685), False, 'import re\n')]
|
'''
Created on Nov 9, 2011
@author: ppa
'''
from analyzerdam.baseDAM import BaseDAM
from analyzerdam.excelLib import ExcelLib
from analyzer.model import TICK_FIELDS, QUOTE_FIELDS, Quote, Tick
from analyzer.lib.errors import UfException, Errors
from os import path
import logging
LOG = logging.getLogger()
class ExcelDAM(BaseDAM):
''' Excel DAO '''
QUOTE = 'quote'
TICK = 'tick'
def __init__(self):
''' constructor '''
super(ExcelDAM, self).__init__()
self.__dir = None
def targetPath(self, kind):
return path.join(self.__dir, "%s-%s.xls" % (self.symbol, kind) )
def __findRange(self, excelLib, start, end):
''' return low and high as excel range '''
inc = 1
low = 0
high = 0
dates = excelLib.readCol(0, 1)
for index, date in enumerate(dates):
if int(start) <= int(date):
low = index + inc
break
if low:
for index, date in reversed(list(enumerate(dates))):
if int(date) <= int(end):
high = index + inc
break
return low, high
def __readData(self, targetPath, start, end):
''' read data '''
ret = []
if not path.exists(targetPath):
LOG.error("Target file doesn't exist: %s" % path.abspath(targetPath) )
return ret
with ExcelLib(fileName = targetPath, mode = ExcelLib.READ_MODE) as excel:
low, high = self.__findRange(excel, start, end)
for index in range(low, high + 1):
ret.append(excel.readRow(index))
return ret
def __writeData(self, targetPath, fields, rows):
''' write data '''
if path.exists(targetPath):
LOG.error("Target file exists: %s" % path.abspath(targetPath) )
raise UfException(Errors.FILE_EXIST, "can't write to a existing file") #because xlwt doesn't support it
with ExcelLib(fileName = targetPath, mode = ExcelLib.WRITE_MODE) as excel:
excel.writeRow(0, fields)
for index, row in enumerate(rows):
excel.writeRow(index+1, row)
def readQuotes(self, start, end):
''' read quotes '''
quotes = self.__readData(self.targetPath(ExcelDAM.QUOTE), start, end)
return [Quote(*quote) for quote in quotes]
def writeQuotes(self, quotes):
''' write quotes '''
self.__writeData(self.targetPath(ExcelDAM.QUOTE),
QUOTE_FIELDS,
[[getattr(quote, field) for field in QUOTE_FIELDS] for quote in quotes])
def readTicks(self, start, end):
''' read ticks '''
ticks = self.__readData(self.targetPath(ExcelDAM.TICK), start, end)
return [Tick(*tick) for tick in ticks]
def writeTicks(self, ticks):
''' read quotes '''
self.__writeData(self.targetPath(ExcelDAM.TICK),
TICK_FIELDS,
[[getattr(tick, field) for field in TICK_FIELDS] for tick in ticks])
def setDir(self, path):
''' set dir '''
self.__dir = path
|
[
"analyzer.model.Quote",
"os.path.abspath",
"analyzer.model.Tick",
"analyzerdam.excelLib.ExcelLib",
"os.path.exists",
"analyzer.lib.errors.UfException",
"os.path.join",
"logging.getLogger"
] |
[((301, 320), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (318, 320), False, 'import logging\n'), ((588, 644), 'os.path.join', 'path.join', (['self.__dir', "('%s-%s.xls' % (self.symbol, kind))"], {}), "(self.__dir, '%s-%s.xls' % (self.symbol, kind))\n", (597, 644), False, 'from os import path\n'), ((1823, 1846), 'os.path.exists', 'path.exists', (['targetPath'], {}), '(targetPath)\n', (1834, 1846), False, 'from os import path\n'), ((1326, 1349), 'os.path.exists', 'path.exists', (['targetPath'], {}), '(targetPath)\n', (1337, 1349), False, 'from os import path\n'), ((1475, 1529), 'analyzerdam.excelLib.ExcelLib', 'ExcelLib', ([], {'fileName': 'targetPath', 'mode': 'ExcelLib.READ_MODE'}), '(fileName=targetPath, mode=ExcelLib.READ_MODE)\n', (1483, 1529), False, 'from analyzerdam.excelLib import ExcelLib\n'), ((1944, 2008), 'analyzer.lib.errors.UfException', 'UfException', (['Errors.FILE_EXIST', '"""can\'t write to a existing file"""'], {}), '(Errors.FILE_EXIST, "can\'t write to a existing file")\n', (1955, 2008), False, 'from analyzer.lib.errors import UfException, Errors\n'), ((2058, 2113), 'analyzerdam.excelLib.ExcelLib', 'ExcelLib', ([], {'fileName': 'targetPath', 'mode': 'ExcelLib.WRITE_MODE'}), '(fileName=targetPath, mode=ExcelLib.WRITE_MODE)\n', (2066, 2113), False, 'from analyzerdam.excelLib import ExcelLib\n'), ((2427, 2440), 'analyzer.model.Quote', 'Quote', (['*quote'], {}), '(*quote)\n', (2432, 2440), False, 'from analyzer.model import TICK_FIELDS, QUOTE_FIELDS, Quote, Tick\n'), ((2891, 2902), 'analyzer.model.Tick', 'Tick', (['*tick'], {}), '(*tick)\n', (2895, 2902), False, 'from analyzer.model import TICK_FIELDS, QUOTE_FIELDS, Quote, Tick\n'), ((1408, 1432), 'os.path.abspath', 'path.abspath', (['targetPath'], {}), '(targetPath)\n', (1420, 1432), False, 'from os import path\n'), ((1898, 1922), 'os.path.abspath', 'path.abspath', (['targetPath'], {}), '(targetPath)\n', (1910, 1922), False, 'from os import path\n')]
|
#!/usr/bin/env python
"""steam-swissapiknife test suite"""
from steamswissapiknife import main
import unittest
import os
from contextlib import contextmanager
import sys
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
key = os.environ['STEAM_API_KEY']
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
def test_interface_wiki_output():
parser = main.parse_args(['-f', 'wiki', '-i', 'ITFItems_440', key])
expected = """Page URL: http://wiki.teamfortress.com/wiki/WebAPI/GetGoldenWrenches
== URL ==
<nowiki>GET http://api.steampowered.com/ITFItems_440/GetGoldenWrenches/v2</nowiki>
== Method-specific parameters ==
== Result data =="""
with captured_output() as (out, err):
main.main(parser)
output = out.getvalue().strip()
assert(output == expected)
def test_interface_list_output():
parser = main.parse_args(['-f', 'list', '-i', 'ITFItems_440', key])
expected = """GET http://api.steampowered.com/ITFItems_440/GetGoldenWrenches/v2?key=%s""" % (key)
with captured_output() as (out, err):
main.main(parser)
output = out.getvalue().strip()
assert(output == expected)
def test_method_wiki_output():
parser = main.parse_args(['-f', 'wiki', '-m', 'GetGoldenWrenches', key])
expected = """Page URL: http://wiki.teamfortress.com/wiki/WebAPI/GetGoldenWrenches
== URL ==
<nowiki>GET http://api.steampowered.com/ITFItems_440/GetGoldenWrenches/v2</nowiki>
== Method-specific parameters ==
== Result data =="""
with captured_output() as (out, err):
main.main(parser)
output = out.getvalue().strip()
assert(output == expected)
def test_method_list_output():
parser = main.parse_args(['-m', 'GetGoldenWrenches', key])
expected = """GET http://api.steampowered.com/ITFItems_440/GetGoldenWrenches/v2?key=%s""" % (key)
with captured_output() as (out, err):
main.main(parser)
output = out.getvalue().strip()
assert(output == expected)
|
[
"steamswissapiknife.main.main",
"io.StringIO",
"steamswissapiknife.main.parse_args"
] |
[((645, 703), 'steamswissapiknife.main.parse_args', 'main.parse_args', (["['-f', 'wiki', '-i', 'ITFItems_440', key]"], {}), "(['-f', 'wiki', '-i', 'ITFItems_440', key])\n", (660, 703), False, 'from steamswissapiknife import main\n'), ((1129, 1187), 'steamswissapiknife.main.parse_args', 'main.parse_args', (["['-f', 'list', '-i', 'ITFItems_440', key]"], {}), "(['-f', 'list', '-i', 'ITFItems_440', key])\n", (1144, 1187), False, 'from steamswissapiknife import main\n'), ((1474, 1537), 'steamswissapiknife.main.parse_args', 'main.parse_args', (["['-f', 'wiki', '-m', 'GetGoldenWrenches', key]"], {}), "(['-f', 'wiki', '-m', 'GetGoldenWrenches', key])\n", (1489, 1537), False, 'from steamswissapiknife import main\n'), ((1960, 2009), 'steamswissapiknife.main.parse_args', 'main.parse_args', (["['-m', 'GetGoldenWrenches', key]"], {}), "(['-m', 'GetGoldenWrenches', key])\n", (1975, 2009), False, 'from steamswissapiknife import main\n'), ((365, 375), 'io.StringIO', 'StringIO', ([], {}), '()\n', (373, 375), False, 'from io import StringIO\n'), ((377, 387), 'io.StringIO', 'StringIO', ([], {}), '()\n', (385, 387), False, 'from io import StringIO\n'), ((992, 1009), 'steamswissapiknife.main.main', 'main.main', (['parser'], {}), '(parser)\n', (1001, 1009), False, 'from steamswissapiknife import main\n'), ((1340, 1357), 'steamswissapiknife.main.main', 'main.main', (['parser'], {}), '(parser)\n', (1349, 1357), False, 'from steamswissapiknife import main\n'), ((1826, 1843), 'steamswissapiknife.main.main', 'main.main', (['parser'], {}), '(parser)\n', (1835, 1843), False, 'from steamswissapiknife import main\n'), ((2162, 2179), 'steamswissapiknife.main.main', 'main.main', (['parser'], {}), '(parser)\n', (2171, 2179), False, 'from steamswissapiknife import main\n')]
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 Collective Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from absl.testing import parameterized
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import test_util
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops as _collective_ops
from tensorflow.python.platform import test
class CollectiveOpsV1(object):
all_reduce = _collective_ops.all_reduce
all_gather = _collective_ops.all_gather
class CollectiveOpsV2(object):
@staticmethod
def all_reduce(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_reduce_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
@staticmethod
def all_gather(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_gather_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
device_combination = (
combinations.combine(device='CPU', communication='RING', required_gpus=0) +
combinations.combine(
device='GPU', communication=['RING', 'NCCL'], required_gpus=2))
@combinations.generate(
combinations.times(
combinations.combine(
collective_ops=[
combinations.NamedObject('v1', CollectiveOpsV1),
combinations.NamedObject('v2', CollectiveOpsV2)
],
mode='eager'), device_combination))
class CollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testReduce(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_reduce_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_reduce_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_reduce_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_reduce_2devices():
self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5)
def testGather(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_gather_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_gather_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_gather_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_gather_2devices():
self.assertAllClose(result, [1., 1.], rtol=1e-5, atol=1e-5)
def testInstanceKeyScopedUnderGroupKey(self, collective_ops, device,
communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
dev2 = '/device:%s:2' % device
dev3 = '/device:%s:3' % device
@def_function.function
def run_all_reduce_4devices_same_instance_key():
# Use a common instance key for both groups.
instance_key = 0
# We will create 2 groups each with 2 devices.
group_size = 2
# Group 0 comprises dev0 and dev1.
group0_key = 0
# Group 1 comprises dev2 and dev3.
group1_key = 1
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(1.), group_size, group0_key, instance_key))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(2.), group_size, group0_key, instance_key))
with ops.device(dev2):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(3.), group_size, group1_key, instance_key))
with ops.device(dev3):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(4.), group_size, group1_key, instance_key))
return collectives
results = run_all_reduce_4devices_same_instance_key()
self.assertAllClose(results[0], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[2], 7., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[3], 7., rtol=1e-5, atol=1e-5)
def testCollectiveGroupSizeOne(self, collective_ops, device, communication):
if communication == 'NCCL':
self.skipTest('b/170672646: it crashes with NCCL and group size one')
dev0 = '/device:%s:0' % device
group_size = 1
group_key = 100
instance_key = 100
in_value = [1., 2., 3., 4.]
in_tensor = constant_op.constant(in_value)
with ops.device(dev0):
reduced_tensor = collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
self.assertAllEqual(in_value, reduced_tensor.numpy())
with ops.device(dev0):
gathered_tensor = collective_ops.all_gather(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
self.assertAllEqual(in_value, gathered_tensor.numpy())
def testMultipleGroups(self, collective_ops, device, communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
num_elements = 4
@def_function.function
def run_all_reduce(group_size, group_key):
instance_key = group_key
input_value = [float(group_key) for i in range(num_elements)]
collectives = []
for device_idx in range(group_size):
with ops.device('/{}:{}'.format(device, device_idx)):
input_tensor = constant_op.constant(input_value)
collectives.append(
collective_ops.all_reduce(
input_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
def run_and_assert(group_size, group_key):
for reduced_tensor in run_all_reduce(group_size, group_key):
self.assertAllEqual(
[float(group_key) * group_size for i in range(num_elements)],
reduced_tensor.numpy())
run_and_assert(group_size=2, group_key=1)
run_and_assert(group_size=3, group_key=2)
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
class AbortCollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testAbortGroupParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
def testAbortInstanceParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# First perform a normal all-reduce to complete the group resolution.
def_function.function(collective_fn)()
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
# Use a different instance key to trigger another instance resolution.
instance_key = 101
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
context._reset_context() # pylint: disable=protected-access
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def_function.function(collective_fn)()
def testAbortCommunication(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# First perform a normal collective to finish resolution.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
# Launch a collective that hangs, and abort the collective executor after
# the launch.
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# Reset the context in order to reset the collective executor.
t.join()
_setup_context()
def_function.function(collective_fn)()
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
class TimeoutTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testTimeout(self, collective_op, device, communication):
if device == 'GPU':
self.skipTest('b/170980122')
timeout = 1.5
@def_function.function
def run(group_size, reported_group_size=None):
group_key = 20
instance_key = 30
tensor = [1., 2., 3., 4.]
results = []
if reported_group_size is None:
reported_group_size = group_size
for i in range(group_size):
with ops.device('/{}:{}'.format(device, i)):
input_data = constant_op.constant(tensor)
result = collective_op(
input_data,
group_size=reported_group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
results.append(result)
return results
run(2, 2)
start_time = time.time()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
run(1, 2)
elapsed = time.time() - start_time
self.assertAllGreaterEqual(elapsed, timeout)
def testParamResolutionAfterTimeout(self, collective_op, device,
communication):
if device == 'GPU':
self.skipTest('b/170980122')
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
# This timeout comes from param solution.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev1):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
def testExecutionAfterTimeout(self, collective_op, device, communication):
if device == 'GPU':
self.skipTest('b/170980122')
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
@def_function.function
def run():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# Run a normal all-reduce to complete param resolution.
run()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev1):
# No timeout.
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
def _setup_context():
context._reset_context()
test_util.set_logical_devices_to_at_least('CPU', 4)
context.ensure_initialized()
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
[
"tensorflow.python.platform.test.main",
"threading.Thread",
"tensorflow.python.eager.context.context",
"tensorflow.python.eager.context._reset_context",
"tensorflow.python.distribute.combinations.combine",
"tensorflow.python.ops.collective_ops.all_reduce_v2",
"tensorflow.python.eager.def_function.function",
"tensorflow.python.framework.constant_op.constant",
"time.time",
"tensorflow.python.eager.context.ensure_initialized",
"tensorflow.python.ops.collective_ops.all_gather_v2",
"tensorflow.python.framework.ops.device",
"time.sleep",
"tensorflow.python.distribute.combinations.NamedObject",
"tensorflow.python.compat.v2_compat.enable_v2_behavior",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.eager.context.num_gpus",
"tensorflow.python.distribute.test_util.set_logical_devices_to_at_least"
] |
[((2388, 2461), 'tensorflow.python.distribute.combinations.combine', 'combinations.combine', ([], {'device': '"""CPU"""', 'communication': '"""RING"""', 'required_gpus': '(0)'}), "(device='CPU', communication='RING', required_gpus=0)\n", (2408, 2461), False, 'from tensorflow.python.distribute import combinations\n'), ((2468, 2555), 'tensorflow.python.distribute.combinations.combine', 'combinations.combine', ([], {'device': '"""GPU"""', 'communication': "['RING', 'NCCL']", 'required_gpus': '(2)'}), "(device='GPU', communication=['RING', 'NCCL'],\n required_gpus=2)\n", (2488, 2555), False, 'from tensorflow.python.distribute import combinations\n'), ((20785, 20809), 'tensorflow.python.eager.context._reset_context', 'context._reset_context', ([], {}), '()\n', (20807, 20809), False, 'from tensorflow.python.eager import context\n'), ((20812, 20863), 'tensorflow.python.distribute.test_util.set_logical_devices_to_at_least', 'test_util.set_logical_devices_to_at_least', (['"""CPU"""', '(4)'], {}), "('CPU', 4)\n", (20853, 20863), False, 'from tensorflow.python.distribute import test_util\n'), ((20866, 20894), 'tensorflow.python.eager.context.ensure_initialized', 'context.ensure_initialized', ([], {}), '()\n', (20892, 20894), False, 'from tensorflow.python.eager import context\n'), ((20926, 20956), 'tensorflow.python.compat.v2_compat.enable_v2_behavior', 'v2_compat.enable_v2_behavior', ([], {}), '()\n', (20954, 20956), False, 'from tensorflow.python.compat import v2_compat\n'), ((20959, 20970), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (20968, 20970), False, 'from tensorflow.python.platform import test\n'), ((1714, 1744), 'tensorflow.python.ops.array_ops.identity', 'array_ops.identity', (['group_size'], {}), '(group_size)\n', (1732, 1744), False, 'from tensorflow.python.ops import array_ops\n'), ((1761, 1790), 'tensorflow.python.ops.array_ops.identity', 'array_ops.identity', (['group_key'], {}), '(group_key)\n', (1779, 1790), False, 'from tensorflow.python.ops import array_ops\n'), ((1810, 1842), 'tensorflow.python.ops.array_ops.identity', 'array_ops.identity', (['instance_key'], {}), '(instance_key)\n', (1828, 1842), False, 'from tensorflow.python.ops import array_ops\n'), ((1854, 1944), 'tensorflow.python.ops.collective_ops.all_reduce_v2', '_collective_ops.all_reduce_v2', (['t', 'group_size', 'group_key', 'instance_key', '*args'], {}), '(t, group_size, group_key, instance_key, *args,\n **kwargs)\n', (1883, 1944), True, 'from tensorflow.python.ops import collective_ops as _collective_ops\n'), ((2091, 2121), 'tensorflow.python.ops.array_ops.identity', 'array_ops.identity', (['group_size'], {}), '(group_size)\n', (2109, 2121), False, 'from tensorflow.python.ops import array_ops\n'), ((2138, 2167), 'tensorflow.python.ops.array_ops.identity', 'array_ops.identity', (['group_key'], {}), '(group_key)\n', (2156, 2167), False, 'from tensorflow.python.ops import array_ops\n'), ((2187, 2219), 'tensorflow.python.ops.array_ops.identity', 'array_ops.identity', (['instance_key'], {}), '(instance_key)\n', (2205, 2219), False, 'from tensorflow.python.ops import array_ops\n'), ((2231, 2321), 'tensorflow.python.ops.collective_ops.all_gather_v2', '_collective_ops.all_gather_v2', (['t', 'group_size', 'group_key', 'instance_key', '*args'], {}), '(t, group_size, group_key, instance_key, *args,\n **kwargs)\n', (2260, 2321), True, 'from tensorflow.python.ops import collective_ops as _collective_ops\n'), ((7991, 8021), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['in_value'], {}), '(in_value)\n', (8011, 8021), False, 'from tensorflow.python.framework import constant_op\n'), ((10798, 10825), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[1.0]'], {}), '([1.0])\n', (10818, 10825), False, 'from tensorflow.python.framework import constant_op\n'), ((10953, 10986), 'threading.Thread', 'threading.Thread', ([], {'target': 'abort_fn'}), '(target=abort_fn)\n', (10969, 10986), False, 'import threading\n'), ((12458, 12485), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[1.0]'], {}), '([1.0])\n', (12478, 12485), False, 'from tensorflow.python.framework import constant_op\n'), ((13001, 13034), 'threading.Thread', 'threading.Thread', ([], {'target': 'abort_fn'}), '(target=abort_fn)\n', (13017, 13034), False, 'import threading\n'), ((13867, 13891), 'tensorflow.python.eager.context._reset_context', 'context._reset_context', ([], {}), '()\n', (13889, 13891), False, 'from tensorflow.python.eager import context\n'), ((14348, 14375), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[1.0]'], {}), '([1.0])\n', (14368, 14375), False, 'from tensorflow.python.framework import constant_op\n'), ((14975, 15008), 'threading.Thread', 'threading.Thread', ([], {'target': 'abort_fn'}), '(target=abort_fn)\n', (14991, 15008), False, 'import threading\n'), ((17448, 17459), 'time.time', 'time.time', ([], {}), '()\n', (17457, 17459), False, 'import time\n'), ((18031, 18073), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[1.0, 2.0, 3.0, 4.0]'], {}), '([1.0, 2.0, 3.0, 4.0])\n', (18051, 18073), False, 'from tensorflow.python.framework import constant_op\n'), ((19335, 19377), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[1.0, 2.0, 3.0, 4.0]'], {}), '([1.0, 2.0, 3.0, 4.0])\n', (19355, 19377), False, 'from tensorflow.python.framework import constant_op\n'), ((3592, 3619), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[1.0]'], {}), '([1.0])\n', (3612, 3619), False, 'from tensorflow.python.framework import constant_op\n'), ((5037, 5064), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[1.0]'], {}), '([1.0])\n', (5057, 5064), False, 'from tensorflow.python.framework import constant_op\n'), ((8032, 8048), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev0'], {}), '(dev0)\n', (8042, 8048), False, 'from tensorflow.python.framework import ops\n'), ((8300, 8316), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev0'], {}), '(dev0)\n', (8310, 8316), False, 'from tensorflow.python.framework import ops\n'), ((10852, 10865), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (10862, 10865), False, 'import time\n'), ((12145, 12181), 'tensorflow.python.eager.def_function.function', 'def_function.function', (['collective_fn'], {}), '(collective_fn)\n', (12166, 12181), False, 'from tensorflow.python.eager import def_function\n'), ((12834, 12870), 'tensorflow.python.eager.def_function.function', 'def_function.function', (['collective_fn'], {}), '(collective_fn)\n', (12855, 12870), False, 'from tensorflow.python.eager import def_function\n'), ((12900, 12913), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (12910, 12913), False, 'import time\n'), ((14086, 14122), 'tensorflow.python.eager.def_function.function', 'def_function.function', (['collective_fn'], {}), '(collective_fn)\n', (14107, 14122), False, 'from tensorflow.python.eager import def_function\n'), ((14712, 14748), 'tensorflow.python.eager.def_function.function', 'def_function.function', (['collective_fn'], {}), '(collective_fn)\n', (14733, 14748), False, 'from tensorflow.python.eager import def_function\n'), ((14874, 14887), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (14884, 14887), False, 'import time\n'), ((15731, 15767), 'tensorflow.python.eager.def_function.function', 'def_function.function', (['collective_fn'], {}), '(collective_fn)\n', (15752, 15767), False, 'from tensorflow.python.eager import def_function\n'), ((17630, 17641), 'time.time', 'time.time', ([], {}), '()\n', (17639, 17641), False, 'import time\n'), ((3195, 3211), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev0'], {}), '(dev0)\n', (3205, 3211), False, 'from tensorflow.python.framework import ops\n'), ((3232, 3259), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[1.0]'], {}), '([1.0])\n', (3252, 3259), False, 'from tensorflow.python.framework import constant_op\n'), ((3717, 3733), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev0'], {}), '(dev0)\n', (3727, 3733), False, 'from tensorflow.python.framework import ops\n'), ((3975, 3991), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev1'], {}), '(dev1)\n', (3985, 3991), False, 'from tensorflow.python.framework import ops\n'), ((4640, 4656), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev0'], {}), '(dev0)\n', (4650, 4656), False, 'from tensorflow.python.framework import ops\n'), ((4677, 4704), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[1.0]'], {}), '([1.0])\n', (4697, 4704), False, 'from tensorflow.python.framework import constant_op\n'), ((5162, 5178), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev0'], {}), '(dev0)\n', (5172, 5178), False, 'from tensorflow.python.framework import ops\n'), ((5420, 5436), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev1'], {}), '(dev1)\n', (5430, 5436), False, 'from tensorflow.python.framework import ops\n'), ((6038, 6056), 'tensorflow.python.eager.context.num_gpus', 'context.num_gpus', ([], {}), '()\n', (6054, 6056), False, 'from tensorflow.python.eager import context\n'), ((6628, 6644), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev0'], {}), '(dev0)\n', (6638, 6644), False, 'from tensorflow.python.framework import ops\n'), ((6805, 6821), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev1'], {}), '(dev1)\n', (6815, 6821), False, 'from tensorflow.python.framework import ops\n'), ((6982, 6998), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev2'], {}), '(dev2)\n', (6992, 6998), False, 'from tensorflow.python.framework import ops\n'), ((7159, 7175), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev3'], {}), '(dev3)\n', (7169, 7175), False, 'from tensorflow.python.framework import ops\n'), ((8659, 8677), 'tensorflow.python.eager.context.num_gpus', 'context.num_gpus', ([], {}), '()\n', (8675, 8677), False, 'from tensorflow.python.eager import context\n'), ((11197, 11213), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev0'], {}), '(dev0)\n', (11207, 11213), False, 'from tensorflow.python.framework import ops\n'), ((11533, 11549), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev0'], {}), '(dev0)\n', (11543, 11549), False, 'from tensorflow.python.framework import ops\n'), ((13343, 13359), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev0'], {}), '(dev0)\n', (13353, 13359), False, 'from tensorflow.python.framework import ops\n'), ((13679, 13695), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev0'], {}), '(dev0)\n', (13689, 13695), False, 'from tensorflow.python.framework import ops\n'), ((15106, 15122), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev0'], {}), '(dev0)\n', (15116, 15122), False, 'from tensorflow.python.framework import ops\n'), ((15442, 15458), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev0'], {}), '(dev0)\n', (15452, 15458), False, 'from tensorflow.python.framework import ops\n'), ((18262, 18278), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev0'], {}), '(dev0)\n', (18272, 18278), False, 'from tensorflow.python.framework import ops\n'), ((18843, 18859), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev1'], {}), '(dev1)\n', (18853, 18859), False, 'from tensorflow.python.framework import ops\n'), ((19941, 19957), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev0'], {}), '(dev0)\n', (19951, 19957), False, 'from tensorflow.python.framework import ops\n'), ((20528, 20544), 'tensorflow.python.framework.ops.device', 'ops.device', (['dev1'], {}), '(dev1)\n', (20538, 20544), False, 'from tensorflow.python.framework import ops\n'), ((9070, 9103), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['input_value'], {}), '(input_value)\n', (9090, 9103), False, 'from tensorflow.python.framework import constant_op\n'), ((2687, 2734), 'tensorflow.python.distribute.combinations.NamedObject', 'combinations.NamedObject', (['"""v1"""', 'CollectiveOpsV1'], {}), "('v1', CollectiveOpsV1)\n", (2711, 2734), False, 'from tensorflow.python.distribute import combinations\n'), ((2752, 2799), 'tensorflow.python.distribute.combinations.NamedObject', 'combinations.NamedObject', (['"""v2"""', 'CollectiveOpsV2'], {}), "('v2', CollectiveOpsV2)\n", (2776, 2799), False, 'from tensorflow.python.distribute import combinations\n'), ((10872, 10889), 'tensorflow.python.eager.context.context', 'context.context', ([], {}), '()\n', (10887, 10889), False, 'from tensorflow.python.eager import context\n'), ((11943, 11961), 'tensorflow.python.framework.ops.device', 'ops.device', (['device'], {}), '(device)\n', (11953, 11961), False, 'from tensorflow.python.framework import ops\n'), ((12558, 12576), 'tensorflow.python.framework.ops.device', 'ops.device', (['device'], {}), '(device)\n', (12568, 12576), False, 'from tensorflow.python.framework import ops\n'), ((12920, 12937), 'tensorflow.python.eager.context.context', 'context.context', ([], {}), '()\n', (12935, 12937), False, 'from tensorflow.python.eager import context\n'), ((14510, 14528), 'tensorflow.python.framework.ops.device', 'ops.device', (['device'], {}), '(device)\n', (14520, 14528), False, 'from tensorflow.python.framework import ops\n'), ((14894, 14911), 'tensorflow.python.eager.context.context', 'context.context', ([], {}), '()\n', (14909, 14911), False, 'from tensorflow.python.eager import context\n'), ((9847, 9913), 'tensorflow.python.distribute.combinations.NamedObject', 'combinations.NamedObject', (['"""all_reduce"""', 'CollectiveOpsV1.all_reduce'], {}), "('all_reduce', CollectiveOpsV1.all_reduce)\n", (9871, 9913), False, 'from tensorflow.python.distribute import combinations\n'), ((9972, 10041), 'tensorflow.python.distribute.combinations.NamedObject', 'combinations.NamedObject', (['"""all_reduce_v2"""', 'CollectiveOpsV2.all_reduce'], {}), "('all_reduce_v2', CollectiveOpsV2.all_reduce)\n", (9996, 10041), False, 'from tensorflow.python.distribute import combinations\n'), ((10100, 10166), 'tensorflow.python.distribute.combinations.NamedObject', 'combinations.NamedObject', (['"""all_gather"""', 'CollectiveOpsV1.all_gather'], {}), "('all_gather', CollectiveOpsV1.all_gather)\n", (10124, 10166), False, 'from tensorflow.python.distribute import combinations\n'), ((10225, 10294), 'tensorflow.python.distribute.combinations.NamedObject', 'combinations.NamedObject', (['"""all_gather_v2"""', 'CollectiveOpsV2.all_gather'], {}), "('all_gather_v2', CollectiveOpsV2.all_gather)\n", (10249, 10294), False, 'from tensorflow.python.distribute import combinations\n'), ((17071, 17099), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['tensor'], {}), '(tensor)\n', (17091, 17099), False, 'from tensorflow.python.framework import constant_op\n'), ((19464, 19482), 'tensorflow.python.framework.ops.device', 'ops.device', (['device'], {}), '(device)\n', (19474, 19482), False, 'from tensorflow.python.framework import ops\n'), ((15894, 15960), 'tensorflow.python.distribute.combinations.NamedObject', 'combinations.NamedObject', (['"""all_reduce"""', 'CollectiveOpsV1.all_reduce'], {}), "('all_reduce', CollectiveOpsV1.all_reduce)\n", (15918, 15960), False, 'from tensorflow.python.distribute import combinations\n'), ((16019, 16088), 'tensorflow.python.distribute.combinations.NamedObject', 'combinations.NamedObject', (['"""all_reduce_v2"""', 'CollectiveOpsV2.all_reduce'], {}), "('all_reduce_v2', CollectiveOpsV2.all_reduce)\n", (16043, 16088), False, 'from tensorflow.python.distribute import combinations\n'), ((16147, 16213), 'tensorflow.python.distribute.combinations.NamedObject', 'combinations.NamedObject', (['"""all_gather"""', 'CollectiveOpsV1.all_gather'], {}), "('all_gather', CollectiveOpsV1.all_gather)\n", (16171, 16213), False, 'from tensorflow.python.distribute import combinations\n'), ((16272, 16341), 'tensorflow.python.distribute.combinations.NamedObject', 'combinations.NamedObject', (['"""all_gather_v2"""', 'CollectiveOpsV2.all_gather'], {}), "('all_gather_v2', CollectiveOpsV2.all_gather)\n", (16296, 16341), False, 'from tensorflow.python.distribute import combinations\n'), ((6729, 6754), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {}), '(1.0)\n', (6749, 6754), False, 'from tensorflow.python.framework import constant_op\n'), ((6906, 6931), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(2.0)'], {}), '(2.0)\n', (6926, 6931), False, 'from tensorflow.python.framework import constant_op\n'), ((7083, 7108), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(3.0)'], {}), '(3.0)\n', (7103, 7108), False, 'from tensorflow.python.framework import constant_op\n'), ((7260, 7285), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(4.0)'], {}), '(4.0)\n', (7280, 7285), False, 'from tensorflow.python.framework import constant_op\n')]
|
from rest_framework.serializers import ModelSerializer
from formidable.constants import ID, NAME, DESCRIPTION, SECTIONS
from formidable.models import Form
from formidable.serializers.section import SectionMinimalSerializer
class FormSerializer(ModelSerializer):
sections = SectionMinimalSerializer(many=True, read_only=True)
class Meta:
model = Form
exclude = "created", "modified"
class FormDetailSerializer(ModelSerializer):
sections = SectionMinimalSerializer(many=True, read_only=True)
class Meta:
model = Form
fields = ID, NAME, DESCRIPTION, SECTIONS
read_only_fields = fields
|
[
"formidable.serializers.section.SectionMinimalSerializer"
] |
[((280, 331), 'formidable.serializers.section.SectionMinimalSerializer', 'SectionMinimalSerializer', ([], {'many': '(True)', 'read_only': '(True)'}), '(many=True, read_only=True)\n', (304, 331), False, 'from formidable.serializers.section import SectionMinimalSerializer\n'), ((472, 523), 'formidable.serializers.section.SectionMinimalSerializer', 'SectionMinimalSerializer', ([], {'many': '(True)', 'read_only': '(True)'}), '(many=True, read_only=True)\n', (496, 523), False, 'from formidable.serializers.section import SectionMinimalSerializer\n')]
|
import logging
from logging.handlers import RotatingFileHandler
from os import path
# 3 MB max files, up to 2 backup files.
logging.basicConfig(format='%(asctime)s %(levelname)s - %(message)s - [%(funcName)s:%(lineno)d]',
level=logging.INFO,
handlers=[RotatingFileHandler(path.relpath('bot.log'), mode='a', maxBytes=3*1024*1024,
backupCount=2, encoding=None, delay=0)])
logger = logging.getLogger('pasoapasobot')
|
[
"os.path.relpath",
"logging.getLogger"
] |
[((471, 504), 'logging.getLogger', 'logging.getLogger', (['"""pasoapasobot"""'], {}), "('pasoapasobot')\n", (488, 504), False, 'import logging\n'), ((313, 336), 'os.path.relpath', 'path.relpath', (['"""bot.log"""'], {}), "('bot.log')\n", (325, 336), False, 'from os import path\n')]
|
# coding=utf-8
# Copyright 2020 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Implementation of adaptive softmax.
See the papers https://arxiv.org/abs/1609.04309 and
https://arxiv.org/abs/1809.10853 for more details.
"""
import math
from typing import Dict, Sequence, Union
import gin
import mesh_tensorflow as mtf
from mesh_tensorflow.transformer import transformer
from mesh_tensorflow.transformer import vocab_embeddings
import tensorflow.compat.v1 as tf
class _Cluster(object):
"""Helper class for adaptive embeddings specifying a cluster of tokens.
Essentially a wrapper around a vocab embedding for the cluster with additional
metadata so that we can apply the embedding to the actual ids and hidden
states.
"""
def __init__(self, embedding, start_token_id, end_token_id,
length_projection_factor, vocab_dim):
"""Cluster constructor.
Args:
embedding: a FactorizedVocabEmbedding or transformer.VocabEmbedding, the
vocab embedding to use for the cluster.
start_token_id: an integer, the inclusive id of the first token in the
cluster.
end_token_id: an integer, the exclusive id of the last token in the
cluster.
length_projection_factor: a float between 0 and 1, the sequence length
dimension will be projected down to this number times the sequence
length dimension to contain the elements in this cluster. If the input
contains too many tokens in the cluster, tokens later in the input will
be ignored.
vocab_dim: an mtf.Dimension, the dimension the embedding uses as its
vocab.
"""
self._embedding = embedding
self._start_token_id = start_token_id
self._end_token_id = end_token_id
self._length_projection_factor = length_projection_factor
self._vocab_dim = vocab_dim
@property
def end_token_id(self):
return self._end_token_id
@property
def length_projection_factor(self):
return self._length_projection_factor
def ids_to_embedding(self, ids, context):
"""Ids to embeddings with ids not in cluster mapped to the zero vector."""
ids -= self._start_token_id
# The mtf.gather in the embedding's ids_to_embedding implementation will
# cause the one hot representations of tokens greater than cluster vocab
# dimension size to be the zero vector. Thus the embeddings for those tokens
# will be the zero vector.
ids = mtf.where(mtf.greater_equal(ids, 0), ids, self._vocab_dim.size)
# Handle the case of the head cluster where we will have entries at the end
# corresponding to the tail clusters.
ids = mtf.where(
mtf.less(ids, self._end_token_id - self._start_token_id),
ids,
self._vocab_dim.size,
)
return self._embedding.ids_to_embedding(ids, context)
def get_cluster_mask(self, targets):
"""Computes mask over the targets masking out tokens not in the cluster."""
return mtf.logical_and(
mtf.greater_equal(targets, self._start_token_id),
mtf.less(targets, self._end_token_id))
def get_cluster_length_dim(self, length_dim):
"""Returns dimension used instead of sequence length for the cluster."""
cluster_length = math.ceil(self._length_projection_factor * length_dim.size)
return mtf.Dimension(length_dim.name, int(cluster_length))
def get_project_to_cluster_length(self, cluster_mask, dtype):
"""Returns projection from length dim to the shorter cluster length dim."""
seq_length_dim = cluster_mask.shape.get_dim_by_name("length")
cluster_length_dim = self.get_cluster_length_dim(seq_length_dim)
return mtf.cast(cluster_mask, dtype) * mtf.one_hot(
mtf.cumsum(mtf.cast(cluster_mask, tf.int32), seq_length_dim) - 1,
output_dim=cluster_length_dim,
dtype=dtype)
def compute_loss(self, decoder, hidden, targets, context):
"""Computes the loss during training."""
logits = self._embedding.hidden_to_logits(hidden, context=context)
soft_targets = mtf.one_hot(
targets - self._start_token_id,
self._vocab_dim,
dtype=context.activation_dtype)
loss = mtf.layers.softmax_cross_entropy_with_logits(
logits, soft_targets, self._vocab_dim, z_loss=decoder.z_loss)
padding_mask = mtf.layers.weights_nonzero(
targets, dtype=context.activation_dtype)
return (mtf.reduce_sum(loss * padding_mask) /
decoder.loss_denominator(targets, context.num_microbatches))
def compute_log_softmax(self, hidden, context):
"""Returns the log softmax of logits computed from the hidden state."""
logits = self._embedding.hidden_to_logits(hidden, context=context)
return mtf.log_softmax(logits, reduced_dim=self._vocab_dim)
def get_log_softmax_prefix(self, log_softmax, end_index):
"""Returns first end_index entries in log_softmax along the vocab dim."""
prefix_dim = mtf.Dimension(self._vocab_dim.name, end_index)
indices = mtf.mtf_range(
log_softmax.mesh, dim=self._vocab_dim, dtype=tf.int32)
prefix_indices = mtf.where(mtf.less(indices, end_index), indices, -1)
projection = mtf.one_hot(
prefix_indices, prefix_dim, dtype=log_softmax.dtype)
return mtf.einsum([log_softmax, projection], reduced_dims=[self._vocab_dim])
def get_log_softmax_value(self, log_softmax, index):
"""Returns the entry at index of the log_softmax along the vocab dim."""
return mtf.gather(log_softmax, index, dim=self._vocab_dim)
@gin.configurable
class AdaptiveSoftmaxVocabEmbedding(object):
"""Vocab embedding implementing the adaptive softmax.
The adaptive softmax was first introduced in this paper
(https://arxiv.org/abs/1609.04309). Note that this implementation is actually
most similar to the adaptive vocab embeddings in
https://arxiv.org/abs/1809.10853 as it supports having different embedding
sizes for different clusters.
The adaptive softmax works by factorizing the traditional softmax over
multiple clusters:
p(v|h) = p(v|c,h) p(c|h),
where both probability distributions take the form of a softmax.
Further speed up is achieved by putting the class containing the most
frequently occurring tokens in the "head" cluster. Essentially, those tokens
are included as "classes" in the p(c|h) softmax. Thus computing their
probabilities requires only single softmax evaluation.
This implementation differs from vocab_embeddings.AdaptiveVocabEmbedding. That
implementation only supports variable embeddings sizes across clusters. This
implementation also supports the adaptive softmax.
A few conditions must be met in order to use this vocab:
- Unitransformer.shared_embedding_and_softmax_weights = True.
- If training, then
Unitranformer.loss_fn = adaptive_softmax.adaptive_softmax_loss_fn.
- Label smoothing is not supported and will be ignored silently.
- loss_on_targets_only is not supported and will be ignored silently.
"""
def __init__(self,
mesh: mtf.Mesh,
vocab_dim: mtf.Dimension,
output_dim: mtf.Dimension,
variable_dtype: mtf.VariableDType,
name: str,
ensemble_dim: mtf.Dimension,
clusters: Sequence[Dict[str, Union[int, float]]] = gin.REQUIRED):
"""Configurable embedding for the vocabulary.
Most of the arguments get passed to `mtf.layers.embedding_weights`.
The clustering parameters are specified by the `clusters` argument. It is a
list of dicts with keys:
- token_count: The number of tokens in the cluster.
- embedding_size: (optional) The hidden dimension size of the cluster's
embedding. Defaults to the model dimension size.
- length_projection_factor: (optional) Since MTF can't handle variable
length dimensions, we project from the sequence length dimension to a
dimension of size length_projection_factor * sequence_length during
training. This can save compute time and resources if the cluster has
many tokens that appear infrequently. If all of the tokens belonging to
the cluster cannot fit within this reduced dimension, some will be
discarded and ignored for the purposes of computing loss. Defaults 1.
Ignored for the head (first) cluster and not during training.
The first cluster will become the head cluster.
For example, let's say we have a vocab size of 500k and pass as clusters:
[
{"token_count": 50000, "embedding_size": 1024},
{"token_count": 100000, "embedding_size": 256},
{"token_count": 350000, "embedding_size": 64},
]
Then tokens with ids 0 (inclusive) to 50k (exclusive) will be in the first
cluster with embedding size of 1024, tokens with ids 50k to 150k will be in
the second cluster with embedding size of 256, and tokens with ids 150k to
500k will be in the third cluster with embedding size of 64.
Args:
mesh: a mtf.Mesh, the mesh used to layout the tensors.
vocab_dim: a mtf.Dimension, the dimension corresponding to vocabulary.
output_dim: a mtf.Dimension, the dimension corresponding to the model
hidden states.
variable_dtype: a mtf.VariableDType, the datatype information for the
variables used in the embedding tensors.
name: a string, a name to base variable names off of.
ensemble_dim: a mtf.Dimension, the dimension used for ensembling.
Absolutely no guarantees that this code will work with ensembling.
clusters: a list(dict), specification of the clusters. See above for more
information.
Raises:
ValueError: The sum of the token counts across the clusters does not equal
the vocabulary size or a length_projection_factor is not in the range
(0, 1].
"""
self._mesh = mesh
self._variable_dtype = variable_dtype
self._name = name
self._ensemble_dim = ensemble_dim
self._vocab_dim = vocab_dim
self._output_dim = output_dim
self._num_clusters = len(clusters)
token_counts = [cluster["token_count"] for cluster in clusters]
if sum(token_counts) != vocab_dim.size:
raise ValueError(
"The cluster token counts {} do not sum to the vocab size {}.".format(
token_counts, vocab_dim.size))
self._tail_clusters = []
start_token_id = 0
for i, cluster_spec in enumerate(clusters):
cluster = self._create_cluster(cluster_spec, i, start_token_id)
if i == 0:
self._head_cluster = cluster
else:
self._tail_clusters.append(cluster)
start_token_id += cluster_spec["token_count"]
def _create_cluster(self, cluster_spec, index, start_token_id):
"""Creates a cluster given its spec."""
token_count = cluster_spec["token_count"]
embedding_size = cluster_spec.get("embedding_size", self._output_dim.size)
length_projection_factor = cluster_spec.get("length_projection_factor", 1)
if length_projection_factor <= 0 or length_projection_factor > 1:
raise ValueError(
"Invalid length_projection_factor of {}. Must be in range (0, 1]"
.format(length_projection_factor))
if index == 0:
# Include the entries for the tail clusters in the head cluster "vocab".
cluster_vocab_dim = mtf.Dimension(self._vocab_dim.name,
token_count + self._num_clusters - 1)
else:
cluster_vocab_dim = mtf.Dimension(self._vocab_dim.name, token_count)
if embedding_size == self._output_dim.size:
# In this case we don't need to up project from the embedding space to
# the model state space.
cluster_embedding = transformer.VocabEmbedding(
mesh=self._mesh,
vocab_dim=cluster_vocab_dim,
output_dim=self._output_dim,
variable_dtype=self._variable_dtype,
name="{}_{}".format(self._name, index),
ensemble_dim=self._ensemble_dim)
else:
cluster_embedding = vocab_embeddings.FactorizedVocabEmbedding(
mesh=self._mesh,
vocab_dim=cluster_vocab_dim,
output_dim=self._output_dim,
variable_dtype=self._variable_dtype,
name="{}_{}".format(self._name, index),
ensemble_dim=self._ensemble_dim,
inner_dimension_size=embedding_size)
return _Cluster(
embedding=cluster_embedding,
start_token_id=start_token_id,
end_token_id=start_token_id + token_count,
length_projection_factor=length_projection_factor,
vocab_dim=cluster_vocab_dim)
def ids_to_embedding(self, ids: mtf.Tensor, context) -> mtf.Tensor:
all_clusters = self._tail_clusters + [self._head_cluster]
# Ids not in each cluster will be mapped to the zero vector. Since clusters
# are disjoint, this sum is correct.
return sum(
cluster.ids_to_embedding(ids, context) for cluster in all_clusters)
def hidden_to_logits(self, hidden: mtf.Tensor,
context: transformer.Context) -> mtf.Tensor:
"""Function called by mtf transformer to get the logits.
The benefit from the adaptive softmax comes from not having to compute the
logits over all of the vocab during training. Thus, we use the somewhat
hacky solution of returning the hidden states during training and then using
them to compute the loss in a custom loss function.
When not training, this method will be true to its name as return the
logits corresponding to the hidden state.
Args:
hidden: an mtf.Tensor, hidden model states of the final decoder layer.
context: a transformer.Context, the context used for the call to the
transformer.
Returns:
an mtf.Tensor
"""
if context.mode == tf.estimator.ModeKeys.TRAIN:
return hidden
else:
return self._hidden_to_logits(hidden, context)
def _hidden_to_logits(self, hidden, context):
"""Actually compute the logits over the entire vocab."""
head_size = self._head_cluster.end_token_id
# Note that computing the log softmax is equivalent to computing the logits.
head_log_softmax = self._head_cluster.compute_log_softmax(hidden, context)
logits = [
self._head_cluster.get_log_softmax_prefix(head_log_softmax, head_size)
]
for i, cluster in enumerate(self._tail_clusters):
tail_log_softmax = cluster.compute_log_softmax(hidden, context)
cluster_softmax = self._head_cluster.get_log_softmax_value(
head_log_softmax, head_size + i)
logits.append(cluster_softmax + tail_log_softmax)
return mtf.concat(logits, concat_dim_name=self._vocab_dim.name)
def compute_loss(self, decoder: transformer.Unitransformer,
hidden: mtf.Tensor, targets: mtf.Tensor,
context: transformer.Context) -> mtf.Tensor:
"""Returns the loss without computing a softmax over the entire vocab."""
loss = 0
tail_cluster_masks = []
for cluster in self._tail_clusters:
cluster_mask = cluster.get_cluster_mask(targets)
tail_cluster_masks.append(cluster_mask)
if cluster.length_projection_factor == 1:
targets_in_cluster = mtf.where(cluster_mask, targets, 0)
hidden_in_cluster = mtf.where(cluster_mask, hidden, 0)
else:
# TODO(mmatena): Unfold the batch dim to get a super long sequence dim
# to reduce the risk of overflowing the projection.
proj_to_cluster_len = cluster.get_project_to_cluster_length(
cluster_mask, dtype=targets.dtype)
targets_in_cluster = mtf.einsum(
[proj_to_cluster_len, targets],
reduced_dims=[targets.shape.get_dim_by_name("length")])
hidden_in_cluster = mtf.einsum(
[mtf.cast(proj_to_cluster_len, hidden.dtype), hidden],
reduced_dims=[hidden.shape.get_dim_by_name("length")])
loss += cluster.compute_loss(decoder, hidden_in_cluster,
targets_in_cluster, context)
tail_clusters_dim = mtf.Dimension("tail_clusters", len(tail_cluster_masks))
tail_node_targets = mtf.reduce_sum(
mtf.stack([(self._head_cluster.end_token_id + i) *
mtf.cast(mask, targets.dtype)
for i, mask in enumerate(tail_cluster_masks)],
tail_clusters_dim.name),
reduced_dim=tail_clusters_dim)
head_targets = mtf.where(
mtf.cast(tail_node_targets, tf.bool), tail_node_targets, targets)
loss += self._head_cluster.compute_loss(decoder, hidden, head_targets,
context)
return loss
@gin.configurable
def adaptive_softmax_loss_fn(decoder: transformer.Unitransformer,
context: transformer.Context, logits: mtf.Tensor,
targets: mtf.Tensor,
output_vocab_dim: mtf.Dimension) -> mtf.Tensor:
"""Custom loss to use when training with an adaptive softmax.
Embedding and softmax weights must be shared in order for this function to
work. Note that label smoothing and loss_on_targets_only is not supported and
will be silently ignored.
Args:
decoder: a transformer.Unitransformer
context: a transformer.Context
logits: an mtf.Tensor, note that this will actually be the hidden state of
the final decoder layer
targets: an mtf.Tensor
output_vocab_dim: an mtf.Dimension
Returns:
the loss
"""
del output_vocab_dim
hidden = logits
vocab_embedding = context.shared_params["embedding"]
return vocab_embedding.compute_loss(
decoder, hidden=hidden, targets=targets, context=context)
|
[
"mesh_tensorflow.einsum",
"mesh_tensorflow.gather",
"mesh_tensorflow.concat",
"math.ceil",
"mesh_tensorflow.Dimension",
"mesh_tensorflow.layers.weights_nonzero",
"mesh_tensorflow.log_softmax",
"mesh_tensorflow.layers.softmax_cross_entropy_with_logits",
"mesh_tensorflow.cast",
"mesh_tensorflow.reduce_sum",
"mesh_tensorflow.one_hot",
"mesh_tensorflow.less",
"mesh_tensorflow.where",
"mesh_tensorflow.greater_equal",
"mesh_tensorflow.mtf_range"
] |
[((3763, 3822), 'math.ceil', 'math.ceil', (['(self._length_projection_factor * length_dim.size)'], {}), '(self._length_projection_factor * length_dim.size)\n', (3772, 3822), False, 'import math\n'), ((4553, 4650), 'mesh_tensorflow.one_hot', 'mtf.one_hot', (['(targets - self._start_token_id)', 'self._vocab_dim'], {'dtype': 'context.activation_dtype'}), '(targets - self._start_token_id, self._vocab_dim, dtype=context.\n activation_dtype)\n', (4564, 4650), True, 'import mesh_tensorflow as mtf\n'), ((4682, 4793), 'mesh_tensorflow.layers.softmax_cross_entropy_with_logits', 'mtf.layers.softmax_cross_entropy_with_logits', (['logits', 'soft_targets', 'self._vocab_dim'], {'z_loss': 'decoder.z_loss'}), '(logits, soft_targets, self.\n _vocab_dim, z_loss=decoder.z_loss)\n', (4726, 4793), True, 'import mesh_tensorflow as mtf\n'), ((4818, 4885), 'mesh_tensorflow.layers.weights_nonzero', 'mtf.layers.weights_nonzero', (['targets'], {'dtype': 'context.activation_dtype'}), '(targets, dtype=context.activation_dtype)\n', (4844, 4885), True, 'import mesh_tensorflow as mtf\n'), ((5228, 5280), 'mesh_tensorflow.log_softmax', 'mtf.log_softmax', (['logits'], {'reduced_dim': 'self._vocab_dim'}), '(logits, reduced_dim=self._vocab_dim)\n', (5243, 5280), True, 'import mesh_tensorflow as mtf\n'), ((5437, 5483), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['self._vocab_dim.name', 'end_index'], {}), '(self._vocab_dim.name, end_index)\n', (5450, 5483), True, 'import mesh_tensorflow as mtf\n'), ((5499, 5567), 'mesh_tensorflow.mtf_range', 'mtf.mtf_range', (['log_softmax.mesh'], {'dim': 'self._vocab_dim', 'dtype': 'tf.int32'}), '(log_softmax.mesh, dim=self._vocab_dim, dtype=tf.int32)\n', (5512, 5567), True, 'import mesh_tensorflow as mtf\n'), ((5668, 5732), 'mesh_tensorflow.one_hot', 'mtf.one_hot', (['prefix_indices', 'prefix_dim'], {'dtype': 'log_softmax.dtype'}), '(prefix_indices, prefix_dim, dtype=log_softmax.dtype)\n', (5679, 5732), True, 'import mesh_tensorflow as mtf\n'), ((5754, 5823), 'mesh_tensorflow.einsum', 'mtf.einsum', (['[log_softmax, projection]'], {'reduced_dims': '[self._vocab_dim]'}), '([log_softmax, projection], reduced_dims=[self._vocab_dim])\n', (5764, 5823), True, 'import mesh_tensorflow as mtf\n'), ((5968, 6019), 'mesh_tensorflow.gather', 'mtf.gather', (['log_softmax', 'index'], {'dim': 'self._vocab_dim'}), '(log_softmax, index, dim=self._vocab_dim)\n', (5978, 6019), True, 'import mesh_tensorflow as mtf\n'), ((15139, 15195), 'mesh_tensorflow.concat', 'mtf.concat', (['logits'], {'concat_dim_name': 'self._vocab_dim.name'}), '(logits, concat_dim_name=self._vocab_dim.name)\n', (15149, 15195), True, 'import mesh_tensorflow as mtf\n'), ((2993, 3018), 'mesh_tensorflow.greater_equal', 'mtf.greater_equal', (['ids', '(0)'], {}), '(ids, 0)\n', (3010, 3018), True, 'import mesh_tensorflow as mtf\n'), ((3198, 3254), 'mesh_tensorflow.less', 'mtf.less', (['ids', '(self._end_token_id - self._start_token_id)'], {}), '(ids, self._end_token_id - self._start_token_id)\n', (3206, 3254), True, 'import mesh_tensorflow as mtf\n'), ((3519, 3567), 'mesh_tensorflow.greater_equal', 'mtf.greater_equal', (['targets', 'self._start_token_id'], {}), '(targets, self._start_token_id)\n', (3536, 3567), True, 'import mesh_tensorflow as mtf\n'), ((3577, 3614), 'mesh_tensorflow.less', 'mtf.less', (['targets', 'self._end_token_id'], {}), '(targets, self._end_token_id)\n', (3585, 3614), True, 'import mesh_tensorflow as mtf\n'), ((4177, 4206), 'mesh_tensorflow.cast', 'mtf.cast', (['cluster_mask', 'dtype'], {}), '(cluster_mask, dtype)\n', (4185, 4206), True, 'import mesh_tensorflow as mtf\n'), ((4908, 4943), 'mesh_tensorflow.reduce_sum', 'mtf.reduce_sum', (['(loss * padding_mask)'], {}), '(loss * padding_mask)\n', (4922, 4943), True, 'import mesh_tensorflow as mtf\n'), ((5608, 5636), 'mesh_tensorflow.less', 'mtf.less', (['indices', 'end_index'], {}), '(indices, end_index)\n', (5616, 5636), True, 'import mesh_tensorflow as mtf\n'), ((11850, 11923), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['self._vocab_dim.name', '(token_count + self._num_clusters - 1)'], {}), '(self._vocab_dim.name, token_count + self._num_clusters - 1)\n', (11863, 11923), True, 'import mesh_tensorflow as mtf\n'), ((12000, 12048), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['self._vocab_dim.name', 'token_count'], {}), '(self._vocab_dim.name, token_count)\n', (12013, 12048), True, 'import mesh_tensorflow as mtf\n'), ((16957, 16993), 'mesh_tensorflow.cast', 'mtf.cast', (['tail_node_targets', 'tf.bool'], {}), '(tail_node_targets, tf.bool)\n', (16965, 16993), True, 'import mesh_tensorflow as mtf\n'), ((15721, 15756), 'mesh_tensorflow.where', 'mtf.where', (['cluster_mask', 'targets', '(0)'], {}), '(cluster_mask, targets, 0)\n', (15730, 15756), True, 'import mesh_tensorflow as mtf\n'), ((15785, 15819), 'mesh_tensorflow.where', 'mtf.where', (['cluster_mask', 'hidden', '(0)'], {}), '(cluster_mask, hidden, 0)\n', (15794, 15819), True, 'import mesh_tensorflow as mtf\n'), ((4241, 4273), 'mesh_tensorflow.cast', 'mtf.cast', (['cluster_mask', 'tf.int32'], {}), '(cluster_mask, tf.int32)\n', (4249, 4273), True, 'import mesh_tensorflow as mtf\n'), ((16293, 16336), 'mesh_tensorflow.cast', 'mtf.cast', (['proj_to_cluster_len', 'hidden.dtype'], {}), '(proj_to_cluster_len, hidden.dtype)\n', (16301, 16336), True, 'import mesh_tensorflow as mtf\n'), ((16741, 16770), 'mesh_tensorflow.cast', 'mtf.cast', (['mask', 'targets.dtype'], {}), '(mask, targets.dtype)\n', (16749, 16770), True, 'import mesh_tensorflow as mtf\n')]
|
# streamClientTCP.py
__author__ = 'William'
import json
from socket import *
# Set server ip and port
serverName = '192.168.43.28'
serverPort = 12000
# Create socket
clientSocket = socket(AF_INET, SOCK_STREAM)
# Connect to socket
clientSocket.connect((serverName, serverPort))
# Set request message to send
messageSizeInBytes = 100
secondsBetweenTransfers = 0.01
nrOfTransfers = 10000
sendInfo = (messageSizeInBytes, secondsBetweenTransfers, nrOfTransfers)
dataString = json.dumps(sendInfo)
# Print request message
print(dataString)
# Encode message
clientSocket.send(str.encode(dataString))
# Recieve set amount of transfers
for x in range(1, nrOfTransfers+1):
print("Transfer nr: "+str(x))
# Recieve transfer
recievedData = clientSocket.recv(messageSizeInBytes)
# Print length of transferred data
print("Length (B): "+str(len(recievedData))+"\n")
# Close socket
clientSocket.close()
|
[
"json.dumps"
] |
[((474, 494), 'json.dumps', 'json.dumps', (['sendInfo'], {}), '(sendInfo)\n', (484, 494), False, 'import json\n')]
|
"""
Created on Mon Nov 23 2020
@author: <NAME>
"""
import numpy as np
from PIL import Image
import cv2
import time
import copy
import arcpy
from arcpy import env
from arcpy.sa import Viewshed2
#from arcpy.da import *
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import matplotlib.pyplot as plt
import os
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
import math
#env.scratchWorkspace = r"in_memory"
# print('ClearWorkspaceCache_management: ', arcpy.ClearWorkspaceCache_management())
arcpy.ClearWorkspaceCache_management()
env.scratchWorkspace = r"in_memory"
#env.workspace = r"../data/space/"
#env.workspace = r"C:/Users/Akmaral/Desktop/coverage/test4/shape_file_gen/"
env.overwriteOutput = True
env.outputCoordinateSystem = arcpy.SpatialReference("WGS 1984 UTM Zone 18N")
env.geographicTransformations = "Arc_1950_To_WGS_1984_5; PSAD_1956_To_WGS_1984_6"
#env.parallelProcessingFactor = "200%"
env.processorType = "GPU"
env.gpuID = "0"
env.compression = "LZ77" #"LZ77" #"JPEG" # LZW
env.tileSize = "128 128"
env.pyramid = "PYRAMIDS -1 CUBIC LZ77 NO_SKIP"
# arcpy.Delete_management("in_memory")
class ViewshedCoverageEnv(gym.Env):
"""
Description:
Viewshed analysis on raster data
Source:
ArcGIS function
Observation:
Type: Image
Actions:
Type: Discrete
Num Action
0 Pan +5 deg
1 Pan -5 deg
2 Tilt +5 deg
3 Tilt -5 deg
4 Zoom +5 factor
5 Zoom -5 factor
Reward:
Reward 1 for game over
Starting State:
Init image of the city
Episode termination:
Episode > 100
"""
metadata = {'render.modes': ['human']}
def __init__(self):
# import image of city
self.city_array = np.array((Image.open(r"../data/images/RasterAstanaCroppedZero.png")), dtype=np.uint16) #.resize((900,600))
# self.city_array = self.city_array/100
print('+++ ', np.max(np.max(self.city_array)), np.min(np.min(self.city_array)))
self.city_array = self.city_array/100 - 285 # convert to meter
print('Original Image: ', type(self.city_array), self.city_array.shape)
# crop the image with center at camera
self.camera_location = (3073, 11684, 350) # x,y,z coordinate # (11685, 7074, 350) - RasterAstana.png
# self.camera_location = (3073, 11684, 350) # x,y,z coordinate # (11685, 7074, 350) - RasterAstana.png
self.coverage_radius = 2000 # .. km square from the center
self.city_array = self.city_array[self.camera_location[1]-self.coverage_radius:self.camera_location[1]+self.coverage_radius,
self.camera_location[0]-self.coverage_radius:self.camera_location[0]+self.coverage_radius]
# resize the image
# self.city_array = self.city_array[2500:3500, 2500:3500]#np.resize(self.city_array, (1000,1000))
# self.city_array_res = self.city_array[0:1000, 0:1000]
self.im_height, self.im_width = self.city_array.shape # reshape (width, height) [300,500] --> example: height = 500, width = 300
print('Cropped Image: ', type(self.city_array), self.city_array.shape)
print('Range Image: ', np.min(self.city_array), np.max(self.city_array))
# input raster
self.input_raster = arcpy.NumPyArrayToRaster(self.city_array)
# input shapefile
self.shape_file = r"../data/input_shapefile/1/points_XYTableToPoint_second.shp"
# CAMERA params
self.camera_number = 1
self.camera_location_cropped = (int(self.coverage_radius), int(self.coverage_radius), self.camera_location[2]-285)
print('Camera Loc: ', self.camera_location_cropped)
#
self.max_distance_min_zoom = 100 # at min zoom - 20mm - the max distance 50
self.max_distance_max_zoom = 4000 # at min zoom - 800mm - the max distance 2000
# PTZ
self.pan_pos = 0
self.tilt_pos = -45
self.zoom_pos = 20 # 0 - 20mm (min), 1 - 800 mm (max)
self.delta_pan = 5 # deg
self.delta_tilt = 3 # deg
self.delta_zoom = 1.25 # 1.25x times
self.horizon_fov = 21 # 21 # Field of View deg
self.vertical_fov = 11.8 # 11.8 # Field of View deg
self.zoom_distance = self.max_distance_min_zoom
# VIEWSHED params
self.init_x = self.camera_location_cropped[0] # self.im_width/2 #310
self.init_y = self.camera_location_cropped[1] # self.im_height/2 #80
self.observer_height = self.camera_location_cropped[2] + 5 # height
self.analysis_type = "FREQUENCY"
self.analysis_method = "PERIMETER_SIGHTLINES"
self.azimuth1 = self.pan_pos - self.horizon_fov/2
self.azimuth2 = self.pan_pos + self.horizon_fov/2
self.vertical_lower_angle = self.tilt_pos - self.vertical_fov/2
self.vertical_upper_angle = self.tilt_pos + self.vertical_fov/2
self.radius_is_3d = 'True'
self.inner_radius = 0
self.outer_radius = self.zoom_distance
# GYM env params
self.observation_space = spaces.Box(low=0, high=255, shape=(self.im_width,self.im_height, 1), dtype = np.uint8)
self.action_space = spaces.Discrete(6) # 6 different actions
self.state = np.zeros((self.im_height, self.im_width)) # self.city_Array
# render
self.max_render = 100
self.is_render = 'True'
self.iteration = 0
self.info = 0
self.info_x = 0.0
self.info_y = 0.0
self.seed(0)
# reward
self.ratio_threshhold = 0.02
self.reward_good_step = 1
self.reward_bad_step = -0.05
self.max_iter = 200
# input
self.input_total_coverage = np.asarray(Image.open(r"../data/images/RasterTotalCoverage4.png"))
#self.input_total_coverage = np.asarray(Image.open(r"../data/images/RasterTotalCoverage4Resized.png"))
self.rad_matrix, self.angle_matrix = self.create_cartesian()
def step(self, action):
#assert self.action_space.contains(action)
# this function needs to do:
# map the "action" to CELL value update in shapefile (actions x observers)
# action [0 ... N] --- > action type x observerN
# here assumption is that action will be 1xD array for all N cameras, and should be interpreted as which action to which observer
# for 1 camera
action_type = action # %cameraN
observer_n = self.camera_number #action//actionN + 1
#print('action', action) # [0 ... 5]
#print('action_type',action_type) # [0 ... 5]
#print('observerN',observerN ) # [1 ... ]
self.update_shapefile_discrete(self.shape_file, action_type, observer_n)
# create the viewshed
output_array, visible_area = self.create_viewshed(self.input_raster, self.shape_file)
output_array2, visible_area2 = self.get_coverage_fast()
self.testing_im = output_array2
# interpret the viewshed output to some value - state , reward etc
# next_state ?
next_state = output_array
ratio = visible_area/output_array.size
# for rendering
self.state = output_array
self.info = ratio
#reward ?
#reward = visible_area/output_array.size
#done ?
crossed_map = np.multiply(self.input_total_coverage,(output_array))
crossed_points = (crossed_map > 0).astype(int)
crossed_area = crossed_points.sum()
reward = crossed_area
# if ratio > self.ratio_threshhold:
# reward = self.reward_good_step + ratio*5
# else:
# reward = self.reward_bad_step + ratio*5
if self.iteration > self.max_iter:
done = 1
else:
done = 0
self.iteration = self.iteration + 1
self.input_total_coverage = np.multiply(self.input_total_coverage,(1-output_array))
next_state = np.stack((self.input_total_coverage, next_state), axis = 0)
return next_state, reward, done
def seed(self, seed = None):
self.np_random , seed = seeding.np_random()
return [seed]
def reset(self):
print('Env reset ...')
self.reset_shapefile(self.shape_file)
self.state = np.zeros((self.im_height, self.im_width)) # self.state
self.iteration = 0
next_state = np.stack((self.input_total_coverage, self.state), axis = 0)
return next_state
def render(self, mode='human'):
mode = 0 # 0 - black/white ; 1 - rgb
if mode == 1:
city_gray = np.array(self.city_array, dtype=np.uint8)
show_array = np.stack((city_gray,)*3, axis=-1)
show_array[:,:,2] = self.state*255
show_array = cv2.resize(show_array, (1000,1000), interpolation = cv2.INTER_AREA)
else:
show_array = np.array(self.state*255, dtype='uint8')
show_array = cv2.resize(show_array, (1000,1000), interpolation = cv2.INTER_AREA)
# if mode == 1:
# city_gray1 = np.array(self.city_array, dtype=np.uint8)
# show_array1 = np.stack((city_gray1,)*3, axis= -1)
# show_array1[:,:,2] = self.testing_im*255
# show_array1 = cv2.resize(show_array1, (1000,1000), interpolation = cv2.INTER_AREA)
# else:
# show_array1 = self.testing_im
# print('****** ', np.max(np.max(self.testing_im)))
# show_array1 = cv2.resize(show_array1, (800,800), interpolation = cv2.INTER_AREA)
# if self.is_render == 'True' and self.iteration < self.max_render :
# print('render --- ratio --- ', self.info)
# cv2.startWindowThread()
# cv2.namedWindow("preview")
# cv2.imshow("preview", show_array)
# #cv2.imshow("GET COVERAGE", show_array1)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
try:
cv2.startWindowThread()
cv2.namedWindow("preview")
cv2.imshow("preview", show_array)
cv2.namedWindow("COVERAGE")
#show_array1 = cv2.resize(self.input_total_coverage, (1000,1000), interpolation = cv2.INTER_AREA)
#cv2.imshow("COVERAGE", show_array1)
array = np.array(self.testing_im*255, dtype='uint8')
show_array1 = cv2.resize(array, (1000,1000), interpolation = cv2.INTER_AREA)
cv2.imshow("COVERAGE", show_array1)
#cv2.imshow("COVERAGE", show_array1)
cv2.waitKey(100)
#if cv2.waitKey(1)& 0xFF == ord('q'):
# quit()
except KeyboardInterrupt:
cv2.destroyAllWindows()
# quit()
def close(self):
pass
def reset_shapefile(self, shape_file):
#print('Reset init camera locations')
fieldlist=['AZIMUTH1','AZIMUTH2']
tokens=['SHAPE@X','SHAPE@Y']
with arcpy.da.UpdateCursor(shape_file,tokens+fieldlist) as cursor:
for row in cursor:
row[0]= self.init_x
row[1]= self.init_y
row[2]= self.azimuth1
row[3]= self.azimuth2
cursor.updateRow(row)
del cursor
def update_shapefile_discrete(self, shape_file, action_type, observer_n):
# Type: Discrete
# Num Action
# 0 Pan +5 deg
# 1 Pan -5 deg
# 2 Tilt +5 deg
# 3 Tilt -5 deg
# 4 Zoom +5 factor
# 5 Zoom -5 factor
if action_type == 0: # rotate + delta
print('... pan right')
# update camera/ptz setting
self.pan_pos += self.delta_pan
if self.pan_pos >= 360:
self.pan_pos -= 360
elif action_type == 1: # rotate - delta deg
print('... pan left')
# update camera/ptz setting
self.pan_pos -= self.delta_pan
if self.pan_pos < 0:
self.pan_pos += 360
elif action_type == 2: # tilt + deg
print('... tilt up')
# update camera/ptz setting
self.tilt_pos += self.delta_tilt
if self.tilt_pos > 20:
self.tilt_pos = 20
elif action_type == 3: # tilt - deg
print('... tilt down')
# update camera/ptz setting
self.tilt_pos -= self.delta_tilt
if self.tilt_pos < -45:
self.tilt_pos = -45
elif action_type == 4: # zoom + in
print('... zoom in')
# update camera/ptz setting
self.zoom_pos *= self.delta_zoom
self.horizon_fov /= self.delta_zoom
self.vertical_fov /= self.delta_zoom
self.zoom_distance *= self.delta_zoom
# boundaries
if self.zoom_pos > 800:
self.zoom_pos = 800
if self.horizon_fov < 0.5:
self.horizon_fov = 0.5
if self.vertical_fov < 0.3:
self.vertical_fov = 0.3
if self.zoom_distance > self.max_distance_max_zoom:
self.zoom_distance = self.max_distance_max_zoom
elif action_type == 5: # zoom - out
print('... zoom out')
# update camera/ptz setting
self.zoom_pos /= self.delta_zoom
self.horizon_fov *= self.delta_zoom
self.vertical_fov *= self.delta_zoom
self.zoom_distance /= self.delta_zoom
# boundaries
if self.zoom_pos < 20:
self.zoom_pos = 20
if self.horizon_fov > 21:
self.horizon_fov = 21
if self.vertical_fov > 11.8:
self.vertical_fov = 11.8
if self.zoom_distance < self.max_distance_min_zoom:
self.zoom_distance = self.max_distance_min_zoom
else:
pass
print('No action done ..')
def create_viewshed(self, input_raster, shape_file):
# UPDATE viewshed params
self.azimuth1 = self.pan_pos - self.horizon_fov/2
if self.azimuth1 < 0:
self.azimuth1 += 360
self.azimuth2 = self.pan_pos + self.horizon_fov/2
# second
# self.azimuth2 = self.pan_pos - self.horizon_fov/2
# self.azimuth2 = 90 - self.azimuth2
# if self.azimuth2 < 0:
# self.azimuth2 += 360
# self.azimuth1 = self.azimuth2 - self.horizon_fov
# temp_angle = self.pan_pos
# temp_angle = 90 - temp_angle
# if temp_angle < 0:
# temp_angle += 360
#
# self.azimuth1 = temp_angle - self.horizon_fov/2
# #self.azimuth1 = 90 - self.azimuth1
# if self.azimuth1 < 0:
# self.azimuth1 += 360
# self.azimuth2 = temp_angle + self.horizon_fov/2
self.vertical_lower_angle = self.tilt_pos - self.vertical_fov/2
self.vertical_upper_angle = self.tilt_pos + self.vertical_fov/2
self.outer_radius = self.zoom_distance
# print('Elapsed time for viewshed: ', time.time() - start_t)
print('1 - camera : pan_pos {}, tilt_pos {} , zoom_pos {}, horizon_fov {}, vertical_fov {}, zoom_distance {}'.format(
self.pan_pos, self.tilt_pos, self.zoom_pos, self.horizon_fov, self.vertical_fov, self.zoom_distance))
print('2 - viewshed : azimuth1 {}, azimuth2 {} , vertical_lower_angle {}, vertical_upper_angle {}, outer_radius {}'.format(
self.azimuth1, self.azimuth2, self.vertical_lower_angle, self.vertical_upper_angle, self.outer_radius))
start_t = time.time()
#self.azimuth1 = 315 #int(input("s1 "))
#self.azimuth2 = 45 #int(input("s2 "))
# self.vertical_lower_angle = -90
# self.vertical_upper_angle = 90
outViewshed2 = Viewshed2(in_raster=self.input_raster, in_observer_features= self.shape_file, out_agl_raster= "", analysis_type= self.analysis_type,
vertical_error= 0, out_observer_region_relationship_table= "", refractivity_coefficient= 0.13,
surface_offset= 0, observer_offset = 0, observer_elevation = self.observer_height, inner_radius= self.inner_radius,
outer_radius= self.outer_radius, inner_radius_is_3d = self.radius_is_3d, outer_radius_is_3d = self.radius_is_3d,
horizontal_start_angle= self.azimuth1, horizontal_end_angle= self.azimuth2, vertical_upper_angle = self.vertical_upper_angle,
vertical_lower_angle= self.vertical_lower_angle, analysis_method=self.analysis_method)
# # # manual
# outViewshed2 = Viewshed2(in_raster=self.input_raster, in_observer_features= self.shape_file, out_agl_raster= "", analysis_type= self.analysis_type,
# vertical_error= 0, out_observer_region_relationship_table= "", refractivity_coefficient= 0.13,
# surface_offset= 0, observer_offset = 0, observer_elevation = 70, inner_radius= 0,
# outer_radius= 200, inner_radius_is_3d = self.radius_is_3d, outer_radius_is_3d = self.radius_is_3d,
# horizontal_start_angle= 0, horizontal_end_angle= 360, vertical_upper_angle = 25.9,
# vertical_lower_angle= -56, analysis_method=self.analysis_method)
#print('--------------- finished -----------------')
print('Elapsed time for viewshed: ', time.time() - start_t)
# extract the array
output_array = arcpy.RasterToNumPyArray(outViewshed2) # output array -> each cell how many observer can see that pixel
# not visible cells will have value of zero
output_array[output_array == 255] = 0
visible_points = output_array > 0
visible_area = visible_points.sum()
print('visible_points ', visible_area)
# save
# im = Image.fromarray(output_array*255)
# im.save("../data/images/RasterTotalCoverage4.png")
return output_array, visible_area
#
# def get_coverage(self):
# start_t = time.time()
# output_array = np.zeros((self.im_height, self.im_width))
#
# temp_angle = self.pan_pos
# # temp_angle = 450-temp_angle
# # if temp_angle >= 360:
# # temp_angle -= 360
#
#
# # temp_angle = 90-self.pan_pos
# # if temp_angle < -180:
# # temp_angle = 90 + (temp_angle + 180)
# #
# # print('test: ', temp_angle, self.pan_pos)
#
#
# # #self.azimuth1 = temp_angle - self.horizon_fov/2
# #self.azimuth1 = 90 - self.azimuth1
# #if self.azimuth1 < 0:
# # self.azimuth1 += 360
# #self.azimuth2 = temp_angle + self.horizon_fov/2
#
# horizon_start = temp_angle - self.horizon_fov/2
# horizon_end = temp_angle + self.horizon_fov/2
# if horizon_start < 0:
# horizon_start += 360
#
# # if horizon_start <= -180:
# # horizon_end = 180 + (horizon_start + 180)
# #
# # if horizon_end > 180:
# # horizon_start = -180 + (horizon_end - 180)
#
# vertical_start = self.tilt_pos - self.vertical_fov/2
# vertical_end = self.tilt_pos + self.vertical_fov/2
#
# if vertical_start < 0 and vertical_end < 0:
#
# radius_inner = self.observer_height*math.tan(math.radians(90+vertical_start))
# radius_outer = self.observer_height*math.tan(math.radians(90+vertical_end))
# if radius_outer > self.zoom_distance:
# radius_outer = self.zoom_distance
#
# # print('rad ---> ', radius_inner, radius_outer)
# # print('hor ---> ', horizon_start, horizon_end)
#
# for i in range(1500, 2500):
# for j in range(1500, 2500):
#
# point_rad = math.sqrt((self.coverage_radius-i)**2 + (self.coverage_radius-j)**2)
# #if i == self.coverage_radius:
# # point_angle = 0
# #else:
# #point_angle = 90-math.degrees(math.atan((self.coverage_radius-j)/(self.coverage_radius-i)))
# # point_angle = math.degrees(math.atan2((self.coverage_radius-j),(i-self.coverage_radius)))
# # if point_angle < 0:
# # point_angle += 360
#
# point_angle = math.degrees(math.atan2((self.coverage_radius-i),(j-self.coverage_radius)))
# point_angle *= -1
# point_angle += 90
#
# #point_angle += 90
# #if point_angle > 360:
# # point_angle -= 360
#
# if point_angle < 0:
# point_angle += 360
#
# inside_rad = radius_inner < point_rad < radius_outer
#
# # case 1
#
# if horizon_start < horizon_end:
# output_array[i,j] = (horizon_start < point_angle and point_angle < horizon_end) and inside_rad
# else:
# output_array[i,j] = (horizon_start < point_angle or point_angle < horizon_end) and inside_rad
#
# #output_array[i,j] = point_angle > horizon_start #
# # output_array[i,j] = (radius_inner < point_rad < radius_outer) and (horizon_start < point_angle < horizon_end)
#
# #point_rad = np.sqrt((self.city_array-self.coverage_radius)**2 + (self.city_array-self.coverage_radius)**2)
# #point_angle = -np.degrees(np.arctan((self.coverage_radius - self.city_array)/((self.coverage_radius - self.city_array).transpose())))
#
#
# #output_array = point_rad > 2000
#
#
# #output_array = (radius_inner < point_rad).astype(int) * (radius_outer > point_rad).astype(int) * (horizon_start < point_angle).astype(int) * (point_angle < horizon_end).astype(int)
# print('Elapsed time for coverage: ', time.time() - start_t)
#
# output_array = output_array.astype(int)
# print('*** ', type(output_array), output_array.shape)
#
# visible_points = (output_array > 0).astype(int)
# visible_area = 0 # visible_points.sum()
#
# else:
# visible_area = 0
#
# return output_array, visible_area
def create_cartesian(self):
rad_matrix = np.zeros((self.im_height, self.im_width))
angle_matrix = np.zeros((self.im_height, self.im_width))
for i in range(self.im_height):
for j in range(self.im_width):
point_rad = math.sqrt((self.coverage_radius-i)**2 + (self.coverage_radius-j)**2)
point_angle = math.degrees(math.atan2((self.coverage_radius-i),(j-self.coverage_radius)))
point_angle *= -1
point_angle += 90
if point_angle < 0:
point_angle += 360
rad_matrix[i,j] = point_rad
angle_matrix[i,j] = point_angle
return rad_matrix, angle_matrix
def get_coverage_fast(self):
start_t = time.time()
output_array = np.zeros((self.im_height, self.im_width))
temp_angle = self.pan_pos
# temp_angle = 450-temp_angle
# if temp_angle >= 360:
# temp_angle -= 360
# temp_angle = 90-self.pan_pos
# if temp_angle < -180:
# temp_angle = 90 + (temp_angle + 180)
#
# print('test: ', temp_angle, self.pan_pos)
# #self.azimuth1 = temp_angle - self.horizon_fov/2
#self.azimuth1 = 90 - self.azimuth1
#if self.azimuth1 < 0:
# self.azimuth1 += 360
#self.azimuth2 = temp_angle + self.horizon_fov/2
horizon_start = temp_angle - self.horizon_fov/2
horizon_end = temp_angle + self.horizon_fov/2
if horizon_start < 0:
horizon_start += 360
if horizon_end >= 360:
horizon_end -= 360
# if horizon_start <= -180:
# horizon_end = 180 + (horizon_start + 180)
#
# if horizon_end > 180:
# horizon_start = -180 + (horizon_end - 180)
vertical_start = self.tilt_pos - self.vertical_fov/2
vertical_end = self.tilt_pos + self.vertical_fov/2
if vertical_start < 0 and vertical_end < 0:
radius_inner = self.observer_height*math.tan(math.radians(90+vertical_start))
radius_outer = self.observer_height*math.tan(math.radians(90+vertical_end))
if radius_outer > self.zoom_distance:
radius_outer = self.zoom_distance
# matrix
rad_matrix, angle_matrix = self.rad_matrix, self.angle_matrix
#inside_rad = radius_inner < rad_matrix and rad_matrix < radius_outer
inside_rad = np.multiply( np.greater_equal(rad_matrix, radius_inner), np.greater_equal(radius_outer, rad_matrix))
# if horizon_start < horizon_end:
# inside_angle = (horizon_start < point_angle and point_angle < horizon_end)
# else:
# inside_angle = (horizon_start < point_angle or point_angle < horizon_end)
#
if horizon_start < horizon_end:
inside_angle = np.multiply(np.greater_equal(angle_matrix, horizon_start), np.greater_equal(horizon_end, angle_matrix))
else:
inside_angle = np.add(np.greater_equal(angle_matrix, horizon_start), np.greater_equal(horizon_end, angle_matrix))
inside_sector = np.multiply(inside_rad, inside_angle)
print('Here --- ', inside_rad.shape, inside_angle.shape, inside_sector.shape)
print('2 - coverage : horizon_start {}, horizon_end {} , vertical_start {}, vertical_end {}, radius_inner{}, outer_radius {}'.format(
horizon_start, horizon_end, vertical_start, vertical_end, radius_inner, radius_outer))
output_array = inside_sector
print('Elapsed time for coverage: ', time.time() - start_t)
output_array = output_array.astype(int)
print('*** ', type(output_array), output_array.shape)
visible_points = (output_array > 0).astype(int)
visible_area = 0 # visible_points.sum()
else:
print('Tilt Angle is larger than zero !!!')
visible_area = 0
return output_array, visible_area
|
[
"math.atan2",
"gym.spaces.Discrete",
"arcpy.sa.Viewshed2",
"cv2.startWindowThread",
"arcpy.ClearWorkspaceCache_management",
"cv2.imshow",
"gym.utils.seeding.np_random",
"numpy.multiply",
"math.radians",
"numpy.max",
"cv2.destroyAllWindows",
"arcpy.NumPyArrayToRaster",
"cv2.resize",
"arcpy.SpatialReference",
"numpy.stack",
"arcpy.RasterToNumPyArray",
"arcpy.da.UpdateCursor",
"math.sqrt",
"cv2.waitKey",
"numpy.min",
"numpy.greater_equal",
"numpy.zeros",
"time.time",
"PIL.Image.open",
"numpy.array",
"gym.spaces.Box",
"cv2.namedWindow"
] |
[((518, 556), 'arcpy.ClearWorkspaceCache_management', 'arcpy.ClearWorkspaceCache_management', ([], {}), '()\n', (554, 556), False, 'import arcpy\n'), ((762, 809), 'arcpy.SpatialReference', 'arcpy.SpatialReference', (['"""WGS 1984 UTM Zone 18N"""'], {}), "('WGS 1984 UTM Zone 18N')\n", (784, 809), False, 'import arcpy\n'), ((3378, 3419), 'arcpy.NumPyArrayToRaster', 'arcpy.NumPyArrayToRaster', (['self.city_array'], {}), '(self.city_array)\n', (3402, 3419), False, 'import arcpy\n'), ((5290, 5380), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(self.im_width, self.im_height, 1)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(self.im_width, self.im_height, 1), dtype\n =np.uint8)\n', (5300, 5380), False, 'from gym import error, spaces, utils\n'), ((5405, 5423), 'gym.spaces.Discrete', 'spaces.Discrete', (['(6)'], {}), '(6)\n', (5420, 5423), False, 'from gym import error, spaces, utils\n'), ((5468, 5509), 'numpy.zeros', 'np.zeros', (['(self.im_height, self.im_width)'], {}), '((self.im_height, self.im_width))\n', (5476, 5509), True, 'import numpy as np\n'), ((7541, 7593), 'numpy.multiply', 'np.multiply', (['self.input_total_coverage', 'output_array'], {}), '(self.input_total_coverage, output_array)\n', (7552, 7593), True, 'import numpy as np\n'), ((8078, 8134), 'numpy.multiply', 'np.multiply', (['self.input_total_coverage', '(1 - output_array)'], {}), '(self.input_total_coverage, 1 - output_array)\n', (8089, 8134), True, 'import numpy as np\n'), ((8156, 8213), 'numpy.stack', 'np.stack', (['(self.input_total_coverage, next_state)'], {'axis': '(0)'}), '((self.input_total_coverage, next_state), axis=0)\n', (8164, 8213), True, 'import numpy as np\n'), ((8323, 8342), 'gym.utils.seeding.np_random', 'seeding.np_random', ([], {}), '()\n', (8340, 8342), False, 'from gym.utils import seeding\n'), ((8485, 8526), 'numpy.zeros', 'np.zeros', (['(self.im_height, self.im_width)'], {}), '((self.im_height, self.im_width))\n', (8493, 8526), True, 'import numpy as np\n'), ((8588, 8645), 'numpy.stack', 'np.stack', (['(self.input_total_coverage, self.state)'], {'axis': '(0)'}), '((self.input_total_coverage, self.state), axis=0)\n', (8596, 8645), True, 'import numpy as np\n'), ((15835, 15846), 'time.time', 'time.time', ([], {}), '()\n', (15844, 15846), False, 'import time\n'), ((16051, 16739), 'arcpy.sa.Viewshed2', 'Viewshed2', ([], {'in_raster': 'self.input_raster', 'in_observer_features': 'self.shape_file', 'out_agl_raster': '""""""', 'analysis_type': 'self.analysis_type', 'vertical_error': '(0)', 'out_observer_region_relationship_table': '""""""', 'refractivity_coefficient': '(0.13)', 'surface_offset': '(0)', 'observer_offset': '(0)', 'observer_elevation': 'self.observer_height', 'inner_radius': 'self.inner_radius', 'outer_radius': 'self.outer_radius', 'inner_radius_is_3d': 'self.radius_is_3d', 'outer_radius_is_3d': 'self.radius_is_3d', 'horizontal_start_angle': 'self.azimuth1', 'horizontal_end_angle': 'self.azimuth2', 'vertical_upper_angle': 'self.vertical_upper_angle', 'vertical_lower_angle': 'self.vertical_lower_angle', 'analysis_method': 'self.analysis_method'}), "(in_raster=self.input_raster, in_observer_features=self.shape_file,\n out_agl_raster='', analysis_type=self.analysis_type, vertical_error=0,\n out_observer_region_relationship_table='', refractivity_coefficient=\n 0.13, surface_offset=0, observer_offset=0, observer_elevation=self.\n observer_height, inner_radius=self.inner_radius, outer_radius=self.\n outer_radius, inner_radius_is_3d=self.radius_is_3d, outer_radius_is_3d=\n self.radius_is_3d, horizontal_start_angle=self.azimuth1,\n horizontal_end_angle=self.azimuth2, vertical_upper_angle=self.\n vertical_upper_angle, vertical_lower_angle=self.vertical_lower_angle,\n analysis_method=self.analysis_method)\n", (16060, 16739), False, 'from arcpy.sa import Viewshed2\n'), ((17848, 17886), 'arcpy.RasterToNumPyArray', 'arcpy.RasterToNumPyArray', (['outViewshed2'], {}), '(outViewshed2)\n', (17872, 17886), False, 'import arcpy\n'), ((22883, 22924), 'numpy.zeros', 'np.zeros', (['(self.im_height, self.im_width)'], {}), '((self.im_height, self.im_width))\n', (22891, 22924), True, 'import numpy as np\n'), ((22948, 22989), 'numpy.zeros', 'np.zeros', (['(self.im_height, self.im_width)'], {}), '((self.im_height, self.im_width))\n', (22956, 22989), True, 'import numpy as np\n'), ((23610, 23621), 'time.time', 'time.time', ([], {}), '()\n', (23619, 23621), False, 'import time\n'), ((23645, 23686), 'numpy.zeros', 'np.zeros', (['(self.im_height, self.im_width)'], {}), '((self.im_height, self.im_width))\n', (23653, 23686), True, 'import numpy as np\n'), ((1803, 1859), 'PIL.Image.open', 'Image.open', (['"""../data/images/RasterAstanaCroppedZero.png"""'], {}), "('../data/images/RasterAstanaCroppedZero.png')\n", (1813, 1859), False, 'from PIL import Image\n'), ((3276, 3299), 'numpy.min', 'np.min', (['self.city_array'], {}), '(self.city_array)\n', (3282, 3299), True, 'import numpy as np\n'), ((3301, 3324), 'numpy.max', 'np.max', (['self.city_array'], {}), '(self.city_array)\n', (3307, 3324), True, 'import numpy as np\n'), ((5948, 6001), 'PIL.Image.open', 'Image.open', (['"""../data/images/RasterTotalCoverage4.png"""'], {}), "('../data/images/RasterTotalCoverage4.png')\n", (5958, 6001), False, 'from PIL import Image\n'), ((8808, 8849), 'numpy.array', 'np.array', (['self.city_array'], {'dtype': 'np.uint8'}), '(self.city_array, dtype=np.uint8)\n', (8816, 8849), True, 'import numpy as np\n'), ((8875, 8910), 'numpy.stack', 'np.stack', (['((city_gray,) * 3)'], {'axis': '(-1)'}), '((city_gray,) * 3, axis=-1)\n', (8883, 8910), True, 'import numpy as np\n'), ((8981, 9047), 'cv2.resize', 'cv2.resize', (['show_array', '(1000, 1000)'], {'interpolation': 'cv2.INTER_AREA'}), '(show_array, (1000, 1000), interpolation=cv2.INTER_AREA)\n', (8991, 9047), False, 'import cv2\n'), ((9088, 9129), 'numpy.array', 'np.array', (['(self.state * 255)'], {'dtype': '"""uint8"""'}), "(self.state * 255, dtype='uint8')\n", (9096, 9129), True, 'import numpy as np\n'), ((9153, 9219), 'cv2.resize', 'cv2.resize', (['show_array', '(1000, 1000)'], {'interpolation': 'cv2.INTER_AREA'}), '(show_array, (1000, 1000), interpolation=cv2.INTER_AREA)\n', (9163, 9219), False, 'import cv2\n'), ((10140, 10163), 'cv2.startWindowThread', 'cv2.startWindowThread', ([], {}), '()\n', (10161, 10163), False, 'import cv2\n'), ((10176, 10202), 'cv2.namedWindow', 'cv2.namedWindow', (['"""preview"""'], {}), "('preview')\n", (10191, 10202), False, 'import cv2\n'), ((10215, 10248), 'cv2.imshow', 'cv2.imshow', (['"""preview"""', 'show_array'], {}), "('preview', show_array)\n", (10225, 10248), False, 'import cv2\n'), ((10261, 10288), 'cv2.namedWindow', 'cv2.namedWindow', (['"""COVERAGE"""'], {}), "('COVERAGE')\n", (10276, 10288), False, 'import cv2\n'), ((10470, 10516), 'numpy.array', 'np.array', (['(self.testing_im * 255)'], {'dtype': '"""uint8"""'}), "(self.testing_im * 255, dtype='uint8')\n", (10478, 10516), True, 'import numpy as np\n'), ((10541, 10602), 'cv2.resize', 'cv2.resize', (['array', '(1000, 1000)'], {'interpolation': 'cv2.INTER_AREA'}), '(array, (1000, 1000), interpolation=cv2.INTER_AREA)\n', (10551, 10602), False, 'import cv2\n'), ((10616, 10651), 'cv2.imshow', 'cv2.imshow', (['"""COVERAGE"""', 'show_array1'], {}), "('COVERAGE', show_array1)\n", (10626, 10651), False, 'import cv2\n'), ((10715, 10731), 'cv2.waitKey', 'cv2.waitKey', (['(100)'], {}), '(100)\n', (10726, 10731), False, 'import cv2\n'), ((11117, 11170), 'arcpy.da.UpdateCursor', 'arcpy.da.UpdateCursor', (['shape_file', '(tokens + fieldlist)'], {}), '(shape_file, tokens + fieldlist)\n', (11138, 11170), False, 'import arcpy\n'), ((26061, 26098), 'numpy.multiply', 'np.multiply', (['inside_rad', 'inside_angle'], {}), '(inside_rad, inside_angle)\n', (26072, 26098), True, 'import numpy as np\n'), ((1977, 2000), 'numpy.max', 'np.max', (['self.city_array'], {}), '(self.city_array)\n', (1983, 2000), True, 'import numpy as np\n'), ((2010, 2033), 'numpy.min', 'np.min', (['self.city_array'], {}), '(self.city_array)\n', (2016, 2033), True, 'import numpy as np\n'), ((10853, 10876), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (10874, 10876), False, 'import cv2\n'), ((17773, 17784), 'time.time', 'time.time', ([], {}), '()\n', (17782, 17784), False, 'import time\n'), ((23104, 23180), 'math.sqrt', 'math.sqrt', (['((self.coverage_radius - i) ** 2 + (self.coverage_radius - j) ** 2)'], {}), '((self.coverage_radius - i) ** 2 + (self.coverage_radius - j) ** 2)\n', (23113, 23180), False, 'import math\n'), ((25348, 25390), 'numpy.greater_equal', 'np.greater_equal', (['rad_matrix', 'radius_inner'], {}), '(rad_matrix, radius_inner)\n', (25364, 25390), True, 'import numpy as np\n'), ((25392, 25434), 'numpy.greater_equal', 'np.greater_equal', (['radius_outer', 'rad_matrix'], {}), '(radius_outer, rad_matrix)\n', (25408, 25434), True, 'import numpy as np\n'), ((23217, 23279), 'math.atan2', 'math.atan2', (['(self.coverage_radius - i)', '(j - self.coverage_radius)'], {}), '(self.coverage_radius - i, j - self.coverage_radius)\n', (23227, 23279), False, 'import math\n'), ((24910, 24943), 'math.radians', 'math.radians', (['(90 + vertical_start)'], {}), '(90 + vertical_start)\n', (24922, 24943), False, 'import math\n'), ((25000, 25031), 'math.radians', 'math.radians', (['(90 + vertical_end)'], {}), '(90 + vertical_end)\n', (25012, 25031), False, 'import math\n'), ((25791, 25836), 'numpy.greater_equal', 'np.greater_equal', (['angle_matrix', 'horizon_start'], {}), '(angle_matrix, horizon_start)\n', (25807, 25836), True, 'import numpy as np\n'), ((25838, 25881), 'numpy.greater_equal', 'np.greater_equal', (['horizon_end', 'angle_matrix'], {}), '(horizon_end, angle_matrix)\n', (25854, 25881), True, 'import numpy as np\n'), ((25939, 25984), 'numpy.greater_equal', 'np.greater_equal', (['angle_matrix', 'horizon_start'], {}), '(angle_matrix, horizon_start)\n', (25955, 25984), True, 'import numpy as np\n'), ((25986, 26029), 'numpy.greater_equal', 'np.greater_equal', (['horizon_end', 'angle_matrix'], {}), '(horizon_end, angle_matrix)\n', (26002, 26029), True, 'import numpy as np\n'), ((26534, 26545), 'time.time', 'time.time', ([], {}), '()\n', (26543, 26545), False, 'import time\n')]
|
import os
import shutil
import tensorflow as tf
import torch
from torch_mimicry.metrics import compute_fid
from torch_mimicry.metrics.inception_model import inception_utils
from torch_mimicry.nets.gan import gan
class ExampleGen(gan.BaseGenerator):
def __init__(self,
bottom_width=4,
nz=4,
ngf=256,
loss_type='gan',
*args,
**kwargs):
super().__init__(nz=nz,
ngf=ngf,
bottom_width=bottom_width,
loss_type=loss_type,
*args,
**kwargs)
def forward(self, x):
output = torch.ones(x.shape[0], 3, 32, 32)
return output
class TestComputeFID:
def setup(self):
self.netG = ExampleGen()
self.num_real_samples = 10
self.num_fake_samples = 10
self.batch_size = 10
self.device = torch.device("cpu")
# Create inception graph once.
self.inception_path = './metrics/inception_model'
if not os.path.exists(self.inception_path):
os.makedirs(self.inception_path)
inception_utils.create_inception_graph(self.inception_path)
# Directory
self.log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"test_log")
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
def test_compute_gen_dist_stats(self):
if self.device.index is not None:
# Avoid unbounded memory usage
gpu_options = tf.GPUOptions(allow_growth=True,
per_process_gpu_memory_fraction=0.15,
visible_device_list=str(
self.device.index))
config = tf.ConfigProto(gpu_options=gpu_options)
else:
config = tf.ConfigProto(device_count={'GPU': 0})
with tf.compat.v1.Session(config=config) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
m_fake, s_fake = compute_fid.compute_gen_dist_stats(
netG=self.netG,
num_samples=self.num_fake_samples,
sess=sess,
device=self.device,
seed=0,
batch_size=self.batch_size,
print_every=1)
assert m_fake.shape == (2048, )
assert s_fake.shape == (2048, 2048)
def test_compute_real_dist_stats(self):
if self.device.index is not None:
# Avoid unbounded memory usage
gpu_options = tf.GPUOptions(allow_growth=True,
per_process_gpu_memory_fraction=0.15,
visible_device_list=str(
self.device.index))
config = tf.ConfigProto(gpu_options=gpu_options)
else:
config = tf.ConfigProto(device_count={'GPU': 0})
with tf.compat.v1.Session(config=config) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
m_real, s_real = compute_fid.compute_real_dist_stats(
num_samples=self.num_real_samples,
sess=sess,
dataset_name='fake_data',
batch_size=self.batch_size,
stats_file=None,
log_dir=self.log_dir,
seed=0,
verbose=True)
assert m_real.shape == (2048, )
assert s_real.shape == (2048, 2048)
def test_fid_score(self):
score = compute_fid.fid_score(num_real_samples=self.num_real_samples,
num_fake_samples=self.num_fake_samples,
netG=self.netG,
device=self.device,
seed=99,
batch_size=self.batch_size,
dataset_name='fake_data',
log_dir=self.log_dir)
assert type(score) == float
def teardown(self):
shutil.rmtree(self.log_dir)
del self.netG
if __name__ == "__main__":
test = TestComputeFID()
test.setup()
test.test_compute_gen_dist_stats()
test.test_compute_real_dist_stats()
test.test_fid_score()
test.teardown()
|
[
"torch.ones",
"torch_mimicry.metrics.compute_fid.fid_score",
"os.path.abspath",
"os.makedirs",
"os.path.exists",
"tensorflow.ConfigProto",
"tensorflow.compat.v1.Session",
"torch_mimicry.metrics.compute_fid.compute_real_dist_stats",
"torch_mimicry.metrics.compute_fid.compute_gen_dist_stats",
"torch.device",
"shutil.rmtree",
"torch_mimicry.metrics.inception_model.inception_utils.create_inception_graph",
"tensorflow.compat.v1.global_variables_initializer"
] |
[((719, 752), 'torch.ones', 'torch.ones', (['x.shape[0]', '(3)', '(32)', '(32)'], {}), '(x.shape[0], 3, 32, 32)\n', (729, 752), False, 'import torch\n'), ((975, 994), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (987, 994), False, 'import torch\n'), ((1199, 1258), 'torch_mimicry.metrics.inception_model.inception_utils.create_inception_graph', 'inception_utils.create_inception_graph', (['self.inception_path'], {}), '(self.inception_path)\n', (1237, 1258), False, 'from torch_mimicry.metrics.inception_model import inception_utils\n'), ((3706, 3941), 'torch_mimicry.metrics.compute_fid.fid_score', 'compute_fid.fid_score', ([], {'num_real_samples': 'self.num_real_samples', 'num_fake_samples': 'self.num_fake_samples', 'netG': 'self.netG', 'device': 'self.device', 'seed': '(99)', 'batch_size': 'self.batch_size', 'dataset_name': '"""fake_data"""', 'log_dir': 'self.log_dir'}), "(num_real_samples=self.num_real_samples,\n num_fake_samples=self.num_fake_samples, netG=self.netG, device=self.\n device, seed=99, batch_size=self.batch_size, dataset_name='fake_data',\n log_dir=self.log_dir)\n", (3727, 3941), False, 'from torch_mimicry.metrics import compute_fid\n'), ((4265, 4292), 'shutil.rmtree', 'shutil.rmtree', (['self.log_dir'], {}), '(self.log_dir)\n', (4278, 4292), False, 'import shutil\n'), ((1108, 1143), 'os.path.exists', 'os.path.exists', (['self.inception_path'], {}), '(self.inception_path)\n', (1122, 1143), False, 'import os\n'), ((1157, 1189), 'os.makedirs', 'os.makedirs', (['self.inception_path'], {}), '(self.inception_path)\n', (1168, 1189), False, 'import os\n'), ((1423, 1451), 'os.path.exists', 'os.path.exists', (['self.log_dir'], {}), '(self.log_dir)\n', (1437, 1451), False, 'import os\n'), ((1465, 1490), 'os.makedirs', 'os.makedirs', (['self.log_dir'], {}), '(self.log_dir)\n', (1476, 1490), False, 'import os\n'), ((1907, 1946), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (1921, 1946), True, 'import tensorflow as tf\n'), ((1983, 2022), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'GPU': 0}"}), "(device_count={'GPU': 0})\n", (1997, 2022), True, 'import tensorflow as tf\n'), ((2037, 2072), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'config': 'config'}), '(config=config)\n', (2057, 2072), True, 'import tensorflow as tf\n'), ((2178, 2355), 'torch_mimicry.metrics.compute_fid.compute_gen_dist_stats', 'compute_fid.compute_gen_dist_stats', ([], {'netG': 'self.netG', 'num_samples': 'self.num_fake_samples', 'sess': 'sess', 'device': 'self.device', 'seed': '(0)', 'batch_size': 'self.batch_size', 'print_every': '(1)'}), '(netG=self.netG, num_samples=self.\n num_fake_samples, sess=sess, device=self.device, seed=0, batch_size=\n self.batch_size, print_every=1)\n', (2212, 2355), False, 'from torch_mimicry.metrics import compute_fid\n'), ((2969, 3008), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (2983, 3008), True, 'import tensorflow as tf\n'), ((3045, 3084), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'GPU': 0}"}), "(device_count={'GPU': 0})\n", (3059, 3084), True, 'import tensorflow as tf\n'), ((3099, 3134), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'config': 'config'}), '(config=config)\n', (3119, 3134), True, 'import tensorflow as tf\n'), ((3240, 3446), 'torch_mimicry.metrics.compute_fid.compute_real_dist_stats', 'compute_fid.compute_real_dist_stats', ([], {'num_samples': 'self.num_real_samples', 'sess': 'sess', 'dataset_name': '"""fake_data"""', 'batch_size': 'self.batch_size', 'stats_file': 'None', 'log_dir': 'self.log_dir', 'seed': '(0)', 'verbose': '(True)'}), "(num_samples=self.num_real_samples, sess\n =sess, dataset_name='fake_data', batch_size=self.batch_size, stats_file\n =None, log_dir=self.log_dir, seed=0, verbose=True)\n", (3275, 3446), False, 'from torch_mimicry.metrics import compute_fid\n'), ((1332, 1357), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1347, 1357), False, 'import os\n'), ((2103, 2146), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (2144, 2146), True, 'import tensorflow as tf\n'), ((3165, 3208), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (3206, 3208), True, 'import tensorflow as tf\n')]
|
from setuptools import setup
def read(file):
return open(file, 'r').read()
LONG_DESCRIPTION = read('README.md')
LICENSE = read('LICENSE.txt')
setup(
name='OceanLab',
version='0.1.0',
packages=['OceanLab'],
include_package_data=True,
description='Python functions for Physical Oceanography',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
download_url = 'https://pypi.python.org/pypi/OceanLab',
url='https://github.com/iuryt/OceanLab',
author='<NAME>',
author_email='<EMAIL>',
license=LICENSE,
py_modules=['OA','EOF','DYN'],
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires = [
'seawater ~= 3.3',
'numpy ~= 1.18',
'scipy ~= 1.6',
'xarray ~= 0.18',
'dask ~= 2021.06',
'dask[distributed] ~= 2021.06'
],
)
|
[
"setuptools.setup"
] |
[((149, 976), 'setuptools.setup', 'setup', ([], {'name': '"""OceanLab"""', 'version': '"""0.1.0"""', 'packages': "['OceanLab']", 'include_package_data': '(True)', 'description': '"""Python functions for Physical Oceanography"""', 'long_description': 'LONG_DESCRIPTION', 'long_description_content_type': '"""text/markdown"""', 'download_url': '"""https://pypi.python.org/pypi/OceanLab"""', 'url': '"""https://github.com/iuryt/OceanLab"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': 'LICENSE', 'py_modules': "['OA', 'EOF', 'DYN']", 'classifiers': "['Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent']", 'install_requires': "['seawater ~= 3.3', 'numpy ~= 1.18', 'scipy ~= 1.6', 'xarray ~= 0.18',\n 'dask ~= 2021.06', 'dask[distributed] ~= 2021.06']"}), "(name='OceanLab', version='0.1.0', packages=['OceanLab'],\n include_package_data=True, description=\n 'Python functions for Physical Oceanography', long_description=\n LONG_DESCRIPTION, long_description_content_type='text/markdown',\n download_url='https://pypi.python.org/pypi/OceanLab', url=\n 'https://github.com/iuryt/OceanLab', author='<NAME>', author_email=\n '<EMAIL>', license=LICENSE, py_modules=['OA', 'EOF', 'DYN'],\n classifiers=['Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent'], install_requires=[\n 'seawater ~= 3.3', 'numpy ~= 1.18', 'scipy ~= 1.6', 'xarray ~= 0.18',\n 'dask ~= 2021.06', 'dask[distributed] ~= 2021.06'])\n", (154, 976), False, 'from setuptools import setup\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
#
# Test proper accounting with malleable transactions
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, connect_nodes, \
sync_blocks, gather_inputs
class TxnMallTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
mining_reward = 10
starting_balance = mining_reward * 25
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("")
# First: use raw transaction API to send (starting_balance - (mining_reward - 2)) BTC to node1_address,
# but don't broadcast:
(total_in, inputs) = gather_inputs(self.nodes[0], (starting_balance - (mining_reward - 2)))
change_address = self.nodes[0].getnewaddress("")
outputs = {}
outputs[change_address] = (mining_reward - 2)
outputs[node1_address] = (starting_balance - (mining_reward - 2))
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two transaction from node[0] to node[1]; the
# second must spend change from the first because the first
# spends all mature inputs:
txid1 = self.nodes[0].sendfrom("", node1_address, (starting_balance - (mining_reward - 2)), 0)
txid2 = self.nodes[0].sendfrom("", node1_address, 5, 0)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus mining_reward for another
# matured block, minus (starting_balance - (mining_reward - 2)), minus 5, and minus transaction fees:
expected = starting_balance
if self.options.mine_block: expected += mining_reward
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's total balance should be its starting balance plus both transaction amounts:
assert_equal(self.nodes[1].getbalance(""), starting_balance - (tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend to miner:
self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -1)
assert_equal(tx2["confirmations"], -1)
# Node0's total balance should be starting balance, plus (mining_reward * 2) for
# two more matured blocks, minus (starting_balance - (mining_reward - 2)) for the double-spend:
expected = starting_balance + (mining_reward * 2) - (starting_balance - (mining_reward - 2))
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# Node1's total balance should be its starting balance plus the amount of the mutated send:
assert_equal(self.nodes[1].getbalance(""), starting_balance + (starting_balance - (mining_reward - 2)))
if __name__ == '__main__':
TxnMallTest().main()
|
[
"test_framework.util.gather_inputs",
"test_framework.util.connect_nodes",
"test_framework.util.assert_equal",
"test_framework.util.sync_blocks"
] |
[((1388, 1456), 'test_framework.util.gather_inputs', 'gather_inputs', (['self.nodes[0]', '(starting_balance - (mining_reward - 2))'], {}), '(self.nodes[0], starting_balance - (mining_reward - 2))\n', (1401, 1456), False, 'from test_framework.util import assert_equal, connect_nodes, sync_blocks, gather_inputs\n'), ((1803, 1846), 'test_framework.util.assert_equal', 'assert_equal', (["doublespend['complete']", '(True)'], {}), "(doublespend['complete'], True)\n", (1815, 1846), False, 'from test_framework.util import assert_equal, connect_nodes, sync_blocks, gather_inputs\n'), ((3567, 3598), 'test_framework.util.connect_nodes', 'connect_nodes', (['self.nodes[1]', '(2)'], {}), '(self.nodes[1], 2)\n', (3580, 3598), False, 'from test_framework.util import assert_equal, connect_nodes, sync_blocks, gather_inputs\n'), ((3684, 3707), 'test_framework.util.sync_blocks', 'sync_blocks', (['self.nodes'], {}), '(self.nodes)\n', (3695, 3707), False, 'from test_framework.util import assert_equal, connect_nodes, sync_blocks, gather_inputs\n'), ((3904, 3942), 'test_framework.util.assert_equal', 'assert_equal', (["tx1['confirmations']", '(-1)'], {}), "(tx1['confirmations'], -1)\n", (3916, 3942), False, 'from test_framework.util import assert_equal, connect_nodes, sync_blocks, gather_inputs\n'), ((3951, 3989), 'test_framework.util.assert_equal', 'assert_equal', (["tx2['confirmations']", '(-1)'], {}), "(tx2['confirmations'], -1)\n", (3963, 3989), False, 'from test_framework.util import assert_equal, connect_nodes, sync_blocks, gather_inputs\n'), ((2305, 2333), 'test_framework.util.sync_blocks', 'sync_blocks', (['self.nodes[0:2]'], {}), '(self.nodes[0:2])\n', (2316, 2333), False, 'from test_framework.util import assert_equal, connect_nodes, sync_blocks, gather_inputs\n'), ((2931, 2968), 'test_framework.util.assert_equal', 'assert_equal', (["tx1['confirmations']", '(1)'], {}), "(tx1['confirmations'], 1)\n", (2943, 2968), False, 'from test_framework.util import assert_equal, connect_nodes, sync_blocks, gather_inputs\n'), ((2981, 3018), 'test_framework.util.assert_equal', 'assert_equal', (["tx2['confirmations']", '(1)'], {}), "(tx2['confirmations'], 1)\n", (2993, 3018), False, 'from test_framework.util import assert_equal, connect_nodes, sync_blocks, gather_inputs\n'), ((3248, 3285), 'test_framework.util.assert_equal', 'assert_equal', (["tx1['confirmations']", '(0)'], {}), "(tx1['confirmations'], 0)\n", (3260, 3285), False, 'from test_framework.util import assert_equal, connect_nodes, sync_blocks, gather_inputs\n'), ((3298, 3335), 'test_framework.util.assert_equal', 'assert_equal', (["tx2['confirmations']", '(0)'], {}), "(tx2['confirmations'], 0)\n", (3310, 3335), False, 'from test_framework.util import assert_equal, connect_nodes, sync_blocks, gather_inputs\n')]
|
from typing import Any, List, Literal, TypedDict
from .FHIR_CodeableConcept import FHIR_CodeableConcept
from .FHIR_Element import FHIR_Element
from .FHIR_Identifier import FHIR_Identifier
from .FHIR_Quantity import FHIR_Quantity
from .FHIR_string import FHIR_string
# The detailed description of a substance, typically at a level beyond what is used for prescribing.
FHIR_SubstanceSpecification_Moiety = TypedDict(
"FHIR_SubstanceSpecification_Moiety",
{
# Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces.
"id": FHIR_string,
# May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# Role that the moiety is playing.
"role": FHIR_CodeableConcept,
# Identifier by which this moiety substance is known.
"identifier": FHIR_Identifier,
# Textual name for this moiety substance.
"name": FHIR_string,
# Extensions for name
"_name": FHIR_Element,
# Stereochemistry type.
"stereochemistry": FHIR_CodeableConcept,
# Optical activity type.
"opticalActivity": FHIR_CodeableConcept,
# Molecular formula.
"molecularFormula": FHIR_string,
# Extensions for molecularFormula
"_molecularFormula": FHIR_Element,
# Quantitative value for this moiety.
"amountQuantity": FHIR_Quantity,
# Quantitative value for this moiety.
"amountString": str,
# Extensions for amountString
"_amountString": FHIR_Element,
},
total=False,
)
|
[
"typing.TypedDict"
] |
[((406, 911), 'typing.TypedDict', 'TypedDict', (['"""FHIR_SubstanceSpecification_Moiety"""', "{'id': FHIR_string, 'extension': List[Any], 'modifierExtension': List[Any],\n 'role': FHIR_CodeableConcept, 'identifier': FHIR_Identifier, 'name':\n FHIR_string, '_name': FHIR_Element, 'stereochemistry':\n FHIR_CodeableConcept, 'opticalActivity': FHIR_CodeableConcept,\n 'molecularFormula': FHIR_string, '_molecularFormula': FHIR_Element,\n 'amountQuantity': FHIR_Quantity, 'amountString': str, '_amountString':\n FHIR_Element}"], {'total': '(False)'}), "('FHIR_SubstanceSpecification_Moiety', {'id': FHIR_string,\n 'extension': List[Any], 'modifierExtension': List[Any], 'role':\n FHIR_CodeableConcept, 'identifier': FHIR_Identifier, 'name':\n FHIR_string, '_name': FHIR_Element, 'stereochemistry':\n FHIR_CodeableConcept, 'opticalActivity': FHIR_CodeableConcept,\n 'molecularFormula': FHIR_string, '_molecularFormula': FHIR_Element,\n 'amountQuantity': FHIR_Quantity, 'amountString': str, '_amountString':\n FHIR_Element}, total=False)\n", (415, 911), False, 'from typing import Any, List, Literal, TypedDict\n')]
|
from __future__ import print_function
import warnings
from setuptools import setup, find_packages, Extension
from setuptools.command.install import install
import numpy
from six.moves import input
# from theano.compat.six.moves import input
# Because many people neglected to run the pylearn2/utils/setup.py script
# separately, we compile the necessary Cython extensions here but because
# Cython is not a strict dependency, we issue a warning when it is not
# available.
try:
from Cython.Distutils import build_ext
cython_available = True
except ImportError:
warnings.warn("Cython was not found and hence pylearn2.utils._window_flip "
"and pylearn2.utils._video and classes that depend on them "
"(e.g. pylearn2.train_extensions.window_flip) will not be "
"available")
cython_available = False
if cython_available:
cmdclass = {'build_ext': build_ext}
ext_modules = [Extension("pylearn2.utils._window_flip",
["pylearn2/utils/_window_flip.pyx"],
include_dirs=[numpy.get_include()]),
Extension("pylearn2.utils._video",
["pylearn2/utils/_video.pyx"],
include_dirs=[numpy.get_include()])]
else:
cmdclass = {}
ext_modules = []
# Inform user of setup.py develop preference
class pylearn2_install(install):
def run(self):
print("Because Pylearn2 is under heavy development, we generally do "
"not advice using the `setup.py install` command. Please "
"consider using the `setup.py develop` command instead for the "
"following reasons:\n\n1. Using `setup.py install` creates a "
"copy of the Pylearn2 source code in your Python installation "
"path. In order to update Pylearn2 afterwards you will need to "
"rerun `setup.py install` (!). Simply using `git pull` to "
"update your local copy of Pylearn2 code will not suffice. \n\n"
"2. When using `sudo` to install Pylearn2, all files, "
"including the tutorials, will be copied to a directory owned "
"by root. Not only is running tutorials as root unsafe, it "
"also means that all Pylearn2-related environment variables "
"which were defined for the user will be unavailable.\n\n"
"Pressing enter will continue the installation of Pylearn2 in "
"`develop` mode instead. Note that this means that you need to "
"keep this folder with the Pylearn2 code in its current "
"location. If you know what you are doing, and are very sure "
"that you want to install Pylearn2 using the `install` "
"command instead, please type `install`.\n")
mode = None
while mode not in ['', 'install', 'develop', 'cancel']:
if mode is not None:
print("Please try again")
mode = input("Installation mode: [develop]/install/cancel: ")
if mode in ['', 'develop']:
self.distribution.run_command('develop')
if mode == 'install':
return install.run(self)
cmdclass.update({'install': pylearn2_install})
setup(
cmdclass=cmdclass,
ext_modules=ext_modules,
name='pylearn2',
version='0.1dev',
packages=find_packages(),
description='A machine learning library built on top of Theano.',
license='BSD 3-clause license',
long_description=open('README.rst', 'rb').read().decode('utf8'),
dependency_links=['git+http://github.com/Theano/Theano.git#egg=Theano'],
install_requires=['numpy>=1.5', 'pyyaml', 'argparse', "Theano"],
scripts=['bin/pylearn2-plot-monitor', 'bin/pylearn2-print-monitor',
'bin/pylearn2-show-examples', 'bin/pylearn2-show-weights',
'bin/pylearn2-train'],
package_data={
'': ['*.cu', '*.cuh', '*.h'],
},
)
|
[
"numpy.get_include",
"warnings.warn",
"setuptools.command.install.install.run",
"six.moves.input",
"setuptools.find_packages"
] |
[((576, 786), 'warnings.warn', 'warnings.warn', (['"""Cython was not found and hence pylearn2.utils._window_flip and pylearn2.utils._video and classes that depend on them (e.g. pylearn2.train_extensions.window_flip) will not be available"""'], {}), "(\n 'Cython was not found and hence pylearn2.utils._window_flip and pylearn2.utils._video and classes that depend on them (e.g. pylearn2.train_extensions.window_flip) will not be available'\n )\n", (589, 786), False, 'import warnings\n'), ((3424, 3439), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (3437, 3439), False, 'from setuptools import setup, find_packages, Extension\n'), ((3050, 3104), 'six.moves.input', 'input', (['"""Installation mode: [develop]/install/cancel: """'], {}), "('Installation mode: [develop]/install/cancel: ')\n", (3055, 3104), False, 'from six.moves import input\n'), ((3243, 3260), 'setuptools.command.install.install.run', 'install.run', (['self'], {}), '(self)\n', (3254, 3260), False, 'from setuptools.command.install import install\n'), ((1100, 1119), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (1117, 1119), False, 'import numpy\n'), ((1280, 1299), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (1297, 1299), False, 'import numpy\n')]
|
from unittest import TestCase
import numpy as np
from scvi.dataset import (
SyntheticDataset,
SyntheticRandomDataset,
SyntheticDatasetCorr,
ZISyntheticDatasetCorr,
)
from .utils import unsupervised_training_one_epoch
class TestSyntheticDataset(TestCase):
def test_train_one(self):
dataset = SyntheticDataset(batch_size=10, nb_genes=10)
unsupervised_training_one_epoch(dataset)
def test_RandomDataset_populate_and_train_one(self):
dataset = SyntheticRandomDataset(save_path="tests/data")
unsupervised_training_one_epoch(dataset)
def test_DatasetCorr_populate_and_train_one(self):
dataset = SyntheticDatasetCorr(n_cells_cluster=10)
self.assertListEqual(
np.unique(dataset.labels).tolist(), np.arange(dataset.n_clusters).tolist()
)
unsupervised_training_one_epoch(dataset)
def test_ZIDatasetCorr_populate_and_train_one(self):
dataset = ZISyntheticDatasetCorr(n_cells_cluster=10)
unsupervised_training_one_epoch(dataset)
def test_corr_zeros(self):
# Test hierarchy of zeros
nb_data = SyntheticDatasetCorr()
zi_data = ZISyntheticDatasetCorr()
zi_zeros_frac = (zi_data.X == 0).mean()
nb_zeros_frac = (nb_data.X == 0).mean()
# nb is not zero inflated
# zi is zero inflated for all genes
# We expect the number of zeros to organize accordingly
self.assertLess(nb_zeros_frac, zi_zeros_frac)
# We enforce that the zero inflated model has at least 20% of zeros
self.assertGreaterEqual(zi_zeros_frac, 0.2)
|
[
"scvi.dataset.SyntheticDataset",
"scvi.dataset.ZISyntheticDatasetCorr",
"scvi.dataset.SyntheticRandomDataset",
"numpy.arange",
"scvi.dataset.SyntheticDatasetCorr",
"numpy.unique"
] |
[((323, 367), 'scvi.dataset.SyntheticDataset', 'SyntheticDataset', ([], {'batch_size': '(10)', 'nb_genes': '(10)'}), '(batch_size=10, nb_genes=10)\n', (339, 367), False, 'from scvi.dataset import SyntheticDataset, SyntheticRandomDataset, SyntheticDatasetCorr, ZISyntheticDatasetCorr\n'), ((493, 539), 'scvi.dataset.SyntheticRandomDataset', 'SyntheticRandomDataset', ([], {'save_path': '"""tests/data"""'}), "(save_path='tests/data')\n", (515, 539), False, 'from scvi.dataset import SyntheticDataset, SyntheticRandomDataset, SyntheticDatasetCorr, ZISyntheticDatasetCorr\n'), ((663, 703), 'scvi.dataset.SyntheticDatasetCorr', 'SyntheticDatasetCorr', ([], {'n_cells_cluster': '(10)'}), '(n_cells_cluster=10)\n', (683, 703), False, 'from scvi.dataset import SyntheticDataset, SyntheticRandomDataset, SyntheticDatasetCorr, ZISyntheticDatasetCorr\n'), ((956, 998), 'scvi.dataset.ZISyntheticDatasetCorr', 'ZISyntheticDatasetCorr', ([], {'n_cells_cluster': '(10)'}), '(n_cells_cluster=10)\n', (978, 998), False, 'from scvi.dataset import SyntheticDataset, SyntheticRandomDataset, SyntheticDatasetCorr, ZISyntheticDatasetCorr\n'), ((1132, 1154), 'scvi.dataset.SyntheticDatasetCorr', 'SyntheticDatasetCorr', ([], {}), '()\n', (1152, 1154), False, 'from scvi.dataset import SyntheticDataset, SyntheticRandomDataset, SyntheticDatasetCorr, ZISyntheticDatasetCorr\n'), ((1173, 1197), 'scvi.dataset.ZISyntheticDatasetCorr', 'ZISyntheticDatasetCorr', ([], {}), '()\n', (1195, 1197), False, 'from scvi.dataset import SyntheticDataset, SyntheticRandomDataset, SyntheticDatasetCorr, ZISyntheticDatasetCorr\n'), ((746, 771), 'numpy.unique', 'np.unique', (['dataset.labels'], {}), '(dataset.labels)\n', (755, 771), True, 'import numpy as np\n'), ((782, 811), 'numpy.arange', 'np.arange', (['dataset.n_clusters'], {}), '(dataset.n_clusters)\n', (791, 811), True, 'import numpy as np\n')]
|
# --------------
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
# path- variable storing file path
df = pd.read_csv(path)
df.head()
X = df.drop(["Price"] , axis = 1)
y = df["Price"]
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3, random_state = 6)
corr = X_train.corr()
print(corr)
#Code starts here
# --------------
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
# Code starts here
regressor = LinearRegression()
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
r2 = r2_score(y_test,y_pred)
# --------------
from sklearn.linear_model import Lasso
# Code starts here
lasso = Lasso()
lasso.fit(X_train, y_train)
lasso_pred = lasso.predict(X_test)
r2_lasso = r2_score(y_test,lasso_pred)
# --------------
from sklearn.linear_model import Ridge
# Code starts here
ridge = Ridge()
ridge.fit(X_train, y_train)
ridge_pred = ridge.predict(X_test)
r2_ridge = r2_score(y_test,ridge_pred)
# Code ends here
# --------------
from sklearn.model_selection import cross_val_score
#Code starts here
regressor = LinearRegression()
score = cross_val_score(regressor,X_train,y_train, cv = 10)
mean_score = score.mean()
print(mean_score)
# --------------
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
#Code starts here
model= make_pipeline(PolynomialFeatures(2), LinearRegression())
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
r2_poly = r2_score(y_test, y_pred)
print(r2_poly)
|
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.linear_model.Ridge",
"sklearn.metrics.r2_score",
"sklearn.model_selection.cross_val_score",
"sklearn.linear_model.LinearRegression",
"sklearn.preprocessing.PolynomialFeatures",
"sklearn.linear_model.Lasso"
] |
[((155, 172), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (166, 172), True, 'import pandas as pd\n'), ((273, 326), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(6)'}), '(X, y, test_size=0.3, random_state=6)\n', (289, 326), False, 'from sklearn.model_selection import train_test_split\n'), ((531, 549), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (547, 549), False, 'from sklearn.linear_model import LinearRegression\n'), ((627, 651), 'sklearn.metrics.r2_score', 'r2_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (635, 651), False, 'from sklearn.metrics import r2_score\n'), ((740, 747), 'sklearn.linear_model.Lasso', 'Lasso', ([], {}), '()\n', (745, 747), False, 'from sklearn.linear_model import Lasso\n'), ((828, 856), 'sklearn.metrics.r2_score', 'r2_score', (['y_test', 'lasso_pred'], {}), '(y_test, lasso_pred)\n', (836, 856), False, 'from sklearn.metrics import r2_score\n'), ((945, 952), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (950, 952), False, 'from sklearn.linear_model import Ridge\n'), ((1032, 1060), 'sklearn.metrics.r2_score', 'r2_score', (['y_test', 'ridge_pred'], {}), '(y_test, ridge_pred)\n', (1040, 1060), False, 'from sklearn.metrics import r2_score\n'), ((1187, 1205), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1203, 1205), False, 'from sklearn.linear_model import LinearRegression\n'), ((1215, 1266), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['regressor', 'X_train', 'y_train'], {'cv': '(10)'}), '(regressor, X_train, y_train, cv=10)\n', (1230, 1266), False, 'from sklearn.model_selection import cross_val_score\n'), ((1597, 1621), 'sklearn.metrics.r2_score', 'r2_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1605, 1621), False, 'from sklearn.metrics import r2_score\n'), ((1478, 1499), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', (['(2)'], {}), '(2)\n', (1496, 1499), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((1501, 1519), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1517, 1519), False, 'from sklearn.linear_model import LinearRegression\n')]
|
"""Tests for http_router"""
from datetime import datetime
from bareasgi import (
HttpRequest,
HttpResponse
)
from bareasgi.application import DEFAULT_NOT_FOUND_RESPONSE
from bareasgi.basic_router import BasicHttpRouter
async def ok_handler(_request: HttpRequest) -> HttpResponse:
"""Return OK"""
return HttpResponse(200)
def test_literal_paths():
"""Test for literal paths"""
basic_route_handler = BasicHttpRouter(DEFAULT_NOT_FOUND_RESPONSE)
basic_route_handler.add({'GET'}, '/foo/bar/grum', ok_handler)
handler, matches = basic_route_handler.resolve('GET', '/foo/bar/grum')
assert handler is ok_handler
assert matches == {}
def test_literal_path_with_trailing_slash():
"""Test for literal path with trailing slash"""
basic_route_handler = BasicHttpRouter(DEFAULT_NOT_FOUND_RESPONSE)
basic_route_handler.add({'GET'}, '/foo/bar/grum/', ok_handler)
handler, matches = basic_route_handler.resolve('GET', '/foo/bar/grum/')
assert handler is ok_handler
assert matches == {}
def test_variable_paths():
"""Test for path including a variable"""
basic_route_handler = BasicHttpRouter(DEFAULT_NOT_FOUND_RESPONSE)
basic_route_handler.add({'GET'}, '/foo/{name}/grum', ok_handler)
handler, matches = basic_route_handler.resolve('GET', '/foo/bar/grum')
assert handler is ok_handler
assert 'name' in matches
assert matches['name'] == 'bar'
def test_variable_path_with_type():
"""Test for path with typed variable"""
basic_route_handler = BasicHttpRouter(DEFAULT_NOT_FOUND_RESPONSE)
basic_route_handler.add({'GET'}, '/foo/{id:int}/grum', ok_handler)
handler, matches = basic_route_handler.resolve('GET', '/foo/123/grum')
assert handler is ok_handler
assert 'id' in matches
assert matches['id'] == 123
def test_variable_path_with_type_and_format():
"""Test for path with typed variable and format"""
basic_route_handler = BasicHttpRouter(DEFAULT_NOT_FOUND_RESPONSE)
basic_route_handler.add(
{'GET'}, '/foo/{date_of_birth:datetime:%Y-%m-%d}/grum', ok_handler)
handler, matches = basic_route_handler.resolve(
'GET', '/foo/2001-12-31/grum')
assert handler is ok_handler
assert 'date_of_birth' in matches
assert matches['date_of_birth'] == datetime(2001, 12, 31)
def test_path_type():
"""Test for path type"""
basic_route_handler = BasicHttpRouter(DEFAULT_NOT_FOUND_RESPONSE)
basic_route_handler.add({'GET'}, '/ui/{rest:path}', ok_handler)
handler, matches = basic_route_handler.resolve('GET', '/ui/index.html')
assert handler is ok_handler
assert 'rest' in matches
assert matches['rest'] == 'index.html'
handler, matches = basic_route_handler.resolve('GET', '/ui/')
assert handler is ok_handler
assert 'rest' in matches
assert matches['rest'] == ''
handler, matches = basic_route_handler.resolve(
'GET', '/ui/folder/other.html')
assert handler is ok_handler
assert 'rest' in matches
assert matches['rest'] == 'folder/other.html'
|
[
"bareasgi.HttpResponse",
"bareasgi.basic_router.BasicHttpRouter",
"datetime.datetime"
] |
[((322, 339), 'bareasgi.HttpResponse', 'HttpResponse', (['(200)'], {}), '(200)\n', (334, 339), False, 'from bareasgi import HttpRequest, HttpResponse\n'), ((427, 470), 'bareasgi.basic_router.BasicHttpRouter', 'BasicHttpRouter', (['DEFAULT_NOT_FOUND_RESPONSE'], {}), '(DEFAULT_NOT_FOUND_RESPONSE)\n', (442, 470), False, 'from bareasgi.basic_router import BasicHttpRouter\n'), ((796, 839), 'bareasgi.basic_router.BasicHttpRouter', 'BasicHttpRouter', (['DEFAULT_NOT_FOUND_RESPONSE'], {}), '(DEFAULT_NOT_FOUND_RESPONSE)\n', (811, 839), False, 'from bareasgi.basic_router import BasicHttpRouter\n'), ((1142, 1185), 'bareasgi.basic_router.BasicHttpRouter', 'BasicHttpRouter', (['DEFAULT_NOT_FOUND_RESPONSE'], {}), '(DEFAULT_NOT_FOUND_RESPONSE)\n', (1157, 1185), False, 'from bareasgi.basic_router import BasicHttpRouter\n'), ((1537, 1580), 'bareasgi.basic_router.BasicHttpRouter', 'BasicHttpRouter', (['DEFAULT_NOT_FOUND_RESPONSE'], {}), '(DEFAULT_NOT_FOUND_RESPONSE)\n', (1552, 1580), False, 'from bareasgi.basic_router import BasicHttpRouter\n'), ((1950, 1993), 'bareasgi.basic_router.BasicHttpRouter', 'BasicHttpRouter', (['DEFAULT_NOT_FOUND_RESPONSE'], {}), '(DEFAULT_NOT_FOUND_RESPONSE)\n', (1965, 1993), False, 'from bareasgi.basic_router import BasicHttpRouter\n'), ((2403, 2446), 'bareasgi.basic_router.BasicHttpRouter', 'BasicHttpRouter', (['DEFAULT_NOT_FOUND_RESPONSE'], {}), '(DEFAULT_NOT_FOUND_RESPONSE)\n', (2418, 2446), False, 'from bareasgi.basic_router import BasicHttpRouter\n'), ((2301, 2323), 'datetime.datetime', 'datetime', (['(2001)', '(12)', '(31)'], {}), '(2001, 12, 31)\n', (2309, 2323), False, 'from datetime import datetime\n')]
|
#
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from abc import ABC, abstractmethod
from contextlib import contextmanager
from datetime import datetime
from typing import BinaryIO, Iterator, TextIO, Union
from airbyte_cdk.logger import AirbyteLogger
class StorageFile(ABC):
def __init__(self, url: str, provider: dict):
"""
:param url: value yielded by filepath_iterator() in [Incremental]FileStream class. Blob/File path.
:param provider: provider specific mapping as described in spec.json
"""
self.url = url
self._provider = provider
self.logger = AirbyteLogger()
@property
@abstractmethod
def last_modified(self) -> datetime:
"""
Override this to implement provider-specific logic
:return: last_modified property of the blob/file
"""
@contextmanager
@abstractmethod
def open(self, binary: bool) -> Iterator[Union[TextIO, BinaryIO]]:
"""
Override this to implement provider-specific logic.
It should yield exactly one TextIO or BinaryIO, that being the opened file-like object.
Note: This must work as described in https://docs.python.org/3/library/contextlib.html#contextlib.contextmanager.
Using contextmanager eliminates need to write all the boilerplate management code in this class.
See S3File() for example implementation.
:param binary: whether or not to open file as binary
:return: file-like object
"""
|
[
"airbyte_cdk.logger.AirbyteLogger"
] |
[((1673, 1688), 'airbyte_cdk.logger.AirbyteLogger', 'AirbyteLogger', ([], {}), '()\n', (1686, 1688), False, 'from airbyte_cdk.logger import AirbyteLogger\n')]
|
import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_csv("titanic.csv")#zadanie 1 i zadanie 2
data.columns
a = data.Survived
len(a)
data.Survived.value_counts()#zadanie 3
data2 = pd.read_csv("titanic.csv", index_col = "PassengerId")
data2
b = data2.loc[data2.Survived == 1]
c = data2.loc[data2.Survived == 0]
print("Amout of passengers:",int(len(a)))
print("Amount of survived passengers:",int(len(b)))
print("The percent of victims:",int(len(c)))
d = (len(b)/len(a))*100
print("The percent of survived passengers equals:",int(d),"%")#próba procentowego obliczenia, ale 100% jest rozwiązanie w jedną linię :)
plt.hist(data["Age"])#zadanie 4
data.Age.mean()#srednia z Age
data.Age.describe()# druga metoda obliczeń
temp1 = data2.loc[:20,["Survived","Sex", "Name"]]#zadanie 5
temp1
temp1.describe()
temp2 = temp1.loc[temp1.Survived == 1]
temp2.Survived.value_counts()
temp3 = temp1.loc[temp1.Survived == 0]
temp3.Survived.value_counts()
|
[
"pandas.read_csv",
"matplotlib.pyplot.hist"
] |
[((61, 87), 'pandas.read_csv', 'pd.read_csv', (['"""titanic.csv"""'], {}), "('titanic.csv')\n", (72, 87), True, 'import pandas as pd\n'), ((211, 262), 'pandas.read_csv', 'pd.read_csv', (['"""titanic.csv"""'], {'index_col': '"""PassengerId"""'}), "('titanic.csv', index_col='PassengerId')\n", (222, 262), True, 'import pandas as pd\n'), ((656, 677), 'matplotlib.pyplot.hist', 'plt.hist', (["data['Age']"], {}), "(data['Age'])\n", (664, 677), True, 'import matplotlib.pyplot as plt\n')]
|
# Copyright (C) 2019 Intel Corporation. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import collections
import board_cfg_lib
import common
PCI_HEADER = r"""
#ifndef PCI_DEVICES_H_
#define PCI_DEVICES_H_
"""
PCI_END_HEADER = r"""
#endif /* PCI_DEVICES_H_ */"""
HI_MMIO_OFFSET = 0
class Bar_Mem:
def __init__(self):
self.addr = 0
self.remapped = False
class Bar_Attr:
def __init__(self):
self.name = 0
self.remappable = True
class Pci_Dev_Bar_Desc:
def __init__(self):
self.pci_dev_dic = {}
self.pci_bar_dic = {}
PCI_DEV_BAR_DESC = Pci_Dev_Bar_Desc()
def get_value_after_str(line, key):
""" Get the value after cstate string """
idx = 0
line_in_list = line.split()
for idx_key, val in enumerate(line_in_list):
if val == key:
idx = idx_key
break
return line_in_list[idx + 1]
def check_bar_remappable(line):
#TODO: check device BAR remappable per ACPI table
return True
def get_size(line):
# get size string from format, Region n: Memory at x ... [size=NK]
size_str = line.split()[-1].strip(']').split('=')[1]
if 'G' in size_str:
size = int(size_str.strip('G')) * common.SIZE_G
elif 'M' in size_str:
size = int(size_str.strip('M')) * common.SIZE_M
elif 'K' in size_str:
size = int(size_str.strip('K')) * common.SIZE_K
else:
size = int(size_str)
return size
# round up the running bar_addr to the size of the incoming bar "line"
def remap_bar_addr_to_high(bar_addr, line):
"""Generate vbar address"""
global HI_MMIO_OFFSET
size = get_size(line)
cur_addr = common.round_up(bar_addr, size)
HI_MMIO_OFFSET = cur_addr + size
return cur_addr
def parser_pci():
""" Parse PCI lines """
cur_bdf = 0
prev_bdf = 0
tmp_bar_dic = {}
bar_addr = bar_num = '0'
cal_sub_pci_name = []
pci_lines = board_cfg_lib.get_info(
common.BOARD_INFO_FILE, "<PCI_DEVICE>", "</PCI_DEVICE>")
for line in pci_lines:
tmp_bar_mem = Bar_Mem()
# get pci bar information into PCI_DEV_BAR_DESC
if "Region" in line and "Memory at" in line:
#ignore memory region from SR-IOV capabilities
if "size=" not in line:
continue
bar_addr = int(get_value_after_str(line, "at"), 16)
bar_num = line.split()[1].strip(':')
if bar_addr >= common.SIZE_4G or bar_addr < common.SIZE_2G:
if not tmp_bar_attr.remappable:
continue
bar_addr = remap_bar_addr_to_high(HI_MMIO_OFFSET, line)
tmp_bar_mem.remapped = True
tmp_bar_mem.addr = hex(bar_addr)
tmp_bar_dic[int(bar_num)] = tmp_bar_mem
else:
tmp_bar_attr = Bar_Attr()
prev_bdf = cur_bdf
pci_bdf = line.split()[0]
tmp_bar_attr.name = " ".join(line.split(':')[1].split()[1:])
# remove '[*]' in pci subname
if '[' in tmp_bar_attr.name:
tmp_bar_attr.name = tmp_bar_attr.name.rsplit('[', 1)[0]
cal_sub_pci_name.append(tmp_bar_attr.name)
tmp_bar_attr.remappable = check_bar_remappable(line)
PCI_DEV_BAR_DESC.pci_dev_dic[pci_bdf] = tmp_bar_attr
cur_bdf = pci_bdf
if not prev_bdf:
prev_bdf = cur_bdf
if tmp_bar_dic and cur_bdf != prev_bdf:
PCI_DEV_BAR_DESC.pci_bar_dic[prev_bdf] = tmp_bar_dic
# clear the tmp_bar_dic before store the next dic
tmp_bar_dic = {}
# output all the pci device list to pci_device.h
sub_name_count = collections.Counter(cal_sub_pci_name)
if tmp_bar_dic:
PCI_DEV_BAR_DESC.pci_bar_dic[cur_bdf] = tmp_bar_dic
return sub_name_count
def write_pbdf(i_cnt, bdf, bar_attr, config):
"""
Parser and generate pbdf
:param i_cnt: the number of pci devices have the same PCI sub class name
:param bdf: it is a string what contains BDF
:param bar_attr: it is a class, contains PIC bar attribute
:param config: it is a file pointer of pci information for writing to
"""
# if there is only one host bridge, then will discard the index of suffix
if i_cnt == 0 and bar_attr.name.upper() == "HOST BRIDGE":
tmp_sub_name = "_".join(bar_attr.name.split()).upper()
else:
if '-' in bar_attr.name:
tmp_sub_name = common.undline_name(bar_attr.name) + "_" + str(i_cnt)
else:
tmp_sub_name = "_".join(bar_attr.name.split()).upper() + "_" + str(i_cnt)
bus = int(bdf.split(':')[0], 16)
dev = int(bdf.split(':')[1].split('.')[0], 16)
fun = int(bdf.split('.')[1], 16)
print("#define %-32s" % tmp_sub_name, end="", file=config)
print(" .pbdf.bits = {{.b = 0x{:02X}U, .d = 0x{:02X}U, .f = 0x{:02X}U}}".format(
bus, dev, fun), end="", file=config)
if not bar_attr.remappable:
align = ' ' * 48
print("\n{}/* TODO: add {} 64bit BAR support */".format(align, tmp_sub_name), file=config)
return
def write_vbar(i_cnt, bdf, pci_bar_dic, bar_attr, config):
"""
Parser and generate vbar
:param i_cnt: the number of pci devices have the same PCI sub class name
:param bdf: it is a string what contains BDF
:param pci_bar_dic: it is a dictionary of pci vbar for those BDF
:param bar_attr: it is a class, contains PIC bar attribute
:param config: it is a file pointer of pci information for writing to
"""
tail = 0
align = ' ' * 48
ptdev_mmio_str = ''
tmp_sub_name = common.undline_name(bar_attr.name) + "_" + str(i_cnt)
if bdf in pci_bar_dic.keys():
bar_list = list(pci_bar_dic[bdf].keys())
bar_len = len(bar_list)
bar_num = 0
for bar_i in bar_list:
if not bar_attr.remappable:
return
if tail == 0:
print(", \\", file=config)
tail += 1
bar_num += 1
bar_val = pci_bar_dic[bdf][bar_i].addr
if pci_bar_dic[bdf][bar_i].remapped:
ptdev_mmio_str = 'PTDEV_HI_MMIO_START + '
if bar_num == bar_len:
print("{}.vbar_base[{}] = {}{}UL".format(align, bar_i, ptdev_mmio_str, bar_val), file=config)
else:
print("{}.vbar_base[{}] = {}{}UL, \\".format(
align, bar_i, ptdev_mmio_str, bar_val), file=config)
else:
print("", file=config)
def generate_file(config):
"""
Get PCI device and generate pci_devices.h
:param config: it is a file pointer of pci information for writing to
"""
# write the license into pci
print("{0}".format(board_cfg_lib.HEADER_LICENSE), file=config)
# add bios and base board info
board_cfg_lib.handle_bios_info(config)
# write the header into pci
print("{0}".format(PCI_HEADER), file=config)
sub_name_count = parser_pci()
print("#define %-32s" % "PTDEV_HI_MMIO_SIZE", " {}UL".format(hex(HI_MMIO_OFFSET)), file=config)
compared_bdf = []
for cnt_sub_name in sub_name_count.keys():
i_cnt = 0
for bdf, bar_attr in PCI_DEV_BAR_DESC.pci_dev_dic.items():
if cnt_sub_name == bar_attr.name and bdf not in compared_bdf:
compared_bdf.append(bdf)
else:
continue
print("",file=config)
write_pbdf(i_cnt, bdf, bar_attr, config)
write_vbar(i_cnt, bdf, PCI_DEV_BAR_DESC.pci_bar_dic, bar_attr, config)
i_cnt += 1
# write the end to the pci devices
print("{0}".format(PCI_END_HEADER), file=config)
|
[
"common.undline_name",
"board_cfg_lib.get_info",
"common.round_up",
"collections.Counter",
"board_cfg_lib.handle_bios_info"
] |
[((1688, 1719), 'common.round_up', 'common.round_up', (['bar_addr', 'size'], {}), '(bar_addr, size)\n', (1703, 1719), False, 'import common\n'), ((1951, 2030), 'board_cfg_lib.get_info', 'board_cfg_lib.get_info', (['common.BOARD_INFO_FILE', '"""<PCI_DEVICE>"""', '"""</PCI_DEVICE>"""'], {}), "(common.BOARD_INFO_FILE, '<PCI_DEVICE>', '</PCI_DEVICE>')\n", (1973, 2030), False, 'import board_cfg_lib\n'), ((3728, 3765), 'collections.Counter', 'collections.Counter', (['cal_sub_pci_name'], {}), '(cal_sub_pci_name)\n', (3747, 3765), False, 'import collections\n'), ((6879, 6917), 'board_cfg_lib.handle_bios_info', 'board_cfg_lib.handle_bios_info', (['config'], {}), '(config)\n', (6909, 6917), False, 'import board_cfg_lib\n'), ((5671, 5705), 'common.undline_name', 'common.undline_name', (['bar_attr.name'], {}), '(bar_attr.name)\n', (5690, 5705), False, 'import common\n'), ((4503, 4537), 'common.undline_name', 'common.undline_name', (['bar_attr.name'], {}), '(bar_attr.name)\n', (4522, 4537), False, 'import common\n')]
|
import argparse
import os
import random
from math import log10
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torchvision.utils as vutils
from tensorboardX import SummaryWriter
from torch.autograd import Variable, grad
from data.UData import CreateDataLoader
from models.CascadeNeXT import *
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', required=True, help='path to dataset')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batchSize', type=int, default=4, help='input batch size')
parser.add_argument('--test', action='store_true', help='test option')
parser.add_argument('--adv', action='store_true', help='adversarial training option')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--testBatch', type=int, default=4, help='input test batch size')
parser.add_argument('--cut', type=int, default=2, help='cut backup frequency')
parser.add_argument('--niter', type=int, default=700, help='number of epochs to train for')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--lrG', type=float, default=0.0001, help='learning rate, default=0.0001')
parser.add_argument('--lrD', type=float, default=0.0001, help='learning rate, default=0.0001')
parser.add_argument('--advW', type=float, default=0.0001, help='adversarial weight, default=0.0001')
parser.add_argument('--gpW', type=float, default=10, help='gradient penalty weight')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.9')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--optim', action='store_true', help='load optimizer\'s checkpoint')
parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')
parser.add_argument('--Diters', type=int, default=1, help='number of D iters per each G iter')
parser.add_argument('--manualSeed', type=int, default=2345, help='random seed to use. Default=1234')
parser.add_argument('--baseGeni', type=int, default=0, help='start base of pure pair L1 loss')
parser.add_argument('--geni', type=int, default=0, help='continue gen image num')
parser.add_argument('--epoi', type=int, default=0, help='continue epoch num')
parser.add_argument('--env', type=str, default=None, help='tensorboard env')
opt = parser.parse_args()
print(opt)
####### regular set up
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
gen_iterations = opt.geni
try:
os.makedirs(opt.outf)
except OSError:
pass
# random seed setup
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
####### regular set up end
writer = SummaryWriter(log_dir=opt.env, comment='this is great')
dataloader_train, dataloader_test = CreateDataLoader(opt)
netG = Pyramid()
if opt.netG != '':
netG.load_state_dict(torch.load(opt.netG))
print(netG)
netD = PatchD()
if opt.netD != '':
netD.load_state_dict(torch.load(opt.netD))
print(netD)
criterion_GAN = torch.nn.BCEWithLogitsLoss()
criterion_L1 = nn.L1Loss()
criterion_L2 = nn.MSELoss()
one = torch.FloatTensor([1])
mone = one * -1
if opt.cuda:
netD.cuda()
netG.cuda()
criterion_L1.cuda()
criterion_L2.cuda()
criterion_GAN.cuda()
one, mone = one.cuda(), mone.cuda()
# setup optimizer
optimizerG = optim.Adam(netG.parameters(), lr=opt.lrG, betas=(opt.beta1, 0.9))
optimizerD = optim.Adam(netD.parameters(), lr=opt.lrD, betas=(opt.beta1, 0.9))
if opt.optim:
optimizerG.load_state_dict(torch.load('%s/optimG_checkpoint.pth' % opt.outf))
optimizerD.load_state_dict(torch.load('%s/optimD_checkpoint.pth' % opt.outf))
schedulerG = lr_scheduler.ReduceLROnPlateau(optimizerG, mode='max', verbose=True, min_lr=0.0000005,
patience=5) # 1.5*10^5 iter
schedulerD = lr_scheduler.ReduceLROnPlateau(optimizerD, mode='max', verbose=True, min_lr=0.0000005,
patience=5) # 1.5*10^5 iter
def calc_gradient_penalty(netD, real_data, fake_data):
# print "real_data: ", real_data.size(), fake_data.size()
alpha = torch.rand(opt.batchSize, 1, 1, 1)
# alpha = alpha.expand(opt.batchSize, real_data.nelement() / opt.batchSize).contiguous().view(opt.batchSize, 3, 64,
# 64)
alpha = alpha.cuda() if opt.cuda else alpha
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
if opt.cuda:
interpolates = interpolates.cuda()
interpolates = Variable(interpolates, requires_grad=True)
disc_interpolates = netD(interpolates)
gradients = grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda() if opt.cuda else torch.ones(
disc_interpolates.size()),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * opt.gpW
return gradient_penalty
flag = 1
for epoch in range(opt.epoi, opt.niter):
epoch_loss = 0
epoch_iter_count = 0
for extra in range(2 * (opt.Diters + 1)):
data_iter = iter(dataloader_train)
iter_count = 0
while iter_count < len(dataloader_train):
############################
# (1) Update D network
###########################
for p in netD.parameters(): # reset requires_grad
p.requires_grad = True # they are set to False below in netG update
for p in netG.parameters():
p.requires_grad = False # to avoid computation
# train the discriminator Diters times
Diters = opt.Diters
if gen_iterations < opt.baseGeni or not opt.adv: # L1 stage
Diters = 0
j = 0
while j < Diters and iter_count < len(dataloader_train):
j += 1
netD.zero_grad()
data = data_iter.next()
iter_count += 1
if opt.cuda:
data = [x.cuda() for x in data]
real_bim, real_sim = data[0:3], data[3:]
# train with fake
fake_Vsim = netG(Variable(real_bim[2], volatile=True))
errD_fake = netD(Variable(torch.cat([fake_Vsim[2].data, real_bim[2]], 1))).mean(0).view(1)
errD_fake.backward(one, retain_graph=True) # backward on score on real
errD_real = netD(Variable(torch.cat([real_sim[2], real_bim[2]], 1))).mean(0).view(1)
errD_real.backward(mone, retain_graph=True) # backward on score on real
errD = errD_real - errD_fake
# gradient penalty
gradient_penalty = calc_gradient_penalty(netD, torch.cat([real_sim[2], real_bim[2]], 1),
torch.cat([fake_Vsim[2], real_bim[2]], 1))
gradient_penalty.backward()
optimizerD.step()
############################
# (2) Update G network
############################
if iter_count < len(dataloader_train):
for p in netD.parameters():
p.requires_grad = False # to avoid computation
for p in netG.parameters():
p.requires_grad = True # to avoid computation
netG.zero_grad()
data = data_iter.next()
iter_count += 1
if opt.cuda:
data = [x.cuda() for x in data]
real_bim, real_sim = data[0:3], data[3:]
if flag: # fix samples
writer.add_image('target imgs', vutils.make_grid(real_sim[2].mul(0.5).add(0.5), nrow=16))
writer.add_image('blur imgs', vutils.make_grid(real_bim[2].mul(0.5).add(0.5), nrow=16))
vutils.save_image(real_sim[2].mul(0.5).add(0.5),
'%s/sharp_samples' % opt.outf + '.png')
vutils.save_image(real_bim[2].mul(0.5).add(0.5),
'%s/blur_samples' % opt.outf + '.png')
fixed_blur = real_bim[2]
flag -= 1
fake = netG(Variable(real_bim[2]))
if gen_iterations < opt.baseGeni or not opt.adv:
contentLoss = criterion_L2(fake[2].mul(0.5).add(0.5), Variable(real_sim[2].mul(0.5).add(0.5)))
epoch_loss += 10 * log10(1 / contentLoss.data[0])
epoch_iter_count += 1
contentLoss += criterion_L2(fake[1].mul(0.5).add(0.5), Variable(real_sim[1].mul(0.5).add(0.5)))
contentLoss += criterion_L2(fake[0].mul(0.5).add(0.5), Variable(real_sim[0].mul(0.5).add(0.5)))
contentLoss /= 3.0
contentLoss.backward()
errG = contentLoss
else:
errG = netD(torch.cat([fake[2], Variable(real_bim[2])], 1)).mean(0).view(1) * opt.advW
errG.backward(mone, retain_graph=True)
contentLoss = criterion_L2(fake[2].mul(0.5).add(0.5), Variable(real_sim[2].mul(0.5).add(0.5)))
epoch_loss += 10 * log10(1 / contentLoss.data[0])
epoch_iter_count += 1
contentLoss += criterion_L2(fake[1].mul(0.5).add(0.5), Variable(real_sim[1].mul(0.5).add(0.5)))
contentLoss += criterion_L2(fake[0].mul(0.5).add(0.5), Variable(real_sim[0].mul(0.5).add(0.5)))
contentLoss /= 3.0
contentLoss.backward()
optimizerG.step()
############################
# (3) Report & 100 Batch checkpoint
############################
if gen_iterations < opt.baseGeni or not opt.adv:
writer.add_scalar('MSE Loss', contentLoss.data[0], gen_iterations)
print('[%d/%d][%d/%d][%d] err_G: %f'
% (epoch, opt.niter, iter_count + extra * len(dataloader_train),
len(dataloader_train) * 2 * (opt.Diters + 1), gen_iterations, contentLoss.data[0]))
else:
writer.add_scalar('MSE Loss', contentLoss.data[0], gen_iterations)
writer.add_scalar('wasserstein distance', errD.data[0], gen_iterations)
writer.add_scalar('errD_real', errD_real.data[0], gen_iterations)
writer.add_scalar('errD_fake', errD_fake.data[0], gen_iterations)
writer.add_scalar('Gnet loss toward real', errG.data[0], gen_iterations)
writer.add_scalar('gradient_penalty', gradient_penalty.data[0], gen_iterations)
print('[%d/%d][%d/%d][%d] errD: %f err_G: %f err_D_real: %f err_D_fake %f content loss %f'
% (epoch, opt.niter, iter_count + extra * len(dataloader_train),
len(dataloader_train) * 2 * (opt.Diters + 1),
gen_iterations, errD.data[0], errG.data[0], errD_real.data[0], errD_fake.data[0],
contentLoss.data[0]))
if gen_iterations % 100 == 0:
fake = netG(Variable(fixed_blur, volatile=True))
writer.add_image('deblur imgs', vutils.make_grid(fake[2].data.mul(0.5).add(0.5).clamp(0, 1), nrow=16),
gen_iterations)
if gen_iterations % 1000 == 0:
for name, param in netG.named_parameters():
writer.add_histogram('netG ' + name, param.clone().cpu().data.numpy(), gen_iterations)
for name, param in netD.named_parameters():
writer.add_histogram('netD ' + name, param.clone().cpu().data.numpy(), gen_iterations)
vutils.save_image(fake[2].data.mul(0.5).add(0.5),
'%s/fake_samples_gen_iter_%08d.png' % (opt.outf, gen_iterations))
gen_iterations += 1
if opt.test:
if epoch % 5 == 0:
avg_psnr = 0
for batch in dataloader_test:
input, target = [x.cuda() for x in batch]
prediction = netG(Variable(input, volatile=True))
mse = criterion_L2(prediction[2].mul(0.5).add(0.5), Variable(target.mul(0.5).add(0.5)))
psnr = 10 * log10(1 / mse.data[0])
avg_psnr += psnr
avg_psnr = avg_psnr / len(dataloader_test)
writer.add_scalar('Test epoch PSNR', avg_psnr, epoch)
print("===> Avg. PSNR: {:.4f} dB".format(avg_psnr))
avg_psnr = epoch_loss / epoch_iter_count
writer.add_scalar('Train epoch PSNR', avg_psnr, epoch)
schedulerG.step(avg_psnr)
schedulerD.step(avg_psnr)
# do checkpointing
if opt.cut == 0:
torch.save(netG.state_dict(), '%s/netG_epoch_only.pth' % opt.outf)
torch.save(netD.state_dict(), '%s/netD_epoch_only.pth' % opt.outf)
elif epoch % opt.cut == 0:
torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outf, epoch))
torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, epoch))
torch.save(optimizerG.state_dict(), '%s/optimG_checkpoint.pth' % opt.outf)
torch.save(optimizerD.state_dict(), '%s/optimD_checkpoint.pth' % opt.outf)
|
[
"tensorboardX.SummaryWriter",
"argparse.ArgumentParser",
"os.makedirs",
"torch.autograd.Variable",
"data.UData.CreateDataLoader",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"math.log10",
"random.seed"
] |
[((377, 402), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (400, 402), False, 'import argparse\n'), ((2927, 2954), 'random.seed', 'random.seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (2938, 2954), False, 'import random\n'), ((3105, 3160), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'opt.env', 'comment': '"""this is great"""'}), "(log_dir=opt.env, comment='this is great')\n", (3118, 3160), False, 'from tensorboardX import SummaryWriter\n'), ((3198, 3219), 'data.UData.CreateDataLoader', 'CreateDataLoader', (['opt'], {}), '(opt)\n', (3214, 3219), False, 'from data.UData import CreateDataLoader\n'), ((4085, 4184), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'lr_scheduler.ReduceLROnPlateau', (['optimizerG'], {'mode': '"""max"""', 'verbose': '(True)', 'min_lr': '(5e-07)', 'patience': '(5)'}), "(optimizerG, mode='max', verbose=True, min_lr\n =5e-07, patience=5)\n", (4115, 4184), True, 'import torch.optim.lr_scheduler as lr_scheduler\n'), ((4258, 4357), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'lr_scheduler.ReduceLROnPlateau', (['optimizerD'], {'mode': '"""max"""', 'verbose': '(True)', 'min_lr': '(5e-07)', 'patience': '(5)'}), "(optimizerD, mode='max', verbose=True, min_lr\n =5e-07, patience=5)\n", (4288, 4357), True, 'import torch.optim.lr_scheduler as lr_scheduler\n'), ((2821, 2842), 'os.makedirs', 'os.makedirs', (['opt.outf'], {}), '(opt.outf)\n', (2832, 2842), False, 'import os\n'), ((5000, 5042), 'torch.autograd.Variable', 'Variable', (['interpolates'], {'requires_grad': '(True)'}), '(interpolates, requires_grad=True)\n', (5008, 5042), False, 'from torch.autograd import Variable, grad\n'), ((6743, 6779), 'torch.autograd.Variable', 'Variable', (['real_bim[2]'], {'volatile': '(True)'}), '(real_bim[2], volatile=True)\n', (6751, 6779), False, 'from torch.autograd import Variable, grad\n'), ((8828, 8849), 'torch.autograd.Variable', 'Variable', (['real_bim[2]'], {}), '(real_bim[2])\n', (8836, 8849), False, 'from torch.autograd import Variable, grad\n'), ((11819, 11854), 'torch.autograd.Variable', 'Variable', (['fixed_blur'], {'volatile': '(True)'}), '(fixed_blur, volatile=True)\n', (11827, 11854), False, 'from torch.autograd import Variable, grad\n'), ((12805, 12835), 'torch.autograd.Variable', 'Variable', (['input'], {'volatile': '(True)'}), '(input, volatile=True)\n', (12813, 12835), False, 'from torch.autograd import Variable, grad\n'), ((12969, 12991), 'math.log10', 'log10', (['(1 / mse.data[0])'], {}), '(1 / mse.data[0])\n', (12974, 12991), False, 'from math import log10\n'), ((9071, 9101), 'math.log10', 'log10', (['(1 / contentLoss.data[0])'], {}), '(1 / contentLoss.data[0])\n', (9076, 9101), False, 'from math import log10\n'), ((9842, 9872), 'math.log10', 'log10', (['(1 / contentLoss.data[0])'], {}), '(1 / contentLoss.data[0])\n', (9847, 9872), False, 'from math import log10\n'), ((9573, 9594), 'torch.autograd.Variable', 'Variable', (['real_bim[2]'], {}), '(real_bim[2])\n', (9581, 9594), False, 'from torch.autograd import Variable, grad\n')]
|
# -*- coding: utf-8 -*-
import os
import tempfile
import unittest
from django.conf import settings
from django.core.cache import cache
from django.core.validators import ValidationError
import mock
from nose.tools import assert_raises, eq_, raises
from amo.utils import (cache_ns_key, escape_all, LocalFileStorage,
resize_image, rm_local_tmp_dir, slugify, slug_validator)
u = u'Ελληνικά'
def test_slug_validator():
eq_(slug_validator(u.lower()), None)
eq_(slug_validator('-'.join([u.lower(), u.lower()])), None)
assert_raises(ValidationError, slug_validator, '234.add')
assert_raises(ValidationError, slug_validator, 'a a a')
assert_raises(ValidationError, slug_validator, 'tags/')
def test_slugify():
x = '-'.join([u, u])
y = ' - '.join([u, u])
def check(x, y):
eq_(slugify(x), y)
slug_validator(slugify(x))
s = [
('xx x - "#$@ x', 'xx-x-x'),
(u'Bän...g (bang)', u'bäng-bang'),
(u, u.lower()),
(x, x.lower()),
(y, x.lower()),
(' a ', 'a'),
('tags/', 'tags'),
('holy_wars', 'holy_wars'),
# I don't really care what slugify returns. Just don't crash.
(u'x荿', u'x\u837f'),
(u'ϧ蒬蓣', u'\u03e7\u84ac\u84e3'),
(u'¿x', u'x'),
]
for val, expected in s:
yield check, val, expected
def test_resize_image():
# src and dst shouldn't be the same.
assert_raises(Exception, resize_image, 't', 't', 'z')
def test_resize_transparency():
src = os.path.join(settings.ROOT, 'apps', 'amo', 'tests',
'images', 'transparent.png')
dest = tempfile.mkstemp(dir=settings.TMP_PATH)[1]
expected = src.replace('.png', '-expected.png')
try:
resize_image(src, dest, (32, 32), remove_src=False, locally=True)
with open(dest) as dfh:
with open(expected) as efh:
assert dfh.read() == efh.read()
finally:
if os.path.exists(dest):
os.remove(dest)
class TestLocalFileStorage(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp()
self.stor = LocalFileStorage()
def tearDown(self):
rm_local_tmp_dir(self.tmp)
def test_read_write(self):
fn = os.path.join(self.tmp, 'somefile.txt')
with self.stor.open(fn, 'w') as fd:
fd.write('stuff')
with self.stor.open(fn, 'r') as fd:
eq_(fd.read(), 'stuff')
def test_non_ascii_filename(self):
fn = os.path.join(self.tmp, u'Ivan Krsti\u0107.txt')
with self.stor.open(fn, 'w') as fd:
fd.write('stuff')
with self.stor.open(fn, 'r') as fd:
eq_(fd.read(), 'stuff')
def test_non_ascii_content(self):
fn = os.path.join(self.tmp, 'somefile.txt')
with self.stor.open(fn, 'w') as fd:
fd.write(u'Ivan Krsti\u0107.txt'.encode('utf8'))
with self.stor.open(fn, 'r') as fd:
eq_(fd.read().decode('utf8'), u'Ivan Krsti\u0107.txt')
def test_make_file_dirs(self):
dp = os.path.join(self.tmp, 'path', 'to')
self.stor.open(os.path.join(dp, 'file.txt'), 'w').close()
assert os.path.exists(self.stor.path(dp)), (
'Directory not created: %r' % dp)
def test_do_not_make_file_dirs_when_reading(self):
fpath = os.path.join(self.tmp, 'file.txt')
with open(fpath, 'w') as fp:
fp.write('content')
# Make sure this doesn't raise an exception.
self.stor.open(fpath, 'r').close()
def test_make_dirs_only_once(self):
dp = os.path.join(self.tmp, 'path', 'to')
with self.stor.open(os.path.join(dp, 'file.txt'), 'w') as fd:
fd.write('stuff')
# Make sure it doesn't try to make the dir twice
with self.stor.open(os.path.join(dp, 'file.txt'), 'w') as fd:
fd.write('stuff')
with self.stor.open(os.path.join(dp, 'file.txt'), 'r') as fd:
eq_(fd.read(), 'stuff')
def test_delete_empty_dir(self):
dp = os.path.join(self.tmp, 'path')
os.mkdir(dp)
self.stor.delete(dp)
eq_(os.path.exists(dp), False)
@raises(OSError)
def test_cannot_delete_non_empty_dir(self):
dp = os.path.join(self.tmp, 'path')
with self.stor.open(os.path.join(dp, 'file.txt'), 'w') as fp:
fp.write('stuff')
self.stor.delete(dp)
def test_delete_file(self):
dp = os.path.join(self.tmp, 'path')
fn = os.path.join(dp, 'file.txt')
with self.stor.open(fn, 'w') as fp:
fp.write('stuff')
self.stor.delete(fn)
eq_(os.path.exists(fn), False)
eq_(os.path.exists(dp), True)
class TestCacheNamespaces(unittest.TestCase):
def setUp(self):
cache.clear()
self.namespace = 'redis-is-dead'
@mock.patch('amo.utils.epoch')
def test_no_preexisting_key(self, epoch_mock):
epoch_mock.return_value = 123456
eq_(cache_ns_key(self.namespace), '123456:ns:%s' % self.namespace)
@mock.patch('amo.utils.epoch')
def test_no_preexisting_key_incr(self, epoch_mock):
epoch_mock.return_value = 123456
eq_(cache_ns_key(self.namespace, increment=True),
'123456:ns:%s' % self.namespace)
@mock.patch('amo.utils.epoch')
def test_key_incr(self, epoch_mock):
epoch_mock.return_value = 123456
cache_ns_key(self.namespace) # Sets ns to 123456
ns_key = cache_ns_key(self.namespace, increment=True)
expected = '123457:ns:%s' % self.namespace
eq_(ns_key, expected)
eq_(cache_ns_key(self.namespace), expected)
class TestEscapeAll(unittest.TestCase):
def test_basics(self):
x = '-'.join([u, u])
y = ' - '.join([u, u])
tests = [
('<script>alert("BALL SO HARD")</script>',
'<script>alert("BALL SO HARD")</script>'),
(u'Bän...g (bang)', u'Bän...g (bang)'),
(u, u),
(x, x),
(y, y),
(u'x荿', u'x\u837f'),
(u'ϧ蒬蓣', u'\u03e7\u0383\u84ac\u84e3'),
(u'¿x', u'¿x'),
]
for val, expected in tests:
eq_(escape_all(val), expected)
def test_nested(self):
value = '<script>alert("BALL SO HARD")</script>'
expected = '<script>alert("BALL SO HARD")</script>'
test = {
'string': value,
'dict': {'x': value},
'list': [value],
'bool': True,
}
res = escape_all(test)
eq_(res['string'], expected)
eq_(res['dict'], {'x': expected})
eq_(res['list'], [expected])
eq_(res['bool'], True)
def test_without_linkify(self):
value = '<button>http://firefox.com</button>'
expected = '<button>http://firefox.com</button>'
test = {
'string': value,
'dict': {'x': value},
'list': [value],
'bool': True,
}
res = escape_all(test, linkify=False)
eq_(res['string'], expected)
eq_(res['dict'], {'x': expected})
eq_(res['list'], [expected])
eq_(res['bool'], True)
|
[
"amo.utils.LocalFileStorage",
"amo.utils.rm_local_tmp_dir",
"os.mkdir",
"amo.utils.escape_all",
"os.remove",
"tempfile.mkstemp",
"django.core.cache.cache.clear",
"os.path.exists",
"mock.patch",
"tempfile.mkdtemp",
"amo.utils.resize_image",
"nose.tools.eq_",
"nose.tools.assert_raises",
"amo.utils.cache_ns_key",
"nose.tools.raises",
"os.path.join",
"amo.utils.slugify"
] |
[((555, 612), 'nose.tools.assert_raises', 'assert_raises', (['ValidationError', 'slug_validator', '"""234.add"""'], {}), "(ValidationError, slug_validator, '234.add')\n", (568, 612), False, 'from nose.tools import assert_raises, eq_, raises\n'), ((617, 672), 'nose.tools.assert_raises', 'assert_raises', (['ValidationError', 'slug_validator', '"""a a a"""'], {}), "(ValidationError, slug_validator, 'a a a')\n", (630, 672), False, 'from nose.tools import assert_raises, eq_, raises\n'), ((677, 732), 'nose.tools.assert_raises', 'assert_raises', (['ValidationError', 'slug_validator', '"""tags/"""'], {}), "(ValidationError, slug_validator, 'tags/')\n", (690, 732), False, 'from nose.tools import assert_raises, eq_, raises\n'), ((1447, 1500), 'nose.tools.assert_raises', 'assert_raises', (['Exception', 'resize_image', '"""t"""', '"""t"""', '"""z"""'], {}), "(Exception, resize_image, 't', 't', 'z')\n", (1460, 1500), False, 'from nose.tools import assert_raises, eq_, raises\n'), ((1545, 1630), 'os.path.join', 'os.path.join', (['settings.ROOT', '"""apps"""', '"""amo"""', '"""tests"""', '"""images"""', '"""transparent.png"""'], {}), "(settings.ROOT, 'apps', 'amo', 'tests', 'images', 'transparent.png'\n )\n", (1557, 1630), False, 'import os\n'), ((4194, 4209), 'nose.tools.raises', 'raises', (['OSError'], {}), '(OSError)\n', (4200, 4209), False, 'from nose.tools import assert_raises, eq_, raises\n'), ((4869, 4898), 'mock.patch', 'mock.patch', (['"""amo.utils.epoch"""'], {}), "('amo.utils.epoch')\n", (4879, 4898), False, 'import mock\n'), ((5072, 5101), 'mock.patch', 'mock.patch', (['"""amo.utils.epoch"""'], {}), "('amo.utils.epoch')\n", (5082, 5101), False, 'import mock\n'), ((5308, 5337), 'mock.patch', 'mock.patch', (['"""amo.utils.epoch"""'], {}), "('amo.utils.epoch')\n", (5318, 5337), False, 'import mock\n'), ((1660, 1699), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'dir': 'settings.TMP_PATH'}), '(dir=settings.TMP_PATH)\n', (1676, 1699), False, 'import tempfile\n'), ((1772, 1837), 'amo.utils.resize_image', 'resize_image', (['src', 'dest', '(32, 32)'], {'remove_src': '(False)', 'locally': '(True)'}), '(src, dest, (32, 32), remove_src=False, locally=True)\n', (1784, 1837), False, 'from amo.utils import cache_ns_key, escape_all, LocalFileStorage, resize_image, rm_local_tmp_dir, slugify, slug_validator\n'), ((1982, 2002), 'os.path.exists', 'os.path.exists', (['dest'], {}), '(dest)\n', (1996, 2002), False, 'import os\n'), ((2122, 2140), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (2138, 2140), False, 'import tempfile\n'), ((2161, 2179), 'amo.utils.LocalFileStorage', 'LocalFileStorage', ([], {}), '()\n', (2177, 2179), False, 'from amo.utils import cache_ns_key, escape_all, LocalFileStorage, resize_image, rm_local_tmp_dir, slugify, slug_validator\n'), ((2213, 2239), 'amo.utils.rm_local_tmp_dir', 'rm_local_tmp_dir', (['self.tmp'], {}), '(self.tmp)\n', (2229, 2239), False, 'from amo.utils import cache_ns_key, escape_all, LocalFileStorage, resize_image, rm_local_tmp_dir, slugify, slug_validator\n'), ((2285, 2323), 'os.path.join', 'os.path.join', (['self.tmp', '"""somefile.txt"""'], {}), "(self.tmp, 'somefile.txt')\n", (2297, 2323), False, 'import os\n'), ((2531, 2573), 'os.path.join', 'os.path.join', (['self.tmp', 'u"""Ivan Krstić.txt"""'], {}), "(self.tmp, u'Ivan Krstić.txt')\n", (2543, 2573), False, 'import os\n'), ((2785, 2823), 'os.path.join', 'os.path.join', (['self.tmp', '"""somefile.txt"""'], {}), "(self.tmp, 'somefile.txt')\n", (2797, 2823), False, 'import os\n'), ((3089, 3125), 'os.path.join', 'os.path.join', (['self.tmp', '"""path"""', '"""to"""'], {}), "(self.tmp, 'path', 'to')\n", (3101, 3125), False, 'import os\n'), ((3363, 3397), 'os.path.join', 'os.path.join', (['self.tmp', '"""file.txt"""'], {}), "(self.tmp, 'file.txt')\n", (3375, 3397), False, 'import os\n'), ((3617, 3653), 'os.path.join', 'os.path.join', (['self.tmp', '"""path"""', '"""to"""'], {}), "(self.tmp, 'path', 'to')\n", (3629, 3653), False, 'import os\n'), ((4068, 4098), 'os.path.join', 'os.path.join', (['self.tmp', '"""path"""'], {}), "(self.tmp, 'path')\n", (4080, 4098), False, 'import os\n'), ((4107, 4119), 'os.mkdir', 'os.mkdir', (['dp'], {}), '(dp)\n', (4115, 4119), False, 'import os\n'), ((4271, 4301), 'os.path.join', 'os.path.join', (['self.tmp', '"""path"""'], {}), "(self.tmp, 'path')\n", (4283, 4301), False, 'import os\n'), ((4477, 4507), 'os.path.join', 'os.path.join', (['self.tmp', '"""path"""'], {}), "(self.tmp, 'path')\n", (4489, 4507), False, 'import os\n'), ((4521, 4549), 'os.path.join', 'os.path.join', (['dp', '"""file.txt"""'], {}), "(dp, 'file.txt')\n", (4533, 4549), False, 'import os\n'), ((4808, 4821), 'django.core.cache.cache.clear', 'cache.clear', ([], {}), '()\n', (4819, 4821), False, 'from django.core.cache import cache\n'), ((5428, 5456), 'amo.utils.cache_ns_key', 'cache_ns_key', (['self.namespace'], {}), '(self.namespace)\n', (5440, 5456), False, 'from amo.utils import cache_ns_key, escape_all, LocalFileStorage, resize_image, rm_local_tmp_dir, slugify, slug_validator\n'), ((5495, 5539), 'amo.utils.cache_ns_key', 'cache_ns_key', (['self.namespace'], {'increment': '(True)'}), '(self.namespace, increment=True)\n', (5507, 5539), False, 'from amo.utils import cache_ns_key, escape_all, LocalFileStorage, resize_image, rm_local_tmp_dir, slugify, slug_validator\n'), ((5599, 5620), 'nose.tools.eq_', 'eq_', (['ns_key', 'expected'], {}), '(ns_key, expected)\n', (5602, 5620), False, 'from nose.tools import assert_raises, eq_, raises\n'), ((6577, 6593), 'amo.utils.escape_all', 'escape_all', (['test'], {}), '(test)\n', (6587, 6593), False, 'from amo.utils import cache_ns_key, escape_all, LocalFileStorage, resize_image, rm_local_tmp_dir, slugify, slug_validator\n'), ((6603, 6631), 'nose.tools.eq_', 'eq_', (["res['string']", 'expected'], {}), "(res['string'], expected)\n", (6606, 6631), False, 'from nose.tools import assert_raises, eq_, raises\n'), ((6640, 6673), 'nose.tools.eq_', 'eq_', (["res['dict']", "{'x': expected}"], {}), "(res['dict'], {'x': expected})\n", (6643, 6673), False, 'from nose.tools import assert_raises, eq_, raises\n'), ((6682, 6710), 'nose.tools.eq_', 'eq_', (["res['list']", '[expected]'], {}), "(res['list'], [expected])\n", (6685, 6710), False, 'from nose.tools import assert_raises, eq_, raises\n'), ((6719, 6741), 'nose.tools.eq_', 'eq_', (["res['bool']", '(True)'], {}), "(res['bool'], True)\n", (6722, 6741), False, 'from nose.tools import assert_raises, eq_, raises\n'), ((7062, 7093), 'amo.utils.escape_all', 'escape_all', (['test'], {'linkify': '(False)'}), '(test, linkify=False)\n', (7072, 7093), False, 'from amo.utils import cache_ns_key, escape_all, LocalFileStorage, resize_image, rm_local_tmp_dir, slugify, slug_validator\n'), ((7103, 7131), 'nose.tools.eq_', 'eq_', (["res['string']", 'expected'], {}), "(res['string'], expected)\n", (7106, 7131), False, 'from nose.tools import assert_raises, eq_, raises\n'), ((7140, 7173), 'nose.tools.eq_', 'eq_', (["res['dict']", "{'x': expected}"], {}), "(res['dict'], {'x': expected})\n", (7143, 7173), False, 'from nose.tools import assert_raises, eq_, raises\n'), ((7182, 7210), 'nose.tools.eq_', 'eq_', (["res['list']", '[expected]'], {}), "(res['list'], [expected])\n", (7185, 7210), False, 'from nose.tools import assert_raises, eq_, raises\n'), ((7219, 7241), 'nose.tools.eq_', 'eq_', (["res['bool']", '(True)'], {}), "(res['bool'], True)\n", (7222, 7241), False, 'from nose.tools import assert_raises, eq_, raises\n'), ((841, 851), 'amo.utils.slugify', 'slugify', (['x'], {}), '(x)\n', (848, 851), False, 'from amo.utils import cache_ns_key, escape_all, LocalFileStorage, resize_image, rm_local_tmp_dir, slugify, slug_validator\n'), ((879, 889), 'amo.utils.slugify', 'slugify', (['x'], {}), '(x)\n', (886, 889), False, 'from amo.utils import cache_ns_key, escape_all, LocalFileStorage, resize_image, rm_local_tmp_dir, slugify, slug_validator\n'), ((2016, 2031), 'os.remove', 'os.remove', (['dest'], {}), '(dest)\n', (2025, 2031), False, 'import os\n'), ((4161, 4179), 'os.path.exists', 'os.path.exists', (['dp'], {}), '(dp)\n', (4175, 4179), False, 'import os\n'), ((4665, 4683), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (4679, 4683), False, 'import os\n'), ((4704, 4722), 'os.path.exists', 'os.path.exists', (['dp'], {}), '(dp)\n', (4718, 4722), False, 'import os\n'), ((5003, 5031), 'amo.utils.cache_ns_key', 'cache_ns_key', (['self.namespace'], {}), '(self.namespace)\n', (5015, 5031), False, 'from amo.utils import cache_ns_key, escape_all, LocalFileStorage, resize_image, rm_local_tmp_dir, slugify, slug_validator\n'), ((5211, 5255), 'amo.utils.cache_ns_key', 'cache_ns_key', (['self.namespace'], {'increment': '(True)'}), '(self.namespace, increment=True)\n', (5223, 5255), False, 'from amo.utils import cache_ns_key, escape_all, LocalFileStorage, resize_image, rm_local_tmp_dir, slugify, slug_validator\n'), ((5633, 5661), 'amo.utils.cache_ns_key', 'cache_ns_key', (['self.namespace'], {}), '(self.namespace)\n', (5645, 5661), False, 'from amo.utils import cache_ns_key, escape_all, LocalFileStorage, resize_image, rm_local_tmp_dir, slugify, slug_validator\n'), ((3682, 3710), 'os.path.join', 'os.path.join', (['dp', '"""file.txt"""'], {}), "(dp, 'file.txt')\n", (3694, 3710), False, 'import os\n'), ((3839, 3867), 'os.path.join', 'os.path.join', (['dp', '"""file.txt"""'], {}), "(dp, 'file.txt')\n", (3851, 3867), False, 'import os\n'), ((3939, 3967), 'os.path.join', 'os.path.join', (['dp', '"""file.txt"""'], {}), "(dp, 'file.txt')\n", (3951, 3967), False, 'import os\n'), ((4330, 4358), 'os.path.join', 'os.path.join', (['dp', '"""file.txt"""'], {}), "(dp, 'file.txt')\n", (4342, 4358), False, 'import os\n'), ((6233, 6248), 'amo.utils.escape_all', 'escape_all', (['val'], {}), '(val)\n', (6243, 6248), False, 'from amo.utils import cache_ns_key, escape_all, LocalFileStorage, resize_image, rm_local_tmp_dir, slugify, slug_validator\n'), ((3149, 3177), 'os.path.join', 'os.path.join', (['dp', '"""file.txt"""'], {}), "(dp, 'file.txt')\n", (3161, 3177), False, 'import os\n')]
|
from discord.ext import commands
import requests
import discord
class Ipfy(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print('Ipfy cog loaded successfully')
@commands.command(aliases=['ip'],description="Shows the info about the given ip/webiste")
async def ipinfo(self, ctx, ip_address):
if ip_address == None:
await ctx.send('You forgot ip')
else:
#ip_address = int(ip_address)
URL = f'http://ip-api.com/json/{ip_address}?fields=17000447'
def check_valid_status_code(request):
if request.status_code == 200:
return request.json()
return False
def get_info():
request = requests.get(URL)
data = check_valid_status_code(request)
return data
infoip = get_info()
check = infoip['status']
if not infoip or check=="fail" :
await ctx.channel.send(
"Couldn't get info from API. Try again later.")
else:
embed = discord.Embed(
timestamp=ctx.message.created_at,
title='Ip Info',
description='Tells info about IP/Domain',
color=0xff0000)
embed.add_field(
name='Status', value=infoip['status'])
embed.add_field(
name='IP ADDRESS', value=infoip['query'])
embed.add_field(
name='Country Code',
value=infoip['countryCode'])
embed.add_field(
name='Country Name', value=infoip['country'])
embed.add_field(
name='Region Code', value=infoip['region'])
embed.add_field(
name='Region Name',
value=infoip['regionName'])
embed.add_field(
name='City', value=infoip['city'])
embed.add_field(
name='Zip Code', value=infoip['zip'])
embed.add_field(
name='Time Zone', value=infoip['timezone'])
embed.add_field(
name='Latitude', value=infoip['lat'])
embed.add_field(
name='Longitude', value=infoip['lon'])
embed.add_field(name='ISP', value=infoip['isp'])
embed.add_field(name='ORG', value=infoip['org'])
embed.add_field(
name='Mobile', value=infoip['mobile'])
embed.add_field(
name='Hosting', value=infoip['hosting'])
embed.add_field(
name='Proxy', value=infoip['proxy'])
embed.set_footer(
text=f'Requested By: {ctx.author.name}',
icon_url=f'{ctx.author.avatar_url}')
await ctx.send(embed=embed)
def setup(client):
client.add_cog(Ipfy(client))
|
[
"discord.Embed",
"discord.ext.commands.command",
"requests.get",
"discord.ext.commands.Cog.listener"
] |
[((159, 182), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (180, 182), False, 'from discord.ext import commands\n'), ((265, 359), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "['ip']", 'description': '"""Shows the info about the given ip/webiste"""'}), "(aliases=['ip'], description=\n 'Shows the info about the given ip/webiste')\n", (281, 359), False, 'from discord.ext import commands\n'), ((828, 845), 'requests.get', 'requests.get', (['URL'], {}), '(URL)\n', (840, 845), False, 'import requests\n'), ((1197, 1323), 'discord.Embed', 'discord.Embed', ([], {'timestamp': 'ctx.message.created_at', 'title': '"""Ip Info"""', 'description': '"""Tells info about IP/Domain"""', 'color': '(16711680)'}), "(timestamp=ctx.message.created_at, title='Ip Info',\n description='Tells info about IP/Domain', color=16711680)\n", (1210, 1323), False, 'import discord\n')]
|
import types
import typing
import traceback
import contextlib
import sys
from mitmproxy import exceptions
from mitmproxy import eventsequence
from mitmproxy import controller
from mitmproxy import flow
from . import ctx
import pprint
def _get_name(itm):
return getattr(itm, "name", itm.__class__.__name__.lower())
def cut_traceback(tb, func_name):
"""
Cut off a traceback at the function with the given name.
The func_name's frame is excluded.
Args:
tb: traceback object, as returned by sys.exc_info()[2]
func_name: function name
Returns:
Reduced traceback.
"""
tb_orig = tb
for _, _, fname, _ in traceback.extract_tb(tb):
tb = tb.tb_next
if fname == func_name:
break
return tb or tb_orig
@contextlib.contextmanager
def safecall():
try:
yield
except (exceptions.AddonHalt, exceptions.OptionsError):
raise
except Exception:
etype, value, tb = sys.exc_info()
tb = cut_traceback(tb, "invoke_addon")
ctx.log.error(
"Addon error: %s" % "".join(
traceback.format_exception(etype, value, tb)
)
)
class Loader:
"""
A loader object is passed to the load() event when addons start up.
"""
def __init__(self, master):
self.master = master
def add_option(
self,
name: str,
typespec: type,
default: typing.Any,
help: str,
choices: typing.Optional[typing.Sequence[str]] = None
) -> None:
"""
Add an option to mitmproxy.
Help should be a single paragraph with no linebreaks - it will be
reflowed by tools. Information on the data type should be omitted -
it will be generated and added by tools as needed.
"""
if name in self.master.options:
existing = self.master.options._options[name]
same_signature = (
existing.name == name and
existing.typespec == typespec and
existing.default == default and
existing.help == help and
existing.choices == choices
)
if same_signature:
return
else:
ctx.log.warn("Over-riding existing option %s" % name)
self.master.options.add_option(
name,
typespec,
default,
help,
choices
)
def add_command(self, path: str, func: typing.Callable) -> None:
self.master.commands.add(path, func)
def traverse(chain):
"""
Recursively traverse an addon chain.
"""
for a in chain:
yield a
if hasattr(a, "addons"):
yield from traverse(a.addons)
class AddonManager:
def __init__(self, master):
self.lookup = {}
self.chain = []
self.master = master
master.options.changed.connect(self._configure_all)
def _configure_all(self, options, updated):
self.trigger("configure", updated)
def clear(self):
"""
Remove all addons.
"""
for a in self.chain:
self.invoke_addon(a, "done")
self.lookup = {}
self.chain = []
def get(self, name):
"""
Retrieve an addon by name. Addon names are equal to the .name
attribute on the instance, or the lower case class name if that
does not exist.
"""
return self.lookup.get(name, None)
def register(self, addon):
"""
Register an addon, call its load event, and then register all its
sub-addons. This should be used by addons that dynamically manage
addons.
If the calling addon is already running, it should follow with
running and configure events. Must be called within a current
context.
"""
for a in traverse([addon]):
name = _get_name(a)
if name in self.lookup:
raise exceptions.AddonManagerError(
"An addon called '%s' already exists." % name
)
l = Loader(self.master)
self.invoke_addon(addon, "load", l)
for a in traverse([addon]):
name = _get_name(a)
self.lookup[name] = a
for a in traverse([addon]):
self.master.commands.collect_commands(a)
self.master.options.process_deferred()
return addon
def add(self, *addons):
"""
Add addons to the end of the chain, and run their load event.
If any addon has sub-addons, they are registered.
"""
for i in addons:
self.chain.append(self.register(i))
def remove(self, addon):
"""
Remove an addon and all its sub-addons.
If the addon is not in the chain - that is, if it's managed by a
parent addon - it's the parent's responsibility to remove it from
its own addons attribute.
"""
for a in traverse([addon]):
n = _get_name(a)
if n not in self.lookup:
raise exceptions.AddonManagerError("No such addon: %s" % n)
self.chain = [i for i in self.chain if i is not a]
del self.lookup[_get_name(a)]
self.invoke_addon(addon, "done")
def __len__(self):
return len(self.chain)
def __str__(self):
return pprint.pformat([str(i) for i in self.chain])
def __contains__(self, item):
name = _get_name(item)
return name in self.lookup
async def handle_lifecycle(self, name, message):
"""
Handle a lifecycle event.
"""
if not hasattr(message, "reply"): # pragma: no cover
raise exceptions.ControlException(
"Message %s has no reply attribute" % message
)
# We can use DummyReply objects multiple times. We only clear them up on
# the next handler so that we can access value and state in the
# meantime.
if isinstance(message.reply, controller.DummyReply):
message.reply.reset()
self.trigger(name, message)
if message.reply.state == "start":
message.reply.take()
if not message.reply.has_message:
message.reply.ack()
message.reply.commit()
if isinstance(message.reply, controller.DummyReply):
message.reply.mark_reset()
if isinstance(message, flow.Flow):
self.trigger("update", [message])
def invoke_addon(self, addon, name, *args, **kwargs):
"""
Invoke an event on an addon and all its children.
"""
if name not in eventsequence.Events:
raise exceptions.AddonManagerError("Unknown event: %s" % name)
for a in traverse([addon]):
func = getattr(a, name, None)
if func:
if callable(func):
func(*args, **kwargs)
elif isinstance(func, types.ModuleType):
# we gracefully exclude module imports with the same name as hooks.
# For example, a user may have "from mitmproxy import log" in an addon,
# which has the same name as the "log" hook. In this particular case,
# we end up in an error loop because we "log" this error.
pass
else:
raise exceptions.AddonManagerError(
"Addon handler {} ({}) not callable".format(name, a)
)
def trigger(self, name, *args, **kwargs):
"""
Trigger an event across all addons.
"""
for i in self.chain:
try:
with safecall():
self.invoke_addon(i, name, *args, **kwargs)
except exceptions.AddonHalt:
return
|
[
"mitmproxy.exceptions.ControlException",
"traceback.format_exception",
"sys.exc_info",
"mitmproxy.exceptions.AddonManagerError",
"traceback.extract_tb"
] |
[((664, 688), 'traceback.extract_tb', 'traceback.extract_tb', (['tb'], {}), '(tb)\n', (684, 688), False, 'import traceback\n'), ((979, 993), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (991, 993), False, 'import sys\n'), ((5869, 5943), 'mitmproxy.exceptions.ControlException', 'exceptions.ControlException', (["('Message %s has no reply attribute' % message)"], {}), "('Message %s has no reply attribute' % message)\n", (5896, 5943), False, 'from mitmproxy import exceptions\n'), ((6881, 6937), 'mitmproxy.exceptions.AddonManagerError', 'exceptions.AddonManagerError', (["('Unknown event: %s' % name)"], {}), "('Unknown event: %s' % name)\n", (6909, 6937), False, 'from mitmproxy import exceptions\n'), ((4098, 4173), 'mitmproxy.exceptions.AddonManagerError', 'exceptions.AddonManagerError', (['("An addon called \'%s\' already exists." % name)'], {}), '("An addon called \'%s\' already exists." % name)\n', (4126, 4173), False, 'from mitmproxy import exceptions\n'), ((5233, 5286), 'mitmproxy.exceptions.AddonManagerError', 'exceptions.AddonManagerError', (["('No such addon: %s' % n)"], {}), "('No such addon: %s' % n)\n", (5261, 5286), False, 'from mitmproxy import exceptions\n'), ((1121, 1165), 'traceback.format_exception', 'traceback.format_exception', (['etype', 'value', 'tb'], {}), '(etype, value, tb)\n', (1147, 1165), False, 'import traceback\n')]
|
import gettext
import json
from os import path
from django.conf import settings
from django.test import (
RequestFactory, SimpleTestCase, TestCase, modify_settings,
override_settings,
)
from django.test.selenium import SeleniumTestCase
from django.urls import reverse
from django.utils.translation import (
LANGUAGE_SESSION_KEY, get_language, override,
)
from django.views.i18n import JavaScriptCatalog, get_formats
from ..urls import locale_dir
@override_settings(ROOT_URLCONF='view_tests.urls')
class SetLanguageTests(TestCase):
"""Test the django.views.i18n.set_language view."""
def _get_inactive_language_code(self):
"""Return language code for a language which is not activated."""
current_language = get_language()
return [code for code, name in settings.LANGUAGES if not code == current_language][0]
def test_setlang(self):
"""
The set_language view can be used to change the session language.
The user is redirected to the 'next' argument if provided.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code, 'next': '/'}
response = self.client.post('/i18n/setlang/', post_data, HTTP_REFERER='/i_should_not_be_used/')
self.assertRedirects(response, '/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
# The language is set in a cookie.
language_cookie = self.client.cookies[settings.LANGUAGE_COOKIE_NAME]
self.assertEqual(language_cookie.value, lang_code)
self.assertEqual(language_cookie['domain'], '')
self.assertEqual(language_cookie['path'], '/')
self.assertEqual(language_cookie['max-age'], '')
def test_setlang_unsafe_next(self):
"""
The set_language view only redirects to the 'next' argument if it is
"safe".
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code, 'next': '//unsafe/redirection/'}
response = self.client.post('/i18n/setlang/', data=post_data)
self.assertEqual(response.url, '/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_http_next(self):
"""
The set_language view only redirects to the 'next' argument if it is
"safe" and its scheme is https if the request was sent over https.
"""
lang_code = self._get_inactive_language_code()
non_https_next_url = 'http://testserver/redirection/'
post_data = {'language': lang_code, 'next': non_https_next_url}
# Insecure URL in POST data.
response = self.client.post('/i18n/setlang/', data=post_data, secure=True)
self.assertEqual(response.url, '/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
# Insecure URL in HTTP referer.
response = self.client.post('/i18n/setlang/', secure=True, HTTP_REFERER=non_https_next_url)
self.assertEqual(response.url, '/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_redirect_to_referer(self):
"""
The set_language view redirects to the URL in the referer header when
there isn't a "next" parameter.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code}
response = self.client.post('/i18n/setlang/', post_data, HTTP_REFERER='/i18n/')
self.assertRedirects(response, '/i18n/', fetch_redirect_response=False)
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_default_redirect(self):
"""
The set_language view redirects to '/' when there isn't a referer or
"next" parameter.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code}
response = self.client.post('/i18n/setlang/', post_data)
self.assertRedirects(response, '/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_performs_redirect_for_ajax_if_explicitly_requested(self):
"""
The set_language view redirects to the "next" parameter for AJAX calls.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code, 'next': '/'}
response = self.client.post('/i18n/setlang/', post_data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertRedirects(response, '/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_doesnt_perform_a_redirect_to_referer_for_ajax(self):
"""
The set_language view doesn't redirect to the HTTP referer header for
AJAX calls.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code}
headers = {'HTTP_REFERER': '/', 'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'}
response = self.client.post('/i18n/setlang/', post_data, **headers)
self.assertEqual(response.status_code, 204)
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_doesnt_perform_a_default_redirect_for_ajax(self):
"""
The set_language view returns 204 for AJAX calls by default.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code}
response = self.client.post('/i18n/setlang/', post_data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 204)
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_unsafe_next_for_ajax(self):
"""
The fallback to root URL for the set_language view works for AJAX calls.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code, 'next': '//unsafe/redirection/'}
response = self.client.post('/i18n/setlang/', post_data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.url, '/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_reversal(self):
self.assertEqual(reverse('set_language'), '/i18n/setlang/')
def test_setlang_cookie(self):
# we force saving language to a cookie rather than a session
# by excluding session middleware and those which do require it
test_settings = {
'MIDDLEWARE': ['django.middleware.common.CommonMiddleware'],
'LANGUAGE_COOKIE_NAME': 'mylanguage',
'LANGUAGE_COOKIE_AGE': 3600 * 7 * 2,
'LANGUAGE_COOKIE_DOMAIN': '.example.com',
'LANGUAGE_COOKIE_PATH': '/test/',
}
with self.settings(**test_settings):
post_data = {'language': 'pl', 'next': '/views/'}
response = self.client.post('/i18n/setlang/', data=post_data)
language_cookie = response.cookies.get('mylanguage')
self.assertEqual(language_cookie.value, 'pl')
self.assertEqual(language_cookie['domain'], '.example.com')
self.assertEqual(language_cookie['path'], '/test/')
self.assertEqual(language_cookie['max-age'], 3600 * 7 * 2)
def test_setlang_decodes_http_referer_url(self):
"""
The set_language view decodes the HTTP_REFERER URL.
"""
# The URL & view must exist for this to work as a regression test.
self.assertEqual(reverse('with_parameter', kwargs={'parameter': 'x'}), '/test-setlang/x/')
lang_code = self._get_inactive_language_code()
encoded_url = '/test-setlang/%C3%A4/' # (%C3%A4 decodes to ä)
response = self.client.post('/i18n/setlang/', {'language': lang_code}, HTTP_REFERER=encoded_url)
self.assertRedirects(response, encoded_url, fetch_redirect_response=False)
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
@modify_settings(MIDDLEWARE={
'append': 'django.middleware.locale.LocaleMiddleware',
})
def test_lang_from_translated_i18n_pattern(self):
response = self.client.post(
'/i18n/setlang/', data={'language': 'nl'},
follow=True, HTTP_REFERER='/en/translated/'
)
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], 'nl')
self.assertRedirects(response, '/nl/vertaald/')
# And reverse
response = self.client.post(
'/i18n/setlang/', data={'language': 'en'},
follow=True, HTTP_REFERER='/nl/vertaald/'
)
self.assertRedirects(response, '/en/translated/')
@override_settings(ROOT_URLCONF='view_tests.urls')
class I18NViewTests(SimpleTestCase):
"""Test django.views.i18n views other than set_language."""
@override_settings(LANGUAGE_CODE='de')
def test_get_formats(self):
formats = get_formats()
# Test 3 possible types in get_formats: integer, string, and list.
self.assertEqual(formats['FIRST_DAY_OF_WEEK'], 0)
self.assertEqual(formats['DECIMAL_SEPARATOR'], '.')
self.assertEqual(formats['TIME_INPUT_FORMATS'], ['%H:%M:%S', '%H:%M:%S.%f', '%H:%M'])
def test_jsi18n(self):
"""The javascript_catalog can be deployed with language settings"""
for lang_code in ['es', 'fr', 'ru']:
with override(lang_code):
catalog = gettext.translation('djangojs', locale_dir, [lang_code])
trans_txt = catalog.gettext('this is to be translated')
response = self.client.get('/jsi18n/')
self.assertEqual(response['Content-Type'], 'text/javascript; charset="utf-8"')
# response content must include a line like:
# "this is to be translated": <value of trans_txt Python variable>
# json.dumps() is used to be able to check unicode strings
self.assertContains(response, json.dumps(trans_txt), 1)
if lang_code == 'fr':
# Message with context (msgctxt)
self.assertContains(response, '"month name\\u0004May": "mai"', 1)
@override_settings(USE_I18N=False)
def test_jsi18n_USE_I18N_False(self):
response = self.client.get('/jsi18n/')
# default plural function
self.assertContains(response, 'django.pluralidx = function(count) { return (count == 1) ? 0 : 1; };')
self.assertNotContains(response, 'var newcatalog =')
def test_jsoni18n(self):
"""
The json_catalog returns the language catalog and settings as JSON.
"""
with override('de'):
response = self.client.get('/jsoni18n/')
data = json.loads(response.content.decode())
self.assertIn('catalog', data)
self.assertIn('formats', data)
self.assertEqual(data['formats']['TIME_INPUT_FORMATS'], ['%H:%M:%S', '%H:%M:%S.%f', '%H:%M'])
self.assertEqual(data['formats']['FIRST_DAY_OF_WEEK'], 0)
self.assertIn('plural', data)
self.assertEqual(data['catalog']['month name\x04May'], 'Mai')
self.assertIn('DATETIME_FORMAT', data['formats'])
self.assertEqual(data['plural'], '(n != 1)')
def test_jsi18n_with_missing_en_files(self):
"""
The javascript_catalog shouldn't load the fallback language in the
case that the current selected language is actually the one translated
from, and hence missing translation files completely.
This happens easily when you're translating from English to other
languages and you've set settings.LANGUAGE_CODE to some other language
than English.
"""
with self.settings(LANGUAGE_CODE='es'), override('en-us'):
response = self.client.get('/jsi18n/')
self.assertNotContains(response, 'esto tiene que ser traducido')
def test_jsoni18n_with_missing_en_files(self):
"""
Same as above for the json_catalog view. Here we also check for the
expected JSON format.
"""
with self.settings(LANGUAGE_CODE='es'), override('en-us'):
response = self.client.get('/jsoni18n/')
data = json.loads(response.content.decode())
self.assertIn('catalog', data)
self.assertIn('formats', data)
self.assertIn('plural', data)
self.assertEqual(data['catalog'], {})
self.assertIn('DATETIME_FORMAT', data['formats'])
self.assertIsNone(data['plural'])
def test_jsi18n_fallback_language(self):
"""
Let's make sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'), override('fi'):
response = self.client.get('/jsi18n/')
self.assertContains(response, 'il faut le traduire')
self.assertNotContains(response, "Untranslated string")
def test_i18n_fallback_language_plural(self):
"""
The fallback to a language with less plural forms maintains the real
language's number of plural forms and correct translations.
"""
with self.settings(LANGUAGE_CODE='pt'), override('ru'):
response = self.client.get('/jsi18n/')
self.assertEqual(
response.context['catalog']['{count} plural3'],
['{count} plural3 p3', '{count} plural3 p3s', '{count} plural3 p3t']
)
self.assertEqual(
response.context['catalog']['{count} plural2'],
['{count} plural2', '{count} plural2s', '']
)
with self.settings(LANGUAGE_CODE='ru'), override('pt'):
response = self.client.get('/jsi18n/')
self.assertEqual(
response.context['catalog']['{count} plural3'],
['{count} plural3', '{count} plural3s']
)
self.assertEqual(
response.context['catalog']['{count} plural2'],
['{count} plural2', '{count} plural2s']
)
def test_i18n_english_variant(self):
with override('en-gb'):
response = self.client.get('/jsi18n/')
self.assertIn(
'"this color is to be translated": "this colour is to be translated"',
response.context['catalog_str']
)
def test_i18n_language_non_english_default(self):
"""
Check if the Javascript i18n view returns an empty language catalog
if the default language is non-English, the selected language
is English and there is not 'en' translation available. See #13388,
#3594 and #13726 for more details.
"""
with self.settings(LANGUAGE_CODE='fr'), override('en-us'):
response = self.client.get('/jsi18n/')
self.assertNotContains(response, 'Choisir une heure')
@modify_settings(INSTALLED_APPS={'append': 'view_tests.app0'})
def test_non_english_default_english_userpref(self):
"""
Same as above with the difference that there IS an 'en' translation
available. The Javascript i18n view must return a NON empty language catalog
with the proper English translations. See #13726 for more details.
"""
with self.settings(LANGUAGE_CODE='fr'), override('en-us'):
response = self.client.get('/jsi18n_english_translation/')
self.assertContains(response, 'this app0 string is to be translated')
def test_i18n_language_non_english_fallback(self):
"""
Makes sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'), override('none'):
response = self.client.get('/jsi18n/')
self.assertContains(response, 'Choisir une heure')
def test_escaping(self):
# Force a language via GET otherwise the gettext functions are a noop!
response = self.client.get('/jsi18n_admin/?language=de')
self.assertContains(response, '\\x04')
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app5']})
def test_non_BMP_char(self):
"""
Non-BMP characters should not break the javascript_catalog (#21725).
"""
with self.settings(LANGUAGE_CODE='en-us'), override('fr'):
response = self.client.get('/jsi18n/app5/')
self.assertContains(response, 'emoji')
self.assertContains(response, '\\ud83d\\udca9')
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app1', 'view_tests.app2']})
def test_i18n_language_english_default(self):
"""
Check if the JavaScript i18n view returns a complete language catalog
if the default language is en-us, the selected language has a
translation available and a catalog composed by djangojs domain
translations of multiple Python packages is requested. See #13388,
#3594 and #13514 for more details.
"""
base_trans_string = 'il faut traduire cette cha\\u00eene de caract\\u00e8res de '
app1_trans_string = base_trans_string + 'app1'
app2_trans_string = base_trans_string + 'app2'
with self.settings(LANGUAGE_CODE='en-us'), override('fr'):
response = self.client.get('/jsi18n_multi_packages1/')
self.assertContains(response, app1_trans_string)
self.assertContains(response, app2_trans_string)
response = self.client.get('/jsi18n/app1/')
self.assertContains(response, app1_trans_string)
self.assertNotContains(response, app2_trans_string)
response = self.client.get('/jsi18n/app2/')
self.assertNotContains(response, app1_trans_string)
self.assertContains(response, app2_trans_string)
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app3', 'view_tests.app4']})
def test_i18n_different_non_english_languages(self):
"""
Similar to above but with neither default or requested language being
English.
"""
with self.settings(LANGUAGE_CODE='fr'), override('es-ar'):
response = self.client.get('/jsi18n_multi_packages2/')
self.assertContains(response, 'este texto de app3 debe ser traducido')
def test_i18n_with_locale_paths(self):
extended_locale_paths = settings.LOCALE_PATHS + [
path.join(
path.dirname(path.dirname(path.abspath(__file__))),
'app3',
'locale',
),
]
with self.settings(LANGUAGE_CODE='es-ar', LOCALE_PATHS=extended_locale_paths):
with override('es-ar'):
response = self.client.get('/jsi18n/')
self.assertContains(response, 'este texto de app3 debe ser traducido')
def test_i18n_unknown_package_error(self):
view = JavaScriptCatalog.as_view()
request = RequestFactory().get('/')
msg = 'Invalid package(s) provided to JavaScriptCatalog: unknown_package'
with self.assertRaisesMessage(ValueError, msg):
view(request, packages='unknown_package')
msg += ',unknown_package2'
with self.assertRaisesMessage(ValueError, msg):
view(request, packages='unknown_package+unknown_package2')
@override_settings(ROOT_URLCONF='view_tests.urls')
class I18nSeleniumTests(SeleniumTestCase):
# The test cases use fixtures & translations from these apps.
available_apps = [
'django.contrib.admin', 'django.contrib.auth',
'django.contrib.contenttypes', 'view_tests',
]
@override_settings(LANGUAGE_CODE='de')
def test_javascript_gettext(self):
self.selenium.get(self.live_server_url + '/jsi18n_template/')
elem = self.selenium.find_element_by_id("gettext")
self.assertEqual(elem.text, "Entfernen")
elem = self.selenium.find_element_by_id("ngettext_sing")
self.assertEqual(elem.text, "1 Element")
elem = self.selenium.find_element_by_id("ngettext_plur")
self.assertEqual(elem.text, "455 Elemente")
elem = self.selenium.find_element_by_id("pgettext")
self.assertEqual(elem.text, "Kann")
elem = self.selenium.find_element_by_id("npgettext_sing")
self.assertEqual(elem.text, "1 Resultat")
elem = self.selenium.find_element_by_id("npgettext_plur")
self.assertEqual(elem.text, "455 Resultate")
elem = self.selenium.find_element_by_id("formats")
self.assertEqual(
elem.text,
"DATE_INPUT_FORMATS is an object; DECIMAL_SEPARATOR is a string; FIRST_DAY_OF_WEEK is a number;"
)
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app1', 'view_tests.app2']})
@override_settings(LANGUAGE_CODE='fr')
def test_multiple_catalogs(self):
self.selenium.get(self.live_server_url + '/jsi18n_multi_catalogs/')
elem = self.selenium.find_element_by_id('app1string')
self.assertEqual(elem.text, 'il faut traduire cette chaîne de caractères de app1')
elem = self.selenium.find_element_by_id('app2string')
self.assertEqual(elem.text, 'il faut traduire cette chaîne de caractères de app2')
|
[
"gettext.translation",
"os.path.abspath",
"django.test.RequestFactory",
"django.test.modify_settings",
"django.views.i18n.JavaScriptCatalog.as_view",
"django.utils.translation.get_language",
"json.dumps",
"django.utils.translation.override",
"django.urls.reverse",
"django.test.override_settings",
"django.views.i18n.get_formats"
] |
[((463, 512), 'django.test.override_settings', 'override_settings', ([], {'ROOT_URLCONF': '"""view_tests.urls"""'}), "(ROOT_URLCONF='view_tests.urls')\n", (480, 512), False, 'from django.test import RequestFactory, SimpleTestCase, TestCase, modify_settings, override_settings\n'), ((8746, 8795), 'django.test.override_settings', 'override_settings', ([], {'ROOT_URLCONF': '"""view_tests.urls"""'}), "(ROOT_URLCONF='view_tests.urls')\n", (8763, 8795), False, 'from django.test import RequestFactory, SimpleTestCase, TestCase, modify_settings, override_settings\n'), ((19568, 19617), 'django.test.override_settings', 'override_settings', ([], {'ROOT_URLCONF': '"""view_tests.urls"""'}), "(ROOT_URLCONF='view_tests.urls')\n", (19585, 19617), False, 'from django.test import RequestFactory, SimpleTestCase, TestCase, modify_settings, override_settings\n'), ((8066, 8153), 'django.test.modify_settings', 'modify_settings', ([], {'MIDDLEWARE': "{'append': 'django.middleware.locale.LocaleMiddleware'}"}), "(MIDDLEWARE={'append':\n 'django.middleware.locale.LocaleMiddleware'})\n", (8081, 8153), False, 'from django.test import RequestFactory, SimpleTestCase, TestCase, modify_settings, override_settings\n'), ((8902, 8939), 'django.test.override_settings', 'override_settings', ([], {'LANGUAGE_CODE': '"""de"""'}), "(LANGUAGE_CODE='de')\n", (8919, 8939), False, 'from django.test import RequestFactory, SimpleTestCase, TestCase, modify_settings, override_settings\n'), ((10257, 10290), 'django.test.override_settings', 'override_settings', ([], {'USE_I18N': '(False)'}), '(USE_I18N=False)\n', (10274, 10290), False, 'from django.test import RequestFactory, SimpleTestCase, TestCase, modify_settings, override_settings\n'), ((15088, 15149), 'django.test.modify_settings', 'modify_settings', ([], {'INSTALLED_APPS': "{'append': 'view_tests.app0'}"}), "(INSTALLED_APPS={'append': 'view_tests.app0'})\n", (15103, 15149), False, 'from django.test import RequestFactory, SimpleTestCase, TestCase, modify_settings, override_settings\n'), ((16308, 16371), 'django.test.modify_settings', 'modify_settings', ([], {'INSTALLED_APPS': "{'append': ['view_tests.app5']}"}), "(INSTALLED_APPS={'append': ['view_tests.app5']})\n", (16323, 16371), False, 'from django.test import RequestFactory, SimpleTestCase, TestCase, modify_settings, override_settings\n'), ((16746, 16832), 'django.test.modify_settings', 'modify_settings', ([], {'INSTALLED_APPS': "{'append': ['view_tests.app1', 'view_tests.app2']}"}), "(INSTALLED_APPS={'append': ['view_tests.app1',\n 'view_tests.app2']})\n", (16761, 16832), False, 'from django.test import RequestFactory, SimpleTestCase, TestCase, modify_settings, override_settings\n'), ((18067, 18153), 'django.test.modify_settings', 'modify_settings', ([], {'INSTALLED_APPS': "{'append': ['view_tests.app3', 'view_tests.app4']}"}), "(INSTALLED_APPS={'append': ['view_tests.app3',\n 'view_tests.app4']})\n", (18082, 18153), False, 'from django.test import RequestFactory, SimpleTestCase, TestCase, modify_settings, override_settings\n'), ((19871, 19908), 'django.test.override_settings', 'override_settings', ([], {'LANGUAGE_CODE': '"""de"""'}), "(LANGUAGE_CODE='de')\n", (19888, 19908), False, 'from django.test import RequestFactory, SimpleTestCase, TestCase, modify_settings, override_settings\n'), ((20930, 21016), 'django.test.modify_settings', 'modify_settings', ([], {'INSTALLED_APPS': "{'append': ['view_tests.app1', 'view_tests.app2']}"}), "(INSTALLED_APPS={'append': ['view_tests.app1',\n 'view_tests.app2']})\n", (20945, 21016), False, 'from django.test import RequestFactory, SimpleTestCase, TestCase, modify_settings, override_settings\n'), ((21018, 21055), 'django.test.override_settings', 'override_settings', ([], {'LANGUAGE_CODE': '"""fr"""'}), "(LANGUAGE_CODE='fr')\n", (21035, 21055), False, 'from django.test import RequestFactory, SimpleTestCase, TestCase, modify_settings, override_settings\n'), ((748, 762), 'django.utils.translation.get_language', 'get_language', ([], {}), '()\n', (760, 762), False, 'from django.utils.translation import LANGUAGE_SESSION_KEY, get_language, override\n'), ((8990, 9003), 'django.views.i18n.get_formats', 'get_formats', ([], {}), '()\n', (9001, 9003), False, 'from django.views.i18n import JavaScriptCatalog, get_formats\n'), ((19139, 19166), 'django.views.i18n.JavaScriptCatalog.as_view', 'JavaScriptCatalog.as_view', ([], {}), '()\n', (19164, 19166), False, 'from django.views.i18n import JavaScriptCatalog, get_formats\n'), ((6316, 6339), 'django.urls.reverse', 'reverse', (['"""set_language"""'], {}), "('set_language')\n", (6323, 6339), False, 'from django.urls import reverse\n'), ((7593, 7645), 'django.urls.reverse', 'reverse', (['"""with_parameter"""'], {'kwargs': "{'parameter': 'x'}"}), "('with_parameter', kwargs={'parameter': 'x'})\n", (7600, 7645), False, 'from django.urls import reverse\n'), ((10728, 10742), 'django.utils.translation.override', 'override', (['"""de"""'], {}), "('de')\n", (10736, 10742), False, 'from django.utils.translation import LANGUAGE_SESSION_KEY, get_language, override\n'), ((11865, 11882), 'django.utils.translation.override', 'override', (['"""en-us"""'], {}), "('en-us')\n", (11873, 11882), False, 'from django.utils.translation import LANGUAGE_SESSION_KEY, get_language, override\n'), ((12242, 12259), 'django.utils.translation.override', 'override', (['"""en-us"""'], {}), "('en-us')\n", (12250, 12259), False, 'from django.utils.translation import LANGUAGE_SESSION_KEY, get_language, override\n'), ((12914, 12928), 'django.utils.translation.override', 'override', (['"""fi"""'], {}), "('fi')\n", (12922, 12928), False, 'from django.utils.translation import LANGUAGE_SESSION_KEY, get_language, override\n'), ((13382, 13396), 'django.utils.translation.override', 'override', (['"""ru"""'], {}), "('ru')\n", (13390, 13396), False, 'from django.utils.translation import LANGUAGE_SESSION_KEY, get_language, override\n'), ((13858, 13872), 'django.utils.translation.override', 'override', (['"""pt"""'], {}), "('pt')\n", (13866, 13872), False, 'from django.utils.translation import LANGUAGE_SESSION_KEY, get_language, override\n'), ((14308, 14325), 'django.utils.translation.override', 'override', (['"""en-gb"""'], {}), "('en-gb')\n", (14316, 14325), False, 'from django.utils.translation import LANGUAGE_SESSION_KEY, get_language, override\n'), ((14946, 14963), 'django.utils.translation.override', 'override', (['"""en-us"""'], {}), "('en-us')\n", (14954, 14963), False, 'from django.utils.translation import LANGUAGE_SESSION_KEY, get_language, override\n'), ((15515, 15532), 'django.utils.translation.override', 'override', (['"""en-us"""'], {}), "('en-us')\n", (15523, 15532), False, 'from django.utils.translation import LANGUAGE_SESSION_KEY, get_language, override\n'), ((15949, 15965), 'django.utils.translation.override', 'override', (['"""none"""'], {}), "('none')\n", (15957, 15965), False, 'from django.utils.translation import LANGUAGE_SESSION_KEY, get_language, override\n'), ((16557, 16571), 'django.utils.translation.override', 'override', (['"""fr"""'], {}), "('fr')\n", (16565, 16571), False, 'from django.utils.translation import LANGUAGE_SESSION_KEY, get_language, override\n'), ((17492, 17506), 'django.utils.translation.override', 'override', (['"""fr"""'], {}), "('fr')\n", (17500, 17506), False, 'from django.utils.translation import LANGUAGE_SESSION_KEY, get_language, override\n'), ((18374, 18391), 'django.utils.translation.override', 'override', (['"""es-ar"""'], {}), "('es-ar')\n", (18382, 18391), False, 'from django.utils.translation import LANGUAGE_SESSION_KEY, get_language, override\n'), ((9457, 9476), 'django.utils.translation.override', 'override', (['lang_code'], {}), '(lang_code)\n', (9465, 9476), False, 'from django.utils.translation import LANGUAGE_SESSION_KEY, get_language, override\n'), ((9504, 9560), 'gettext.translation', 'gettext.translation', (['"""djangojs"""', 'locale_dir', '[lang_code]'], {}), "('djangojs', locale_dir, [lang_code])\n", (9523, 9560), False, 'import gettext\n'), ((18915, 18932), 'django.utils.translation.override', 'override', (['"""es-ar"""'], {}), "('es-ar')\n", (18923, 18932), False, 'from django.utils.translation import LANGUAGE_SESSION_KEY, get_language, override\n'), ((19185, 19201), 'django.test.RequestFactory', 'RequestFactory', ([], {}), '()\n', (19199, 19201), False, 'from django.test import RequestFactory, SimpleTestCase, TestCase, modify_settings, override_settings\n'), ((10048, 10069), 'json.dumps', 'json.dumps', (['trans_txt'], {}), '(trans_txt)\n', (10058, 10069), False, 'import json\n'), ((18710, 18732), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (18722, 18732), False, 'from os import path\n')]
|
# BSD 3-Clause License
#
# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
from __future__ import absolute_import
import inspect
import itertools
import logging
import os
import platform
import re
import sys
import threading
import time
import warnings
from copy import deepcopy
from typing import Optional, Tuple
import elasticapm
from elasticapm.conf import Config, VersionedConfig, constants
from elasticapm.conf.constants import ERROR
from elasticapm.metrics.base_metrics import MetricsRegistry
from elasticapm.traces import Tracer, execution_context
from elasticapm.utils import cgroup, cloud, compat, is_master_process, stacks, varmap
from elasticapm.utils.encoding import enforce_label_format, keyword_field, shorten, transform
from elasticapm.utils.logging import get_logger
from elasticapm.utils.module_import import import_string
__all__ = ("Client",)
CLIENT_SINGLETON = None
class Client(object):
"""
The base ElasticAPM client, which handles communication over the
HTTP API to the APM Server.
Will read default configuration from the environment variable
``ELASTIC_APM_APP_NAME`` and ``ELASTIC_APM_SECRET_TOKEN``
if available. ::
>>> from elasticapm import Client
>>> # Read configuration from environment
>>> client = Client()
>>> # Configure the client manually
>>> client = Client(
>>> include_paths=['my.package'],
>>> service_name='myapp',
>>> secret_token='secret_token',
>>> )
>>> # Record an exception
>>> try:
>>> 1/0
>>> except ZeroDivisionError:
>>> ident = client.capture_exception()
>>> print ("Exception caught; reference is %%s" %% ident)
"""
logger = get_logger("elasticapm")
def __init__(self, config=None, **inline):
# configure loggers first
cls = self.__class__
self.logger = get_logger("%s.%s" % (cls.__module__, cls.__name__))
self.error_logger = get_logger("elasticapm.errors")
self._pid = None
self._thread_starter_lock = threading.Lock()
self._thread_managers = {}
self.tracer = None
self.processors = []
self.filter_exception_types_dict = {}
self._service_info = None
# setting server_version here is mainly used for testing
self.server_version = inline.pop("server_version", None)
self.check_python_version()
config = Config(config, inline_dict=inline)
if config.errors:
for msg in config.errors.values():
self.error_logger.error(msg)
config.disable_send = True
if config.service_name == "python_service":
self.logger.warning("No custom SERVICE_NAME was set -- using non-descript default 'python_service'")
self.config = VersionedConfig(config, version=None)
# Insert the log_record_factory into the logging library
# The LogRecordFactory functionality is only available on python 3.2+
if compat.PY3 and not self.config.disable_log_record_factory:
record_factory = logging.getLogRecordFactory()
# Only way to know if it's wrapped is to create a log record
throwaway_record = record_factory(__name__, logging.DEBUG, __file__, 252, "dummy_msg", [], None)
if not hasattr(throwaway_record, "elasticapm_labels"):
self.logger.debug("Inserting elasticapm log_record_factory into logging")
# Late import due to circular imports
import elasticapm.handlers.logging as elastic_logging
new_factory = elastic_logging.log_record_factory(record_factory)
logging.setLogRecordFactory(new_factory)
headers = {
"Content-Type": "application/x-ndjson",
"Content-Encoding": "gzip",
"User-Agent": self.get_user_agent(),
}
transport_kwargs = {
"headers": headers,
"verify_server_cert": self.config.verify_server_cert,
"server_cert": self.config.server_cert,
"timeout": self.config.server_timeout,
"processors": self.load_processors(),
}
self._api_endpoint_url = compat.urlparse.urljoin(
self.config.server_url if self.config.server_url.endswith("/") else self.config.server_url + "/",
constants.EVENTS_API_PATH,
)
transport_class = import_string(self.config.transport_class)
self._transport = transport_class(url=self._api_endpoint_url, client=self, **transport_kwargs)
self.config.transport = self._transport
self._thread_managers["transport"] = self._transport
for exc_to_filter in self.config.filter_exception_types or []:
exc_to_filter_type = exc_to_filter.split(".")[-1]
exc_to_filter_module = ".".join(exc_to_filter.split(".")[:-1])
self.filter_exception_types_dict[exc_to_filter_type] = exc_to_filter_module
if platform.python_implementation() == "PyPy":
# PyPy introduces a `_functools.partial.__call__` frame due to our use
# of `partial` in AbstractInstrumentedModule
skip_modules = ("elasticapm.", "_functools")
else:
skip_modules = ("elasticapm.",)
self.tracer = Tracer(
frames_collector_func=lambda: list(
stacks.iter_stack_frames(
start_frame=inspect.currentframe(), skip_top_modules=skip_modules, config=self.config
)
),
frames_processing_func=lambda frames: self._get_stack_info_for_trace(
frames,
library_frame_context_lines=self.config.source_lines_span_library_frames,
in_app_frame_context_lines=self.config.source_lines_span_app_frames,
with_locals=self.config.collect_local_variables in ("all", "transactions"),
locals_processor_func=lambda local_var: varmap(
lambda k, v: shorten(
v,
list_length=self.config.local_var_list_max_length,
string_length=self.config.local_var_max_length,
dict_length=self.config.local_var_dict_max_length,
),
local_var,
),
),
queue_func=self.queue,
config=self.config,
agent=self,
)
self.include_paths_re = stacks.get_path_regex(self.config.include_paths) if self.config.include_paths else None
self.exclude_paths_re = stacks.get_path_regex(self.config.exclude_paths) if self.config.exclude_paths else None
self._metrics = MetricsRegistry(self)
for path in self.config.metrics_sets:
self._metrics.register(path)
if self.config.breakdown_metrics:
self._metrics.register("elasticapm.metrics.sets.breakdown.BreakdownMetricSet")
if self.config.prometheus_metrics:
self._metrics.register("elasticapm.metrics.sets.prometheus.PrometheusMetrics")
if self.config.metrics_interval:
self._thread_managers["metrics"] = self._metrics
compat.atexit_register(self.close)
if self.config.central_config:
self._thread_managers["config"] = self.config
else:
self._config_updater = None
if self.config.use_elastic_excepthook:
self.original_excepthook = sys.excepthook
sys.excepthook = self._excepthook
if config.enabled:
self.start_threads()
# Save this Client object as the global CLIENT_SINGLETON
set_client(self)
def start_threads(self):
current_pid = os.getpid()
if self._pid != current_pid:
with self._thread_starter_lock:
self.logger.debug("Detected PID change from %r to %r, starting threads", self._pid, current_pid)
for manager_type, manager in sorted(
self._thread_managers.items(), key=lambda item: item[1].start_stop_order
):
self.logger.debug("Starting %s thread", manager_type)
manager.start_thread(pid=current_pid)
self._pid = current_pid
def get_handler(self, name):
return import_string(name)
def capture(self, event_type, date=None, context=None, custom=None, stack=None, handled=True, **kwargs):
"""
Captures and processes an event and pipes it off to Client.send.
"""
if not self.config.is_recording:
return
if event_type == "Exception":
# never gather log stack for exceptions
stack = False
data = self._build_msg_for_logging(
event_type, date=date, context=context, custom=custom, stack=stack, handled=handled, **kwargs
)
if data:
# queue data, and flush the queue if this is an unhandled exception
self.queue(ERROR, data, flush=not handled)
return data["id"]
def capture_message(self, message=None, param_message=None, **kwargs):
"""
Creates an event from ``message``.
>>> client.capture_message('My event just happened!')
"""
return self.capture("Message", message=message, param_message=param_message, **kwargs)
def capture_exception(self, exc_info=None, handled=True, **kwargs):
"""
Creates an event from an exception.
>>> try:
>>> exc_info = sys.exc_info()
>>> client.capture_exception(exc_info)
>>> finally:
>>> del exc_info
If exc_info is not provided, or is set to True, then this method will
perform the ``exc_info = sys.exc_info()`` and the requisite clean-up
for you.
"""
return self.capture("Exception", exc_info=exc_info, handled=handled, **kwargs)
def queue(self, event_type, data, flush=False):
if self.config.disable_send:
return
self.start_threads()
if flush and is_master_process():
# don't flush in uWSGI master process to avoid ending up in an unpredictable threading state
flush = False
self._transport.queue(event_type, data, flush)
def begin_transaction(self, transaction_type, trace_parent=None, start=None):
"""
Register the start of a transaction on the client
:param transaction_type: type of the transaction, e.g. "request"
:param trace_parent: an optional TraceParent object for distributed tracing
:param start: override the start timestamp, mostly useful for testing
:return: the started transaction object
"""
if self.config.is_recording:
return self.tracer.begin_transaction(transaction_type, trace_parent=trace_parent, start=start)
def end_transaction(self, name=None, result="", duration=None):
"""
End the current transaction.
:param name: optional name of the transaction
:param result: result of the transaction, e.g. "OK" or "HTTP 2xx"
:param duration: override duration, mostly useful for testing
:return: the ended transaction object
"""
transaction = self.tracer.end_transaction(result, name, duration=duration)
return transaction
def close(self):
if self.config.enabled:
with self._thread_starter_lock:
for _, manager in sorted(self._thread_managers.items(), key=lambda item: item[1].start_stop_order):
manager.stop_thread()
global CLIENT_SINGLETON
CLIENT_SINGLETON = None
def get_service_info(self):
if self._service_info:
return self._service_info
language_version = platform.python_version()
if hasattr(sys, "pypy_version_info"):
runtime_version = ".".join(map(str, sys.pypy_version_info[:3]))
else:
runtime_version = language_version
result = {
"name": keyword_field(self.config.service_name),
"environment": keyword_field(self.config.environment),
"version": keyword_field(self.config.service_version),
"agent": {"name": "python", "version": elasticapm.VERSION},
"language": {"name": "python", "version": keyword_field(platform.python_version())},
"runtime": {
"name": keyword_field(platform.python_implementation()),
"version": keyword_field(runtime_version),
},
}
if self.config.framework_name:
result["framework"] = {
"name": keyword_field(self.config.framework_name),
"version": keyword_field(self.config.framework_version),
}
if self.config.service_node_name:
result["node"] = {"configured_name": keyword_field(self.config.service_node_name)}
self._service_info = result
return result
def get_process_info(self):
return {
"pid": os.getpid(),
"ppid": os.getppid() if hasattr(os, "getppid") else None,
"argv": sys.argv,
"title": None, # Note: if we implement this, the value needs to be wrapped with keyword_field
}
def get_system_info(self):
system_data = {
"hostname": keyword_field(self.config.hostname),
"architecture": platform.machine(),
"platform": platform.system().lower(),
}
system_data.update(cgroup.get_cgroup_container_metadata())
pod_name = os.environ.get("KUBERNETES_POD_NAME") or system_data["hostname"]
changed = False
if "kubernetes" in system_data:
k8s = system_data["kubernetes"]
k8s["pod"]["name"] = pod_name
else:
k8s = {"pod": {"name": pod_name}}
# get kubernetes metadata from environment
if "KUBERNETES_NODE_NAME" in os.environ:
k8s["node"] = {"name": os.environ["KUBERNETES_NODE_NAME"]}
changed = True
if "KUBERNETES_NAMESPACE" in os.environ:
k8s["namespace"] = os.environ["KUBERNETES_NAMESPACE"]
changed = True
if "KUBERNETES_POD_UID" in os.environ:
# this takes precedence over any value from /proc/self/cgroup
k8s["pod"]["uid"] = os.environ["KUBERNETES_POD_UID"]
changed = True
if changed:
system_data["kubernetes"] = k8s
return system_data
def get_cloud_info(self):
"""
Detects if the app is running in a cloud provider and fetches relevant
metadata from the cloud provider's metadata endpoint.
"""
provider = str(self.config.cloud_provider).lower()
if not provider or provider == "none" or provider == "false":
return {}
if provider == "aws":
data = cloud.aws_metadata()
if not data:
self.logger.warning("Cloud provider {0} defined, but no metadata was found.".format(provider))
return data
elif provider == "gcp":
data = cloud.gcp_metadata()
if not data:
self.logger.warning("Cloud provider {0} defined, but no metadata was found.".format(provider))
return data
elif provider == "azure":
data = cloud.azure_metadata()
if not data:
self.logger.warning("Cloud provider {0} defined, but no metadata was found.".format(provider))
return data
elif provider == "auto" or provider == "true":
# Trial and error
data = {}
data = cloud.aws_metadata()
if data:
return data
data = cloud.gcp_metadata()
if data:
return data
data = cloud.azure_metadata()
return data
else:
self.logger.warning("Unknown value for CLOUD_PROVIDER, skipping cloud metadata: {}".format(provider))
return {}
def get_user_agent(self) -> str:
"""
Compiles the user agent, which will be added as a header to all requests
to the APM Server
"""
if self.config.service_version:
service_version = re.sub(r"[^\t _\x21-\x27\x2a-\x5b\x5d-\x7e\x80-\xff]", "_", self.config.service_version)
return "apm-agent-python/{} ({} {})".format(elasticapm.VERSION, self.config.service_name, service_version)
else:
return "apm-agent-python/{} ({})".format(elasticapm.VERSION, self.config.service_name)
def build_metadata(self):
data = {
"service": self.get_service_info(),
"process": self.get_process_info(),
"system": self.get_system_info(),
"cloud": self.get_cloud_info(),
}
if not data["cloud"]:
data.pop("cloud")
if self.config.global_labels:
data["labels"] = enforce_label_format(self.config.global_labels)
return data
def _build_msg_for_logging(
self, event_type, date=None, context=None, custom=None, stack=None, handled=True, **kwargs
):
"""
Captures, processes and serializes an event into a dict object
"""
transaction = execution_context.get_transaction()
span = execution_context.get_span()
if transaction:
transaction_context = deepcopy(transaction.context)
else:
transaction_context = {}
event_data = {}
if custom is None:
custom = {}
if date is not None:
warnings.warn(
"The date argument is no longer evaluated and will be removed in a future release", DeprecationWarning
)
date = time.time()
if stack is None:
stack = self.config.auto_log_stacks
if context:
transaction_context.update(context)
context = transaction_context
else:
context = transaction_context
event_data["context"] = context
if transaction and transaction.labels:
context["tags"] = deepcopy(transaction.labels)
# if '.' not in event_type:
# Assume it's a builtin
event_type = "elasticapm.events.%s" % event_type
handler = self.get_handler(event_type)
result = handler.capture(self, **kwargs)
if self._filter_exception_type(result):
return
# data (explicit) culprit takes over auto event detection
culprit = result.pop("culprit", None)
if custom.get("culprit"):
culprit = custom.pop("culprit")
for k, v in compat.iteritems(result):
if k not in event_data:
event_data[k] = v
log = event_data.get("log", {})
if stack and "stacktrace" not in log:
if stack is True:
frames = stacks.iter_stack_frames(skip=3, config=self.config)
else:
frames = stack
frames = stacks.get_stack_info(
frames,
with_locals=self.config.collect_local_variables in ("errors", "all"),
library_frame_context_lines=self.config.source_lines_error_library_frames,
in_app_frame_context_lines=self.config.source_lines_error_app_frames,
include_paths_re=self.include_paths_re,
exclude_paths_re=self.exclude_paths_re,
locals_processor_func=lambda local_var: varmap(
lambda k, v: shorten(
v,
list_length=self.config.local_var_list_max_length,
string_length=self.config.local_var_max_length,
dict_length=self.config.local_var_dict_max_length,
),
local_var,
),
)
log["stacktrace"] = frames
if "stacktrace" in log and not culprit:
culprit = stacks.get_culprit(log["stacktrace"], self.config.include_paths, self.config.exclude_paths)
if "level" in log and isinstance(log["level"], compat.integer_types):
log["level"] = logging.getLevelName(log["level"]).lower()
if log:
event_data["log"] = log
if culprit:
event_data["culprit"] = culprit
if "custom" in context:
context["custom"].update(custom)
else:
context["custom"] = custom
# Make sure all data is coerced
event_data = transform(event_data)
if "exception" in event_data:
event_data["exception"]["handled"] = bool(handled)
event_data["timestamp"] = int(date * 1000000)
if transaction:
if transaction.trace_parent:
event_data["trace_id"] = transaction.trace_parent.trace_id
# parent id might already be set in the handler
event_data.setdefault("parent_id", span.id if span else transaction.id)
event_data["transaction_id"] = transaction.id
event_data["transaction"] = {
"sampled": transaction.is_sampled,
"type": transaction.transaction_type,
"name": transaction.name,
}
return event_data
def _filter_exception_type(self, data):
exception = data.get("exception")
if not exception:
return False
exc_type = exception.get("type")
exc_module = exception.get("module")
if exc_module == "None":
exc_module = None
if exc_type in self.filter_exception_types_dict:
exc_to_filter_module = self.filter_exception_types_dict[exc_type]
if not exc_to_filter_module or exc_to_filter_module == exc_module:
if exc_module:
exc_name = "%s.%s" % (exc_module, exc_type)
else:
exc_name = exc_type
self.logger.debug("Ignored %s exception due to exception type filter", exc_name)
return True
return False
def _get_stack_info_for_trace(
self,
frames,
library_frame_context_lines=None,
in_app_frame_context_lines=None,
with_locals=True,
locals_processor_func=None,
):
"""Overrideable in derived clients to add frames/info, e.g. templates"""
return stacks.get_stack_info(
frames,
library_frame_context_lines=library_frame_context_lines,
in_app_frame_context_lines=in_app_frame_context_lines,
with_locals=with_locals,
include_paths_re=self.include_paths_re,
exclude_paths_re=self.exclude_paths_re,
locals_processor_func=locals_processor_func,
)
def _excepthook(self, type_, value, traceback):
try:
self.original_excepthook(type_, value, traceback)
except Exception:
self.capture_exception(handled=False)
finally:
self.capture_exception(exc_info=(type_, value, traceback), handled=False)
def load_processors(self):
"""
Loads processors from self.config.processors, as well as constants.HARDCODED_PROCESSORS.
Duplicate processors (based on the path) will be discarded.
:return: a list of callables
"""
processors = itertools.chain(self.config.processors, constants.HARDCODED_PROCESSORS)
seen = {}
# setdefault has the nice property that it returns the value that it just set on the dict
return [seen.setdefault(path, import_string(path)) for path in processors if path not in seen]
def should_ignore_url(self, url):
if self.config.transaction_ignore_urls:
for pattern in self.config.transaction_ignore_urls:
if pattern.match(url):
return True
return False
def check_python_version(self):
v = tuple(map(int, platform.python_version_tuple()[:2]))
if v == (2, 7):
warnings.warn(
(
"The Elastic APM agent will stop supporting Python 2.7 starting in 6.0.0 -- "
"Please upgrade to Python 3.5+ to continue to use the latest features."
),
PendingDeprecationWarning,
)
elif v < (3, 5):
warnings.warn("The Elastic APM agent only supports Python 3.5+", DeprecationWarning)
def check_server_version(
self, gte: Optional[Tuple[int, ...]] = None, lte: Optional[Tuple[int, ...]] = None
) -> bool:
"""
Check APM Server version against greater-or-equal and/or lower-or-equal limits, provided as tuples of integers.
If server_version is not set, always returns True.
:param gte: a tuple of ints describing the greater-or-equal limit, e.g. (7, 16)
:param lte: a tuple of ints describing the lower-or-equal limit, e.g. (7, 99)
:return: bool
"""
if not self.server_version:
return True
gte = gte or (0,)
lte = lte or (2 ** 32,) # let's assume APM Server version will never be greater than 2^32
return bool(gte <= self.server_version <= lte)
class DummyClient(Client):
"""Sends messages into an empty void"""
def send(self, url, **kwargs):
return None
def get_client() -> Client:
return CLIENT_SINGLETON
def set_client(client: Client):
global CLIENT_SINGLETON
if CLIENT_SINGLETON:
logger = get_logger("elasticapm")
logger.warning("Client object is being set more than once", stack_info=True)
CLIENT_SINGLETON = client
|
[
"platform.python_version",
"os.getppid",
"elasticapm.utils.cloud.aws_metadata",
"elasticapm.utils.compat.iteritems",
"elasticapm.utils.stacks.get_stack_info",
"elasticapm.conf.VersionedConfig",
"elasticapm.utils.stacks.get_culprit",
"logging.getLevelName",
"elasticapm.utils.stacks.get_path_regex",
"platform.python_version_tuple",
"elasticapm.utils.encoding.transform",
"elasticapm.utils.cgroup.get_cgroup_container_metadata",
"threading.Lock",
"elasticapm.utils.encoding.keyword_field",
"platform.machine",
"itertools.chain",
"elasticapm.conf.Config",
"re.sub",
"logging.setLogRecordFactory",
"copy.deepcopy",
"logging.getLogRecordFactory",
"elasticapm.utils.module_import.import_string",
"elasticapm.handlers.logging.log_record_factory",
"elasticapm.utils.logging.get_logger",
"elasticapm.utils.cloud.gcp_metadata",
"elasticapm.metrics.base_metrics.MetricsRegistry",
"elasticapm.utils.encoding.shorten",
"inspect.currentframe",
"platform.system",
"elasticapm.traces.execution_context.get_transaction",
"elasticapm.utils.compat.atexit_register",
"platform.python_implementation",
"elasticapm.utils.stacks.iter_stack_frames",
"elasticapm.utils.cloud.azure_metadata",
"os.getpid",
"elasticapm.utils.is_master_process",
"elasticapm.traces.execution_context.get_span",
"time.time",
"os.environ.get",
"elasticapm.utils.encoding.enforce_label_format",
"warnings.warn"
] |
[((3222, 3246), 'elasticapm.utils.logging.get_logger', 'get_logger', (['"""elasticapm"""'], {}), "('elasticapm')\n", (3232, 3246), False, 'from elasticapm.utils.logging import get_logger\n'), ((3380, 3432), 'elasticapm.utils.logging.get_logger', 'get_logger', (["('%s.%s' % (cls.__module__, cls.__name__))"], {}), "('%s.%s' % (cls.__module__, cls.__name__))\n", (3390, 3432), False, 'from elasticapm.utils.logging import get_logger\n'), ((3461, 3492), 'elasticapm.utils.logging.get_logger', 'get_logger', (['"""elasticapm.errors"""'], {}), "('elasticapm.errors')\n", (3471, 3492), False, 'from elasticapm.utils.logging import get_logger\n'), ((3555, 3571), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (3569, 3571), False, 'import threading\n'), ((3929, 3963), 'elasticapm.conf.Config', 'Config', (['config'], {'inline_dict': 'inline'}), '(config, inline_dict=inline)\n', (3935, 3963), False, 'from elasticapm.conf import Config, VersionedConfig, constants\n'), ((4308, 4345), 'elasticapm.conf.VersionedConfig', 'VersionedConfig', (['config'], {'version': 'None'}), '(config, version=None)\n', (4323, 4345), False, 'from elasticapm.conf import Config, VersionedConfig, constants\n'), ((5928, 5970), 'elasticapm.utils.module_import.import_string', 'import_string', (['self.config.transport_class'], {}), '(self.config.transport_class)\n', (5941, 5970), False, 'from elasticapm.utils.module_import import import_string\n'), ((8232, 8253), 'elasticapm.metrics.base_metrics.MetricsRegistry', 'MetricsRegistry', (['self'], {}), '(self)\n', (8247, 8253), False, 'from elasticapm.metrics.base_metrics import MetricsRegistry\n'), ((8718, 8752), 'elasticapm.utils.compat.atexit_register', 'compat.atexit_register', (['self.close'], {}), '(self.close)\n', (8740, 8752), False, 'from elasticapm.utils import cgroup, cloud, compat, is_master_process, stacks, varmap\n'), ((9254, 9265), 'os.getpid', 'os.getpid', ([], {}), '()\n', (9263, 9265), False, 'import os\n'), ((9846, 9865), 'elasticapm.utils.module_import.import_string', 'import_string', (['name'], {}), '(name)\n', (9859, 9865), False, 'from elasticapm.utils.module_import import import_string\n'), ((13348, 13373), 'platform.python_version', 'platform.python_version', ([], {}), '()\n', (13371, 13373), False, 'import platform\n'), ((18878, 18913), 'elasticapm.traces.execution_context.get_transaction', 'execution_context.get_transaction', ([], {}), '()\n', (18911, 18913), False, 'from elasticapm.traces import Tracer, execution_context\n'), ((18929, 18957), 'elasticapm.traces.execution_context.get_span', 'execution_context.get_span', ([], {}), '()\n', (18955, 18957), False, 'from elasticapm.traces import Tracer, execution_context\n'), ((19376, 19387), 'time.time', 'time.time', ([], {}), '()\n', (19385, 19387), False, 'import time\n'), ((20275, 20299), 'elasticapm.utils.compat.iteritems', 'compat.iteritems', (['result'], {}), '(result)\n', (20291, 20299), False, 'from elasticapm.utils import cgroup, cloud, compat, is_master_process, stacks, varmap\n'), ((22162, 22183), 'elasticapm.utils.encoding.transform', 'transform', (['event_data'], {}), '(event_data)\n', (22171, 22183), False, 'from elasticapm.utils.encoding import enforce_label_format, keyword_field, shorten, transform\n'), ((24033, 24343), 'elasticapm.utils.stacks.get_stack_info', 'stacks.get_stack_info', (['frames'], {'library_frame_context_lines': 'library_frame_context_lines', 'in_app_frame_context_lines': 'in_app_frame_context_lines', 'with_locals': 'with_locals', 'include_paths_re': 'self.include_paths_re', 'exclude_paths_re': 'self.exclude_paths_re', 'locals_processor_func': 'locals_processor_func'}), '(frames, library_frame_context_lines=\n library_frame_context_lines, in_app_frame_context_lines=\n in_app_frame_context_lines, with_locals=with_locals, include_paths_re=\n self.include_paths_re, exclude_paths_re=self.exclude_paths_re,\n locals_processor_func=locals_processor_func)\n', (24054, 24343), False, 'from elasticapm.utils import cgroup, cloud, compat, is_master_process, stacks, varmap\n'), ((25007, 25078), 'itertools.chain', 'itertools.chain', (['self.config.processors', 'constants.HARDCODED_PROCESSORS'], {}), '(self.config.processors, constants.HARDCODED_PROCESSORS)\n', (25022, 25078), False, 'import itertools\n'), ((27167, 27191), 'elasticapm.utils.logging.get_logger', 'get_logger', (['"""elasticapm"""'], {}), "('elasticapm')\n", (27177, 27191), False, 'from elasticapm.utils.logging import get_logger\n'), ((4589, 4618), 'logging.getLogRecordFactory', 'logging.getLogRecordFactory', ([], {}), '()\n', (4616, 4618), False, 'import logging\n'), ((6492, 6524), 'platform.python_implementation', 'platform.python_implementation', ([], {}), '()\n', (6522, 6524), False, 'import platform\n'), ((8000, 8048), 'elasticapm.utils.stacks.get_path_regex', 'stacks.get_path_regex', (['self.config.include_paths'], {}), '(self.config.include_paths)\n', (8021, 8048), False, 'from elasticapm.utils import cgroup, cloud, compat, is_master_process, stacks, varmap\n'), ((8120, 8168), 'elasticapm.utils.stacks.get_path_regex', 'stacks.get_path_regex', (['self.config.exclude_paths'], {}), '(self.config.exclude_paths)\n', (8141, 8168), False, 'from elasticapm.utils import cgroup, cloud, compat, is_master_process, stacks, varmap\n'), ((11614, 11633), 'elasticapm.utils.is_master_process', 'is_master_process', ([], {}), '()\n', (11631, 11633), False, 'from elasticapm.utils import cgroup, cloud, compat, is_master_process, stacks, varmap\n'), ((13596, 13635), 'elasticapm.utils.encoding.keyword_field', 'keyword_field', (['self.config.service_name'], {}), '(self.config.service_name)\n', (13609, 13635), False, 'from elasticapm.utils.encoding import enforce_label_format, keyword_field, shorten, transform\n'), ((13664, 13702), 'elasticapm.utils.encoding.keyword_field', 'keyword_field', (['self.config.environment'], {}), '(self.config.environment)\n', (13677, 13702), False, 'from elasticapm.utils.encoding import enforce_label_format, keyword_field, shorten, transform\n'), ((13727, 13769), 'elasticapm.utils.encoding.keyword_field', 'keyword_field', (['self.config.service_version'], {}), '(self.config.service_version)\n', (13740, 13769), False, 'from elasticapm.utils.encoding import enforce_label_format, keyword_field, shorten, transform\n'), ((14615, 14626), 'os.getpid', 'os.getpid', ([], {}), '()\n', (14624, 14626), False, 'import os\n'), ((14925, 14960), 'elasticapm.utils.encoding.keyword_field', 'keyword_field', (['self.config.hostname'], {}), '(self.config.hostname)\n', (14938, 14960), False, 'from elasticapm.utils.encoding import enforce_label_format, keyword_field, shorten, transform\n'), ((14990, 15008), 'platform.machine', 'platform.machine', ([], {}), '()\n', (15006, 15008), False, 'import platform\n'), ((15098, 15136), 'elasticapm.utils.cgroup.get_cgroup_container_metadata', 'cgroup.get_cgroup_container_metadata', ([], {}), '()\n', (15134, 15136), False, 'from elasticapm.utils import cgroup, cloud, compat, is_master_process, stacks, varmap\n'), ((15157, 15194), 'os.environ.get', 'os.environ.get', (['"""KUBERNETES_POD_NAME"""'], {}), "('KUBERNETES_POD_NAME')\n", (15171, 15194), False, 'import os\n'), ((16473, 16493), 'elasticapm.utils.cloud.aws_metadata', 'cloud.aws_metadata', ([], {}), '()\n', (16491, 16493), False, 'from elasticapm.utils import cgroup, cloud, compat, is_master_process, stacks, varmap\n'), ((17862, 17963), 're.sub', 're.sub', (['"""[^\\\\t _\\\\x21-\\\\x27\\\\x2a-\\\\x5b\\\\x5d-\\\\x7e\\\\x80-\\\\xff]"""', '"""_"""', 'self.config.service_version'], {}), "('[^\\\\t _\\\\x21-\\\\x27\\\\x2a-\\\\x5b\\\\x5d-\\\\x7e\\\\x80-\\\\xff]', '_', self.\n config.service_version)\n", (17868, 17963), False, 'import re\n'), ((18554, 18601), 'elasticapm.utils.encoding.enforce_label_format', 'enforce_label_format', (['self.config.global_labels'], {}), '(self.config.global_labels)\n', (18574, 18601), False, 'from elasticapm.utils.encoding import enforce_label_format, keyword_field, shorten, transform\n'), ((19016, 19045), 'copy.deepcopy', 'deepcopy', (['transaction.context'], {}), '(transaction.context)\n', (19024, 19045), False, 'from copy import deepcopy\n'), ((19213, 19340), 'warnings.warn', 'warnings.warn', (['"""The date argument is no longer evaluated and will be removed in a future release"""', 'DeprecationWarning'], {}), "(\n 'The date argument is no longer evaluated and will be removed in a future release'\n , DeprecationWarning)\n", (19226, 19340), False, 'import warnings\n'), ((19745, 19773), 'copy.deepcopy', 'deepcopy', (['transaction.labels'], {}), '(transaction.labels)\n', (19753, 19773), False, 'from copy import deepcopy\n'), ((21610, 21706), 'elasticapm.utils.stacks.get_culprit', 'stacks.get_culprit', (["log['stacktrace']", 'self.config.include_paths', 'self.config.exclude_paths'], {}), "(log['stacktrace'], self.config.include_paths, self.\n config.exclude_paths)\n", (21628, 21706), False, 'from elasticapm.utils import cgroup, cloud, compat, is_master_process, stacks, varmap\n'), ((25679, 25877), 'warnings.warn', 'warnings.warn', (['"""The Elastic APM agent will stop supporting Python 2.7 starting in 6.0.0 -- Please upgrade to Python 3.5+ to continue to use the latest features."""', 'PendingDeprecationWarning'], {}), "(\n 'The Elastic APM agent will stop supporting Python 2.7 starting in 6.0.0 -- Please upgrade to Python 3.5+ to continue to use the latest features.'\n , PendingDeprecationWarning)\n", (25692, 25877), False, 'import warnings\n'), ((5114, 5164), 'elasticapm.handlers.logging.log_record_factory', 'elastic_logging.log_record_factory', (['record_factory'], {}), '(record_factory)\n', (5148, 5164), True, 'import elasticapm.handlers.logging as elastic_logging\n'), ((5181, 5221), 'logging.setLogRecordFactory', 'logging.setLogRecordFactory', (['new_factory'], {}), '(new_factory)\n', (5208, 5221), False, 'import logging\n'), ((14065, 14095), 'elasticapm.utils.encoding.keyword_field', 'keyword_field', (['runtime_version'], {}), '(runtime_version)\n', (14078, 14095), False, 'from elasticapm.utils.encoding import enforce_label_format, keyword_field, shorten, transform\n'), ((14221, 14262), 'elasticapm.utils.encoding.keyword_field', 'keyword_field', (['self.config.framework_name'], {}), '(self.config.framework_name)\n', (14234, 14262), False, 'from elasticapm.utils.encoding import enforce_label_format, keyword_field, shorten, transform\n'), ((14291, 14335), 'elasticapm.utils.encoding.keyword_field', 'keyword_field', (['self.config.framework_version'], {}), '(self.config.framework_version)\n', (14304, 14335), False, 'from elasticapm.utils.encoding import enforce_label_format, keyword_field, shorten, transform\n'), ((14442, 14486), 'elasticapm.utils.encoding.keyword_field', 'keyword_field', (['self.config.service_node_name'], {}), '(self.config.service_node_name)\n', (14455, 14486), False, 'from elasticapm.utils.encoding import enforce_label_format, keyword_field, shorten, transform\n'), ((14648, 14660), 'os.getppid', 'os.getppid', ([], {}), '()\n', (14658, 14660), False, 'import os\n'), ((16705, 16725), 'elasticapm.utils.cloud.gcp_metadata', 'cloud.gcp_metadata', ([], {}), '()\n', (16723, 16725), False, 'from elasticapm.utils import cgroup, cloud, compat, is_master_process, stacks, varmap\n'), ((20513, 20565), 'elasticapm.utils.stacks.iter_stack_frames', 'stacks.iter_stack_frames', ([], {'skip': '(3)', 'config': 'self.config'}), '(skip=3, config=self.config)\n', (20537, 20565), False, 'from elasticapm.utils import cgroup, cloud, compat, is_master_process, stacks, varmap\n'), ((25233, 25252), 'elasticapm.utils.module_import.import_string', 'import_string', (['path'], {}), '(path)\n', (25246, 25252), False, 'from elasticapm.utils.module_import import import_string\n'), ((26015, 26103), 'warnings.warn', 'warnings.warn', (['"""The Elastic APM agent only supports Python 3.5+"""', 'DeprecationWarning'], {}), "('The Elastic APM agent only supports Python 3.5+',\n DeprecationWarning)\n", (26028, 26103), False, 'import warnings\n'), ((13911, 13936), 'platform.python_version', 'platform.python_version', ([], {}), '()\n', (13934, 13936), False, 'import platform\n'), ((14003, 14035), 'platform.python_implementation', 'platform.python_implementation', ([], {}), '()\n', (14033, 14035), False, 'import platform\n'), ((15034, 15051), 'platform.system', 'platform.system', ([], {}), '()\n', (15049, 15051), False, 'import platform\n'), ((16939, 16961), 'elasticapm.utils.cloud.azure_metadata', 'cloud.azure_metadata', ([], {}), '()\n', (16959, 16961), False, 'from elasticapm.utils import cgroup, cloud, compat, is_master_process, stacks, varmap\n'), ((21808, 21842), 'logging.getLevelName', 'logging.getLevelName', (["log['level']"], {}), "(log['level'])\n", (21828, 21842), False, 'import logging\n'), ((25605, 25636), 'platform.python_version_tuple', 'platform.python_version_tuple', ([], {}), '()\n', (25634, 25636), False, 'import platform\n'), ((17248, 17268), 'elasticapm.utils.cloud.aws_metadata', 'cloud.aws_metadata', ([], {}), '()\n', (17266, 17268), False, 'from elasticapm.utils import cgroup, cloud, compat, is_master_process, stacks, varmap\n'), ((17337, 17357), 'elasticapm.utils.cloud.gcp_metadata', 'cloud.gcp_metadata', ([], {}), '()\n', (17355, 17357), False, 'from elasticapm.utils import cgroup, cloud, compat, is_master_process, stacks, varmap\n'), ((17426, 17448), 'elasticapm.utils.cloud.azure_metadata', 'cloud.azure_metadata', ([], {}), '()\n', (17446, 17448), False, 'from elasticapm.utils import cgroup, cloud, compat, is_master_process, stacks, varmap\n'), ((6944, 6966), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (6964, 6966), False, 'import inspect\n'), ((21155, 21325), 'elasticapm.utils.encoding.shorten', 'shorten', (['v'], {'list_length': 'self.config.local_var_list_max_length', 'string_length': 'self.config.local_var_max_length', 'dict_length': 'self.config.local_var_dict_max_length'}), '(v, list_length=self.config.local_var_list_max_length, string_length\n =self.config.local_var_max_length, dict_length=self.config.\n local_var_dict_max_length)\n', (21162, 21325), False, 'from elasticapm.utils.encoding import enforce_label_format, keyword_field, shorten, transform\n'), ((7521, 7691), 'elasticapm.utils.encoding.shorten', 'shorten', (['v'], {'list_length': 'self.config.local_var_list_max_length', 'string_length': 'self.config.local_var_max_length', 'dict_length': 'self.config.local_var_dict_max_length'}), '(v, list_length=self.config.local_var_list_max_length, string_length\n =self.config.local_var_max_length, dict_length=self.config.\n local_var_dict_max_length)\n', (7528, 7691), False, 'from elasticapm.utils.encoding import enforce_label_format, keyword_field, shorten, transform\n')]
|
from .client import PingboardClient
__version__ = '0.0.6'
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger('pyngboard').addHandler(NullHandler())
|
[
"logging.getLogger",
"logging.NullHandler"
] |
[((292, 305), 'logging.NullHandler', 'NullHandler', ([], {}), '()\n', (303, 305), False, 'from logging import NullHandler\n'), ((250, 280), 'logging.getLogger', 'logging.getLogger', (['"""pyngboard"""'], {}), "('pyngboard')\n", (267, 280), False, 'import logging\n')]
|
import chainer
from chainer.dataset import dataset_mixin
class Cifar10Dataset(dataset_mixin.DatasetMixin):
def __init__(self, split='train'):
x_train, x_test = chainer.datasets.get_cifar10(ndim=3, withlabel=False,
scale=1.0)
if split == 'train':
self.imgs = x_train
elif split == 'test':
self.imgs = x_test
self.imgs = self.imgs * 2 - 1.0 # [0, 1] to [-1.0, 1.0]
def __len__(self):
return len(self.imgs)
def get_example(self, index):
return self.imgs[index]
|
[
"chainer.datasets.get_cifar10"
] |
[((174, 238), 'chainer.datasets.get_cifar10', 'chainer.datasets.get_cifar10', ([], {'ndim': '(3)', 'withlabel': '(False)', 'scale': '(1.0)'}), '(ndim=3, withlabel=False, scale=1.0)\n', (202, 238), False, 'import chainer\n')]
|
# Copyright (c) 2012--2014 King's College London
# Created by the Software Development Team <http://soft-dev.org/>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from .production import Production
from grammar_parser.gparser import Terminal, Nonterminal, Epsilon
from .constants import LR0, LR1, LALR
class SyntaxTableElement(object):
def __init__(self, action):
self.action = action
def __eq__(self, other):
return self.action == other.action
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.action)
class FinishSymbol(object):
def __init__(self, name="eos"):
self.name = name
def __eq__(self, other):
return isinstance(other, FinishSymbol)
def __hash__(self):
# XXX hack: may cause errors if grammar consist of same symbol
return hash("FinishSymbol(%s)" % (self.name))
def __repr__(self):
return "$(%s)" % self.name
class Goto(SyntaxTableElement): pass
class Shift(SyntaxTableElement): pass
class Reduce(SyntaxTableElement):
def __init__(self, action):
self.action = action
def amount(self):
if len(self.action.right) > 0 and self.action.right[-1] == Terminal("<eos>"):
return len(self.action.right) - 1
if self.action.right == [Epsilon()]:
return 0
return len(self.action.right)
class Accept(SyntaxTableElement):
def __init__(self, action=None):
self.action = None
class SyntaxTable(object):
def __init__(self, prod_ids, lr_type=LR0):
self.lr_type = lr_type
self.prod_ids = prod_ids
def build(self, graph, precedences=[]):
self.table = [{} for _ in range(len(graph.state_sets))]
symbols = graph.get_symbols()
symbols.add(FinishSymbol())
for i in range(len(graph.state_sets)):
# accept, reduce
state_set = graph.get_state_set(i)
for state in state_set.elements:
if state.isfinal():
if state.p.left is None:
self.table[i][FinishSymbol()] = Accept()
else:
if self.lr_type in [LR1, LALR]:
lookahead = state_set.lookaheads[state]
else:
lookahead = symbols
for s in lookahead:
newaction = Reduce(state.p)
if s in self.table[i]:
oldaction = self.table[i][s]
newaction = self.resolve_conflict(i, s, oldaction, newaction, precedences)
if newaction:
self.table[i][s] = newaction
else:
del self.table[i][s]
# shift, goto
for s in symbols:
dest = graph.follow(i, s)
if dest:
if isinstance(s, Terminal):
action = Shift(dest)
if isinstance(s, Nonterminal):
action = Goto(dest)
if s in self.table[i]:
action = self.resolve_conflict(i, s, self.table[i][s], action, precedences)
if action:
self.table[i][s] = action
else:
del self.table[i][s]
def resolve_conflict(self, state, symbol, oldaction, newaction, precedences):
# input: old_action, lookup_symbol, new_action
# return: action/error
# shift/reduce or reduce/shift
# get precedence and associativity
newassoc = self.find_assoc(symbol, precedences)
if oldaction.action.prec:
# old production has a precedence attached to it
symbol = Terminal(oldaction.action.prec)
oldassoc = self.find_assoc(symbol, precedences)
else:
# otherwise use precedence from last terminal in production body
prev_terminal = self.get_last_terminal(oldaction)
oldassoc = self.find_assoc(prev_terminal, precedences)
# if oldaction and lookup symbol have precedences & associativity
# and conflict is shift/reduce
if oldassoc and newassoc and not self.is_reduce_reduce(oldaction, newaction):
if oldassoc[1] > newassoc[1]:
# previous action has higher precedence -> do nothing
return oldaction
elif oldassoc[1] < newassoc[1]:
# previous action has lower precedenec -> override action
return newaction
else:
# both precedences are equal, use associativity
if newassoc[0] == "%left":
# left binding -> reduce
return self.get_reduce(oldaction, newaction)
elif newassoc[0] == "%right":
# right binding -> shift
return self.get_shift(oldaction, newaction)
elif newassoc[0] == "%nonassoc":
# parsing error
return None
else:
# use built in fixes and print warning
# shift/reduce: shift
# reduce/reduce: use earlier reduce
if self.is_reduce_reduce(oldaction, newaction):
if self.prod_ids:
action = oldaction if self.prod_ids[oldaction.action] < self.prod_ids[newaction.action] else newaction
else:
action = oldaction
print("Warning: Reduce/Reduce conflict in state %s with %s: %s vs. %s => Solved in favour of %s." % (state, symbol, oldaction, newaction, action))
return action
else:
print("Warning: Shift/Reduce conflict in state %s with %s: %s vs. %s => Solved by shift." % (state, symbol, oldaction, newaction))
return self.get_shift(oldaction, newaction)
print("Error: Shift/Reduce conflict in state %s with %s: %s vs. %s => Unsolved!" % (state, symbol, oldaction, newaction))
def is_reduce_reduce(self, a1, a2):
return isinstance(a1, Reduce) and isinstance(a2, Reduce)
def get_reduce(self, a1, a2):
if isinstance(a1, Reduce):
return a1
assert isinstance(a2, Reduce)
return a2
def get_shift(self, a1, a2):
if isinstance(a1, Shift):
return a1
assert isinstance(a2, Shift)
return a2
def find_assoc(self, symbol, precedences):
if not symbol:
return None
i = 0
for p in precedences:
name, terminals = p
if symbol.name in terminals:
return (name, i)
i += 1
def get_last_terminal(self, rule):
for symbol in reversed(rule.action.right):
if isinstance(symbol, Terminal):
return symbol
return None
def lookup(self, state_id, symbol):
try:
return self.table[state_id][symbol]
except KeyError:
return None
|
[
"grammar_parser.gparser.Terminal",
"grammar_parser.gparser.Epsilon"
] |
[((4883, 4914), 'grammar_parser.gparser.Terminal', 'Terminal', (['oldaction.action.prec'], {}), '(oldaction.action.prec)\n', (4891, 4914), False, 'from grammar_parser.gparser import Terminal, Nonterminal, Epsilon\n'), ((2211, 2228), 'grammar_parser.gparser.Terminal', 'Terminal', (['"""<eos>"""'], {}), "('<eos>')\n", (2219, 2228), False, 'from grammar_parser.gparser import Terminal, Nonterminal, Epsilon\n'), ((2309, 2318), 'grammar_parser.gparser.Epsilon', 'Epsilon', ([], {}), '()\n', (2316, 2318), False, 'from grammar_parser.gparser import Terminal, Nonterminal, Epsilon\n')]
|
from django.db import models
# Create your models here.
from datetime import datetime
__doc__ = """
存储文件地址
历史记录
任务信息
注意,对 Model 的操作写在 Manage 中,不要把复杂操作写到 views 中
"""
class Host(models.Model):
ip = models.GenericIPAddressField(
'IP地址',
null=False,
blank=False,
default='127.0.0.1'
)
server = models.URLField(
'域名',
null=False,
blank=False,
default=""
)
gmt_create = models.DateTimeField(
"创建时间",
null=False,
auto_now_add=True
)
gmt_modified = models.DateTimeField(
'修改时间',
null=False,
auto_now=True,
)
class Meta:
db_table = 'jmeter_host'
ordering = ['-gmt_modified']
default_permissions = ('add', 'change')
class AbstractTask(models.Model):
name = models.CharField(
'任务名',
max_length=20,
blank=False,
null=False,
default=""
)
run_time = models.DateTimeField(
'执行时间',
null=False,
blank=False
)
loops = models.SmallIntegerField(
'循环次数',
null=False,
blank=False,
default=1
)
num_threads = models.PositiveIntegerField(
'线程数',
null=False,
blank=False,
default=1
)
scheduler = models.BooleanField(
'调度器',
null=False,
blank=False,
default=False
)
duration = models.PositiveIntegerField(
'持续时间',
null=False,
blank=False,
default=0
)
class Meta:
abstract = True
class Task(AbstractTask):
"""
"""
status = models.BooleanField(
'任务状态',
null=False,
blank=True,
default=True,
)
jmx_file = models.FilePathField(
null=False,
blank=False,
default=""
)
task_start_time = models.DateTimeField(
'任务开始时间',
null=False,
blank=False,
default="1970-01-01T00:00"
)
task_end_time = models.DateTimeField(
'任务结束时间',
null=False,
blank=False,
default="1970-01-01T00:00"
)
gmt_create = models.DateTimeField(
"创建时间",
null=False,
auto_now_add=True,
)
gmt_modified = models.DateTimeField(
'修改时间',
null=False,
auto_now=True,
)
class Meta:
db_table = 'jmeter_task'
ordering = ['-gmt_modified']
default_permissions = ('add', 'change')
permissions = (("can_run_task", "执行性能测试任务"),)
class TaskResult(AbstractTask):
"""
"""
jmx_file = models.FilePathField(
null=False,
blank=False,
default=""
)
data_files_id = models.CharField(
max_length=100,
null=False,
blank=False,
default=""
)
# 成功或者失败
status = models.BooleanField(
'状态',
null=False,
blank=False
)
machines_id = models.CharField(
max_length=100,
null=False,
blank=False,
default=""
)
gmt_create = models.DateTimeField(
null=False,
auto_now_add=True
)
gmt_modified = models.DateTimeField(
null=False,
auto_now=True
)
class Meta:
db_table = 'jmeter_task_result'
ordering = ['-gmt_modified']
default_permissions = ('add', 'change')
class Files(models.Model):
"""
"""
name = models.CharField(
"文件名",
max_length=50,
null=False,
blank=False,
unique=True,
default=""
)
# 状态,0 未知, 1不存在, 2存在
status = models.BooleanField(
blank=False,
null=False,
default=True
)
file_path = models.FilePathField(
'文件',
null=False,
blank=False,
)
task_data_file = models.ForeignKey(
Task,
related_name='task_data_file',
on_delete=models.CASCADE,
db_constraint=False,
null=False,
blank=False,
default=""
)
class Machine(models.Model):
"""
"""
name = models.CharField(
"机器名",
max_length=20,
null=False,
blank=False,
default=""
)
port = models.PositiveIntegerField(
'机器端口',
blank=False,
null=False,
default=22
)
ip = models.GenericIPAddressField(
'IP地址',
blank=False,
null=False,
default="127.0.0.1"
)
password = models.CharField(
'password',
max_length=50,
blank=False,
null=False,
default=""
)
task = models.ForeignKey(
Task,
related_name='machines',
on_delete=models.CASCADE,
db_constraint=False,
null=False,
blank=False,
default="",
verbose_name='任务'
)
# secret_key = models.FileField(
# '秘钥文件',
# null=False,
# blank=True
# )
status = models.BooleanField(
'状态, 离线/在线',
blank=False,
null=False,
default=0
)
is_slave = models.BooleanField(
'是否是从机器, 只允许一个主机器',
blank=False,
null=False,
default=False
)
# host = models.ForeignKey(
# Host,
# on_delete=models.CASCADE,
# db_constraint=False,
# null=False,
# blank=False
# )
gmt_create = models.DateTimeField(
"创建时间",
null=False,
auto_now_add=True,
)
gmt_modified = models.DateTimeField(
'修改时间',
null=False,
auto_now=True
)
def __str__(self):
return self.name
class Meta:
db_table = 'jmeter_machine'
ordering = ['-gmt_modified']
default_permissions = ('add', 'change')
class Config(models.Model):
"""
配置
"""
jmeter_report_path = models.FilePathField(
'报告存放路径',
null=False,
blank=False,
default=""
)
jmeter_path = models.FilePathField(
'JMeter存放路径',
null=False,
blank=False,
default=""
)
jtl_path = models.FilePathField(
'Jtl文件存放路径',
null=False,
blank=False,
default=""
)
gmt_create = models.DateTimeField(
"创建时间",
null=False,
auto_now_add=True
)
gmt_modified = models.DateTimeField(
'修改时间',
null=False,
auto_now=True
)
class Meta:
db_table = 'jmeter_config'
ordering = ['-gmt_modified']
default_permissions = ('add', 'change')
|
[
"django.db.models.URLField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.PositiveIntegerField",
"django.db.models.BooleanField",
"django.db.models.GenericIPAddressField",
"django.db.models.SmallIntegerField",
"django.db.models.DateTimeField",
"django.db.models.FilePathField"
] |
[((205, 292), 'django.db.models.GenericIPAddressField', 'models.GenericIPAddressField', (['"""IP地址"""'], {'null': '(False)', 'blank': '(False)', 'default': '"""127.0.0.1"""'}), "('IP地址', null=False, blank=False, default=\n '127.0.0.1')\n", (233, 292), False, 'from django.db import models\n'), ((340, 398), 'django.db.models.URLField', 'models.URLField', (['"""域名"""'], {'null': '(False)', 'blank': '(False)', 'default': '""""""'}), "('域名', null=False, blank=False, default='')\n", (355, 398), False, 'from django.db import models\n'), ((455, 514), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""创建时间"""'], {'null': '(False)', 'auto_now_add': '(True)'}), "('创建时间', null=False, auto_now_add=True)\n", (475, 514), False, 'from django.db import models\n'), ((564, 619), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""修改时间"""'], {'null': '(False)', 'auto_now': '(True)'}), "('修改时间', null=False, auto_now=True)\n", (584, 619), False, 'from django.db import models\n'), ((836, 911), 'django.db.models.CharField', 'models.CharField', (['"""任务名"""'], {'max_length': '(20)', 'blank': '(False)', 'null': '(False)', 'default': '""""""'}), "('任务名', max_length=20, blank=False, null=False, default='')\n", (852, 911), False, 'from django.db import models\n'), ((973, 1026), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""执行时间"""'], {'null': '(False)', 'blank': '(False)'}), "('执行时间', null=False, blank=False)\n", (993, 1026), False, 'from django.db import models\n'), ((1069, 1137), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', (['"""循环次数"""'], {'null': '(False)', 'blank': '(False)', 'default': '(1)'}), "('循环次数', null=False, blank=False, default=1)\n", (1093, 1137), False, 'from django.db import models\n'), ((1194, 1264), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (['"""线程数"""'], {'null': '(False)', 'blank': '(False)', 'default': '(1)'}), "('线程数', null=False, blank=False, default=1)\n", (1221, 1264), False, 'from django.db import models\n'), ((1320, 1386), 'django.db.models.BooleanField', 'models.BooleanField', (['"""调度器"""'], {'null': '(False)', 'blank': '(False)', 'default': '(False)'}), "('调度器', null=False, blank=False, default=False)\n", (1339, 1386), False, 'from django.db import models\n'), ((1441, 1512), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (['"""持续时间"""'], {'null': '(False)', 'blank': '(False)', 'default': '(0)'}), "('持续时间', null=False, blank=False, default=0)\n", (1468, 1512), False, 'from django.db import models\n'), ((1662, 1727), 'django.db.models.BooleanField', 'models.BooleanField', (['"""任务状态"""'], {'null': '(False)', 'blank': '(True)', 'default': '(True)'}), "('任务状态', null=False, blank=True, default=True)\n", (1681, 1727), False, 'from django.db import models\n'), ((1783, 1840), 'django.db.models.FilePathField', 'models.FilePathField', ([], {'null': '(False)', 'blank': '(False)', 'default': '""""""'}), "(null=False, blank=False, default='')\n", (1803, 1840), False, 'from django.db import models\n'), ((1894, 1982), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""任务开始时间"""'], {'null': '(False)', 'blank': '(False)', 'default': '"""1970-01-01T00:00"""'}), "('任务开始时间', null=False, blank=False, default=\n '1970-01-01T00:00')\n", (1914, 1982), False, 'from django.db import models\n'), ((2036, 2124), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""任务结束时间"""'], {'null': '(False)', 'blank': '(False)', 'default': '"""1970-01-01T00:00"""'}), "('任务结束时间', null=False, blank=False, default=\n '1970-01-01T00:00')\n", (2056, 2124), False, 'from django.db import models\n'), ((2176, 2235), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""创建时间"""'], {'null': '(False)', 'auto_now_add': '(True)'}), "('创建时间', null=False, auto_now_add=True)\n", (2196, 2235), False, 'from django.db import models\n'), ((2286, 2341), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""修改时间"""'], {'null': '(False)', 'auto_now': '(True)'}), "('修改时间', null=False, auto_now=True)\n", (2306, 2341), False, 'from django.db import models\n'), ((2639, 2696), 'django.db.models.FilePathField', 'models.FilePathField', ([], {'null': '(False)', 'blank': '(False)', 'default': '""""""'}), "(null=False, blank=False, default='')\n", (2659, 2696), False, 'from django.db import models\n'), ((2748, 2817), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(False)', 'blank': '(False)', 'default': '""""""'}), "(max_length=100, null=False, blank=False, default='')\n", (2764, 2817), False, 'from django.db import models\n'), ((2884, 2934), 'django.db.models.BooleanField', 'models.BooleanField', (['"""状态"""'], {'null': '(False)', 'blank': '(False)'}), "('状态', null=False, blank=False)\n", (2903, 2934), False, 'from django.db import models\n'), ((2984, 3053), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(False)', 'blank': '(False)', 'default': '""""""'}), "(max_length=100, null=False, blank=False, default='')\n", (3000, 3053), False, 'from django.db import models\n'), ((3110, 3161), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(False)', 'auto_now_add': '(True)'}), '(null=False, auto_now_add=True)\n', (3130, 3161), False, 'from django.db import models\n'), ((3203, 3250), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(False)', 'auto_now': '(True)'}), '(null=False, auto_now=True)\n', (3223, 3250), False, 'from django.db import models\n'), ((3471, 3563), 'django.db.models.CharField', 'models.CharField', (['"""文件名"""'], {'max_length': '(50)', 'null': '(False)', 'blank': '(False)', 'unique': '(True)', 'default': '""""""'}), "('文件名', max_length=50, null=False, blank=False, unique=True,\n default='')\n", (3487, 3563), False, 'from django.db import models\n'), ((3654, 3712), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'blank': '(False)', 'null': '(False)', 'default': '(True)'}), '(blank=False, null=False, default=True)\n', (3673, 3712), False, 'from django.db import models\n'), ((3760, 3811), 'django.db.models.FilePathField', 'models.FilePathField', (['"""文件"""'], {'null': '(False)', 'blank': '(False)'}), "('文件', null=False, blank=False)\n", (3780, 3811), False, 'from django.db import models\n'), ((3865, 4008), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Task'], {'related_name': '"""task_data_file"""', 'on_delete': 'models.CASCADE', 'db_constraint': '(False)', 'null': '(False)', 'blank': '(False)', 'default': '""""""'}), "(Task, related_name='task_data_file', on_delete=models.\n CASCADE, db_constraint=False, null=False, blank=False, default='')\n", (3882, 4008), False, 'from django.db import models\n'), ((4123, 4198), 'django.db.models.CharField', 'models.CharField', (['"""机器名"""'], {'max_length': '(20)', 'null': '(False)', 'blank': '(False)', 'default': '""""""'}), "('机器名', max_length=20, null=False, blank=False, default='')\n", (4139, 4198), False, 'from django.db import models\n'), ((4256, 4328), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (['"""机器端口"""'], {'blank': '(False)', 'null': '(False)', 'default': '(22)'}), "('机器端口', blank=False, null=False, default=22)\n", (4283, 4328), False, 'from django.db import models\n'), ((4376, 4463), 'django.db.models.GenericIPAddressField', 'models.GenericIPAddressField', (['"""IP地址"""'], {'blank': '(False)', 'null': '(False)', 'default': '"""127.0.0.1"""'}), "('IP地址', blank=False, null=False, default=\n '127.0.0.1')\n", (4404, 4463), False, 'from django.db import models\n'), ((4512, 4597), 'django.db.models.CharField', 'models.CharField', (['"""password"""'], {'max_length': '(50)', 'blank': '(False)', 'null': '(False)', 'default': '""""""'}), "('password', max_length=50, blank=False, null=False, default=''\n )\n", (4528, 4597), False, 'from django.db import models\n'), ((4652, 4812), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Task'], {'related_name': '"""machines"""', 'on_delete': 'models.CASCADE', 'db_constraint': '(False)', 'null': '(False)', 'blank': '(False)', 'default': '""""""', 'verbose_name': '"""任务"""'}), "(Task, related_name='machines', on_delete=models.CASCADE,\n db_constraint=False, null=False, blank=False, default='', verbose_name='任务'\n )\n", (4669, 4812), False, 'from django.db import models\n'), ((4995, 5063), 'django.db.models.BooleanField', 'models.BooleanField', (['"""状态, 离线/在线"""'], {'blank': '(False)', 'null': '(False)', 'default': '(0)'}), "('状态, 离线/在线', blank=False, null=False, default=0)\n", (5014, 5063), False, 'from django.db import models\n'), ((5119, 5198), 'django.db.models.BooleanField', 'models.BooleanField', (['"""是否是从机器, 只允许一个主机器"""'], {'blank': '(False)', 'null': '(False)', 'default': '(False)'}), "('是否是从机器, 只允许一个主机器', blank=False, null=False, default=False)\n", (5138, 5198), False, 'from django.db import models\n'), ((5423, 5482), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""创建时间"""'], {'null': '(False)', 'auto_now_add': '(True)'}), "('创建时间', null=False, auto_now_add=True)\n", (5443, 5482), False, 'from django.db import models\n'), ((5533, 5588), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""修改时间"""'], {'null': '(False)', 'auto_now': '(True)'}), "('修改时间', null=False, auto_now=True)\n", (5553, 5588), False, 'from django.db import models\n'), ((5884, 5951), 'django.db.models.FilePathField', 'models.FilePathField', (['"""报告存放路径"""'], {'null': '(False)', 'blank': '(False)', 'default': '""""""'}), "('报告存放路径', null=False, blank=False, default='')\n", (5904, 5951), False, 'from django.db import models\n'), ((6008, 6079), 'django.db.models.FilePathField', 'models.FilePathField', (['"""JMeter存放路径"""'], {'null': '(False)', 'blank': '(False)', 'default': '""""""'}), "('JMeter存放路径', null=False, blank=False, default='')\n", (6028, 6079), False, 'from django.db import models\n'), ((6133, 6203), 'django.db.models.FilePathField', 'models.FilePathField', (['"""Jtl文件存放路径"""'], {'null': '(False)', 'blank': '(False)', 'default': '""""""'}), "('Jtl文件存放路径', null=False, blank=False, default='')\n", (6153, 6203), False, 'from django.db import models\n'), ((6260, 6319), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""创建时间"""'], {'null': '(False)', 'auto_now_add': '(True)'}), "('创建时间', null=False, auto_now_add=True)\n", (6280, 6319), False, 'from django.db import models\n'), ((6369, 6424), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""修改时间"""'], {'null': '(False)', 'auto_now': '(True)'}), "('修改时间', null=False, auto_now=True)\n", (6389, 6424), False, 'from django.db import models\n')]
|
import json
import os
from constants import DATA_PATH
from data_utils import DiffAST, DiffExample, DiffASTExample, CommentCategory
PARTITIONS = ['train', 'valid', 'test']
def get_data_splits(comment_type_str=None, ignore_ast=False):
"""Retrieves train/validation/test sets for the given comment_type_str.
comment_type_str -- Return, Param, Summary, or None (if None, uses all comment types)
ignore_ast -- Skip loading ASTs (they take a long time)"""
dataset, high_level_details = load_processed_data(comment_type_str, ignore_ast)
train_examples = dataset['train']
valid_examples = dataset['valid']
test_examples = dataset['test']
return train_examples, valid_examples, test_examples, high_level_details
def load_cleaned_test_set(comment_type_str=None):
"""Retrieves the ids corresponding to clean examples, for the given comment_type_str.
comment_type_str -- Return, Param, Summary, or None (if None, uses all comment types)"""
if not comment_type_str:
comment_types = [CommentCategory(category).name for category in CommentCategory]
else:
comment_types = [comment_type_str]
test_ids = []
for comment_type in comment_types:
resources_path = os.path.join(DATA_PATH, 'resources', comment_type, 'clean_test_ids.json')
with open(resources_path) as f:
test_ids.extend(json.load(f))
return test_ids
def load_processed_data(comment_type_str, ignore_ast):
"""Processes saved data for the given comment_type_str.
comment_type_str -- Return, Param, Summary, or None (if None, uses all comment types)
ignore_ast -- Skip loading ASTs (they take a long time)"""
if not comment_type_str:
comment_types = [CommentCategory(category).name for category in CommentCategory]
else:
comment_types = [comment_type_str]
print('Loading data from: {}'.format(comment_types))
dataset = dict()
high_level_details = dict()
for comment_type in comment_types:
path = os.path.join(DATA_PATH, comment_type)
loaded = load_raw_data_from_path(path)
category_high_level_details_path = os.path.join(DATA_PATH, 'resources', comment_type, 'high_level_details.json')
with open(category_high_level_details_path) as f:
category_high_level_details = json.load(f)
high_level_details.update(category_high_level_details)
if not ignore_ast:
ast_path = os.path.join(DATA_PATH, 'resources', comment_type, 'ast_objs.json')
with open(ast_path) as f:
ast_details = json.load(f)
for partition, examples in loaded.items():
if partition not in dataset:
dataset[partition] = []
if ignore_ast:
dataset[partition].extend(examples)
else:
for ex in examples:
ex_ast_info = ast_details[ex.id]
old_ast = DiffAST.from_json(ex_ast_info['old_ast'])
new_ast = DiffAST.from_json(ex_ast_info['new_ast'])
diff_ast = DiffAST.from_json(ex_ast_info['diff_ast'])
ast_ex = DiffASTExample(ex.id, ex.label, ex.comment_type, ex.old_comment_raw,
ex.old_comment_subtokens, ex.new_comment_raw, ex.new_comment_subtokens, ex.span_minimal_diff_comment_subtokens,
ex.old_code_raw, ex.old_code_subtokens, ex.new_code_raw, ex.new_code_subtokens,
ex.span_diff_code_subtokens, ex.token_diff_code_subtokens, old_ast, new_ast, diff_ast)
dataset[partition].append(ast_ex)
return dataset, high_level_details
def load_raw_data_from_path(path):
"""Reads saved partition-level data from a directory path"""
dataset = dict()
for partition in PARTITIONS:
dataset[partition] = []
dataset[partition].extend(read_diff_examples_from_file(os.path.join(path, '{}.json'.format(partition))))
return dataset
def read_diff_examples_from_file(filename):
"""Reads saved data from filename"""
with open(filename) as f:
data = json.load(f)
return [DiffExample(**d) for d in data]
|
[
"json.load",
"data_utils.DiffAST.from_json",
"data_utils.DiffASTExample",
"data_utils.DiffExample",
"os.path.join",
"data_utils.CommentCategory"
] |
[((1240, 1313), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""resources"""', 'comment_type', '"""clean_test_ids.json"""'], {}), "(DATA_PATH, 'resources', comment_type, 'clean_test_ids.json')\n", (1252, 1313), False, 'import os\n'), ((2036, 2073), 'os.path.join', 'os.path.join', (['DATA_PATH', 'comment_type'], {}), '(DATA_PATH, comment_type)\n', (2048, 2073), False, 'import os\n'), ((2164, 2241), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""resources"""', 'comment_type', '"""high_level_details.json"""'], {}), "(DATA_PATH, 'resources', comment_type, 'high_level_details.json')\n", (2176, 2241), False, 'import os\n'), ((4187, 4199), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4196, 4199), False, 'import json\n'), ((4212, 4228), 'data_utils.DiffExample', 'DiffExample', ([], {}), '(**d)\n', (4223, 4228), False, 'from data_utils import DiffAST, DiffExample, DiffASTExample, CommentCategory\n'), ((2343, 2355), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2352, 2355), False, 'import json\n'), ((2471, 2538), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""resources"""', 'comment_type', '"""ast_objs.json"""'], {}), "(DATA_PATH, 'resources', comment_type, 'ast_objs.json')\n", (2483, 2538), False, 'import os\n'), ((1035, 1060), 'data_utils.CommentCategory', 'CommentCategory', (['category'], {}), '(category)\n', (1050, 1060), False, 'from data_utils import DiffAST, DiffExample, DiffASTExample, CommentCategory\n'), ((1382, 1394), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1391, 1394), False, 'import json\n'), ((1745, 1770), 'data_utils.CommentCategory', 'CommentCategory', (['category'], {}), '(category)\n', (1760, 1770), False, 'from data_utils import DiffAST, DiffExample, DiffASTExample, CommentCategory\n'), ((2607, 2619), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2616, 2619), False, 'import json\n'), ((2982, 3023), 'data_utils.DiffAST.from_json', 'DiffAST.from_json', (["ex_ast_info['old_ast']"], {}), "(ex_ast_info['old_ast'])\n", (2999, 3023), False, 'from data_utils import DiffAST, DiffExample, DiffASTExample, CommentCategory\n'), ((3054, 3095), 'data_utils.DiffAST.from_json', 'DiffAST.from_json', (["ex_ast_info['new_ast']"], {}), "(ex_ast_info['new_ast'])\n", (3071, 3095), False, 'from data_utils import DiffAST, DiffExample, DiffASTExample, CommentCategory\n'), ((3127, 3169), 'data_utils.DiffAST.from_json', 'DiffAST.from_json', (["ex_ast_info['diff_ast']"], {}), "(ex_ast_info['diff_ast'])\n", (3144, 3169), False, 'from data_utils import DiffAST, DiffExample, DiffASTExample, CommentCategory\n'), ((3200, 3571), 'data_utils.DiffASTExample', 'DiffASTExample', (['ex.id', 'ex.label', 'ex.comment_type', 'ex.old_comment_raw', 'ex.old_comment_subtokens', 'ex.new_comment_raw', 'ex.new_comment_subtokens', 'ex.span_minimal_diff_comment_subtokens', 'ex.old_code_raw', 'ex.old_code_subtokens', 'ex.new_code_raw', 'ex.new_code_subtokens', 'ex.span_diff_code_subtokens', 'ex.token_diff_code_subtokens', 'old_ast', 'new_ast', 'diff_ast'], {}), '(ex.id, ex.label, ex.comment_type, ex.old_comment_raw, ex.\n old_comment_subtokens, ex.new_comment_raw, ex.new_comment_subtokens, ex\n .span_minimal_diff_comment_subtokens, ex.old_code_raw, ex.\n old_code_subtokens, ex.new_code_raw, ex.new_code_subtokens, ex.\n span_diff_code_subtokens, ex.token_diff_code_subtokens, old_ast,\n new_ast, diff_ast)\n', (3214, 3571), False, 'from data_utils import DiffAST, DiffExample, DiffASTExample, CommentCategory\n')]
|
# Generated by Django 2.2.20 on 2021-06-03 14:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('analyzer', '0009_graphitevariable_period'),
]
operations = [
migrations.AlterField(
model_name='graphitevariable',
name='period',
field=models.CharField(blank=True, default='', max_length=12),
),
]
|
[
"django.db.models.CharField"
] |
[((353, 408), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(12)'}), "(blank=True, default='', max_length=12)\n", (369, 408), False, 'from django.db import migrations, models\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
pymixer.py - wrapper for amixer command line tool
.. Created on 2017-07-05
.. Licence MIT
.. codeauthor:: <NAME> <<EMAIL>>, <EMAIL>
"""
import subprocess
import logging
class PyMixer(object):
"""
Wrapping class for linux command line ``amixer`` to get/set volume
**Example:**
.. code-block:: python
from pymixer import PyMixer
mixer = PyMixer()
# to set volume you can do just
mixer.set_volume(50)
# if you want to know current volume call
volume = mixer.get_volume()
"""
amixer = 'amixer' #: linux command
logger = None
def __init__(self):
self.logger = logging.getLogger("PyMixer")
def set_volume(self, value):
"""
Set volume to value (percent 0 - 100)
:param int value: volume to set (percent 0 - 100)
"""
if value < 0:
self.logger.debug("Norm value to: 0")
value = 0
if value > 100:
self.logger.debug("Norm value to: 100")
value = 100
volume = int((65536/100) * int(value))
subprocess.call([self.amixer, "set", "Master", "{}".format(volume)])
self.logger.info("Volume set to: {}%".format(value))
def get_volume(self):
"""
Get current level of volume
:return: percent of current volume
:rtype: int
"""
temp = subprocess.check_output([self.amixer, "get", "Master"], universal_newlines=True)
pos = str(temp).find('[')
volume = temp[pos+1:pos+4]
if volume[2] == '%':
volume = volume[:2]
return int(volume)
|
[
"subprocess.check_output",
"logging.getLogger"
] |
[((707, 735), 'logging.getLogger', 'logging.getLogger', (['"""PyMixer"""'], {}), "('PyMixer')\n", (724, 735), False, 'import logging\n'), ((1447, 1532), 'subprocess.check_output', 'subprocess.check_output', (["[self.amixer, 'get', 'Master']"], {'universal_newlines': '(True)'}), "([self.amixer, 'get', 'Master'], universal_newlines=True\n )\n", (1470, 1532), False, 'import subprocess\n')]
|
"""
Basic statistics module.
This module provides functions for calculating statistics of data, including
averages, variance, and standard deviation.
Calculating averages
--------------------
================== ==================================================
Function Description
================== ==================================================
mean Arithmetic mean (average) of data.
fmean Fast, floating point arithmetic mean.
geometric_mean Geometric mean of data.
harmonic_mean Harmonic mean of data.
median Median (middle value) of data.
median_low Low median of data.
median_high High median of data.
median_grouped Median, or 50th percentile, of grouped data.
mode Mode (most common value) of data.
multimode List of modes (most common values of data).
quantiles Divide data into intervals with equal probability.
================== ==================================================
Calculate the arithmetic mean ("the average") of data:
>>> mean([-1.0, 2.5, 3.25, 5.75])
2.625
Calculate the standard median of discrete data:
>>> median([2, 3, 4, 5])
3.5
Calculate the median, or 50th percentile, of data grouped into class intervals
centred on the data values provided. E.g. if your data points are rounded to
the nearest whole number:
>>> median_grouped([2, 2, 3, 3, 3, 4]) #doctest: +ELLIPSIS
2.8333333333...
This should be interpreted in this way: you have two data points in the class
interval 1.5-2.5, three data points in the class interval 2.5-3.5, and one in
the class interval 3.5-4.5. The median of these data points is 2.8333...
Calculating variability or spread
---------------------------------
================== =============================================
Function Description
================== =============================================
pvariance Population variance of data.
variance Sample variance of data.
pstdev Population standard deviation of data.
stdev Sample standard deviation of data.
================== =============================================
Calculate the standard deviation of sample data:
>>> stdev([2.5, 3.25, 5.5, 11.25, 11.75]) #doctest: +ELLIPSIS
4.38961843444...
If you have previously calculated the mean, you can pass it as the optional
second argument to the four "spread" functions to avoid recalculating it:
>>> data = [1, 2, 2, 4, 4, 4, 5, 6]
>>> mu = mean(data)
>>> pvariance(data, mu)
2.5
Exceptions
----------
A single exception is defined: StatisticsError is a subclass of ValueError.
"""
__all__ = [
'NormalDist',
'StatisticsError',
'fmean',
'geometric_mean',
'harmonic_mean',
'mean',
'median',
'median_grouped',
'median_high',
'median_low',
'mode',
'multimode',
'pstdev',
'pvariance',
'quantiles',
'stdev',
'variance',
]
import math
import numbers
import random
from fractions import Fraction
from decimal import Decimal
from itertools import groupby
from bisect import bisect_left, bisect_right
from math import hypot, sqrt, fabs, exp, erf, tau, log, fsum
from operator import itemgetter
from collections import Counter
# === Exceptions ===
class StatisticsError(ValueError):
pass
# === Private utilities ===
def _sum(data, start=0):
"""_sum(data [, start]) -> (type, sum, count)
Return a high-precision sum of the given numeric data as a fraction,
together with the type to be converted to and the count of items.
If optional argument ``start`` is given, it is added to the total.
If ``data`` is empty, ``start`` (defaulting to 0) is returned.
Examples
--------
>>> _sum([3, 2.25, 4.5, -0.5, 1.0], 0.75)
(<class 'float'>, Fraction(11, 1), 5)
Some sources of round-off error will be avoided:
# Built-in sum returns zero.
>>> _sum([1e50, 1, -1e50] * 1000)
(<class 'float'>, Fraction(1000, 1), 3000)
Fractions and Decimals are also supported:
>>> from fractions import Fraction as F
>>> _sum([F(2, 3), F(7, 5), F(1, 4), F(5, 6)])
(<class 'fractions.Fraction'>, Fraction(63, 20), 4)
>>> from decimal import Decimal as D
>>> data = [D("0.1375"), D("0.2108"), D("0.3061"), D("0.0419")]
>>> _sum(data)
(<class 'decimal.Decimal'>, Fraction(6963, 10000), 4)
Mixed types are currently treated as an error, except that int is
allowed.
"""
count = 0
n, d = _exact_ratio(start)
partials = {d: n}
partials_get = partials.get
T = _coerce(int, type(start))
for typ, values in groupby(data, type):
T = _coerce(T, typ) # or raise TypeError
for n,d in map(_exact_ratio, values):
count += 1
partials[d] = partials_get(d, 0) + n
if None in partials:
# The sum will be a NAN or INF. We can ignore all the finite
# partials, and just look at this special one.
total = partials[None]
assert not _isfinite(total)
else:
# Sum all the partial sums using builtin sum.
# FIXME is this faster if we sum them in order of the denominator?
total = sum(Fraction(n, d) for d, n in sorted(partials.items()))
return (T, total, count)
def _isfinite(x):
try:
return x.is_finite() # Likely a Decimal.
except AttributeError:
return math.isfinite(x) # Coerces to float first.
def _coerce(T, S):
"""Coerce types T and S to a common type, or raise TypeError.
Coercion rules are currently an implementation detail. See the CoerceTest
test class in test_statistics for details.
"""
# See http://bugs.python.org/issue24068.
assert T is not bool, "initial type T is bool"
# If the types are the same, no need to coerce anything. Put this
# first, so that the usual case (no coercion needed) happens as soon
# as possible.
if T is S: return T
# Mixed int & other coerce to the other type.
if S is int or S is bool: return T
if T is int: return S
# If one is a (strict) subclass of the other, coerce to the subclass.
if issubclass(S, T): return S
if issubclass(T, S): return T
# Ints coerce to the other type.
if issubclass(T, int): return S
if issubclass(S, int): return T
# Mixed fraction & float coerces to float (or float subclass).
if issubclass(T, Fraction) and issubclass(S, float):
return S
if issubclass(T, float) and issubclass(S, Fraction):
return T
# Any other combination is disallowed.
msg = "don't know how to coerce %s and %s"
raise TypeError(msg % (T.__name__, S.__name__))
def _exact_ratio(x):
"""Return Real number x to exact (numerator, denominator) pair.
>>> _exact_ratio(0.25)
(1, 4)
x is expected to be an int, Fraction, Decimal or float.
"""
try:
# Optimise the common case of floats. We expect that the most often
# used numeric type will be builtin floats, so try to make this as
# fast as possible.
if type(x) is float or type(x) is Decimal:
return x.as_integer_ratio()
try:
# x may be an int, Fraction, or Integral ABC.
return (x.numerator, x.denominator)
except AttributeError:
try:
# x may be a float or Decimal subclass.
return x.as_integer_ratio()
except AttributeError:
# Just give up?
pass
except (OverflowError, ValueError):
# float NAN or INF.
assert not _isfinite(x)
return (x, None)
msg = "can't convert type '{}' to numerator/denominator"
raise TypeError(msg.format(type(x).__name__))
def _convert(value, T):
"""Convert value to given numeric type T."""
if type(value) is T:
# This covers the cases where T is Fraction, or where value is
# a NAN or INF (Decimal or float).
return value
if issubclass(T, int) and value.denominator != 1:
T = float
try:
# FIXME: what do we do if this overflows?
return T(value)
except TypeError:
if issubclass(T, Decimal):
return T(value.numerator)/T(value.denominator)
else:
raise
def _find_lteq(a, x):
'Locate the leftmost value exactly equal to x'
i = bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
raise ValueError
def _find_rteq(a, l, x):
'Locate the rightmost value exactly equal to x'
i = bisect_right(a, x, lo=l)
if i != (len(a)+1) and a[i-1] == x:
return i-1
raise ValueError
def _fail_neg(values, errmsg='negative value'):
"""Iterate over values, failing if any are less than zero."""
for x in values:
if x < 0:
raise StatisticsError(errmsg)
yield x
# === Measures of central tendency (averages) ===
def mean(data):
"""Return the sample arithmetic mean of data.
>>> mean([1, 2, 3, 4, 4])
2.8
>>> from fractions import Fraction as F
>>> mean([F(3, 7), F(1, 21), F(5, 3), F(1, 3)])
Fraction(13, 21)
>>> from decimal import Decimal as D
>>> mean([D("0.5"), D("0.75"), D("0.625"), D("0.375")])
Decimal('0.5625')
If ``data`` is empty, StatisticsError will be raised.
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 1:
raise StatisticsError('mean requires at least one data point')
T, total, count = _sum(data)
assert count == n
return _convert(total/n, T)
def fmean(data):
"""Convert data to floats and compute the arithmetic mean.
This runs faster than the mean() function and it always returns a float.
The result is highly accurate but not as perfect as mean().
If the input dataset is empty, it raises a StatisticsError.
>>> fmean([3.5, 4.0, 5.25])
4.25
"""
try:
n = len(data)
except TypeError:
# Handle iterators that do not define __len__().
n = 0
def count(iterable):
nonlocal n
for n, x in enumerate(iterable, start=1):
yield x
total = fsum(count(data))
else:
total = fsum(data)
try:
return total / n
except ZeroDivisionError:
raise StatisticsError('fmean requires at least one data point') from None
def geometric_mean(data):
"""Convert data to floats and compute the geometric mean.
Raises a StatisticsError if the input dataset is empty,
if it contains a zero, or if it contains a negative value.
No special efforts are made to achieve exact results.
(However, this may change in the future.)
>>> round(geometric_mean([54, 24, 36]), 9)
36.0
"""
try:
return exp(fmean(map(log, data)))
except ValueError:
raise StatisticsError('geometric mean requires a non-empty dataset '
' containing positive numbers') from None
def harmonic_mean(data):
"""Return the harmonic mean of data.
The harmonic mean, sometimes called the subcontrary mean, is the
reciprocal of the arithmetic mean of the reciprocals of the data,
and is often appropriate when averaging quantities which are rates
or ratios, for example speeds. Example:
Suppose an investor purchases an equal value of shares in each of
three companies, with P/E (price/earning) ratios of 2.5, 3 and 10.
What is the average P/E ratio for the investor's portfolio?
>>> harmonic_mean([2.5, 3, 10]) # For an equal investment portfolio.
3.6
Using the arithmetic mean would give an average of about 5.167, which
is too high.
If ``data`` is empty, or any element is less than zero,
``harmonic_mean`` will raise ``StatisticsError``.
"""
# For a justification for using harmonic mean for P/E ratios, see
# http://fixthepitch.pellucid.com/comps-analysis-the-missing-harmony-of-summary-statistics/
# http://papers.ssrn.com/sol3/papers.cfm?abstract_id=2621087
if iter(data) is data:
data = list(data)
errmsg = 'harmonic mean does not support negative values'
n = len(data)
if n < 1:
raise StatisticsError('harmonic_mean requires at least one data point')
elif n == 1:
x = data[0]
if isinstance(x, (numbers.Real, Decimal)):
if x < 0:
raise StatisticsError(errmsg)
return x
else:
raise TypeError('unsupported type')
try:
T, total, count = _sum(1/x for x in _fail_neg(data, errmsg))
except ZeroDivisionError:
return 0
assert count == n
return _convert(n/total, T)
# FIXME: investigate ways to calculate medians without sorting? Quickselect?
def median(data):
"""Return the median (middle value) of numeric data.
When the number of data points is odd, return the middle data point.
When the number of data points is even, the median is interpolated by
taking the average of the two middle values:
>>> median([1, 3, 5])
3
>>> median([1, 3, 5, 7])
4.0
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
if n%2 == 1:
return data[n//2]
else:
i = n//2
return (data[i - 1] + data[i])/2
def median_low(data):
"""Return the low median of numeric data.
When the number of data points is odd, the middle value is returned.
When it is even, the smaller of the two middle values is returned.
>>> median_low([1, 3, 5])
3
>>> median_low([1, 3, 5, 7])
3
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
if n%2 == 1:
return data[n//2]
else:
return data[n//2 - 1]
def median_high(data):
"""Return the high median of data.
When the number of data points is odd, the middle value is returned.
When it is even, the larger of the two middle values is returned.
>>> median_high([1, 3, 5])
3
>>> median_high([1, 3, 5, 7])
5
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
return data[n//2]
def median_grouped(data, interval=1):
"""Return the 50th percentile (median) of grouped continuous data.
>>> median_grouped([1, 2, 2, 3, 4, 4, 4, 4, 4, 5])
3.7
>>> median_grouped([52, 52, 53, 54])
52.5
This calculates the median as the 50th percentile, and should be
used when your data is continuous and grouped. In the above example,
the values 1, 2, 3, etc. actually represent the midpoint of classes
0.5-1.5, 1.5-2.5, 2.5-3.5, etc. The middle value falls somewhere in
class 3.5-4.5, and interpolation is used to estimate it.
Optional argument ``interval`` represents the class interval, and
defaults to 1. Changing the class interval naturally will change the
interpolated 50th percentile value:
>>> median_grouped([1, 3, 3, 5, 7], interval=1)
3.25
>>> median_grouped([1, 3, 3, 5, 7], interval=2)
3.5
This function does not check whether the data points are at least
``interval`` apart.
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
elif n == 1:
return data[0]
# Find the value at the midpoint. Remember this corresponds to the
# centre of the class interval.
x = data[n//2]
for obj in (x, interval):
if isinstance(obj, (str, bytes)):
raise TypeError('expected number but got %r' % obj)
try:
L = x - interval/2 # The lower limit of the median interval.
except TypeError:
# Mixed type. For now we just coerce to float.
L = float(x) - float(interval)/2
# Uses bisection search to search for x in data with log(n) time complexity
# Find the position of leftmost occurrence of x in data
l1 = _find_lteq(data, x)
# Find the position of rightmost occurrence of x in data[l1...len(data)]
# Assuming always l1 <= l2
l2 = _find_rteq(data, l1, x)
cf = l1
f = l2 - l1 + 1
return L + interval*(n/2 - cf)/f
def mode(data):
"""Return the most common data point from discrete or nominal data.
``mode`` assumes discrete data, and returns a single value. This is the
standard treatment of the mode as commonly taught in schools:
>>> mode([1, 1, 2, 3, 3, 3, 3, 4])
3
This also works with nominal (non-numeric) data:
>>> mode(["red", "blue", "blue", "red", "green", "red", "red"])
'red'
If there are multiple modes, return the first one encountered.
>>> mode(['red', 'red', 'green', 'blue', 'blue'])
'red'
If *data* is empty, ``mode``, raises StatisticsError.
"""
data = iter(data)
try:
return Counter(data).most_common(1)[0][0]
except IndexError:
raise StatisticsError('no mode for empty data') from None
def multimode(data):
"""Return a list of the most frequently occurring values.
Will return more than one result if there are multiple modes
or an empty list if *data* is empty.
>>> multimode('aabbbbbbbbcc')
['b']
>>> multimode('aabbbbccddddeeffffgg')
['b', 'd', 'f']
>>> multimode('')
[]
"""
counts = Counter(iter(data)).most_common()
maxcount, mode_items = next(groupby(counts, key=itemgetter(1)), (0, []))
return list(map(itemgetter(0), mode_items))
# Notes on methods for computing quantiles
# ----------------------------------------
#
# There is no one perfect way to compute quantiles. Here we offer
# two methods that serve common needs. Most other packages
# surveyed offered at least one or both of these two, making them
# "standard" in the sense of "widely-adopted and reproducible".
# They are also easy to explain, easy to compute manually, and have
# straight-forward interpretations that aren't surprising.
# The default method is known as "R6", "PERCENTILE.EXC", or "expected
# value of rank order statistics". The alternative method is known as
# "R7", "PERCENTILE.INC", or "mode of rank order statistics".
# For sample data where there is a positive probability for values
# beyond the range of the data, the R6 exclusive method is a
# reasonable choice. Consider a random sample of nine values from a
# population with a uniform distribution from 0.0 to 100.0. The
# distribution of the third ranked sample point is described by
# betavariate(alpha=3, beta=7) which has mode=0.250, median=0.286, and
# mean=0.300. Only the latter (which corresponds with R6) gives the
# desired cut point with 30% of the population falling below that
# value, making it comparable to a result from an inv_cdf() function.
# For describing population data where the end points are known to
# be included in the data, the R7 inclusive method is a reasonable
# choice. Instead of the mean, it uses the mode of the beta
# distribution for the interior points. Per Hyndman & Fan, "One nice
# property is that the vertices of Q7(p) divide the range into n - 1
# intervals, and exactly 100p% of the intervals lie to the left of
# Q7(p) and 100(1 - p)% of the intervals lie to the right of Q7(p)."
# If needed, other methods could be added. However, for now, the
# position is that fewer options make for easier choices and that
# external packages can be used for anything more advanced.
def quantiles(dist, /, *, n=4, method='exclusive'):
"""Divide *dist* into *n* continuous intervals with equal probability.
Returns a list of (n - 1) cut points separating the intervals.
Set *n* to 4 for quartiles (the default). Set *n* to 10 for deciles.
Set *n* to 100 for percentiles which gives the 99 cuts points that
separate *dist* in to 100 equal sized groups.
The *dist* can be any iterable containing sample data or it can be
an instance of a class that defines an inv_cdf() method. For sample
data, the cut points are linearly interpolated between data points.
If *method* is set to *inclusive*, *dist* is treated as population
data. The minimum value is treated as the 0th percentile and the
maximum value is treated as the 100th percentile.
"""
if n < 1:
raise StatisticsError('n must be at least 1')
if hasattr(dist, 'inv_cdf'):
return [dist.inv_cdf(i / n) for i in range(1, n)]
data = sorted(dist)
ld = len(data)
if ld < 2:
raise StatisticsError('must have at least two data points')
if method == 'inclusive':
m = ld - 1
result = []
for i in range(1, n):
j = i * m // n
delta = i*m - j*n
interpolated = (data[j] * (n - delta) + data[j+1] * delta) / n
result.append(interpolated)
return result
if method == 'exclusive':
m = ld + 1
result = []
for i in range(1, n):
j = i * m // n # rescale i to m/n
j = 1 if j < 1 else ld-1 if j > ld-1 else j # clamp to 1 .. ld-1
delta = i*m - j*n # exact integer math
interpolated = (data[j-1] * (n - delta) + data[j] * delta) / n
result.append(interpolated)
return result
raise ValueError(f'Unknown method: {method!r}')
# === Measures of spread ===
# See http://mathworld.wolfram.com/Variance.html
# http://mathworld.wolfram.com/SampleVariance.html
# http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
#
# Under no circumstances use the so-called "computational formula for
# variance", as that is only suitable for hand calculations with a small
# amount of low-precision data. It has terrible numeric properties.
#
# See a comparison of three computational methods here:
# http://www.johndcook.com/blog/2008/09/26/comparing-three-methods-of-computing-standard-deviation/
def _ss(data, c=None):
"""Return sum of square deviations of sequence data.
If ``c`` is None, the mean is calculated in one pass, and the deviations
from the mean are calculated in a second pass. Otherwise, deviations are
calculated from ``c`` as given. Use the second case with care, as it can
lead to garbage results.
"""
if c is None:
c = mean(data)
T, total, count = _sum((x-c)**2 for x in data)
# The following sum should mathematically equal zero, but due to rounding
# error may not.
U, total2, count2 = _sum((x-c) for x in data)
assert T == U and count == count2
total -= total2**2/len(data)
assert not total < 0, 'negative sum of square deviations: %f' % total
return (T, total)
def variance(data, xbar=None):
"""Return the sample variance of data.
data should be an iterable of Real-valued numbers, with at least two
values. The optional argument xbar, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function when your data is a sample from a population. To
calculate the variance from the entire population, see ``pvariance``.
Examples:
>>> data = [2.75, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5]
>>> variance(data)
1.3720238095238095
If you have already calculated the mean of your data, you can pass it as
the optional second argument ``xbar`` to avoid recalculating it:
>>> m = mean(data)
>>> variance(data, m)
1.3720238095238095
This function does not check that ``xbar`` is actually the mean of
``data``. Giving arbitrary values for ``xbar`` may lead to invalid or
impossible results.
Decimals and Fractions are supported:
>>> from decimal import Decimal as D
>>> variance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")])
Decimal('31.01875')
>>> from fractions import Fraction as F
>>> variance([F(1, 6), F(1, 2), F(5, 3)])
Fraction(67, 108)
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 2:
raise StatisticsError('variance requires at least two data points')
T, ss = _ss(data, xbar)
return _convert(ss/(n-1), T)
def pvariance(data, mu=None):
"""Return the population variance of ``data``.
data should be an iterable of Real-valued numbers, with at least one
value. The optional argument mu, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function to calculate the variance from the entire population.
To estimate the variance from a sample, the ``variance`` function is
usually a better choice.
Examples:
>>> data = [0.0, 0.25, 0.25, 1.25, 1.5, 1.75, 2.75, 3.25]
>>> pvariance(data)
1.25
If you have already calculated the mean of the data, you can pass it as
the optional second argument to avoid recalculating it:
>>> mu = mean(data)
>>> pvariance(data, mu)
1.25
This function does not check that ``mu`` is actually the mean of ``data``.
Giving arbitrary values for ``mu`` may lead to invalid or impossible
results.
Decimals and Fractions are supported:
>>> from decimal import Decimal as D
>>> pvariance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")])
Decimal('24.815')
>>> from fractions import Fraction as F
>>> pvariance([F(1, 4), F(5, 4), F(1, 2)])
Fraction(13, 72)
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 1:
raise StatisticsError('pvariance requires at least one data point')
T, ss = _ss(data, mu)
return _convert(ss/n, T)
def stdev(data, xbar=None):
"""Return the square root of the sample variance.
See ``variance`` for arguments and other details.
>>> stdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75])
1.0810874155219827
"""
var = variance(data, xbar)
try:
return var.sqrt()
except AttributeError:
return math.sqrt(var)
def pstdev(data, mu=None):
"""Return the square root of the population variance.
See ``pvariance`` for arguments and other details.
>>> pstdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75])
0.986893273527251
"""
var = pvariance(data, mu)
try:
return var.sqrt()
except AttributeError:
return math.sqrt(var)
## Normal Distribution #####################################################
class NormalDist:
"Normal distribution of a random variable"
# https://en.wikipedia.org/wiki/Normal_distribution
# https://en.wikipedia.org/wiki/Variance#Properties
__slots__ = {
'_mu': 'Arithmetic mean of a normal distribution',
'_sigma': 'Standard deviation of a normal distribution',
}
def __init__(self, mu=0.0, sigma=1.0):
"NormalDist where mu is the mean and sigma is the standard deviation."
if sigma < 0.0:
raise StatisticsError('sigma must be non-negative')
self._mu = mu
self._sigma = sigma
@classmethod
def from_samples(cls, data):
"Make a normal distribution instance from sample data."
if not isinstance(data, (list, tuple)):
data = list(data)
xbar = fmean(data)
return cls(xbar, stdev(data, xbar))
def samples(self, n, *, seed=None):
"Generate *n* samples for a given mean and standard deviation."
gauss = random.gauss if seed is None else random.Random(seed).gauss
mu, sigma = self._mu, self._sigma
return [gauss(mu, sigma) for i in range(n)]
def pdf(self, x):
"Probability density function. P(x <= X < x+dx) / dx"
variance = self._sigma ** 2.0
if not variance:
raise StatisticsError('pdf() not defined when sigma is zero')
return exp((x - self._mu)**2.0 / (-2.0*variance)) / sqrt(tau*variance)
def cdf(self, x):
"Cumulative distribution function. P(X <= x)"
if not self._sigma:
raise StatisticsError('cdf() not defined when sigma is zero')
return 0.5 * (1.0 + erf((x - self._mu) / (self._sigma * sqrt(2.0))))
def inv_cdf(self, p):
"""Inverse cumulative distribution function. x : P(X <= x) = p
Finds the value of the random variable such that the probability of
the variable being less than or equal to that value equals the given
probability.
This function is also called the percent point function or quantile
function.
"""
if p <= 0.0 or p >= 1.0:
raise StatisticsError('p must be in the range 0.0 < p < 1.0')
if self._sigma <= 0.0:
raise StatisticsError('cdf() not defined when sigma at or below zero')
# There is no closed-form solution to the inverse CDF for the normal
# distribution, so we use a rational approximation instead:
# <NAME>. (1988). "Algorithm AS241: The Percentage Points of the
# Normal Distribution". Applied Statistics. Blackwell Publishing. 37
# (3): 477–484. doi:10.2307/2347330. JSTOR 2347330.
q = p - 0.5
if fabs(q) <= 0.425:
r = 0.180625 - q * q
num = (((((((2.50908_09287_30122_6727e+3 * r +
3.34305_75583_58812_8105e+4) * r +
6.72657_70927_00870_0853e+4) * r +
4.59219_53931_54987_1457e+4) * r +
1.37316_93765_50946_1125e+4) * r +
1.97159_09503_06551_4427e+3) * r +
1.33141_66789_17843_7745e+2) * r +
3.38713_28727_96366_6080e+0) * q
den = (((((((5.22649_52788_52854_5610e+3 * r +
2.87290_85735_72194_2674e+4) * r +
3.93078_95800_09271_0610e+4) * r +
2.12137_94301_58659_5867e+4) * r +
5.39419_60214_24751_1077e+3) * r +
6.87187_00749_20579_0830e+2) * r +
4.23133_30701_60091_1252e+1) * r +
1.0)
x = num / den
return self._mu + (x * self._sigma)
r = p if q <= 0.0 else 1.0 - p
r = sqrt(-log(r))
if r <= 5.0:
r = r - 1.6
num = (((((((7.74545_01427_83414_07640e-4 * r +
2.27238_44989_26918_45833e-2) * r +
2.41780_72517_74506_11770e-1) * r +
1.27045_82524_52368_38258e+0) * r +
3.64784_83247_63204_60504e+0) * r +
5.76949_72214_60691_40550e+0) * r +
4.63033_78461_56545_29590e+0) * r +
1.42343_71107_49683_57734e+0)
den = (((((((1.05075_00716_44416_84324e-9 * r +
5.47593_80849_95344_94600e-4) * r +
1.51986_66563_61645_71966e-2) * r +
1.48103_97642_74800_74590e-1) * r +
6.89767_33498_51000_04550e-1) * r +
1.67638_48301_83803_84940e+0) * r +
2.05319_16266_37758_82187e+0) * r +
1.0)
else:
r = r - 5.0
num = (((((((2.01033_43992_92288_13265e-7 * r +
2.71155_55687_43487_57815e-5) * r +
1.24266_09473_88078_43860e-3) * r +
2.65321_89526_57612_30930e-2) * r +
2.96560_57182_85048_91230e-1) * r +
1.78482_65399_17291_33580e+0) * r +
5.46378_49111_64114_36990e+0) * r +
6.65790_46435_01103_77720e+0)
den = (((((((2.04426_31033_89939_78564e-15 * r +
1.42151_17583_16445_88870e-7) * r +
1.84631_83175_10054_68180e-5) * r +
7.86869_13114_56132_59100e-4) * r +
1.48753_61290_85061_48525e-2) * r +
1.36929_88092_27358_05310e-1) * r +
5.99832_20655_58879_37690e-1) * r +
1.0)
x = num / den
if q < 0.0:
x = -x
return self._mu + (x * self._sigma)
def overlap(self, other):
"""Compute the overlapping coefficient (OVL) between two normal distributions.
Measures the agreement between two normal probability distributions.
Returns a value between 0.0 and 1.0 giving the overlapping area in
the two underlying probability density functions.
>>> N1 = NormalDist(2.4, 1.6)
>>> N2 = NormalDist(3.2, 2.0)
>>> N1.overlap(N2)
0.8035050657330205
"""
# See: "The overlapping coefficient as a measure of agreement between
# probability distributions and point estimation of the overlap of two
# normal densities" -- <NAME> and <NAME> Jr
# http://dx.doi.org/10.1080/03610928908830127
if not isinstance(other, NormalDist):
raise TypeError('Expected another NormalDist instance')
X, Y = self, other
if (Y._sigma, Y._mu) < (X._sigma, X._mu): # sort to assure commutativity
X, Y = Y, X
X_var, Y_var = X.variance, Y.variance
if not X_var or not Y_var:
raise StatisticsError('overlap() not defined when sigma is zero')
dv = Y_var - X_var
dm = fabs(Y._mu - X._mu)
if not dv:
return 1.0 - erf(dm / (2.0 * X._sigma * sqrt(2.0)))
a = X._mu * Y_var - Y._mu * X_var
b = X._sigma * Y._sigma * sqrt(dm**2.0 + dv * log(Y_var / X_var))
x1 = (a + b) / dv
x2 = (a - b) / dv
return 1.0 - (fabs(Y.cdf(x1) - X.cdf(x1)) + fabs(Y.cdf(x2) - X.cdf(x2)))
@property
def mean(self):
"Arithmetic mean of the normal distribution."
return self._mu
@property
def stdev(self):
"Standard deviation of the normal distribution."
return self._sigma
@property
def variance(self):
"Square of the standard deviation."
return self._sigma ** 2.0
def __add__(x1, x2):
"""Add a constant or another NormalDist instance.
If *other* is a constant, translate mu by the constant,
leaving sigma unchanged.
If *other* is a NormalDist, add both the means and the variances.
Mathematically, this works only if the two distributions are
independent or if they are jointly normally distributed.
"""
if isinstance(x2, NormalDist):
return NormalDist(x1._mu + x2._mu, hypot(x1._sigma, x2._sigma))
return NormalDist(x1._mu + x2, x1._sigma)
def __sub__(x1, x2):
"""Subtract a constant or another NormalDist instance.
If *other* is a constant, translate by the constant mu,
leaving sigma unchanged.
If *other* is a NormalDist, subtract the means and add the variances.
Mathematically, this works only if the two distributions are
independent or if they are jointly normally distributed.
"""
if isinstance(x2, NormalDist):
return NormalDist(x1._mu - x2._mu, hypot(x1._sigma, x2._sigma))
return NormalDist(x1._mu - x2, x1._sigma)
def __mul__(x1, x2):
"""Multiply both mu and sigma by a constant.
Used for rescaling, perhaps to change measurement units.
Sigma is scaled with the absolute value of the constant.
"""
return NormalDist(x1._mu * x2, x1._sigma * fabs(x2))
def __truediv__(x1, x2):
"""Divide both mu and sigma by a constant.
Used for rescaling, perhaps to change measurement units.
Sigma is scaled with the absolute value of the constant.
"""
return NormalDist(x1._mu / x2, x1._sigma / fabs(x2))
def __pos__(x1):
"Return a copy of the instance."
return NormalDist(x1._mu, x1._sigma)
def __neg__(x1):
"Negates mu while keeping sigma the same."
return NormalDist(-x1._mu, x1._sigma)
__radd__ = __add__
def __rsub__(x1, x2):
"Subtract a NormalDist from a constant or another NormalDist."
return -(x1 - x2)
__rmul__ = __mul__
def __eq__(x1, x2):
"Two NormalDist objects are equal if their mu and sigma are both equal."
if not isinstance(x2, NormalDist):
return NotImplemented
return (x1._mu, x2._sigma) == (x2._mu, x2._sigma)
def __hash__(self):
"NormalDist objects hash equal if their mu and sigma are both equal."
return hash((self._mu, self._sigma))
def __repr__(self):
return f'{type(self).__name__}(mu={self._mu!r}, sigma={self._sigma!r})'
if __name__ == '__main__':
# Show math operations computed analytically in comparsion
# to a monte carlo simulation of the same operations
from math import isclose
from operator import add, sub, mul, truediv
from itertools import repeat
import doctest
g1 = NormalDist(10, 20)
g2 = NormalDist(-5, 25)
# Test scaling by a constant
assert (g1 * 5 / 5).mean == g1.mean
assert (g1 * 5 / 5).stdev == g1.stdev
n = 100_000
G1 = g1.samples(n)
G2 = g2.samples(n)
for func in (add, sub):
print(f'\nTest {func.__name__} with another NormalDist:')
print(func(g1, g2))
print(NormalDist.from_samples(map(func, G1, G2)))
const = 11
for func in (add, sub, mul, truediv):
print(f'\nTest {func.__name__} with a constant:')
print(func(g1, const))
print(NormalDist.from_samples(map(func, G1, repeat(const))))
const = 19
for func in (add, sub, mul):
print(f'\nTest constant with {func.__name__}:')
print(func(const, g1))
print(NormalDist.from_samples(map(func, repeat(const), G1)))
def assert_close(G1, G2):
assert isclose(G1.mean, G1.mean, rel_tol=0.01), (G1, G2)
assert isclose(G1.stdev, G2.stdev, rel_tol=0.01), (G1, G2)
X = NormalDist(-105, 73)
Y = NormalDist(31, 47)
s = 32.75
n = 100_000
S = NormalDist.from_samples([x + s for x in X.samples(n)])
assert_close(X + s, S)
S = NormalDist.from_samples([x - s for x in X.samples(n)])
assert_close(X - s, S)
S = NormalDist.from_samples([x * s for x in X.samples(n)])
assert_close(X * s, S)
S = NormalDist.from_samples([x / s for x in X.samples(n)])
assert_close(X / s, S)
S = NormalDist.from_samples([x + y for x, y in zip(X.samples(n),
Y.samples(n))])
assert_close(X + Y, S)
S = NormalDist.from_samples([x - y for x, y in zip(X.samples(n),
Y.samples(n))])
assert_close(X - Y, S)
print(doctest.testmod())
|
[
"itertools.repeat",
"math.exp",
"math.hypot",
"math.fabs",
"math.sqrt",
"bisect.bisect_right",
"random.Random",
"fractions.Fraction",
"math.fsum",
"math.isclose",
"itertools.groupby",
"collections.Counter",
"math.log",
"math.isfinite",
"bisect.bisect_left",
"operator.itemgetter",
"doctest.testmod"
] |
[((4662, 4681), 'itertools.groupby', 'groupby', (['data', 'type'], {}), '(data, type)\n', (4669, 4681), False, 'from itertools import groupby\n'), ((8396, 8413), 'bisect.bisect_left', 'bisect_left', (['a', 'x'], {}), '(a, x)\n', (8407, 8413), False, 'from bisect import bisect_left, bisect_right\n'), ((8573, 8597), 'bisect.bisect_right', 'bisect_right', (['a', 'x'], {'lo': 'l'}), '(a, x, lo=l)\n', (8585, 8597), False, 'from bisect import bisect_left, bisect_right\n'), ((10253, 10263), 'math.fsum', 'fsum', (['data'], {}), '(data)\n', (10257, 10263), False, 'from math import hypot, sqrt, fabs, exp, erf, tau, log, fsum\n'), ((33571, 33590), 'math.fabs', 'fabs', (['(Y._mu - X._mu)'], {}), '(Y._mu - X._mu)\n', (33575, 33590), False, 'from math import hypot, sqrt, fabs, exp, erf, tau, log, fsum\n'), ((38043, 38082), 'math.isclose', 'isclose', (['G1.mean', 'G1.mean'], {'rel_tol': '(0.01)'}), '(G1.mean, G1.mean, rel_tol=0.01)\n', (38050, 38082), False, 'from math import isclose\n'), ((38108, 38149), 'math.isclose', 'isclose', (['G1.stdev', 'G2.stdev'], {'rel_tol': '(0.01)'}), '(G1.stdev, G2.stdev, rel_tol=0.01)\n', (38115, 38149), False, 'from math import isclose\n'), ((38958, 38975), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (38973, 38975), False, 'import doctest\n'), ((5429, 5445), 'math.isfinite', 'math.isfinite', (['x'], {}), '(x)\n', (5442, 5445), False, 'import math\n'), ((17553, 17566), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (17563, 17566), False, 'from operator import itemgetter\n'), ((26068, 26082), 'math.sqrt', 'math.sqrt', (['var'], {}), '(var)\n', (26077, 26082), False, 'import math\n'), ((26415, 26429), 'math.sqrt', 'math.sqrt', (['var'], {}), '(var)\n', (26424, 26429), False, 'import math\n'), ((27882, 27928), 'math.exp', 'exp', (['((x - self._mu) ** 2.0 / (-2.0 * variance))'], {}), '((x - self._mu) ** 2.0 / (-2.0 * variance))\n', (27885, 27928), False, 'from math import hypot, sqrt, fabs, exp, erf, tau, log, fsum\n'), ((27927, 27947), 'math.sqrt', 'sqrt', (['(tau * variance)'], {}), '(tau * variance)\n', (27931, 27947), False, 'from math import hypot, sqrt, fabs, exp, erf, tau, log, fsum\n'), ((29194, 29201), 'math.fabs', 'fabs', (['q'], {}), '(q)\n', (29198, 29201), False, 'from math import hypot, sqrt, fabs, exp, erf, tau, log, fsum\n'), ((5226, 5240), 'fractions.Fraction', 'Fraction', (['n', 'd'], {}), '(n, d)\n', (5234, 5240), False, 'from fractions import Fraction\n'), ((17508, 17521), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (17518, 17521), False, 'from operator import itemgetter\n'), ((27524, 27543), 'random.Random', 'random.Random', (['seed'], {}), '(seed)\n', (27537, 27543), False, 'import random\n'), ((30302, 30308), 'math.log', 'log', (['r'], {}), '(r)\n', (30305, 30308), False, 'from math import hypot, sqrt, fabs, exp, erf, tau, log, fsum\n'), ((34762, 34789), 'math.hypot', 'hypot', (['x1._sigma', 'x2._sigma'], {}), '(x1._sigma, x2._sigma)\n', (34767, 34789), False, 'from math import hypot, sqrt, fabs, exp, erf, tau, log, fsum\n'), ((35339, 35366), 'math.hypot', 'hypot', (['x1._sigma', 'x2._sigma'], {}), '(x1._sigma, x2._sigma)\n', (35344, 35366), False, 'from math import hypot, sqrt, fabs, exp, erf, tau, log, fsum\n'), ((35691, 35699), 'math.fabs', 'fabs', (['x2'], {}), '(x2)\n', (35695, 35699), False, 'from math import hypot, sqrt, fabs, exp, erf, tau, log, fsum\n'), ((35976, 35984), 'math.fabs', 'fabs', (['x2'], {}), '(x2)\n', (35980, 35984), False, 'from math import hypot, sqrt, fabs, exp, erf, tau, log, fsum\n'), ((37775, 37788), 'itertools.repeat', 'repeat', (['const'], {}), '(const)\n', (37781, 37788), False, 'from itertools import repeat\n'), ((37976, 37989), 'itertools.repeat', 'repeat', (['const'], {}), '(const)\n', (37982, 37989), False, 'from itertools import repeat\n'), ((16949, 16962), 'collections.Counter', 'Counter', (['data'], {}), '(data)\n', (16956, 16962), False, 'from collections import Counter\n'), ((33770, 33788), 'math.log', 'log', (['(Y_var / X_var)'], {}), '(Y_var / X_var)\n', (33773, 33788), False, 'from math import hypot, sqrt, fabs, exp, erf, tau, log, fsum\n'), ((28190, 28199), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (28194, 28199), False, 'from math import hypot, sqrt, fabs, exp, erf, tau, log, fsum\n'), ((33662, 33671), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (33666, 33671), False, 'from math import hypot, sqrt, fabs, exp, erf, tau, log, fsum\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0053_ownedcard_skill'),
]
operations = [
migrations.AlterField(
model_name='activity',
name='message',
field=models.CharField(max_length=300, choices=[(b'Added a card', 'Added a card'), (b'Idolized a card', 'Idolized a card'), (b'Max Leveled a card', 'Max Leveled a card'), (b'Max Bonded a card', 'Max Bonded a card'), (b'Rank Up', 'Rank Up'), (b'Ranked in event', 'Ranked in event')]),
preserve_default=True,
),
]
|
[
"django.db.models.CharField"
] |
[((348, 637), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)', 'choices': "[(b'Added a card', 'Added a card'), (b'Idolized a card', 'Idolized a card'),\n (b'Max Leveled a card', 'Max Leveled a card'), (b'Max Bonded a card',\n 'Max Bonded a card'), (b'Rank Up', 'Rank Up'), (b'Ranked in event',\n 'Ranked in event')]"}), "(max_length=300, choices=[(b'Added a card', 'Added a card'),\n (b'Idolized a card', 'Idolized a card'), (b'Max Leveled a card',\n 'Max Leveled a card'), (b'Max Bonded a card', 'Max Bonded a card'), (\n b'Rank Up', 'Rank Up'), (b'Ranked in event', 'Ranked in event')])\n", (364, 637), False, 'from django.db import models, migrations\n')]
|
"""
This is a data preparation script for the ASpIRE dataset. The following description
is taken from the LDC website:
ASpIRE Development and Development Test Sets was developed for the Automatic Speech
recognition In Reverberant Environments (ASpIRE) Challenge sponsored by IARPA
(the Intelligent Advanced Research Projects Activity). It contains approximately 226
hours of English speech with transcripts and scoring files.
The ASpIRE challenge asked solvers to develop innovative speech recognition systems
that could be trained on conversational telephone speech, and yet work well on far-
field microphone data from noisy, reverberant rooms. Participants had the opportunity
to evaluate their techniques on a common set of challenging data that included
significant room noise and reverberation.
The data is provided in LDC catalog LDC2017S21. The audio data is a subset of Mixer 6
Speech (LDC2013S03), audio recordings of interviews, transcript readings and
conversational telephone speech collected by the Linguistic Data Consortium in 2009
and 2010 from native English speakers local to the Philadelphia area. The transcripts
were developed by Appen for the ASpIRE challenge.
Data is divided into development and development test sets.
There are 2 versions: "single" and "multi", which stand for single-channel and
multi-channel audio respectively. All audio is presented as single channel, 16kHz
16-bit Signed Integer PCM *.wav files. Transcripts are plain text tdf files or as STM
files. Scoring files (glm) are also included.
"""
import logging
import itertools
import tarfile
from collections import defaultdict
from pathlib import Path
from typing import Dict, Optional, Union, NamedTuple
from lhotse import validate_recordings_and_supervisions, fix_manifests
from lhotse.audio import Recording, RecordingSet, AudioSource
from lhotse.supervision import SupervisionSegment, SupervisionSet
from lhotse.utils import Pathlike, Seconds
class AspireSegmentAnnotation(NamedTuple):
session: str
speaker: str
start: Seconds
end: Seconds
text: str
def prepare_aspire(
corpus_dir: Pathlike, output_dir: Optional[Pathlike] = None, mic: str = "single"
) -> Dict[str, Dict[str, Union[RecordingSet, SupervisionSet]]]:
"""
Returns the manifests which consist of the Recordings and Supervisions
:param corpus_dir: Pathlike, the path of the corpus dir (LDC2017S21).
:param output_dir: Pathlike, the path where to write the manifests.
:param mic: str, the microphone type, either "single" or "multi".
:return: a Dict whose key is the dataset part ('dev' and 'dev_test'), and the value is Dicts with the keys 'recordings' and 'supervisions'.
"""
corpus_dir = Path(corpus_dir)
assert corpus_dir.is_dir(), f"No such directory: {corpus_dir}"
assert mic in [
"single",
"multi",
], f"mic must be either 'single' or 'multi', got {mic}"
corpus_dir = corpus_dir / "IARPA-ASpIRE-Dev-Sets-v2.0" / "data"
audio_dir = corpus_dir / "dev_and_dev_test_audio"
stm_dir = corpus_dir / "dev_and_dev_test_STM_files"
if mic == "single":
audio_paths = {
"dev": audio_dir / "ASpIRE_single_dev",
"dev_test": audio_dir / "ASpIRE_single_dev_test",
}
stm_file = {
"dev": stm_dir / "dev.stm",
"dev_test": stm_dir / "dev_test.stm",
}
else:
audio_paths = {
"dev": audio_dir / "ASpIRE_multi_dev",
"dev_test": audio_dir / "ASpIRE_multi_dev_test",
}
stm_file = {
"dev": stm_dir / "multi_dev.stm",
"dev_test": stm_dir / "multi_dev_test.stm",
}
manifests = defaultdict(dict)
if output_dir is not None:
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
for part in ["dev", "dev_test"]:
recordings = []
supervisions = []
# Prepare the recordings
if mic == "single":
recording_set = RecordingSet.from_dir(audio_paths[part], "*.wav")
else:
import soundfile as sf
audio_groups = {
k: list(v)
for k, v in itertools.groupby(
sorted(audio_paths[part].glob("*.wav")),
key=lambda x: "_".join(x.stem.split("_")[:-1]),
)
} # group audios so that each entry is a session containing all channels
for session_name, audios in audio_groups.items():
audio_sf = sf.SoundFile(str(audios[0]))
recordings.append(
Recording(
id=session_name,
sources=[
AudioSource(
type="file",
channels=[int(audio.stem[-2:]) - 1],
source=str(audio),
)
for audio in sorted(audios)
],
sampling_rate=audio_sf.samplerate,
num_samples=audio_sf.frames,
duration=audio_sf.frames / audio_sf.samplerate,
)
)
recording_set = RecordingSet.from_recordings(recordings)
# Read STM file and prepare segments
segments = []
with open(stm_file[part]) as f:
for line in f:
session, _, speaker, start, end, text = line.strip().split(maxsplit=5)
segments.append(
AspireSegmentAnnotation(
session, speaker, float(start), float(end), text
)
)
# Group the segments by session and speaker
segments_grouped = defaultdict(list)
for segment in segments:
segments_grouped[(segment.session, segment.speaker)].append(segment)
# Create the supervisions
supervisions = []
for k, segs in segments_grouped.items():
session, speaker = k
supervisions += [
SupervisionSegment(
id=f"{session}-{speaker}-{i:03d}",
recording_id=session,
start=seg.start,
duration=round(seg.end - seg.start, 4),
speaker=speaker,
text=seg.text,
language="English",
)
for i, seg in enumerate(segs)
]
supervision_set = SupervisionSet.from_segments(supervisions)
recording_set, supervision_set = fix_manifests(recording_set, supervision_set)
validate_recordings_and_supervisions(recording_set, supervision_set)
if output_dir is not None:
supervision_set.to_file(output_dir / f"supervisions_{part}.jsonl")
recording_set.to_file(output_dir / f"recordings_{part}.jsonl")
manifests[part] = {"recordings": recording_set, "supervisions": supervision_set}
return manifests
|
[
"lhotse.audio.RecordingSet.from_dir",
"lhotse.supervision.SupervisionSet.from_segments",
"lhotse.fix_manifests",
"collections.defaultdict",
"lhotse.validate_recordings_and_supervisions",
"pathlib.Path",
"lhotse.audio.RecordingSet.from_recordings"
] |
[((2731, 2747), 'pathlib.Path', 'Path', (['corpus_dir'], {}), '(corpus_dir)\n', (2735, 2747), False, 'from pathlib import Path\n'), ((3707, 3724), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (3718, 3724), False, 'from collections import defaultdict\n'), ((3778, 3794), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (3782, 3794), False, 'from pathlib import Path\n'), ((5827, 5844), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5838, 5844), False, 'from collections import defaultdict\n'), ((6578, 6620), 'lhotse.supervision.SupervisionSet.from_segments', 'SupervisionSet.from_segments', (['supervisions'], {}), '(supervisions)\n', (6606, 6620), False, 'from lhotse.supervision import SupervisionSegment, SupervisionSet\n'), ((6663, 6708), 'lhotse.fix_manifests', 'fix_manifests', (['recording_set', 'supervision_set'], {}), '(recording_set, supervision_set)\n', (6676, 6708), False, 'from lhotse import validate_recordings_and_supervisions, fix_manifests\n'), ((6717, 6785), 'lhotse.validate_recordings_and_supervisions', 'validate_recordings_and_supervisions', (['recording_set', 'supervision_set'], {}), '(recording_set, supervision_set)\n', (6753, 6785), False, 'from lhotse import validate_recordings_and_supervisions, fix_manifests\n'), ((4027, 4076), 'lhotse.audio.RecordingSet.from_dir', 'RecordingSet.from_dir', (['audio_paths[part]', '"""*.wav"""'], {}), "(audio_paths[part], '*.wav')\n", (4048, 4076), False, 'from lhotse.audio import Recording, RecordingSet, AudioSource\n'), ((5293, 5333), 'lhotse.audio.RecordingSet.from_recordings', 'RecordingSet.from_recordings', (['recordings'], {}), '(recordings)\n', (5321, 5333), False, 'from lhotse.audio import Recording, RecordingSet, AudioSource\n')]
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: health_assessment_rule_version.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from capacity_admin_sdk.model.health_assessment import health_assessment_event_score_config_item_pb2 as capacity__admin__sdk_dot_model_dot_health__assessment_dot_health__assessment__event__score__config__item__pb2
from capacity_admin_sdk.model.health_assessment import health_assessment_related_resource_score_config_item_pb2 as capacity__admin__sdk_dot_model_dot_health__assessment_dot_health__assessment__related__resource__score__config__item__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='health_assessment_rule_version.proto',
package='health_assessment',
syntax='proto3',
serialized_options=_b('ZKgo.easyops.local/contracts/protorepo-models/easyops/model/health_assessment'),
serialized_pb=_b('\n$health_assessment_rule_version.proto\x12\x11health_assessment\x1aZcapacity_admin_sdk/model/health_assessment/health_assessment_event_score_config_item.proto\x1a\x65\x63\x61pacity_admin_sdk/model/health_assessment/health_assessment_related_resource_score_config_item.proto\"\xc6\x02\n\x1bHealthAssessmentRuleVersion\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06ruleId\x18\x02 \x01(\t\x12\x10\n\x08objectId\x18\x03 \x01(\t\x12Q\n\x10\x65ventScoreConfig\x18\x04 \x03(\x0b\x32\x37.health_assessment.HealthAssessmentEventScoreConfigItem\x12\x65\n\x1arelatedResourceScoreConfig\x18\x05 \x03(\x0b\x32\x41.health_assessment.HealthAssessmentRelatedResourceScoreConfigItem\x12\x18\n\x10\x65ventScoreWeight\x18\x06 \x01(\x05\x12\x1d\n\x15relatedResourceWeight\x18\x07 \x01(\x05\x42MZKgo.easyops.local/contracts/protorepo-models/easyops/model/health_assessmentb\x06proto3')
,
dependencies=[capacity__admin__sdk_dot_model_dot_health__assessment_dot_health__assessment__event__score__config__item__pb2.DESCRIPTOR,capacity__admin__sdk_dot_model_dot_health__assessment_dot_health__assessment__related__resource__score__config__item__pb2.DESCRIPTOR,])
_HEALTHASSESSMENTRULEVERSION = _descriptor.Descriptor(
name='HealthAssessmentRuleVersion',
full_name='health_assessment.HealthAssessmentRuleVersion',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='health_assessment.HealthAssessmentRuleVersion.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ruleId', full_name='health_assessment.HealthAssessmentRuleVersion.ruleId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objectId', full_name='health_assessment.HealthAssessmentRuleVersion.objectId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eventScoreConfig', full_name='health_assessment.HealthAssessmentRuleVersion.eventScoreConfig', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='relatedResourceScoreConfig', full_name='health_assessment.HealthAssessmentRuleVersion.relatedResourceScoreConfig', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eventScoreWeight', full_name='health_assessment.HealthAssessmentRuleVersion.eventScoreWeight', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='relatedResourceWeight', full_name='health_assessment.HealthAssessmentRuleVersion.relatedResourceWeight', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=255,
serialized_end=581,
)
_HEALTHASSESSMENTRULEVERSION.fields_by_name['eventScoreConfig'].message_type = capacity__admin__sdk_dot_model_dot_health__assessment_dot_health__assessment__event__score__config__item__pb2._HEALTHASSESSMENTEVENTSCORECONFIGITEM
_HEALTHASSESSMENTRULEVERSION.fields_by_name['relatedResourceScoreConfig'].message_type = capacity__admin__sdk_dot_model_dot_health__assessment_dot_health__assessment__related__resource__score__config__item__pb2._HEALTHASSESSMENTRELATEDRESOURCESCORECONFIGITEM
DESCRIPTOR.message_types_by_name['HealthAssessmentRuleVersion'] = _HEALTHASSESSMENTRULEVERSION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HealthAssessmentRuleVersion = _reflection.GeneratedProtocolMessageType('HealthAssessmentRuleVersion', (_message.Message,), {
'DESCRIPTOR' : _HEALTHASSESSMENTRULEVERSION,
'__module__' : 'health_assessment_rule_version_pb2'
# @@protoc_insertion_point(class_scope:health_assessment.HealthAssessmentRuleVersion)
})
_sym_db.RegisterMessage(HealthAssessmentRuleVersion)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.FieldDescriptor",
"google.protobuf.reflection.GeneratedProtocolMessageType"
] |
[((484, 510), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (508, 510), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((6348, 6547), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""HealthAssessmentRuleVersion"""', '(_message.Message,)', "{'DESCRIPTOR': _HEALTHASSESSMENTRULEVERSION, '__module__':\n 'health_assessment_rule_version_pb2'}"], {}), "('HealthAssessmentRuleVersion', (\n _message.Message,), {'DESCRIPTOR': _HEALTHASSESSMENTRULEVERSION,\n '__module__': 'health_assessment_rule_version_pb2'})\n", (6388, 6547), True, 'from google.protobuf import reflection as _reflection\n'), ((3832, 4217), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""eventScoreConfig"""', 'full_name': '"""health_assessment.HealthAssessmentRuleVersion.eventScoreConfig"""', 'index': '(3)', 'number': '(4)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='eventScoreConfig', full_name=\n 'health_assessment.HealthAssessmentRuleVersion.eventScoreConfig', index\n =3, number=4, type=11, cpp_type=10, label=3, has_default_value=False,\n default_value=[], message_type=None, enum_type=None, containing_type=\n None, is_extension=False, extension_scope=None, serialized_options=None,\n file=DESCRIPTOR)\n", (3859, 4217), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4237, 4641), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""relatedResourceScoreConfig"""', 'full_name': '"""health_assessment.HealthAssessmentRuleVersion.relatedResourceScoreConfig"""', 'index': '(4)', 'number': '(5)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='relatedResourceScoreConfig', full_name=\n 'health_assessment.HealthAssessmentRuleVersion.relatedResourceScoreConfig',\n index=4, number=5, type=11, cpp_type=10, label=3, has_default_value=\n False, default_value=[], message_type=None, enum_type=None,\n containing_type=None, is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR)\n", (4264, 4641), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4662, 5044), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""eventScoreWeight"""', 'full_name': '"""health_assessment.HealthAssessmentRuleVersion.eventScoreWeight"""', 'index': '(5)', 'number': '(6)', 'type': '(5)', 'cpp_type': '(1)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='eventScoreWeight', full_name=\n 'health_assessment.HealthAssessmentRuleVersion.eventScoreWeight', index\n =5, number=6, type=5, cpp_type=1, label=1, has_default_value=False,\n default_value=0, message_type=None, enum_type=None, containing_type=\n None, is_extension=False, extension_scope=None, serialized_options=None,\n file=DESCRIPTOR)\n", (4689, 5044), True, 'from google.protobuf import descriptor as _descriptor\n'), ((5064, 5455), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""relatedResourceWeight"""', 'full_name': '"""health_assessment.HealthAssessmentRuleVersion.relatedResourceWeight"""', 'index': '(6)', 'number': '(7)', 'type': '(5)', 'cpp_type': '(1)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='relatedResourceWeight', full_name=\n 'health_assessment.HealthAssessmentRuleVersion.relatedResourceWeight',\n index=6, number=7, type=5, cpp_type=1, label=1, has_default_value=False,\n default_value=0, message_type=None, enum_type=None, containing_type=\n None, is_extension=False, extension_scope=None, serialized_options=None,\n file=DESCRIPTOR)\n", (5091, 5455), True, 'from google.protobuf import descriptor as _descriptor\n')]
|
# LIBTBX_SET_DISPATCHER_NAME prime.frame_extractor
"""
Author : Uervirojnangkoorn, M.
Desc : Taking the original code from xfel/command_line/frame_extractor.py
and adding by scan so that each scan is output as a single pickle file.
"""
from __future__ import absolute_import, division, print_function
from dials.array_family import flex
from dials.util.options import (
Importer,
flatten_reflections,
flatten_experiments,
OptionParser,
)
from cctbx import crystal, miller
from cctbx.crystal_orientation import crystal_orientation
import iotbx.phil
import cctbx, os
from libtbx import easy_pickle
from six.moves import range
class ConstructFrame(object):
def get_template_pickle(self):
return {
"current_cb_op_to_primitive": 0,
"current_orientation": 0,
"distance": 0,
"effective_tiling": 0,
"mapped_predictions": [[]],
"max_signal": 0,
"ML_domain_size_ang": [0],
"ML_half_mosaicity_deg": [0],
"mosaicity": 0,
"model_partialities": [None],
"observations": [0],
"pointgroup": 0,
"residual": 0,
"sa_parameters": ["None"],
"wavelength": 0,
"xbeam": 0,
"ybeam": 0,
}
def __init__(self, reflections, experiment, scan_no):
# assemble template and unpack files
self.frame = self.get_template_pickle()
self.pixel_size = experiment.detector[0].get_pixel_size()[0]
if "intensity.prf.value" in reflections:
self.method = "prf" # integration by profile fitting
elif "intensity.sum.value" in reflections:
self.method = "sum" # integration by simple summation
# self.reflections = reflections.select(reflections['intensity.' + self.method + '.variance'] > 0) # keep only spots with sigmas above zero
self.reflections = reflections
self.scan_no = scan_no
self.reflections = self.reflections.select(
(self.reflections["xyzobs.px.value"].parts()[2] >= scan_no)
& (self.reflections["xyzobs.px.value"].parts()[2] < scan_no + 1)
) # select only reflections in the scan no.
self.xtal = experiment.crystal
self.beam_obj = experiment.beam
self.det = experiment.detector
self.gonio = experiment.goniometer
self.scan = experiment.scan
self.img_sweep = experiment.imageset
print(scan_no, len(self.reflections))
# experiment-dependent components ---------------------------------------------------------------------------
# get wavelength
def populate_wavelength(self):
assert self.beam_obj.get_wavelength() is not None, "no wavelength"
self.frame["wavelength"] = self.beam_obj.get_wavelength()
# get detector distance in mm
def populate_distance(self):
assert self.det[0].get_distance() is not None, "no detector distance"
self.frame["distance"] = self.det[0].get_distance()
# get xbeam and ybeam in mm
def populate_beam_dir(self):
assert self.beam_obj.get_s0() is not None, "no beam direction"
self.frame["xbeam"], self.frame["ybeam"] = self.det[0].get_beam_centre(
self.beam_obj.get_s0()
)
# get max signal
def populate_max_signal(self):
pass
# get effective tiling
def populate_effective_tiling(self):
pass
# indicate simulated annealing parameters, if present
def populate_sa_params(self):
pass
# crystal-dependent components ------------------------------------------------------------------------------
# generate a crystal orientation object from the A* matrix
def populate_orientation(self):
assert (
self.xtal.get_A_at_scan_point(self.scan_no) is not None
), "no crystal orientation matrix"
self.frame["current_orientation"] = [
crystal_orientation(self.xtal.get_A_at_scan_point(self.scan_no).elems, True)
]
# generate change-of-basis operation for current to primitive cell
def populate_op_to_primitive(self):
assert self.xtal.get_space_group() is not None, "no space group"
self.frame["current_cb_op_to_primitive"] = [
self.xtal.get_space_group().z2p_op()
]
# fetch the point group associated with the crystal
def populate_point_group(self):
assert self.xtal.get_space_group() is not None, "no space group"
self.frame["pointgroup"] = str(
self.xtal.get_space_group().build_derived_point_group().info()
)
# get mosaicity
def populate_mosaicity(self):
try:
self.frame["mosaicity"] = self.xtal.get_mosaicity()
except AttributeError as e:
pass
# get any available ML values
def populate_ML_values(self):
try:
self.frame["ML_half_mosaicity_deg"] = [self.xtal.get_half_mosaicity_deg()]
except AttributeError:
pass
try:
self.frame["ML_domain_size_ang"] = [self.xtal.get_domain_size_ang()]
except AttributeError:
pass
# observations-dependent components -------------------------------------------------------------------------
# generate a miller array containing the Miller indices, intensities and variances for one frame
def populate_observations(self):
intensities = self.reflections["intensity." + self.method + ".value"]
variances = self.reflections["intensity." + self.method + ".variance"]
space_group = crystal.symmetry(
self.xtal.get_unit_cell(), str(self.xtal.get_space_group().info())
)
miller_set = miller.set(space_group, self.reflections["miller_index"])
self.frame["observations"][0] = cctbx.miller.array(
miller_set, intensities, flex.sqrt(variances)
).set_observation_type_xray_intensity()
# collect predicted spot positions
def populate_pixel_positions(self):
assert "xyzcal.px" in self.reflections, "no calculated spot positions"
self.frame["mapped_predictions"][0] = flex.vec2_double()
for i in range(len(self.reflections["xyzcal.px"])):
self.frame["mapped_predictions"][0].append(
tuple(self.reflections["xyzcal.px"][i][1::-1])
) # 1::-1 reverses the order taking only the first two elements first.
# generate a list of dictionaries containing a series of corrections for each predicted reflection
def populate_corrections(self):
assert (
"xyzobs.px.value" in self.reflections and "xyzcal.px" in self.reflections
), "no calculated or observed spot positions"
assert (
self.frame["xbeam"] != 0 and self.frame["ybeam"] != 0
), "invalid beam center"
self.frame["correction_vectors"] = [[]]
for idx in range(len(self.reflections["xyzobs.px.value"])):
if (
self.reflections["xyzcal.px"][idx][0:2]
!= self.reflections["xyzobs.px.value"][idx][0:2]
):
theoret_center = 1765 / 2, 1765 / 2
refined_center = (
self.frame["xbeam"] / self.pixel_size,
self.frame["ybeam"] / self.pixel_size,
) # px to mm conversion
hkl = self.reflections["miller_index"][idx]
obsspot = tuple(self.reflections["xyzobs.px.value"][idx][0:2])
predspot = tuple(self.reflections["xyzcal.px"][idx][0:2])
self.frame["correction_vectors"][0].append(
{
"refinedcenter": refined_center,
"hkl": hkl,
"setting_id": 0,
"azimuthal": 0,
"radial": 0,
"obsspot": obsspot,
"obscenter": theoret_center,
"predspot": predspot,
}
)
# get partialities
def populate_partialities(self):
pass
# produce residuals
def populate_residuals(self):
pass
# combine all of the above
def make_frame(self):
self.populate_wavelength()
self.populate_distance()
self.populate_beam_dir()
self.populate_max_signal()
self.populate_effective_tiling()
self.populate_sa_params()
self.populate_orientation()
self.populate_op_to_primitive()
self.populate_point_group()
self.populate_mosaicity()
self.populate_ML_values()
self.populate_observations()
self.populate_pixel_positions()
# self.populate_corrections() # works, but unnecessary
self.populate_partialities()
self.populate_residuals()
return self.frame
class ConstructFrameFromFiles(ConstructFrame):
def __init__(self, pickle_name, json_name, scan_no):
# load the integration.pickle file (reflection table) into memory and
# load the experiments.json file (json) into memory, piecewise.
# check_format=False because we don't wont to load any imagesets in the
# experiement list
importer = Importer(
[pickle_name, json_name],
read_experiments=True,
read_reflections=True,
check_format=False,
)
if importer.unhandled:
print("unable to process:", importer.unhandled)
ConstructFrame.__init__(
self,
flatten_reflections(importer.reflections)[0],
flatten_experiments(importer.experiments)[0],
scan_no,
)
if __name__ == "__main__":
master_phil_scope = iotbx.phil.parse(
"""
pickle_name = None
.type = path
.help = path to a reflection table (integrated.pickle) file
json_name = None
.type = path
.help = path to an experiments.json file
output_dir = None
.type = path
.help = if set, path to directory to save the new pickle file
"""
)
parser = OptionParser(phil=master_phil_scope)
params, options = parser.parse_args(show_diff_phil=True)
# get scan range number
importer = Importer(
[params.pickle_name, params.json_name],
read_experiments=True,
read_reflections=True,
check_format=False,
)
if importer.unhandled:
print("unable to process:", importer.unhandled)
experiment = flatten_experiments(importer.experiments)[0]
scan = experiment.scan
for scan_no in range(scan.get_image_range()[0], scan.get_image_range()[1]):
# build each frame
frame = ConstructFrameFromFiles(
params.pickle_name, params.json_name, scan_no
).make_frame()
if not params.output_dir is None:
assert os.path.isdir(params.output_dir)
basename = os.path.basename(params.pickle_name)
name = (
os.path.splitext(basename)[0] + "_extracted_" + str(scan_no) + ".pickle"
)
dest_path = os.path.join(params.output_dir, name)
assert not os.path.isfile(dest_path)
easy_pickle.dump(dest_path, frame)
|
[
"os.path.basename",
"os.path.isdir",
"cctbx.miller.set",
"dials.util.options.Importer",
"dials.array_family.flex.vec2_double",
"dials.array_family.flex.sqrt",
"dials.util.options.flatten_experiments",
"dials.util.options.flatten_reflections",
"os.path.isfile",
"libtbx.easy_pickle.dump",
"os.path.splitext",
"os.path.join",
"dials.util.options.OptionParser"
] |
[((10173, 10209), 'dials.util.options.OptionParser', 'OptionParser', ([], {'phil': 'master_phil_scope'}), '(phil=master_phil_scope)\n', (10185, 10209), False, 'from dials.util.options import Importer, flatten_reflections, flatten_experiments, OptionParser\n'), ((10314, 10432), 'dials.util.options.Importer', 'Importer', (['[params.pickle_name, params.json_name]'], {'read_experiments': '(True)', 'read_reflections': '(True)', 'check_format': '(False)'}), '([params.pickle_name, params.json_name], read_experiments=True,\n read_reflections=True, check_format=False)\n', (10322, 10432), False, 'from dials.util.options import Importer, flatten_reflections, flatten_experiments, OptionParser\n'), ((5766, 5823), 'cctbx.miller.set', 'miller.set', (['space_group', "self.reflections['miller_index']"], {}), "(space_group, self.reflections['miller_index'])\n", (5776, 5823), False, 'from cctbx import crystal, miller\n'), ((6195, 6213), 'dials.array_family.flex.vec2_double', 'flex.vec2_double', ([], {}), '()\n', (6211, 6213), False, 'from dials.array_family import flex\n'), ((9308, 9413), 'dials.util.options.Importer', 'Importer', (['[pickle_name, json_name]'], {'read_experiments': '(True)', 'read_reflections': '(True)', 'check_format': '(False)'}), '([pickle_name, json_name], read_experiments=True, read_reflections=\n True, check_format=False)\n', (9316, 9413), False, 'from dials.util.options import Importer, flatten_reflections, flatten_experiments, OptionParser\n'), ((10568, 10609), 'dials.util.options.flatten_experiments', 'flatten_experiments', (['importer.experiments'], {}), '(importer.experiments)\n', (10587, 10609), False, 'from dials.util.options import Importer, flatten_reflections, flatten_experiments, OptionParser\n'), ((10930, 10962), 'os.path.isdir', 'os.path.isdir', (['params.output_dir'], {}), '(params.output_dir)\n', (10943, 10962), False, 'import cctbx, os\n'), ((10986, 11022), 'os.path.basename', 'os.path.basename', (['params.pickle_name'], {}), '(params.pickle_name)\n', (11002, 11022), False, 'import cctbx, os\n'), ((11171, 11208), 'os.path.join', 'os.path.join', (['params.output_dir', 'name'], {}), '(params.output_dir, name)\n', (11183, 11208), False, 'import cctbx, os\n'), ((11270, 11304), 'libtbx.easy_pickle.dump', 'easy_pickle.dump', (['dest_path', 'frame'], {}), '(dest_path, frame)\n', (11286, 11304), False, 'from libtbx import easy_pickle\n'), ((9622, 9663), 'dials.util.options.flatten_reflections', 'flatten_reflections', (['importer.reflections'], {}), '(importer.reflections)\n', (9641, 9663), False, 'from dials.util.options import Importer, flatten_reflections, flatten_experiments, OptionParser\n'), ((9680, 9721), 'dials.util.options.flatten_experiments', 'flatten_experiments', (['importer.experiments'], {}), '(importer.experiments)\n', (9699, 9721), False, 'from dials.util.options import Importer, flatten_reflections, flatten_experiments, OptionParser\n'), ((11232, 11257), 'os.path.isfile', 'os.path.isfile', (['dest_path'], {}), '(dest_path)\n', (11246, 11257), False, 'import cctbx, os\n'), ((5921, 5941), 'dials.array_family.flex.sqrt', 'flex.sqrt', (['variances'], {}), '(variances)\n', (5930, 5941), False, 'from dials.array_family import flex\n'), ((11060, 11086), 'os.path.splitext', 'os.path.splitext', (['basename'], {}), '(basename)\n', (11076, 11086), False, 'import cctbx, os\n')]
|
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
import flask
import functools
import re
import werkzeug.exceptions
from .routing import get_request_arg, request_wants_json
def get_username():
return get_request_arg('username') or \
flask.request.cookies.get('username', None)
def validate_username(username):
"""
Raises a ValueError if the username is invalid
"""
if not username:
raise ValueError('username is required')
if not re.match('[a-z]', username):
raise ValueError('Must start with a lowercase letter')
if not re.match('[a-z0-9\.\-_]+$', username):
raise ValueError('Only lowercase letters, numbers, periods, dashes and underscores allowed')
def requires_login(f=None, redirect=True):
"""
Decorator for views that require the user to be logged in
Keyword arguments:
f -- the function to decorate
redirect -- if True, this function may return a redirect
"""
if f is None:
# optional arguments are handled strangely
return functools.partial(requires_login, redirect=redirect)
@functools.wraps(f)
def decorated(*args, **kwargs):
username = get_username()
if not username:
# Handle missing username
if request_wants_json() or not redirect:
raise werkzeug.exceptions.Unauthorized()
else:
return flask.redirect(flask.url_for('digits.views.login', next=flask.request.path))
try:
# Validate username
validate_username(username)
except ValueError as e:
raise werkzeug.exceptions.BadRequest('Invalid username - %s' % e.message)
return f(*args, **kwargs)
return decorated
def has_permission(job, action, username=None):
"""
Returns True if username can perform action on job
Arguments:
job -- the Job in question
action -- the action in question
Keyword arguments:
username -- the user in question (defaults to current user)
"""
if job.is_read_only():
return False
if username is None:
username = get_username()
if not username:
return False
if not job.username:
return True
return username == job.username
|
[
"functools.partial",
"re.match",
"flask.request.cookies.get",
"flask.url_for",
"functools.wraps"
] |
[((1125, 1143), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (1140, 1143), False, 'import functools\n'), ((270, 313), 'flask.request.cookies.get', 'flask.request.cookies.get', (['"""username"""', 'None'], {}), "('username', None)\n", (295, 313), False, 'import flask\n'), ((497, 524), 're.match', 're.match', (['"""[a-z]"""', 'username'], {}), "('[a-z]', username)\n", (505, 524), False, 'import re\n'), ((600, 639), 're.match', 're.match', (['"""[a-z0-9\\\\.\\\\-_]+$"""', 'username'], {}), "('[a-z0-9\\\\.\\\\-_]+$', username)\n", (608, 639), False, 'import re\n'), ((1066, 1118), 'functools.partial', 'functools.partial', (['requires_login'], {'redirect': 'redirect'}), '(requires_login, redirect=redirect)\n', (1083, 1118), False, 'import functools\n'), ((1443, 1503), 'flask.url_for', 'flask.url_for', (['"""digits.views.login"""'], {'next': 'flask.request.path'}), "('digits.views.login', next=flask.request.path)\n", (1456, 1503), False, 'import flask\n')]
|
import json
import uuid
from collections import defaultdict
import factory
import wagtail_factories
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.serializers.json import DjangoJSONEncoder
from wagtail.core.blocks import RichTextBlock
from wagtail.core.rich_text import RichText
from hypha.apply.stream_forms import blocks as stream_blocks
__all__ = ['BLOCK_FACTORY_DEFINITION', 'FormFieldBlockFactory',
'CharFieldBlockFactory', 'NumberFieldBlockFactory',
'RadioFieldBlockFactory', 'UploadableMediaFactory',
'ImageFieldBlockFactory', 'FileFieldBlockFactory',
'MultiFileFieldBlockFactory']
class AnswerFactory(factory.Factory):
def _create(self, *args, sub_factory=None, **kwargs):
return sub_factory.make_answer(kwargs)
def _build(self, *args, sub_factory=None, **kwargs):
return sub_factory.make_answer(kwargs)
class AddFormFieldsMetaclass(factory.base.FactoryMetaClass):
def __new__(mcs, class_name, bases, attrs):
# Add the form field definitions to allow nested calls
field_factory = attrs.pop('field_factory', None)
if field_factory:
wrapped_factories = {
k: factory.SubFactory(AnswerFactory, sub_factory=v)
for k, v in field_factory.factories.items()
if issubclass(v, FormFieldBlockFactory)
}
attrs.update(wrapped_factories)
return super().__new__(mcs, class_name, bases, attrs)
class FormDataFactory(factory.Factory, metaclass=AddFormFieldsMetaclass):
@classmethod
def _create(self, *args, form_fields={}, for_factory=None, clean=False, **kwargs):
if form_fields and isinstance(form_fields, str):
form_fields = json.loads(form_fields)
form_definition = {
field['type']: field['id']
for field in form_fields
}
else:
form_definition = {
f.block_type: f.id
for f in form_fields or for_factory.Meta.model.form_fields.field.to_python(form_fields)
}
form_data = {}
for name, answer in kwargs.items():
try:
key = form_definition[name]
except KeyError:
# We are not using that field - don't add the submission data
pass
else:
form_data[key] = answer
if clean:
clean_object = for_factory()
clean_object.form_fields = form_fields
clean_object.form_data = form_data
clean_object.save()
form_data = clean_object.form_data.copy()
clean_object.delete()
return form_data
return form_data
@classmethod
def _build(self, *args, **kwargs):
return self._create(*args, **kwargs)
class ParagraphBlockFactory(wagtail_factories.blocks.BlockFactory):
class Meta:
model = RichTextBlock
@classmethod
def _create(cls, model_class, value):
value = RichText(value)
return super()._create(model_class, value)
class FormFieldBlockFactory(wagtail_factories.StructBlockFactory):
default_value = factory.Faker('sentence')
field_label = factory.Faker('sentence')
help_text = factory.LazyAttribute(lambda o: str(o._Resolver__step.builder.factory_meta.model))
class Meta:
model = stream_blocks.FormFieldBlock
@classmethod
def make_answer(cls, params=None):
return cls.default_value.generate(params or {})
@classmethod
def make_form_answer(cls, params=None):
if params:
return params
return cls.make_answer(params or {})
class CharFieldBlockFactory(FormFieldBlockFactory):
default_value = factory.Faker('sentence')
class Meta:
model = stream_blocks.CharFieldBlock
class TextFieldBlockFactory(FormFieldBlockFactory):
default_value = factory.Faker('sentence')
class Meta:
model = stream_blocks.TextFieldBlock
class DateFieldBlockFactory(FormFieldBlockFactory):
default_value = factory.Faker('date_object')
class Meta:
model = stream_blocks.DateFieldBlock
class TimeFieldBlockFactory(FormFieldBlockFactory):
default_value = factory.Faker('time_object')
class Meta:
model = stream_blocks.TimeFieldBlock
class DateTimeFieldBlockFactory(FormFieldBlockFactory):
default_value = factory.Faker('date_time')
class Meta:
model = stream_blocks.DateTimeFieldBlock
@classmethod
def make_form_answer(cls, params=None):
if params:
date_time = params
else:
date_time = super().make_form_answer(params)
return {
'date': str(date_time.date()),
'time': str(date_time.time()),
}
class NumberFieldBlockFactory(FormFieldBlockFactory):
default_value = 100
class Meta:
model = stream_blocks.NumberFieldBlock
@classmethod
def make_answer(cls, params=None):
return cls.default_value
class CheckboxFieldBlockFactory(FormFieldBlockFactory):
choices = ['check_one', 'check_two']
class Meta:
model = stream_blocks.CheckboxFieldBlock
@classmethod
def make_answer(cls, params=None):
return cls.choices[0]
class CheckboxesFieldBlockFactory(FormFieldBlockFactory):
checkboxes = ['check_multiple_one', 'check_multiple_two', 'check_multiple_three']
class Meta:
model = stream_blocks.CheckboxesFieldBlock
@classmethod
def make_answer(cls, params=None):
return cls.checkboxes[0:2]
class RadioFieldBlockFactory(FormFieldBlockFactory):
choices = ['first', 'second']
class Meta:
model = stream_blocks.RadioButtonsFieldBlock
@classmethod
def make_answer(cls, params=None):
return cls.choices[0]
class DropdownFieldBlockFactory(FormFieldBlockFactory):
choices = ['first', 'second']
class Meta:
model = stream_blocks.DropdownFieldBlock
@classmethod
def make_answer(cls, params=None):
return cls.choices[0]
class UploadableMediaFactory(FormFieldBlockFactory):
default_value = factory.django.FileField()
@classmethod
def make_answer(cls, params=None):
params = params or {}
params.setdefault('data', b'this is some content')
if params.get('filename') is None:
params['filename'] = 'example.pdf'
file_name, file = cls.default_value._make_content(params)
return SimpleUploadedFile(file_name, file.read())
class ImageFieldBlockFactory(UploadableMediaFactory):
default_value = factory.django.ImageField()
class Meta:
model = stream_blocks.ImageFieldBlock
class FileFieldBlockFactory(UploadableMediaFactory):
class Meta:
model = stream_blocks.FileFieldBlock
class MultiFileFieldBlockFactory(UploadableMediaFactory):
class Meta:
model = stream_blocks.MultiFileFieldBlock
@classmethod
def make_answer(cls, params=None):
return [UploadableMediaFactory.make_answer() for _ in range(2)]
class StreamFieldUUIDFactory(wagtail_factories.StreamFieldFactory):
def generate(self, step, params):
params = self.build_form(params)
blocks = super().generate(step, params)
ret_val = list()
# Convert to JSON so we can add id before create
for block_name, value in blocks:
block = self.factories[block_name]._meta.model()
value = block.get_prep_value(value)
ret_val.append({'type': block_name, 'value': value, 'id': str(uuid.uuid4())})
return json.dumps(ret_val, cls=DjangoJSONEncoder)
def build_form(self, data):
extras = defaultdict(dict)
exclusions = []
multiples = dict()
for field, value in data.items():
# we dont care about position
name, attr = field.split('__')
if name == 'exclude':
exclusions.append(attr)
elif name == 'multiple':
multiples[attr] = value
else:
extras[name] = {attr: value}
defined_both = set(exclusions) & set(multiples)
if defined_both:
raise ValueError(
'Cant exclude and provide multiple at the same time: {}'.format(', '.join(defined_both))
)
form_fields = {}
field_count = 0
for field in self.factories:
if field == 'text_markup' or field in exclusions:
pass
else:
for _ in range(multiples.get(field, 1)):
form_fields[f'{field_count}__{field}__'] = ''
field_count += 1
for attr, value in extras[field].items():
form_fields[f'{field_count}__{field}__{attr}'] = value
return form_fields
def form_response(self, fields, field_values=None):
if not field_values:
field_values = {}
data = {
field.id: self.factories[field.block.name].make_form_answer(field_values.get(field.id, {}))
for field in fields
if hasattr(self.factories[field.block.name], 'make_form_answer')
}
return flatten_for_form(data)
BLOCK_FACTORY_DEFINITION = {
'text_markup': ParagraphBlockFactory,
'char': CharFieldBlockFactory,
'text': TextFieldBlockFactory,
'number': NumberFieldBlockFactory,
'checkbox': CheckboxFieldBlockFactory,
'radios': RadioFieldBlockFactory,
'dropdown': DropdownFieldBlockFactory,
'checkboxes': CheckboxesFieldBlockFactory,
'date': DateFieldBlockFactory,
'time': TimeFieldBlockFactory,
'datetime': DateTimeFieldBlockFactory,
'image': ImageFieldBlockFactory,
'file': FileFieldBlockFactory,
'multi_file': MultiFileFieldBlockFactory,
}
FormFieldsBlockFactory = StreamFieldUUIDFactory(BLOCK_FACTORY_DEFINITION)
def flatten_for_form(data, field_name='', number=False):
result = {}
for i, (field, value) in enumerate(data.items()):
if number:
field = f'{field_name}_{i}'
if isinstance(value, dict):
result.update(**flatten_for_form(value, field_name=field, number=True))
else:
result[field] = value
return result
|
[
"factory.django.ImageField",
"wagtail.core.rich_text.RichText",
"factory.Faker",
"uuid.uuid4",
"json.loads",
"factory.django.FileField",
"factory.SubFactory",
"json.dumps",
"collections.defaultdict"
] |
[((3227, 3252), 'factory.Faker', 'factory.Faker', (['"""sentence"""'], {}), "('sentence')\n", (3240, 3252), False, 'import factory\n'), ((3271, 3296), 'factory.Faker', 'factory.Faker', (['"""sentence"""'], {}), "('sentence')\n", (3284, 3296), False, 'import factory\n'), ((3797, 3822), 'factory.Faker', 'factory.Faker', (['"""sentence"""'], {}), "('sentence')\n", (3810, 3822), False, 'import factory\n'), ((3959, 3984), 'factory.Faker', 'factory.Faker', (['"""sentence"""'], {}), "('sentence')\n", (3972, 3984), False, 'import factory\n'), ((4121, 4149), 'factory.Faker', 'factory.Faker', (['"""date_object"""'], {}), "('date_object')\n", (4134, 4149), False, 'import factory\n'), ((4286, 4314), 'factory.Faker', 'factory.Faker', (['"""time_object"""'], {}), "('time_object')\n", (4299, 4314), False, 'import factory\n'), ((4455, 4481), 'factory.Faker', 'factory.Faker', (['"""date_time"""'], {}), "('date_time')\n", (4468, 4481), False, 'import factory\n'), ((6202, 6228), 'factory.django.FileField', 'factory.django.FileField', ([], {}), '()\n', (6226, 6228), False, 'import factory\n'), ((6665, 6692), 'factory.django.ImageField', 'factory.django.ImageField', ([], {}), '()\n', (6690, 6692), False, 'import factory\n'), ((3071, 3086), 'wagtail.core.rich_text.RichText', 'RichText', (['value'], {}), '(value)\n', (3079, 3086), False, 'from wagtail.core.rich_text import RichText\n'), ((7661, 7703), 'json.dumps', 'json.dumps', (['ret_val'], {'cls': 'DjangoJSONEncoder'}), '(ret_val, cls=DjangoJSONEncoder)\n', (7671, 7703), False, 'import json\n'), ((7754, 7771), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (7765, 7771), False, 'from collections import defaultdict\n'), ((1776, 1799), 'json.loads', 'json.loads', (['form_fields'], {}), '(form_fields)\n', (1786, 1799), False, 'import json\n'), ((1228, 1276), 'factory.SubFactory', 'factory.SubFactory', (['AnswerFactory'], {'sub_factory': 'v'}), '(AnswerFactory, sub_factory=v)\n', (1246, 1276), False, 'import factory\n'), ((7630, 7642), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7640, 7642), False, 'import uuid\n')]
|
"""The db model for a user."""
from sqlalchemy import Column, func
from sqlalchemy.orm import relationship
from sqlalchemy.types import (
Boolean,
DateTime,
String,
)
from pornhub.db import base
class User(base):
"""The model for a user."""
__tablename__ = "user"
USER = "users"
MODEL = "model"
PORNSTAR = "pornstar"
key = Column(String, primary_key=True)
name = Column(String, unique=True)
user_type = Column(String)
subscribed = Column(Boolean, default=False, nullable=False)
last_scan = Column(DateTime)
created_at = Column(DateTime, server_default=func.now(), nullable=False)
clips = relationship("Clip")
def __init__(self, key, name, user_type):
"""Create a new user."""
self.key = key
self.name = name
self.user_type = user_type
def get_or_create(session, key, name, user_type):
"""Get an existing user or create a new one."""
user = session.query(User).get(key)
if user is None:
user = User(key, name, user_type)
session.add(user)
session.commit()
return user
|
[
"sqlalchemy.orm.relationship",
"sqlalchemy.func.now",
"sqlalchemy.Column"
] |
[((365, 397), 'sqlalchemy.Column', 'Column', (['String'], {'primary_key': '(True)'}), '(String, primary_key=True)\n', (371, 397), False, 'from sqlalchemy import Column, func\n'), ((409, 436), 'sqlalchemy.Column', 'Column', (['String'], {'unique': '(True)'}), '(String, unique=True)\n', (415, 436), False, 'from sqlalchemy import Column, func\n'), ((453, 467), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (459, 467), False, 'from sqlalchemy import Column, func\n'), ((485, 531), 'sqlalchemy.Column', 'Column', (['Boolean'], {'default': '(False)', 'nullable': '(False)'}), '(Boolean, default=False, nullable=False)\n', (491, 531), False, 'from sqlalchemy import Column, func\n'), ((549, 565), 'sqlalchemy.Column', 'Column', (['DateTime'], {}), '(DateTime)\n', (555, 565), False, 'from sqlalchemy import Column, func\n'), ((656, 676), 'sqlalchemy.orm.relationship', 'relationship', (['"""Clip"""'], {}), "('Clip')\n", (668, 676), False, 'from sqlalchemy.orm import relationship\n'), ((615, 625), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (623, 625), False, 'from sqlalchemy import Column, func\n')]
|
# Copyright 2020 <NAME>
# SPDX-License-Identifier: Apache-2.0
'''
batch and commandline utilities
'''
from __future__ import print_function
import gc
import os
import ssl
import sys
import site
import shlex
import logging
import warnings
import argparse
import platform
import resource
import subprocess
import time
if sys.version_info.major < 3 or sys.version_info.minor < 5:
warnings.warn('old python')
#pylint: disable=wrong-import-position
from pathlib import Path
try:
import numpy
except ImportError:
numpy = None
try:
from pynvml.smi import nvidia_smi
except ImportError:
nvidia_smi = None
CODE_RESET = '\033[0m'
CODE_BLACK = '\033[1;30m'
CODE_RED = '\033[1;31m'
CODE_GREEN = '\033[1;32m'
CODE_YELLOW = '\033[1;33m'
CODE_BLUE = '\033[1;34m'
CODE_MAGENTA = '\033[1;35m'
CODE_CYAN = '\033[1;36m'
CODE_WHITE = '\033[1;37m'
RUN_CMD_ALWAYS = 'RUN_CMD_ALWAYS'
RUN_CMD_CONFIRM = 'RUN_CMD_USER_CONFIRMATION'
RUN_CMD_NEVER = 'RUN_CMD_NEVER'
USER_CONFIRM_ALWAYS = False
def confirm(run_mode, cmd_str):
'optionally ask user for confirmation with info about a cmd about to be run'
#pylint: disable=global-statement
global USER_CONFIRM_ALWAYS
if run_mode == RUN_CMD_NEVER:
return False
if not USER_CONFIRM_ALWAYS and run_mode == RUN_CMD_CONFIRM:
c = input('run command [%s] ? (N)o / (Y)es / (A)lways:' % (cmd_str))
if not isinstance(c, str) or c == '':
return False
c = c.lower()
if c == 'n':
return False
if c == 'a':
USER_CONFIRM_ALWAYS = True
return True
def color_text(text, color, fmt=None):
'if color control string is not None, wrap like so: color|text|color_rest'
if text is None:
return None
if fmt is not None:
text = fmt % (text)
if color is None:
return str(text)
return color + str(text) + CODE_RESET
def black_text(text, **kwargs):
'wrap text in terminal encoding characters'
return color_text(text, CODE_BLACK, **kwargs)
def red_text(text, **kwargs):
'wrap text in terminal encoding characters'
return color_text(text, CODE_RED, **kwargs)
def green_text(text, **kwargs):
'wrap text in terminal encoding characters'
return color_text(text, CODE_GREEN, **kwargs)
def yellow_text(text, **kwargs):
'wrap text in terminal encoding characters'
return color_text(text, CODE_YELLOW, **kwargs)
def blue_text(text, **kwargs):
'wrap text in terminal encoding characters'
return color_text(text, CODE_BLUE, **kwargs)
def magenta_text(text, **kwargs):
'wrap text in terminal encoding characters'
return color_text(text, CODE_MAGENTA, **kwargs)
def cyan_text(text, **kwargs):
'wrap text in terminal encoding characters'
return color_text(text, CODE_CYAN, **kwargs)
def white_text(text, **kwargs):
'wrap text in terminal encoding characters'
return color_text(text, CODE_WHITE, **kwargs)
def color_code_stdout(color_code):
'write color code to stdout and flush'
if color_code is not None:
sys.stdout.write(color_code)
sys.stdout.flush()
def reset_color_code_stdout(color):
'reset stdout to non normal color code mode flush'
if color:
sys.stdout.write(CODE_RESET)
sys.stdout.flush()
def execute(
cmd,
run_mode=RUN_CMD_ALWAYS,
cwd=None,
output=False,
color=True,
log_level=logging.DEBUG,
env=None,
):
'''
execute a subprocess with
logging of commandline before output
optional color coded output
optional current working directory override
a run mode that can disable execution, ask for user confirmation, or execute
'''
nottext = color_text('not', CODE_RED) if color else 'not'
cmd = [str(x) for x in cmd]
cmd_str = color_text(subprocess.list2cmdline(cmd), CODE_GREEN)
go = confirm(run_mode, cmd_str)
verb = 'running' if go else nottext + ' running'
highlight_color = (CODE_YELLOW if go else CODE_GREEN) if color else None
result_color = CODE_CYAN if color else None
cwd_str = color_text(cwd, highlight_color)
cmd_str = color_text(subprocess.list2cmdline(cmd), highlight_color)
if cwd is None:
logging.log(log_level, '%s [%s]', verb, cmd_str)
else:
logging.log(log_level, 'from [%s] %s [%s]', cwd_str, verb, cmd_str)
if not go:
return None
if not output:
color_code_stdout(result_color)
try:
subprocess.check_call(cmd, cwd=cwd, env=env)
finally:
reset_color_code_stdout(color)
else:
return subprocess.check_output(cmd, cwd=cwd, env=env)
return None
def execute_multiline_str(**kwargs):
'wraps execute by converting multiline "cmd" kwarg to strings'
cmd = kwargs.pop('cmd')
if cmd is None:
raise ValueError('expected multiline string keyword arg "cmd"')
lines = cmd.split('\n')
lines = [x.strip() for x in lines]
lines = [x for x in lines if not x.startswith('#')]
cmd = ' '.join(lines)
cmd = cmd.split(' ')
cmd = [x for x in cmd if x] # remove empty argv
execute(cmd, **kwargs)
def execute_callback(
message,
callback,
args,
kwargs,
run_mode=RUN_CMD_ALWAYS,
color=True,
log_arguments=True,
log_time=False,
log_level=logging.DEBUG,
):
'''
execute a python function with
a run mode that can disable execution, ask for user confirmation, or execute
'''
nottext = color_text('not', CODE_RED) if color else 'not'
go = confirm(run_mode, message)
verb = 'calling' if go else nottext + ' calling'
if log_arguments:
logging.log(
log_level,
'%s [%s.%s] with args %s and kwargs %s',
verb,
callback.__module__,
callback.__name__,
args,
kwargs,
)
else:
logging.log(
log_level,
'%s [%s.%s] to %s',
verb,
callback.__module__,
callback.__name__,
message,
)
if not go:
return None
if log_time:
with T(message + ' total'):
result = callback(*args, **kwargs)
else:
result = callback(*args, **kwargs)
return result
def set_log_level(level):
'set the global logging level'
logging.getLogger('').setLevel(level)
def setup_logging(
level=logging.DEBUG,
setup_matplotlib=True,
setup_lambda=False,
numpy_precision=3,
numpy_suppress=True,
numpy_linewidth=75,
stream=None,
color=True,
force_warning_modules=(
'boto3',
'botocore',
's3transfer',
'urllib3',
'websockets',
),
):
'setup reasonable logging defaults'
if setup_lambda:
color = False
logger = logging.getLogger()
logger.setLevel(level)
logger.propagate = False
for modname in force_warning_modules:
modlogger = logging.getLogger(modname)
modlogger.setLevel(logging.WARNING)
elif level == logging.INFO:
logging.basicConfig(level=logging.INFO, format='%(message)s', stream=stream)
else:
logging.basicConfig(
level=level,
format='%(levelname)s %(message)s',
stream=stream,
)
logger = logging.getLogger()
logger.propagate = False
for modname in force_warning_modules:
modlogger = logging.getLogger(modname)
modlogger.setLevel(logging.WARNING)
if setup_matplotlib:
# force matplotlib to never show debug info!
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
for num, name, color_code in [
(logging.CRITICAL, 'BAD ', CODE_RED),
(logging.ERROR, 'err ', CODE_RED),
(logging.WARNING, 'warn', CODE_WHITE),
(logging.INFO, 'info', CODE_BLACK),
(logging.DEBUG, 'dbg ', CODE_BLACK),
]:
#name = logging.getLevelName(num).lower().ljust(8)
resolved_name = name
if color:
resolved_name = color_text(name, color_code)
logging.addLevelName(num, resolved_name)
if numpy is not None:
numpy.set_printoptions(
precision=numpy_precision,
suppress=numpy_suppress,
linewidth=numpy_linewidth,
)
def setup_patching(setup_ssl=True):
'''
follow this guide to make sure models can be downloaded without error:
https://github.com/fchollet/deep-learning-models/issues/33#issuecomment-397257502
'''
if setup_ssl:
#pylint: disable=W0212
ssl._create_default_https_context = ssl._create_unverified_context
def setup_tensorflow():
'make tensorflow silent unless TF_CPP_MIN_LOG_LEVEL envvar found'
tf_log_key = 'TF_CPP_MIN_LOG_LEVEL'
tf_logger = logging.getLogger('tensorflow')
if tf_log_key not in os.environ:
os.environ[tf_log_key] = '3'
tf_logger.setLevel(logging.INFO)
else:
tf_logger.setLevel(logging.DEBUG)
# redirect stdout/stderr, import keras, then restore stdout/stderr
# avoids keras cluttering up the console during version or other query cmds
save_stdout, save_stderr = sys.stdout, sys.stderr
try:
sys.stdout = open(os.devnull, 'w')
sys.stderr = sys.stdout
#pylint: disable=unused-import,import-outside-toplevel
import tensorflow.keras
finally:
sys.stdout, sys.stderr = save_stdout, save_stderr
class HELP_FMT(
argparse.ArgumentDefaultsHelpFormatter,
argparse.RawTextHelpFormatter,
):
'''
composite class to provide both default args in help and raw help strings
goes to crazy lengths to split up lists of choices...
'''
def format_help(self):
tmp = argparse.HelpFormatter.format_help(self)
result = []
for line in tmp.split('\n'):
if '[' in line and '{' in line and line.count(',') > 5:
test = line
total_whitespace = line.count(' ')
test = test.strip()
leading = total_whitespace - test.count(' ')
if test[0] == '[' and test[-1] == ']':
test = test[1:-1]
# use shlex to hanle list tokenizing
# by turning lists into strings
test = test.replace('[', '"')
test = test.replace(']', '"')
test = test.replace('{', "'")
test = test.replace('}', "'")
test = test.replace(' ...', '')
parts = shlex.split(test, comments=False)
# remove crazy duplication of the same list
A = parts[-1]
B = parts[-2]
C = "'%s'" % (B)
if A == C:
parts.pop()
norm_line = ' '.join(parts)
indent = ' ' * leading
line = indent + ('\n ' + indent).join(norm_line.split(','))
result.append(line)
return '\n'.join(result)
VERBOSE_MAP = {0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG}
def add_verbose_parse_arg(parser):
'add verbosity levels to a parser'
if not getattr(parser, 'vm_build_utils_has_verbose', False):
parser.add_argument(
'-v',
'--verbose',
action='count',
help='verbose level... repeat up to 2 times',
)
parser.vm_build_utils_has_verbose = True
def set_log_level_from_args(args):
'args is a command line parser result - use it to configure logging'
if args.verbose is None:
args.verbose = 0
set_log_level(VERBOSE_MAP[args.verbose])
def add_run_mode_parse_arg(parser):
'add controls to run sub commands / persistent system operations'
if not getattr(parser, 'vm_build_utils_has_run_mode', False):
RUN_MODE_GROUP = parser.add_mutually_exclusive_group()
RUN_MODE_GROUP.add_argument(
'--run-never',
action='store_true',
help='no actions will be taken, only logging will be performed')
RUN_MODE_GROUP.add_argument(
'--run-confirm',
action='store_true',
help='actions will be performed with user confirmation')
RUN_MODE_GROUP.add_argument(
'--run-always',
action='store_true',
help='actions will be performed always [ default ]',
)
parser.vm_build_utils_has_run_mode = True
def setup_run_mode(args):
'args is a command line parser result - use it to configure the run mode'
if not args.run_confirm and not args.run_never:
args.run_always = True
result = None
if args.run_never:
result = RUN_CMD_NEVER
elif args.run_confirm:
result = RUN_CMD_CONFIRM
elif args.run_always:
result = RUN_CMD_ALWAYS
else:
raise ValueError('one of [run-never,run-confirm,run-always] must be True')
return result
def add_file_logging_parse_arg(parser):
'add file logging output + verbosity to a parser'
if not getattr(parser, 'vm_build_utils_has_file_log', False):
parser.add_argument(
'-fv',
'--file-verbose',
action='count',
help='verbose level for --file-log ... repeat up to 2 times',
)
parser.add_argument(
'--file-log',
default=None,
type=Path,
help='direct logging stream to this file in addition to stderr',
)
parser.vm_build_utils_has_file_log = True
def set_file_logging_from_args(args):
'args is a command line parser result - use it to configure file logging'
if args.file_log is None:
return
if args.file_verbose is None:
args.file_verbose = 0
level = VERBOSE_MAP[args.file_verbose]
file_log = logging.FileHandler(args.file_log, mode='w')
file_log.setLevel(level)
file_log.setFormatter(
logging.Formatter('%(levelname)s %(message)s', None, '%'))
logging.getLogger('').addHandler(file_log)
def finish_args(parser):
'add common arguments to a parser if not already added: verbose, run_mode'
add_verbose_parse_arg(parser)
add_file_logging_parse_arg(parser)
add_run_mode_parse_arg(parser)
return parser
def log_parsed_args(args_namespace, level=logging.DEBUG):
'log each elemenet in an argparser namespace'
items = dict(vars(args_namespace)).items()
key_whitespace_len = -1
for key, _ in items:
key_whitespace_len = max(key_whitespace_len, len(key))
key_whitespace_len += 2
newline_whitespace_len = key_whitespace_len + 6
newline_whitespace = ''.join(['\n'] + [' '] * newline_whitespace_len)
for key, value in items:
tmp = str(value)
if isinstance(value, list):
tmp = newline_whitespace.join([str(c) for c in value])
logging.log(level, '%s[%s]', key.rjust(key_whitespace_len), tmp)
def parse_args(parser, args=None, parse_known_args=False, return_unknown=False):
'parse, handle logging and run mode arguments'
finish_args(parser)
if parse_known_args:
args, unknown = parser.parse_known_args(args=args)
else:
args = parser.parse_args(args=args)
set_log_level_from_args(args)
set_file_logging_from_args(args)
args.run_mode = setup_run_mode(args)
if return_unknown:
return args, unknown
return args
KB = float(10**3)
GB = float(10**9) # 1000000000
MiB = float(2**20) # 1048576
GiB = float(2**30) # 1073741824
def current_platform_is_darwin():
'returns true if current system is darwin, false on linux or windows'
return platform.system().lower() == 'darwin'
def current_platform_is_linux():
'returns true if current system is linux, false on darwin or windows'
return platform.system().lower() == 'linux'
def get_rss():
'get high water mark resident memory usage'
rss_bytes = 0
maxrss = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if current_platform_is_darwin():
rss_bytes = maxrss
else:
rss_bytes = maxrss * KB
rss_gb = rss_bytes / GB
return rss_gb
def get_rss_and_total():
'resident and total physical memory in GB'
try:
total = (os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES')) / GB
except ValueError:
total = -1
return (get_rss(), total)
def get_gpu_used_and_total():
'total physical memory in GB'
if nvidia_smi is None:
return 0, 0
nvsmi = nvidia_smi.getInstance()
qresult = nvsmi.DeviceQuery('memory.used, memory.total')
mem = qresult['gpu'][0]['fb_memory_usage']
assert mem['unit'] == 'MiB'
used = (mem['used'] * MiB) / GiB
total = (mem['total'] * MiB) / GiB
return used, total
class T(object):
'simple timer'
def __init__(self, name, level=logging.INFO):
self.name = name
self.start = self.end = self.interval = 0
self.level = level
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, *args):
self.end = time.perf_counter()
self.interval = self.end - self.start
gc.collect()
rss, total = get_rss_and_total()
gpu_used, gpu_total = get_gpu_used_and_total()
logging.log(
self.level,
'%s [%s sec] [%s/%s GB] [%s/%s GB gpu]',
self.name.rjust(40),
yellow_text('% 7.2f' % (self.interval)),
yellow_text('% 6.2f' % (rss)),
yellow_text('%02.2f' % (total)),
yellow_text('% 6.2f' % (gpu_used)),
yellow_text('%02.2f' % (gpu_total)),
)
def format_size(byte_size):
'convert size in bytes to a human readable string'
if byte_size > 1000 * 1000:
return '%.1fMB' % (byte_size / 1000.0 / 1000)
if byte_size > 10 * 1000:
return '%ikB' % (byte_size / 1000)
if byte_size > 1000:
return '%.1fkB' % (byte_size / 1000.0)
return '%ibytes' % byte_size
def remove_prefix(value, prefix):
'remove string prefix'
if value.startswith(prefix):
return value[len(prefix):]
return value
def get_sitepackages_path():
'get path to python site-packages directory'
try:
return site.getsitepackages()[0]
except AttributeError:
for path in sys.path:
if 'local' in path:
continue
if 'site-packages' in path:
return path
raise ValueError('no site packages found')
def executable_path():
'get a path to the python interpreter than can be tweaked via env var'
result = sys.executable
override = os.environ.get('VM_EXECUTABLE')
if override is not None:
result = override
result = str(result)
result = remove_prefix(result, '/System/Volumes/Data')
return Path(result)
def project_path_components():
'validate and return paths related to /comet/PROJECT/env/DEVREL/bin/python'
template_path = '"/comet/PROJECT/env/DEVREL/bin/python"'
err_msg = 'python path must be of the form %s' % (template_path)
python_exec = executable_path()
assert len(python_exec.parts) >= 6, err_msg
user_parts = python_exec.parts[:-4]
_env, dev_rel, _bin, _python = python_exec.parts[-4:]
assert (_env, _bin, _python) == ('env', 'bin', 'python'), err_msg
return user_parts, dev_rel
def project_path():
'abs path relative to the directory containing env/container/bin/python'
user_parts, _ = project_path_components()
return Path().joinpath(*user_parts)
def env_root(rel_path=''):
'abs path relative to the directory containing bin/python'
python_exec = executable_path()
bin_path = python_exec.parent.resolve()
env = bin_path.parent
if rel_path:
result = env / rel_path
else:
result = env
return result
|
[
"sys.stdout.write",
"logging.addLevelName",
"subprocess.list2cmdline",
"logging.Formatter",
"gc.collect",
"pathlib.Path",
"sys.stdout.flush",
"resource.getrusage",
"subprocess.check_call",
"numpy.set_printoptions",
"logging.FileHandler",
"logging.log",
"shlex.split",
"site.getsitepackages",
"subprocess.check_output",
"os.sysconf",
"time.perf_counter",
"platform.system",
"pynvml.smi.nvidia_smi.getInstance",
"logging.basicConfig",
"os.environ.get",
"warnings.warn",
"logging.getLogger",
"argparse.HelpFormatter.format_help"
] |
[((380, 407), 'warnings.warn', 'warnings.warn', (['"""old python"""'], {}), "('old python')\n", (393, 407), False, 'import warnings\n'), ((6927, 6946), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (6944, 6946), False, 'import logging\n'), ((8334, 8365), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (8351, 8365), False, 'import logging\n'), ((12886, 12930), 'logging.FileHandler', 'logging.FileHandler', (['args.file_log'], {'mode': '"""w"""'}), "(args.file_log, mode='w')\n", (12905, 12930), False, 'import logging\n'), ((15427, 15451), 'pynvml.smi.nvidia_smi.getInstance', 'nvidia_smi.getInstance', ([], {}), '()\n', (15449, 15451), False, 'from pynvml.smi import nvidia_smi\n'), ((17400, 17431), 'os.environ.get', 'os.environ.get', (['"""VM_EXECUTABLE"""'], {}), "('VM_EXECUTABLE')\n", (17414, 17431), False, 'import os\n'), ((17570, 17582), 'pathlib.Path', 'Path', (['result'], {}), '(result)\n', (17574, 17582), False, 'from pathlib import Path\n'), ((2945, 2973), 'sys.stdout.write', 'sys.stdout.write', (['color_code'], {}), '(color_code)\n', (2961, 2973), False, 'import sys\n'), ((2978, 2996), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2994, 2996), False, 'import sys\n'), ((3104, 3132), 'sys.stdout.write', 'sys.stdout.write', (['CODE_RESET'], {}), '(CODE_RESET)\n', (3120, 3132), False, 'import sys\n'), ((3137, 3155), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3153, 3155), False, 'import sys\n'), ((3661, 3689), 'subprocess.list2cmdline', 'subprocess.list2cmdline', (['cmd'], {}), '(cmd)\n', (3684, 3689), False, 'import subprocess\n'), ((3980, 4008), 'subprocess.list2cmdline', 'subprocess.list2cmdline', (['cmd'], {}), '(cmd)\n', (4003, 4008), False, 'import subprocess\n'), ((4050, 4098), 'logging.log', 'logging.log', (['log_level', '"""%s [%s]"""', 'verb', 'cmd_str'], {}), "(log_level, '%s [%s]', verb, cmd_str)\n", (4061, 4098), False, 'import logging\n'), ((4111, 4178), 'logging.log', 'logging.log', (['log_level', '"""from [%s] %s [%s]"""', 'cwd_str', 'verb', 'cmd_str'], {}), "(log_level, 'from [%s] %s [%s]', cwd_str, verb, cmd_str)\n", (4122, 4178), False, 'import logging\n'), ((4393, 4439), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'cwd': 'cwd', 'env': 'env'}), '(cmd, cwd=cwd, env=env)\n', (4416, 4439), False, 'import subprocess\n'), ((5403, 5530), 'logging.log', 'logging.log', (['log_level', '"""%s [%s.%s] with args %s and kwargs %s"""', 'verb', 'callback.__module__', 'callback.__name__', 'args', 'kwargs'], {}), "(log_level, '%s [%s.%s] with args %s and kwargs %s', verb,\n callback.__module__, callback.__name__, args, kwargs)\n", (5414, 5530), False, 'import logging\n'), ((5602, 5703), 'logging.log', 'logging.log', (['log_level', '"""%s [%s.%s] to %s"""', 'verb', 'callback.__module__', 'callback.__name__', 'message'], {}), "(log_level, '%s [%s.%s] to %s', verb, callback.__module__,\n callback.__name__, message)\n", (5613, 5703), False, 'import logging\n'), ((6470, 6489), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (6487, 6489), False, 'import logging\n'), ((7031, 7057), 'logging.getLogger', 'logging.getLogger', (['modname'], {}), '(modname)\n', (7048, 7057), False, 'import logging\n'), ((7188, 7219), 'logging.getLogger', 'logging.getLogger', (['"""matplotlib"""'], {}), "('matplotlib')\n", (7205, 7219), False, 'import logging\n'), ((7664, 7704), 'logging.addLevelName', 'logging.addLevelName', (['num', 'resolved_name'], {}), '(num, resolved_name)\n', (7684, 7704), False, 'import logging\n'), ((7734, 7839), 'numpy.set_printoptions', 'numpy.set_printoptions', ([], {'precision': 'numpy_precision', 'suppress': 'numpy_suppress', 'linewidth': 'numpy_linewidth'}), '(precision=numpy_precision, suppress=numpy_suppress,\n linewidth=numpy_linewidth)\n', (7756, 7839), False, 'import numpy\n'), ((9223, 9263), 'argparse.HelpFormatter.format_help', 'argparse.HelpFormatter.format_help', (['self'], {}), '(self)\n', (9257, 9263), False, 'import argparse\n'), ((12989, 13046), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s %(message)s"""', 'None', '"""%"""'], {}), "('%(levelname)s %(message)s', None, '%')\n", (13006, 13046), False, 'import logging\n'), ((14902, 14942), 'resource.getrusage', 'resource.getrusage', (['resource.RUSAGE_SELF'], {}), '(resource.RUSAGE_SELF)\n', (14920, 14942), False, 'import resource\n'), ((15895, 15914), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (15912, 15914), False, 'import time\n'), ((15976, 15995), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (15993, 15995), False, 'import time\n'), ((16042, 16054), 'gc.collect', 'gc.collect', ([], {}), '()\n', (16052, 16054), False, 'import gc\n'), ((4278, 4322), 'subprocess.check_call', 'subprocess.check_call', (['cmd'], {'cwd': 'cwd', 'env': 'env'}), '(cmd, cwd=cwd, env=env)\n', (4299, 4322), False, 'import subprocess\n'), ((6001, 6022), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (6018, 6022), False, 'import logging\n'), ((6607, 6633), 'logging.getLogger', 'logging.getLogger', (['modname'], {}), '(modname)\n', (6624, 6633), False, 'import logging\n'), ((6711, 6787), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(message)s"""', 'stream': 'stream'}), "(level=logging.INFO, format='%(message)s', stream=stream)\n", (6730, 6787), False, 'import logging\n'), ((6800, 6888), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'level', 'format': '"""%(levelname)s %(message)s"""', 'stream': 'stream'}), "(level=level, format='%(levelname)s %(message)s', stream\n =stream)\n", (6819, 6888), False, 'import logging\n'), ((13050, 13071), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (13067, 13071), False, 'import logging\n'), ((17043, 17065), 'site.getsitepackages', 'site.getsitepackages', ([], {}), '()\n', (17063, 17065), False, 'import site\n'), ((18244, 18250), 'pathlib.Path', 'Path', ([], {}), '()\n', (18248, 18250), False, 'from pathlib import Path\n'), ((9891, 9924), 'shlex.split', 'shlex.split', (['test'], {'comments': '(False)'}), '(test, comments=False)\n', (9902, 9924), False, 'import shlex\n'), ((14621, 14638), 'platform.system', 'platform.system', ([], {}), '()\n', (14636, 14638), False, 'import platform\n'), ((14775, 14792), 'platform.system', 'platform.system', ([], {}), '()\n', (14790, 14792), False, 'import platform\n'), ((15183, 15209), 'os.sysconf', 'os.sysconf', (['"""SC_PAGE_SIZE"""'], {}), "('SC_PAGE_SIZE')\n", (15193, 15209), False, 'import os\n'), ((15212, 15239), 'os.sysconf', 'os.sysconf', (['"""SC_PHYS_PAGES"""'], {}), "('SC_PHYS_PAGES')\n", (15222, 15239), False, 'import os\n')]
|
"""Update old version URIs to be the same as the latest version
Revision ID: 2018_05_17_unify_uris
Revises: 2018_05_04_coalesce_contacts
Create Date: 2018-05-17 11:50:00.000000
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "2018_05_17_unify_uris"
down_revision = "2018_05_04_coalesce_contacts"
branch_labels = None
depends_on = None
def upgrade():
# Find pages where old versions have a different URI from the latest version and update old URIs to match the new
op.get_bind()
op.execute(
"""
UPDATE page
SET uri = subquery.uri
FROM (SELECT guid, uri
FROM page
WHERE external_edit_summary = 'Technical change: Updated url to match page title.'
) AS subquery
WHERE page.guid = subquery.guid
AND page.uri != subquery.uri;
"""
)
def downgrade():
# No way to undo this!
pass
|
[
"alembic.op.execute",
"alembic.op.get_bind"
] |
[((534, 547), 'alembic.op.get_bind', 'op.get_bind', ([], {}), '()\n', (545, 547), False, 'from alembic import op\n'), ((552, 888), 'alembic.op.execute', 'op.execute', (['"""\n UPDATE page\n SET uri = subquery.uri\n FROM (SELECT guid, uri\n FROM page\n WHERE external_edit_summary = \'Technical change: Updated url to match page title.\'\n ) AS subquery\n WHERE page.guid = subquery.guid\n AND page.uri != subquery.uri;\n """'], {}), '(\n """\n UPDATE page\n SET uri = subquery.uri\n FROM (SELECT guid, uri\n FROM page\n WHERE external_edit_summary = \'Technical change: Updated url to match page title.\'\n ) AS subquery\n WHERE page.guid = subquery.guid\n AND page.uri != subquery.uri;\n """\n )\n', (562, 888), False, 'from alembic import op\n')]
|
"""
Models
"""
from typing import Iterator, List, Optional, Set
import mongoengine as me
from sni.esi.scope import EsiScope
import sni.utils as utils
class Alliance(me.Document):
"""
EVE alliance database model.
"""
SCHEMA_VERSION = 3
"""Latest schema version for this collection"""
_version = me.IntField(default=SCHEMA_VERSION)
"""Schema version of this document"""
alliance_id = me.IntField(unique=True)
"""Alliance id (according to the ESI)"""
alliance_name = me.StringField(required=True)
"""Self explanatory"""
authorized_to_login = me.BooleanField(default=None, null=True)
"""
Wether the members of this alliance are allowed to login to SNI. See
:meth:`sni.uac.uac.is_authorized_to_login`.
"""
executor_corporation_id = me.IntField(required=True)
"""Id of the executor of this alliance"""
mandatory_esi_scopes = me.ListField(
me.StringField(choices=EsiScope), default=list
)
"""Mandatory ESI scopes for the members of this alliance"""
ticker = me.StringField(required=True)
"""Ticker of the alliance"""
updated_on = me.DateTimeField(default=utils.now, required=True)
"""Timestamp of the last update of this document"""
meta = {"indexes": ["alliance_id", "alliance_name",]}
def __repr__(self) -> str:
return f"<Alliance: {self.alliance_id} {self.alliance_name}>"
@property
def ceo(self) -> "User":
"""
Returns the ceo of the executor corporation.
"""
return self.executor.ceo
def coalitions(self) -> List["Coalition"]:
"""
Returns the list of coalition this alliance is part of.
Todo:
Paginate the results
"""
return list(Coalition.objects(member_alliances=self))
def cumulated_mandatory_esi_scopes(self) -> Set[EsiScope]:
"""
Returns the list (although it really is a set) of all the ESI scopes
required by this alliance, and all the coalitions this alliance belongs
to.
"""
coalition_scopes = []
for coalition in self.coalitions():
coalition_scopes += coalition.mandatory_esi_scopes
return set(self.mandatory_esi_scopes + coalition_scopes)
@property
def executor(self) -> "Corporation":
"""
Returns the alliance's executor corporation as a
:class:`sni.user.Corporation` object.
"""
return Corporation.objects.get(
corporation_id=self.executor_corporation_id
)
def users(self) -> List["User"]:
"""
Return the member list of this alliance, according to the database.
This may not be up to date with the ESI.
"""
return list(self.user_iterator())
def user_iterator(self) -> Iterator["User"]:
"""
Returns an iterator over all the members of this alliance, according to
the database. This may not be up to date with the ESI.
"""
result = User.objects.aggregate(
[
{
"$lookup": {
"as": "corporation_data",
"foreignField": "_id",
"from": "corporation",
"localField": "corporation",
},
},
{"$unwind": "$corporation_data"},
{
"$lookup": {
"as": "alliance_data",
"foreignField": "_id",
"from": "alliance",
"localField": "corporation_data.alliance",
},
},
{"$unwind": "$alliance_data"},
{
"$match": {
"clearance_level": {"$gte": 0},
"alliance_data.alliance_id": self.alliance_id,
}
},
{
"$set": {
"character_name_lower": {"$toLower": "$character_name"}
}
},
{"$sort": {"character_name_lower": 1}},
{"$project": {"_id": True}},
]
)
for item in result:
yield User.objects(pk=item["_id"]).get()
class Corporation(me.Document):
"""
EVE corporation database model.
"""
SCHEMA_VERSION = 3
"""Latest schema version for this collection"""
_version = me.IntField(default=SCHEMA_VERSION)
"""Schema version of this document"""
authorized_to_login = me.BooleanField(default=None, null=True)
"""
Wether the members of this corporation are allowed to login to SNI. See
:meth:`sni.uac.uac.is_authorized_to_login`.
"""
alliance = me.ReferenceField(
Alliance, default=None, null=True, required=False
)
"""Optional reference to the alliance this corporation belongs to"""
ceo_character_id = me.IntField(required=True)
"""Character id (according to the ESI) of the CEO. See also :meth:`sni.user.models.Corporation.ceo`."""
corporation_id = me.IntField(unique=True)
"""Id of the corporation (according to the ESI)"""
corporation_name = me.StringField(required=True)
"""Name of the corporation"""
mandatory_esi_scopes = me.ListField(
me.StringField(choices=EsiScope), default=list
)
"""Mandatory ESI scopes for the members of this corporation"""
ticker = me.StringField(required=True)
"""Ticker of the corporation"""
updated_on = me.DateTimeField(default=utils.now, required=True)
"""Timestamp of the last update of this document"""
meta = {"indexes": ["corporation_id", "corporation_name",]}
def __repr__(self) -> str:
return f"<Corporation: {self.corporation_id} {self.corporation_name}>"
@property
def ceo(self) -> "User":
"""
Returns the corporation's ceo as a :class:`sni.user` object.
"""
return User.objects.get(character_id=self.ceo_character_id)
def coalitions(self) -> List["Coalition"]:
"""
Returns the list of coalition this user is part of.
"""
result: Set[Coalition] = set(
Coalition.objects(member_corporations=self)
)
if self.alliance is not None:
result.update(self.alliance.coalitions())
return list(result)
def cumulated_mandatory_esi_scopes(self) -> Set[EsiScope]:
"""
Returns the list (although it really is a set) of all the ESI scopes
required by this corporation, alliance, and all the coalitions this
corporation is part of.
"""
alliance_scopes = (
self.alliance.mandatory_esi_scopes
if self.alliance is not None
else []
)
coalition_scopes = []
for coalition in self.coalitions():
coalition_scopes += coalition.mandatory_esi_scopes
return set(
self.mandatory_esi_scopes + alliance_scopes + coalition_scopes
)
def guests(self) -> List["User"]:
"""
Return the guest list of this corporation, according to the database. A
guest is a member with a clearance level of -1.
"""
return list(self.guest_iterator())
def guest_iterator(self) -> Iterator["User"]:
"""
Returns an iterator over all the guests of this corporation, according
to the database. A guest is a member with a clearance level of -1.
"""
result = User.objects.aggregate(
[
{
"$lookup": {
"as": "corporation_data",
"foreignField": "_id",
"from": "corporation",
"localField": "corporation",
},
},
{"$unwind": "$corporation_data"},
{
"$match": {
"clearance_level": {"$lt": 0},
"corporation_data.corporation_id": self.corporation_id,
}
},
{
"$set": {
"character_name_lower": {"$toLower": "$character_name"}
}
},
{"$sort": {"character_name_lower": 1}},
{"$project": {"_id": True}},
]
)
for item in result:
yield User.objects(pk=item["_id"]).get()
def users(self) -> List["User"]:
"""
Return the member list of this corporation, according to the database.
This may not be up to date with the ESI.
"""
return list(self.user_iterator())
def user_iterator(self) -> Iterator["User"]:
"""
Returns an iterator over all the members of this corporation, according
to the database. This may not be up to date with the ESI.
"""
result = User.objects.aggregate(
[
{
"$lookup": {
"as": "corporation_data",
"foreignField": "_id",
"from": "corporation",
"localField": "corporation",
},
},
{"$unwind": "$corporation_data"},
{
"$match": {
"clearance_level": {"$gte": 0},
"corporation_data.corporation_id": self.corporation_id,
}
},
{
"$set": {
"character_name_lower": {"$toLower": "$character_name"}
}
},
{"$sort": {"character_name_lower": 1}},
{"$project": {"_id": True}},
]
)
for item in result:
yield User.objects(pk=item["_id"]).get()
class Coalition(me.Document):
"""
EVE coalition. Coalitions are not formally represented in EVE, so they have
to be created manually. An alliance can be part of multiple coalitions.
"""
SCHEMA_VERSION = 6
"""Latest schema version for this collection"""
_version = me.IntField(default=SCHEMA_VERSION)
"""Schema version of this document"""
authorized_to_login = me.BooleanField(default=True, null=True)
"""
Wether the members of this coalition are allowed to login to SNI. See
:meth:`sni.uac.uac.is_authorized_to_login`.
"""
created_on = me.DateTimeField(default=utils.now, required=True)
"""Timestamp of the creation of this document"""
mandatory_esi_scopes = me.ListField(
me.StringField(choices=EsiScope), default=list
)
"""Mandatory ESI scopes for the members of this coalition"""
member_alliances = me.ListField(me.ReferenceField(Alliance), default=list)
"""
List of references to the member alliances (NOT users, for that, see
:meth:`sni.user.models.Coalition.users` and
:meth:`sni.user.models.Coalition.user_iterator`.
"""
member_corporations = me.ListField(
me.ReferenceField(Corporation), default=list
)
"""
Corporations that are direct members of this coalition (i.e. not through an
alliance)
"""
coalition_name = me.StringField(required=True, unique=True)
"""Name of the coalition"""
ticker = me.StringField(default=str)
"""Ticker of the coalition"""
updated_on = me.DateTimeField(default=utils.now, required=True)
"""Timestamp of the last update of this document"""
meta = {"indexes": ["coalition_name",]}
def __repr__(self) -> str:
return f"<Coalition: {self.coalition_name}>"
def users(self) -> List["User"]:
"""
Return the member list of this coalition.
"""
return list(self.user_iterator())
def user_iterator(self) -> Iterator["User"]:
"""
Returns an iterator over all the members of this coalition.
"""
alliance_ids = [alliance.pk for alliance in self.member_alliances]
corporation_ids = [
corporation.pk for corporation in self.member_corporations
]
result = User.objects.aggregate(
[
{
"$lookup": {
"as": "corporation_data",
"foreignField": "_id",
"from": "corporation",
"localField": "corporation",
},
},
{"$unwind": "$corporation_data"},
{
"$lookup": {
"as": "alliance_data",
"foreignField": "_id",
"from": "alliance",
"localField": "corporation_data.alliance",
},
},
{"$unwind": "$alliance_data"},
{
"$match": {
"$or": [
{"alliance_data._id": {"$in": alliance_ids}},
{"corporation_data._id": {"$in": corporation_ids}},
],
"clearance_level": {"$gte": 0},
}
},
{
"$set": {
"character_name_lower": {"$toLower": "$character_name"}
}
},
{"$sort": {"character_name_lower": 1}},
{"$project": {"_id": True}},
]
)
for item in result:
yield User.objects(pk=item["_id"]).get()
class Group(me.Document):
"""
Group model. A group is simply a collection of users.
"""
SCHEMA_VERSION = 4
"""Latest schema version for this collection"""
_version = me.IntField(default=SCHEMA_VERSION)
"""Schema version of this document"""
authorized_to_login = me.BooleanField(default=None, null=True)
"""Wether the members of this alliance are allowed to login to SNI. See :meth:`sni.uac.uac.is_authorized_to_login`."""
created_on = me.DateTimeField(default=utils.now, required=True)
"""Timestamp of the creation of this document"""
discord_role_id = me.IntField(null=True)
"""Id of the corresponding discord role"""
description = me.StringField(default=str)
"""Self explanatory"""
is_autogroup = me.BooleanField(default=False, required=True)
"""Wether this group was created automatically by SNI (e.g. group of a corporation)"""
map_to_discord = me.BooleanField(default=True, required=True)
"""Wether this group should be mapped as a Discord role"""
map_to_teamspeak = me.BooleanField(default=True, required=True)
"""Wether this group should be mapped as a Teamspeak group"""
members = me.ListField(me.ReferenceField("User"), default=list)
"""Member list"""
group_name = me.StringField(required=True, unique=True)
"""Name of the group"""
owner = me.ReferenceField("User", null=True)
"""Owner of the group. Can be ``None``."""
teamspeak_sgid = me.IntField(null=True)
"""Teamspeak group id, if applicable"""
updated_on = me.DateTimeField(default=utils.now, required=True)
"""Timestamp of the last update of this document"""
meta = {"indexes": ["group_name",]}
def __repr__(self) -> str:
return f"<Group: {self.group_name}>"
class User(me.Document):
"""
User model.
A user corresponds to a single EVE character.
"""
SCHEMA_VERSION = 3
"""Latest schema version for this collection"""
_version = me.IntField(default=SCHEMA_VERSION)
"""Schema version of this document"""
authorized_to_login = me.BooleanField(default=None, null=True)
"""Wether the members of this alliance are allowed to login to SNI. See :meth:`sni.uac.uac.is_authorized_to_login`."""
character_id = me.IntField(unique=True)
"""Character id (according to the ESI)"""
character_name = me.StringField(required=True)
"""Character name"""
clearance_level = me.IntField(default=0, required=True)
"""Clearance level of this user. See :mod:`sni.uac.clearance`."""
corporation = me.ReferenceField(Corporation, default=None, null=True)
"""Corporation this character belongs to, if applicable"""
created_on = me.DateTimeField(default=utils.now, required=True)
"""Timestamp of the creation of this document"""
discord_user_id = me.IntField(default=None, null=True)
"""Discord user id associated to this user, if applicable"""
teamspeak_cldbid = me.IntField(default=None, null=True)
"""Teamspeak user id associated to this user, if applicable"""
updated_on = me.DateTimeField(default=utils.now, required=True)
"""Timestamp of the last update of this document"""
meta = {"indexes": ["character_id", "character_name",]}
def __repr__(self) -> str:
return f"<User: {self.character_id} {self.character_name}>"
@property
def alliance(self) -> Optional[Alliance]:
"""
Returns the alliance the user is part of, if any
"""
if self.corporation is not None:
return self.corporation.alliance
return None
def cumulated_mandatory_esi_scopes(self) -> Set[EsiScope]:
"""
Returns the list (although it really is a set) of all the ESI scopes
required by the corporation, alliance, and all the coalitions the user
is part of.
"""
if self.corporation is not None:
return self.corporation.cumulated_mandatory_esi_scopes()
return set()
def coalitions(self) -> List[Coalition]:
"""
Returns the list of coalition this user is part of.
"""
if self.corporation is not None:
return self.corporation.coalitions()
return []
def is_ceo_of_alliance(self) -> bool:
"""
Tells wether the user is the ceo of its corporation.
"""
return (
self.is_ceo_of_corporation()
and self.corporation.alliance is not None
and self.corporation.alliance.executor_corporation_id
== self.corporation.corporation_id
)
def is_ceo_of_corporation(self) -> bool:
"""
Tells wether the user is the ceo of its corporation.
"""
return (
self.corporation is not None
and self.corporation.ceo_character_id == self.character_id
)
@property
def tickered_name(self) -> str:
"""
Returns the user's character name with its alliance ticker as a prefix.
If the user is not in an alliance, then the corporation's ticker is
used instead. If the user is not in any coproration (e.g. root), then
there is no prefix.
"""
ticker = None
if self.corporation is not None:
if self.corporation.alliance is not None:
ticker = self.corporation.alliance.ticker
else:
ticker = self.corporation.ticker
if ticker is not None:
return f"[{ticker}] {self.character_name}"
return self.character_name
|
[
"mongoengine.ReferenceField",
"mongoengine.BooleanField",
"mongoengine.DateTimeField",
"mongoengine.IntField",
"mongoengine.StringField"
] |
[((325, 360), 'mongoengine.IntField', 'me.IntField', ([], {'default': 'SCHEMA_VERSION'}), '(default=SCHEMA_VERSION)\n', (336, 360), True, 'import mongoengine as me\n'), ((422, 446), 'mongoengine.IntField', 'me.IntField', ([], {'unique': '(True)'}), '(unique=True)\n', (433, 446), True, 'import mongoengine as me\n'), ((513, 542), 'mongoengine.StringField', 'me.StringField', ([], {'required': '(True)'}), '(required=True)\n', (527, 542), True, 'import mongoengine as me\n'), ((597, 637), 'mongoengine.BooleanField', 'me.BooleanField', ([], {'default': 'None', 'null': '(True)'}), '(default=None, null=True)\n', (612, 637), True, 'import mongoengine as me\n'), ((806, 832), 'mongoengine.IntField', 'me.IntField', ([], {'required': '(True)'}), '(required=True)\n', (817, 832), True, 'import mongoengine as me\n'), ((1060, 1089), 'mongoengine.StringField', 'me.StringField', ([], {'required': '(True)'}), '(required=True)\n', (1074, 1089), True, 'import mongoengine as me\n'), ((1141, 1191), 'mongoengine.DateTimeField', 'me.DateTimeField', ([], {'default': 'utils.now', 'required': '(True)'}), '(default=utils.now, required=True)\n', (1157, 1191), True, 'import mongoengine as me\n'), ((4514, 4549), 'mongoengine.IntField', 'me.IntField', ([], {'default': 'SCHEMA_VERSION'}), '(default=SCHEMA_VERSION)\n', (4525, 4549), True, 'import mongoengine as me\n'), ((4619, 4659), 'mongoengine.BooleanField', 'me.BooleanField', ([], {'default': 'None', 'null': '(True)'}), '(default=None, null=True)\n', (4634, 4659), True, 'import mongoengine as me\n'), ((4816, 4884), 'mongoengine.ReferenceField', 'me.ReferenceField', (['Alliance'], {'default': 'None', 'null': '(True)', 'required': '(False)'}), '(Alliance, default=None, null=True, required=False)\n', (4833, 4884), True, 'import mongoengine as me\n'), ((4996, 5022), 'mongoengine.IntField', 'me.IntField', ([], {'required': '(True)'}), '(required=True)\n', (5007, 5022), True, 'import mongoengine as me\n'), ((5153, 5177), 'mongoengine.IntField', 'me.IntField', ([], {'unique': '(True)'}), '(unique=True)\n', (5164, 5177), True, 'import mongoengine as me\n'), ((5257, 5286), 'mongoengine.StringField', 'me.StringField', ([], {'required': '(True)'}), '(required=True)\n', (5271, 5286), True, 'import mongoengine as me\n'), ((5505, 5534), 'mongoengine.StringField', 'me.StringField', ([], {'required': '(True)'}), '(required=True)\n', (5519, 5534), True, 'import mongoengine as me\n'), ((5589, 5639), 'mongoengine.DateTimeField', 'me.DateTimeField', ([], {'default': 'utils.now', 'required': '(True)'}), '(default=utils.now, required=True)\n', (5605, 5639), True, 'import mongoengine as me\n'), ((10306, 10341), 'mongoengine.IntField', 'me.IntField', ([], {'default': 'SCHEMA_VERSION'}), '(default=SCHEMA_VERSION)\n', (10317, 10341), True, 'import mongoengine as me\n'), ((10411, 10451), 'mongoengine.BooleanField', 'me.BooleanField', ([], {'default': '(True)', 'null': '(True)'}), '(default=True, null=True)\n', (10426, 10451), True, 'import mongoengine as me\n'), ((10608, 10658), 'mongoengine.DateTimeField', 'me.DateTimeField', ([], {'default': 'utils.now', 'required': '(True)'}), '(default=utils.now, required=True)\n', (10624, 10658), True, 'import mongoengine as me\n'), ((11382, 11424), 'mongoengine.StringField', 'me.StringField', ([], {'required': '(True)', 'unique': '(True)'}), '(required=True, unique=True)\n', (11396, 11424), True, 'import mongoengine as me\n'), ((11471, 11498), 'mongoengine.StringField', 'me.StringField', ([], {'default': 'str'}), '(default=str)\n', (11485, 11498), True, 'import mongoengine as me\n'), ((11551, 11601), 'mongoengine.DateTimeField', 'me.DateTimeField', ([], {'default': 'utils.now', 'required': '(True)'}), '(default=utils.now, required=True)\n', (11567, 11601), True, 'import mongoengine as me\n'), ((13938, 13973), 'mongoengine.IntField', 'me.IntField', ([], {'default': 'SCHEMA_VERSION'}), '(default=SCHEMA_VERSION)\n', (13949, 13973), True, 'import mongoengine as me\n'), ((14043, 14083), 'mongoengine.BooleanField', 'me.BooleanField', ([], {'default': 'None', 'null': '(True)'}), '(default=None, null=True)\n', (14058, 14083), True, 'import mongoengine as me\n'), ((14225, 14275), 'mongoengine.DateTimeField', 'me.DateTimeField', ([], {'default': 'utils.now', 'required': '(True)'}), '(default=utils.now, required=True)\n', (14241, 14275), True, 'import mongoengine as me\n'), ((14352, 14374), 'mongoengine.IntField', 'me.IntField', ([], {'null': '(True)'}), '(null=True)\n', (14363, 14374), True, 'import mongoengine as me\n'), ((14441, 14468), 'mongoengine.StringField', 'me.StringField', ([], {'default': 'str'}), '(default=str)\n', (14455, 14468), True, 'import mongoengine as me\n'), ((14516, 14561), 'mongoengine.BooleanField', 'me.BooleanField', ([], {'default': '(False)', 'required': '(True)'}), '(default=False, required=True)\n', (14531, 14561), True, 'import mongoengine as me\n'), ((14675, 14719), 'mongoengine.BooleanField', 'me.BooleanField', ([], {'default': '(True)', 'required': '(True)'}), '(default=True, required=True)\n', (14690, 14719), True, 'import mongoengine as me\n'), ((14807, 14851), 'mongoengine.BooleanField', 'me.BooleanField', ([], {'default': '(True)', 'required': '(True)'}), '(default=True, required=True)\n', (14822, 14851), True, 'import mongoengine as me\n'), ((15027, 15069), 'mongoengine.StringField', 'me.StringField', ([], {'required': '(True)', 'unique': '(True)'}), '(required=True, unique=True)\n', (15041, 15069), True, 'import mongoengine as me\n'), ((15111, 15147), 'mongoengine.ReferenceField', 'me.ReferenceField', (['"""User"""'], {'null': '(True)'}), "('User', null=True)\n", (15128, 15147), True, 'import mongoengine as me\n'), ((15217, 15239), 'mongoengine.IntField', 'me.IntField', ([], {'null': '(True)'}), '(null=True)\n', (15228, 15239), True, 'import mongoengine as me\n'), ((15302, 15352), 'mongoengine.DateTimeField', 'me.DateTimeField', ([], {'default': 'utils.now', 'required': '(True)'}), '(default=utils.now, required=True)\n', (15318, 15352), True, 'import mongoengine as me\n'), ((15729, 15764), 'mongoengine.IntField', 'me.IntField', ([], {'default': 'SCHEMA_VERSION'}), '(default=SCHEMA_VERSION)\n', (15740, 15764), True, 'import mongoengine as me\n'), ((15834, 15874), 'mongoengine.BooleanField', 'me.BooleanField', ([], {'default': 'None', 'null': '(True)'}), '(default=None, null=True)\n', (15849, 15874), True, 'import mongoengine as me\n'), ((16018, 16042), 'mongoengine.IntField', 'me.IntField', ([], {'unique': '(True)'}), '(unique=True)\n', (16029, 16042), True, 'import mongoengine as me\n'), ((16111, 16140), 'mongoengine.StringField', 'me.StringField', ([], {'required': '(True)'}), '(required=True)\n', (16125, 16140), True, 'import mongoengine as me\n'), ((16189, 16226), 'mongoengine.IntField', 'me.IntField', ([], {'default': '(0)', 'required': '(True)'}), '(default=0, required=True)\n', (16200, 16226), True, 'import mongoengine as me\n'), ((16316, 16371), 'mongoengine.ReferenceField', 'me.ReferenceField', (['Corporation'], {'default': 'None', 'null': '(True)'}), '(Corporation, default=None, null=True)\n', (16333, 16371), True, 'import mongoengine as me\n'), ((16453, 16503), 'mongoengine.DateTimeField', 'me.DateTimeField', ([], {'default': 'utils.now', 'required': '(True)'}), '(default=utils.now, required=True)\n', (16469, 16503), True, 'import mongoengine as me\n'), ((16580, 16616), 'mongoengine.IntField', 'me.IntField', ([], {'default': 'None', 'null': '(True)'}), '(default=None, null=True)\n', (16591, 16616), True, 'import mongoengine as me\n'), ((16706, 16742), 'mongoengine.IntField', 'me.IntField', ([], {'default': 'None', 'null': '(True)'}), '(default=None, null=True)\n', (16717, 16742), True, 'import mongoengine as me\n'), ((16828, 16878), 'mongoengine.DateTimeField', 'me.DateTimeField', ([], {'default': 'utils.now', 'required': '(True)'}), '(default=utils.now, required=True)\n', (16844, 16878), True, 'import mongoengine as me\n'), ((929, 961), 'mongoengine.StringField', 'me.StringField', ([], {'choices': 'EsiScope'}), '(choices=EsiScope)\n', (943, 961), True, 'import mongoengine as me\n'), ((5371, 5403), 'mongoengine.StringField', 'me.StringField', ([], {'choices': 'EsiScope'}), '(choices=EsiScope)\n', (5385, 5403), True, 'import mongoengine as me\n'), ((10762, 10794), 'mongoengine.StringField', 'me.StringField', ([], {'choices': 'EsiScope'}), '(choices=EsiScope)\n', (10776, 10794), True, 'import mongoengine as me\n'), ((10917, 10944), 'mongoengine.ReferenceField', 'me.ReferenceField', (['Alliance'], {}), '(Alliance)\n', (10934, 10944), True, 'import mongoengine as me\n'), ((11199, 11229), 'mongoengine.ReferenceField', 'me.ReferenceField', (['Corporation'], {}), '(Corporation)\n', (11216, 11229), True, 'import mongoengine as me\n'), ((14946, 14971), 'mongoengine.ReferenceField', 'me.ReferenceField', (['"""User"""'], {}), "('User')\n", (14963, 14971), True, 'import mongoengine as me\n')]
|
# -*- coding: utf-8 -*-
# @Time : 2018/8/21 下午4:25
# @Author : <NAME>
# @Email : <EMAIL>
# @File : connect_database.py
# @Software: PyCharm
'''
为了标准化,在设计初始阶段,主要分三个模块用来测试;
这是第一个模块,即,与数据库交互的功能部分;
数据库名:fanuc
表名:session
用户名:fanuc
密码:<PASSWORD>
请尽量按照此标准进行
'''
import pymysql
#host=self.hostname, user=self.user,
# passwd=<PASSWORD>, db=self.db,
# port=self.port, connect_timeout=conf.timeout,
# use_unicode=True
# host = 'loaclhost'
# user = 'fanuc'
# passwd = '<PASSWORD>'
# DB = 'fanuc_test'
# port = 3306
config = {
'host': '127.0.0.1',
'port': 3306,
'db':'fanuc',
'user': 'fanuc',
'passwd': '<PASSWORD>',
'charset':'utf8',
'cursorclass':pymysql.cursors.DictCursor
}
def remakeResquest(request_data):
return '\"' + request_data + '\"'
#测试
# print(remakeResquest('dasaasdasad'))
def createSql(request,flag=1):
if flag == 1:
sql = 'select * from session WHERE request_data='
sql=sql+request+';'
if flag == 2:
sql = 'select * from session WHERE functions = '
sql = sql+request+';'
return sql
def connectDB(config):
return pymysql.connect(**config)
def searchData(db,sql_clause):
# db = connectDB(config)
cursor = db.cursor()
#此处应当考虑对符号的转义
# request = remakeResquest(request_data)
# sql_clause = createSql(request)
cursor.execute(sql_clause)
results = cursor.fetchall()
return results
# db = connectDB(config)
# def connectDB(config,sql='select * from session;'):
# db = pymysql.connect(**config)
# cursor = db.cursor()
# cursor.execute(sql)
# results = cursor.fetchall()
# return db,cursor,results[0]['response_data']
'''
测试代码
sql_test = createSql('\"a0a0a0a00001010100020001\"')
db = pymysql.connect(**config)
cursor = db.cursor()
cursor.execute(sql_test)
results = cursor.fetchall()
print(results[0]['response_data'])
'''
if __name__ =="__main__":
# db = pymysql.connect("loaclhost", "root",
# "lsj940411", "fanuc")
# sql_test = createSql('\"a0a0a0a00001010100020001\"')
# print(sql_test)
# db,cursor,response_data = connectDB(config,sql=sql_test)
# sql_test = createSql('\"a0a0a0a00001010100020001\"')
data = "a0a0a0a00001010100020001"
#第一步:构建sql语句
request = remakeResquest(data)
sql_clause = createSql(request)
#第二步:连接数据库
db = connectDB(config)
# cursor = db.cursor()
# cursor.execute(sql_test)
# results = cursor.fetchall()
#第三步:查询返回数据
results = searchData(db,sql_clause)
# print(results)
print(results[0]['response_data'])
# print(response_data)
#
# cursor = db.cursor()
# cursor.execute(sql_test)
# result = cursor.fetchall()
# response_data = result[0][0]
# print(response_data)
|
[
"pymysql.connect"
] |
[((1120, 1145), 'pymysql.connect', 'pymysql.connect', ([], {}), '(**config)\n', (1135, 1145), False, 'import pymysql\n')]
|
# Copyright Materialize, Inc. and contributors. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
import os
from materialize.mzcompose import (
Kafka,
Materialized,
SchemaRegistry,
Testdrive,
Workflow,
Zookeeper,
)
materialized = Materialized(
options="--persistent-user-tables --persistent-kafka-upsert-source --disable-persistent-system-tables-test"
)
mz_disable_user_indexes = Materialized(
name="mz_disable_user_indexes",
hostname="materialized",
options="--persistent-user-tables --persistent-kafka-upsert-source --disable-persistent-system-tables-test --disable-user-indexes",
)
# This instance of Mz is used for failpoint testing. By using --disable-persistent-system-tables-test
# we ensure that only testdrive-initiated actions cause I/O. The --workers 1 is used due to #8739
mz_without_system_tables = Materialized(
name="mz_without_system_tables",
hostname="materialized",
options="--persistent-user-tables --disable-persistent-system-tables-test --workers 1",
)
prerequisites = [Zookeeper(), Kafka(), SchemaRegistry()]
services = [
*prerequisites,
materialized,
mz_disable_user_indexes,
mz_without_system_tables,
Testdrive(no_reset=True, seed=1),
]
td_test = os.environ.pop("TD_TEST", "*")
def workflow_persistence(w: Workflow):
workflow_kafka_sources(w)
workflow_user_tables(w)
workflow_failpoints(w)
workflow_disable_user_indexes(w)
def workflow_kafka_sources(w: Workflow):
w.start_and_wait_for_tcp(services=prerequisites, timeout_secs=240)
w.start_services(services=["materialized"])
w.wait_for_mz(service="materialized")
w.run_service(
service="testdrive-svc",
command=f"kafka-sources/*{td_test}*-before.td",
)
w.kill_services(services=["materialized"], signal="SIGKILL")
w.start_services(services=["materialized"])
w.wait_for_mz(service="materialized")
# And restart again, for extra stress
w.kill_services(services=["materialized"], signal="SIGKILL")
w.start_services(services=["materialized"])
w.wait_for_mz(service="materialized")
w.run_service(
service="testdrive-svc",
command=f"kafka-sources/*{td_test}*-after.td",
)
# Do one more restart, just in case and just confirm that Mz is able to come up
w.kill_services(services=["materialized"], signal="SIGKILL")
w.start_services(services=["materialized"])
w.wait_for_mz(service="materialized")
w.kill_services(services=["materialized"], signal="SIGKILL")
w.remove_services(services=["materialized", "testdrive-svc"], destroy_volumes=True)
w.remove_volumes(volumes=["mzdata"])
def workflow_user_tables(w: Workflow):
w.start_services(services=["materialized"])
w.wait_for_mz(service="materialized")
w.run_service(
service="testdrive-svc",
command=f"user-tables/table-persistence-before-{td_test}.td",
)
w.kill_services(services=["materialized"], signal="SIGKILL")
w.start_services(services=["materialized"])
w.run_service(
service="testdrive-svc",
command=f"user-tables/table-persistence-after-{td_test}.td",
)
w.kill_services(services=["materialized"], signal="SIGKILL")
w.remove_services(services=["materialized", "testdrive-svc"], destroy_volumes=True)
w.remove_volumes(volumes=["mzdata"])
def workflow_failpoints(w: Workflow):
w.start_services(services=["mz_without_system_tables"])
w.wait_for_mz(service="mz_without_system_tables")
w.run_service(service="testdrive-svc", command=f"failpoints/{td_test}.td")
w.kill_services(services=["mz_without_system_tables"], signal="SIGKILL")
w.remove_services(
services=["mz_without_system_tables", "testdrive-svc"], destroy_volumes=True
)
w.remove_volumes(volumes=["mzdata"])
def workflow_disable_user_indexes(w: Workflow):
w.start_and_wait_for_tcp(services=prerequisites)
w.start_services(services=["materialized"])
w.wait_for_mz(service="materialized")
w.run_service(
service="testdrive-svc",
command="disable-user-indexes/before.td",
)
w.kill_services(services=["materialized"], signal="SIGKILL")
w.start_services(services=["mz_disable_user_indexes"])
w.wait_for_mz(service="mz_disable_user_indexes")
w.run_service(
service="testdrive-svc",
command="disable-user-indexes/after.td",
)
w.kill_services(services=["mz_disable_user_indexes"], signal="SIGKILL")
w.remove_services(
services=["materialized", "mz_disable_user_indexes", "testdrive-svc"],
destroy_volumes=True,
)
w.remove_volumes(volumes=["mzdata"])
|
[
"materialize.mzcompose.Materialized",
"materialize.mzcompose.SchemaRegistry",
"materialize.mzcompose.Kafka",
"os.environ.pop",
"materialize.mzcompose.Testdrive",
"materialize.mzcompose.Zookeeper"
] |
[((535, 666), 'materialize.mzcompose.Materialized', 'Materialized', ([], {'options': '"""--persistent-user-tables --persistent-kafka-upsert-source --disable-persistent-system-tables-test"""'}), "(options=\n '--persistent-user-tables --persistent-kafka-upsert-source --disable-persistent-system-tables-test'\n )\n", (547, 666), False, 'from materialize.mzcompose import Kafka, Materialized, SchemaRegistry, Testdrive, Workflow, Zookeeper\n'), ((690, 905), 'materialize.mzcompose.Materialized', 'Materialized', ([], {'name': '"""mz_disable_user_indexes"""', 'hostname': '"""materialized"""', 'options': '"""--persistent-user-tables --persistent-kafka-upsert-source --disable-persistent-system-tables-test --disable-user-indexes"""'}), "(name='mz_disable_user_indexes', hostname='materialized',\n options=\n '--persistent-user-tables --persistent-kafka-upsert-source --disable-persistent-system-tables-test --disable-user-indexes'\n )\n", (702, 905), False, 'from materialize.mzcompose import Kafka, Materialized, SchemaRegistry, Testdrive, Workflow, Zookeeper\n'), ((1136, 1308), 'materialize.mzcompose.Materialized', 'Materialized', ([], {'name': '"""mz_without_system_tables"""', 'hostname': '"""materialized"""', 'options': '"""--persistent-user-tables --disable-persistent-system-tables-test --workers 1"""'}), "(name='mz_without_system_tables', hostname='materialized',\n options=\n '--persistent-user-tables --disable-persistent-system-tables-test --workers 1'\n )\n", (1148, 1308), False, 'from materialize.mzcompose import Kafka, Materialized, SchemaRegistry, Testdrive, Workflow, Zookeeper\n'), ((1529, 1559), 'os.environ.pop', 'os.environ.pop', (['"""TD_TEST"""', '"""*"""'], {}), "('TD_TEST', '*')\n", (1543, 1559), False, 'import os\n'), ((1328, 1339), 'materialize.mzcompose.Zookeeper', 'Zookeeper', ([], {}), '()\n', (1337, 1339), False, 'from materialize.mzcompose import Kafka, Materialized, SchemaRegistry, Testdrive, Workflow, Zookeeper\n'), ((1341, 1348), 'materialize.mzcompose.Kafka', 'Kafka', ([], {}), '()\n', (1346, 1348), False, 'from materialize.mzcompose import Kafka, Materialized, SchemaRegistry, Testdrive, Workflow, Zookeeper\n'), ((1350, 1366), 'materialize.mzcompose.SchemaRegistry', 'SchemaRegistry', ([], {}), '()\n', (1364, 1366), False, 'from materialize.mzcompose import Kafka, Materialized, SchemaRegistry, Testdrive, Workflow, Zookeeper\n'), ((1482, 1514), 'materialize.mzcompose.Testdrive', 'Testdrive', ([], {'no_reset': '(True)', 'seed': '(1)'}), '(no_reset=True, seed=1)\n', (1491, 1514), False, 'from materialize.mzcompose import Kafka, Materialized, SchemaRegistry, Testdrive, Workflow, Zookeeper\n')]
|
#!/usr/bin/python
# Copyright (C) <NAME> 2006.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
t = BoostBuild.Tester()
t.write("jamroot.jam", """
exe a1 : a1.cpp : <conditional>@a1-rule ;
rule a1-rule ( properties * )
{
if <variant>debug in $(properties)
{
return <define>OK ;
}
}
exe a2 : a2.cpp : <conditional>@$(__name__).a2-rule
<variant>debug:<optimization>speed ;
rule a2-rule ( properties * )
{
if <optimization>speed in $(properties)
{
return <define>OK ;
}
}
exe a3 : a3.cpp : <conditional>@$(__name__).a3-rule-1
<conditional>@$(__name__).a3-rule-2 ;
rule a3-rule-1 ( properties * )
{
if <optimization>speed in $(properties)
{
return <define>OK ;
}
}
rule a3-rule-2 ( properties * )
{
if <variant>debug in $(properties)
{
return <optimization>speed ;
}
}
""")
t.write("a1.cpp", """
#ifdef OK
int main() {}
#endif
""")
t.write("a2.cpp", """
#ifdef OK
int main() {}
#endif
""")
t.write("a3.cpp", """
#ifdef OK
int main() {}
#endif
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/a1.exe")
t.expect_addition("bin/$toolset/debug/optimization-speed/a2.exe")
t.expect_addition("bin/$toolset/debug/optimization-speed/a3.exe")
t.cleanup()
|
[
"BoostBuild.Tester"
] |
[((234, 253), 'BoostBuild.Tester', 'BoostBuild.Tester', ([], {}), '()\n', (251, 253), False, 'import BoostBuild\n')]
|
import settings
import handlers.base_handler
import csv
class CartogramHandler(handlers.base_handler.BaseCartogramHandler):
def get_name(self):
return "Bangladesh"
def get_gen_file(self):
return "{}/bangladesh_processedmap.json".format(settings.CARTOGRAM_DATA_DIR)
def validate_values(self, values):
if len(values) != 8:
return False
for v in values:
if type(v) != float:
return False
return True
def gen_area_data(self, values):
return """1 {} Barisal
2 {} Chittagong
3 {} Dhaka
4 {} Khulna
5 {} Mymensingh
6 {} Rajshahi
7 {} Rangpur
8 {} Sylhet""".format(*values)
def expect_geojson_output(self):
return True
def csv_to_area_string_and_colors(self, csvfile):
return self.order_by_example(csv.reader(csvfile), "Division", 0, 1, 2, 3, ["Barisal","Chittagong","Dhaka","Khulna","Mymensingh","Rajshahi","Rangpur","Sylhet"], [0.0 for i in range(0,8)], {"Barisal":"1","Chittagong":"2","Dhaka":"3","Khulna":"4","Mymensingh":"5","Rajshahi":"6","Rangpur":"7","Sylhet":"8"})
|
[
"csv.reader"
] |
[((849, 868), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (859, 868), False, 'import csv\n')]
|
# Generated by Django 3.1 on 2020-08-28 01:34
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("automation", "0004_auto_20200617_0332"),
("agents", "0012_auto_20200810_0544"),
("winupdate", "0002_auto_20200715_0445"),
]
operations = [
migrations.AddField(
model_name="winupdatepolicy",
name="policy",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="winupdatepolicy",
to="automation.policy",
),
),
migrations.AlterField(
model_name="winupdatepolicy",
name="agent",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="winupdatepolicy",
to="agents.agent",
),
),
]
|
[
"django.db.models.ForeignKey"
] |
[((474, 620), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""winupdatepolicy"""', 'to': '"""automation.policy"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='winupdatepolicy', to='automation.policy')\n", (491, 620), False, 'from django.db import migrations, models\n'), ((840, 981), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""winupdatepolicy"""', 'to': '"""agents.agent"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='winupdatepolicy', to='agents.agent')\n", (857, 981), False, 'from django.db import migrations, models\n')]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import re
import logging
import json
import math
from ConfigReader import configuration
import mysql.connector
from common import constants as constant
from mysql.connector import errorcode
from datetime import datetime
import pandas as pd
import jaydebeapi
class source(object):
def __init__(self):
logging.debug("Initiating schemaReader.source()")
def removeNewLine(self, _data):
if _data == None:
return None
else:
return _data
def readTableColumns(self, JDBCCursor, serverType = None, database = None, schema = None, table = None):
logging.debug("Executing schemaReader.readTableColumns()")
query = None
result_df = pd.DataFrame()
if serverType == constant.MSSQL:
query = "select "
query += " SchemaName = CAST((TBL.TABLE_SCHEMA) AS NVARCHAR(4000)), "
query += " TableName = CAST((TBL.TABLE_NAME) AS NVARCHAR(4000)), "
query += " TableDescription = CAST((tableProp.value) AS NVARCHAR(4000)), "
query += " ColumnName = CAST((COL.COLUMN_NAME) AS NVARCHAR(4000)), "
query += " ColumnDataType = CAST((COL.DATA_TYPE) AS NVARCHAR(4000)), "
query += " ColumnLength = COL.CHARACTER_MAXIMUM_LENGTH, "
query += " ColumnDescription = CAST((colDesc.ColumnDescription) AS NVARCHAR(4000)), "
query += " ColumnPrecision = CAST((COL.numeric_precision) AS NVARCHAR(128)), "
query += " ColumnScale = COL.numeric_scale, "
query += " IsNullable = CAST((COL.Is_Nullable) AS NVARCHAR(128)), "
query += " TableType = CAST((TBL.TABLE_TYPE) AS NVARCHAR(4000)), "
query += " CreateDate = sysTables.create_date "
query += "FROM INFORMATION_SCHEMA.TABLES TBL "
query += "INNER JOIN INFORMATION_SCHEMA.COLUMNS COL "
query += " ON COL.TABLE_NAME = TBL.TABLE_NAME "
query += " AND COL.TABLE_SCHEMA = TBL.TABLE_SCHEMA "
query += "LEFT JOIN sys.tables sysTables "
query += " ON sysTables.object_id = object_id(TBL.TABLE_SCHEMA + '.' + TBL.TABLE_NAME) "
query += "LEFT JOIN sys.extended_properties tableProp "
query += " ON tableProp.major_id = object_id(TBL.TABLE_SCHEMA + '.' + TBL.TABLE_NAME) "
query += " AND tableProp.minor_id = 0 "
query += " AND tableProp.name = 'MS_Description' "
query += "LEFT JOIN ( "
query += " SELECT "
query += " sc.object_id, "
query += " sc.column_id, "
query += " sc.name, "
query += " colProp.[value] AS ColumnDescription "
query += " FROM sys.columns sc "
query += " INNER JOIN sys.extended_properties colProp "
query += " ON colProp.major_id = sc.object_id "
query += " AND colProp.minor_id = sc.column_id "
query += " AND colProp.name = 'MS_Description' "
query += " ) colDesc "
query += " ON colDesc.object_id = object_id(TBL.TABLE_SCHEMA + '.' + TBL.TABLE_NAME) "
query += " AND colDesc.name = COL.COLUMN_NAME "
query += "WHERE lower(TBL.TABLE_TYPE) in ('base table','view') "
query += " AND COL.TABLE_SCHEMA = '%s' "%(schema)
if table != None:
query += " AND COL.TABLE_NAME = '%s' "%(table)
query += "ORDER BY TBL.TABLE_SCHEMA, TBL.TABLE_NAME,COL.ordinal_position"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = self.removeNewLine(row[0])
line_dict["TABLE_NAME"] = self.removeNewLine(row[1])
if row[2] == "" or row[2] == None:
line_dict["TABLE_COMMENT"] = None
else:
line_dict["TABLE_COMMENT"] = self.removeNewLine(row[2]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["SOURCE_COLUMN_NAME"] = self.removeNewLine(row[3])
if row[4] in ("numeric", "decimal"):
if row[5] == None:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s,%s)"%(row[4],row[7], row[8] )
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(row[4], row[5])
elif row[4] in ("geometry", "image", "ntext", "text", "xml"):
line_dict["SOURCE_COLUMN_TYPE"] = "%s"%(row[4])
elif row[4] == "varbinary":
if row[7] != None and row[7] > -1:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s,%s)"%(row[4],row[7], row[8] )
else:
line_dict["SOURCE_COLUMN_TYPE"] = row[4]
else:
if row[5] == None:
line_dict["SOURCE_COLUMN_TYPE"] = row[4]
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(row[4], row[5])
line_dict["SOURCE_COLUMN_LENGTH"] = row[5]
if row[6] == "" or row[6] == None:
line_dict["SOURCE_COLUMN_COMMENT"] = None
else:
line_dict["SOURCE_COLUMN_COMMENT"] = self.removeNewLine(row[6]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["IS_NULLABLE"] = row[9]
line_dict["TABLE_TYPE"] = row[10]
try:
line_dict["TABLE_CREATE_TIME"] = datetime.strptime(row[11], '%Y-%m-%d %H:%M:%S.%f')
except:
line_dict["TABLE_CREATE_TIME"] = None
line_dict["DEFAULT_VALUE"] = None
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.ORACLE:
# First determine if column ORIGIN_CON_ID exists in ALL_TAB_COMMENTS. If it does, we need to take that into consideration
oracle_OriginConId_exists = True
oracle_OriginConId = None
# query = "SELECT ORIGIN_CON_ID FROM ALL_TAB_COMMENTS WHERE 1 = 0"
query = "SELECT ORIGIN_CON_ID FROM ALL_TAB_COMMENTS "
query += "WHERE OWNER = '%s' "%(schema)
if table != None:
query += " AND TABLE_NAME = '%s' "%(table)
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
if "invalid identifier" in str(errMsg):
oracle_OriginConId_exists = False
else:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
if oracle_OriginConId_exists == True:
rowCount = 0
for row in JDBCCursor.fetchall():
oracle_OriginConId = row[0]
rowCount += 1
if rowCount != 1:
# If there are more than one originConId, it's impossible to determine what we will use. So then we go to default
oracle_OriginConId = None
query = "SELECT "
query += " ALL_TAB_COLUMNS.OWNER SCHEMA_NAME, "
query += " ALL_TAB_COLUMNS.TABLE_NAME, "
query += " ALL_TAB_COMMENTS.COMMENTS TABLE_COMMENT, "
query += " ALL_TAB_COLUMNS.COLUMN_NAME, "
query += " ALL_TAB_COLUMNS.DATA_TYPE, "
query += " ALL_TAB_COLUMNS.DATA_LENGTH, "
query += " ALL_COL_COMMENTS.COMMENTS COLUMN_COMMENT, "
query += " ALL_TAB_COLUMNS.CHAR_LENGTH, "
query += " ALL_TAB_COLUMNS.DATA_PRECISION, "
query += " ALL_TAB_COLUMNS.DATA_SCALE, "
query += " ALL_TAB_COLUMNS.NULLABLE, "
query += " ALL_OBJECTS.OBJECT_TYPE, "
query += " ALL_OBJECTS.CREATED "
query += "FROM ALL_TAB_COLUMNS ALL_TAB_COLUMNS "
query += "LEFT JOIN ALL_TAB_COMMENTS ALL_TAB_COMMENTS "
query += " ON ALL_TAB_COLUMNS.OWNER = ALL_TAB_COMMENTS.OWNER "
query += " AND ALL_TAB_COLUMNS.TABLE_NAME = ALL_TAB_COMMENTS.TABLE_NAME "
if oracle_OriginConId_exists == True:
if oracle_OriginConId == None:
query += " AND ALL_TAB_COMMENTS.ORIGIN_CON_ID <= 1 "
else:
query += " AND ALL_TAB_COMMENTS.ORIGIN_CON_ID = %s "%(oracle_OriginConId)
query += "LEFT JOIN ALL_COL_COMMENTS ALL_COL_COMMENTS "
query += " ON ALL_TAB_COLUMNS.OWNER = ALL_COL_COMMENTS.OWNER "
query += " AND ALL_TAB_COLUMNS.TABLE_NAME = ALL_COL_COMMENTS.TABLE_NAME "
query += " AND ALL_TAB_COLUMNS.COLUMN_NAME = ALL_COL_COMMENTS.COLUMN_NAME "
if oracle_OriginConId_exists == True:
if oracle_OriginConId == None:
query += " AND ALL_COL_COMMENTS.ORIGIN_CON_ID <= 1 "
else:
query += " AND ALL_COL_COMMENTS.ORIGIN_CON_ID = %s "%(oracle_OriginConId)
query += "LEFT JOIN ALL_OBJECTS ALL_OBJECTS "
query += " ON ALL_TAB_COLUMNS.OWNER = ALL_OBJECTS.OWNER "
query += " AND ALL_TAB_COLUMNS.TABLE_NAME = ALL_OBJECTS.OBJECT_NAME "
query += " AND ALL_OBJECTS.OBJECT_TYPE IN ('TABLE', 'VIEW') "
query += "WHERE ALL_TAB_COLUMNS.OWNER = '%s' "%(schema)
if table != None:
query += " AND ALL_TAB_COLUMNS.TABLE_NAME = '%s' "%(table)
query += "ORDER BY SCHEMA_NAME, ALL_TAB_COLUMNS.TABLE_NAME, ALL_TAB_COLUMNS.COLUMN_ID"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = self.removeNewLine(row[0])
line_dict["TABLE_NAME"] = self.removeNewLine(row[1])
if row[2] == "" or row[2] == None:
line_dict["TABLE_COMMENT"] = None
else:
line_dict["TABLE_COMMENT"] = self.removeNewLine(row[2]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["SOURCE_COLUMN_NAME"] = self.removeNewLine(row[3])
if row[5] == None:
line_dict["SOURCE_COLUMN_TYPE"] = row[4]
else:
if re.search('TIMESTAMP', row[4]) or row[4] in ("CLOB", "DATE", "LONG", "BLOB", "NCLOB", "LONG RAW"):
line_dict["SOURCE_COLUMN_TYPE"] = row[4]
elif row[4] in ("VARCHAR", "VARCHAR2", "CHAR", "NCHAR", "NVARCHAR2"):
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(row[4], int(row[7]))
elif row[4] in ("NUMBER", "FLOAT", "BINARY_FLOAT", "BINARY_DOUBLE"):
if row[8] == None:
line_dict["SOURCE_COLUMN_TYPE"] = row[4]
elif row[8] == 0: #("DATA_PRECISION") == 0) then use char_length
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(row[4], int(row[7]))
elif row[9]== None or row[9] == 0:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(row[4], int(row[8]))
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s,%s)"%(row[4], int(row[8]), int(row[9]))
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(row[4], int(row[5]))
line_dict["SOURCE_COLUMN_LENGTH"] = row[5]
if row[6] == "" or row[6] == None:
line_dict["SOURCE_COLUMN_COMMENT"] = None
else:
line_dict["SOURCE_COLUMN_COMMENT"] = self.removeNewLine(row[6]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["IS_NULLABLE"] = row[10]
line_dict["TABLE_TYPE"] = row[11]
try:
line_dict["TABLE_CREATE_TIME"] = datetime.strptime(row[12], '%Y-%m-%d %H:%M:%S')
except:
line_dict["TABLE_CREATE_TIME"] = None
line_dict["DEFAULT_VALUE"] = None
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.MYSQL:
query = "select "
query += " c.table_schema as table_schema, "
query += " c.table_name, "
query += " t.table_comment, "
query += " c.column_name, "
query += " c.data_type, "
query += " c.character_maximum_length, "
query += " c.column_comment, "
query += " c.is_nullable, "
query += " c.numeric_precision, "
query += " c.numeric_scale, "
query += " t.table_type, "
query += " t.create_time "
query += "from information_schema.columns c "
query += "left join information_schema.tables t "
query += " on c.table_schema = t.table_schema and c.table_name = t.table_name "
query += "where c.table_schema = '%s' "%(database)
if table != None:
query += " and c.table_name = '%s' "%(table)
query += "order by c.table_schema,c.table_name, c.ordinal_position "
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = self.removeNewLine(row[0])
line_dict["TABLE_NAME"] = self.removeNewLine(row[1])
if row[2] == "" or row[2] == None:
line_dict["TABLE_COMMENT"] = None
else:
line_dict["TABLE_COMMENT"] = self.removeNewLine(row[2]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["SOURCE_COLUMN_NAME"] = self.removeNewLine(row[3])
if row[4] == "decimal":
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s,%s)"%(self.removeNewLine(row[4]), row[8], row[9])
elif row[5] == None:
line_dict["SOURCE_COLUMN_TYPE"] = self.removeNewLine(row[4])
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(self.removeNewLine(row[4]), row[5])
line_dict["SOURCE_COLUMN_LENGTH"] = row[5]
if row[6] == None or row[6] == "":
line_dict["SOURCE_COLUMN_COMMENT"] = None
else:
line_dict["SOURCE_COLUMN_COMMENT"] = self.removeNewLine(row[6]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["IS_NULLABLE"] = row[7]
line_dict["TABLE_TYPE"] = row[10]
try:
line_dict["TABLE_CREATE_TIME"] = datetime.strptime(row[11], '%Y-%m-%d %H:%M:%S')
except:
line_dict["TABLE_CREATE_TIME"] = None
line_dict["DEFAULT_VALUE"] = None
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.DB2_UDB:
query = "SELECT "
query += " TRIM(ST.CREATOR) as SCHEMA_NAME, "
query += " TRIM(ST.NAME) as TABLE_NAME, "
query += " TRIM(ST.REMARKS) as TABLE_COMMENT, "
query += " TRIM(SC.NAME) as SOURCE_COLUMN_NAME, "
query += " TRIM(SC.COLTYPE) SOURCE_COLUMN_TYPE, "
query += " SC.LENGTH as SOURCE_COLUMN_LENGTH, "
query += " SC.SCALE as SOURCE_COLUMN_SCALE, "
query += " TRIM(SC.REMARKS) as SOURCE_COLUMN_COMMENT, "
query += " SC.NULLS as IS_NULLABLE, "
query += " ST.TYPE as TABLE_TYPE, "
query += " ST.CTIME as CREATE_TIME "
query += "FROM SYSIBM.SYSTABLES ST "
query += "LEFT JOIN SYSIBM.SYSCOLUMNS SC "
query += " ON ST.NAME = SC.TBNAME "
query += " AND ST.CREATOR = SC.TBCREATOR "
query += "WHERE "
query += " ST.CREATOR = '%s' "%(schema)
if table != None:
query += " AND ST.NAME = '%s' "%(table)
query += "ORDER BY ST.CREATOR, ST.NAME"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = self.removeNewLine(row[0])
line_dict["TABLE_NAME"] = self.removeNewLine(row[1])
if row[2] == "" or row[2] == None:
line_dict["TABLE_COMMENT"] = None
else:
line_dict["TABLE_COMMENT"] = self.removeNewLine(row[2]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["SOURCE_COLUMN_NAME"] = self.removeNewLine(row[3])
if row[4] == "DECIMAL":
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s,%s)"%(row[4], row[5], row[6])
elif row[4] in ("DOUBLE", "REAL", "SMALLINT", "DATE", "BLOB", "INTEGER", "TIMESTMP", "BIGINT", "CLOB"):
line_dict["SOURCE_COLUMN_TYPE"] = self.removeNewLine(row[4])
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(self.removeNewLine(row[4]), row[5])
line_dict["SOURCE_COLUMN_LENGTH"] = row[5]
if row[7] == "" or row[7] == None:
line_dict["SOURCE_COLUMN_COMMENT"] = None
else:
line_dict["SOURCE_COLUMN_COMMENT"] = self.removeNewLine(row[7]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["IS_NULLABLE"] = row[8]
line_dict["TABLE_TYPE"] = row[9]
try:
line_dict["TABLE_CREATE_TIME"] = datetime.strptime(row[10], '%Y-%m-%d %H:%M:%S.%f')
except:
line_dict["TABLE_CREATE_TIME"] = None
line_dict["DEFAULT_VALUE"] = None
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.DB2_AS400:
query = "SELECT "
query += " TRIM(ST.TABLE_SCHEMA) as SCHEMA_NAME, "
query += " TRIM(ST.TABLE_NAME) as TABLE_NAME, "
query += " ST.LONG_COMMENT as TABLE_COMMENT, "
query += " TRIM(SC.COLUMN_NAME) as SOURCE_COLUMN_NAME, "
query += " SC.TYPE_NAME as SOURCE_COLUMN_TYPE, "
query += " SC.COLUMN_SIZE as SOURCE_COLUMN_LENGTH, "
query += " SC.DECIMAL_DIGITS as SOURCE_COLUMN_SCALE, "
query += " SC.REMARKS as SOURCE_COLUMN_COMMENT, "
query += " SC.IS_NULLABLE, "
query += " ST.TABLE_TYPE, "
# ST.LAST_ALTERED_TIMESTAMP is not really correct, but it's the best we got
# https://www.ibm.com/support/knowledgecenter/SSAE4W_9.6.0/db2/rbafzcatsystbls.htm
query += " ST.LAST_ALTERED_TIMESTAMP "
query += "FROM QSYS2.SYSTABLES ST "
query += "LEFT JOIN SYSIBM.SQLCOLUMNS SC "
query += " ON ST.TABLE_SCHEMA = SC.TABLE_SCHEM "
query += " AND ST.TABLE_NAME= SC.TABLE_NAME "
query += "WHERE "
query += " ST.TABLE_SCHEMA = '%s' "%(schema)
if table != None:
query += " AND SC.TABLE_NAME = '%s' "%(table)
query += "ORDER BY ST.TABLE_SCHEMA, SC.TABLE_NAME, SC.ORDINAL_POSITION"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = self.removeNewLine(row[0])
line_dict["TABLE_NAME"] = self.removeNewLine(row[1])
if row[2] == "" or row[2] == None:
line_dict["TABLE_COMMENT"] = None
else:
line_dict["TABLE_COMMENT"] = self.removeNewLine(row[2]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["SOURCE_COLUMN_NAME"] = self.removeNewLine(row[3])
if row[4] == "DECIMAL":
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s,%s)"%(row[4], row[5], row[6])
elif row[4] in ("DOUBLE", "REAL", "SMALLINT", "DATE", "BLOB", "INTEGER", "TIMESTMP", "BIGINT", "CLOB"):
line_dict["SOURCE_COLUMN_TYPE"] = self.removeNewLine(row[4])
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(self.removeNewLine(row[4]), row[5])
line_dict["SOURCE_COLUMN_LENGTH"] = row[5]
if self.removeNewLine(row[7]) == "" or row[7] == None:
line_dict["SOURCE_COLUMN_COMMENT"] = None
else:
line_dict["SOURCE_COLUMN_COMMENT"] = self.removeNewLine(row[7]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["IS_NULLABLE"] = row[8]
line_dict["TABLE_TYPE"] = row[9]
try:
line_dict["TABLE_CREATE_TIME"] = datetime.strptime(row[10], '%Y-%m-%d %H:%M:%S.%f')
except:
line_dict["TABLE_CREATE_TIME"] = None
line_dict["DEFAULT_VALUE"] = None
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.POSTGRESQL:
query = "SELECT "
query += " tab_columns.table_schema, "
query += " tab_columns.table_name, "
query += " pg_catalog.col_description(c.oid, 0::int) as table_comment, "
query += " tab_columns.column_name, "
query += " data_type, "
query += " character_maximum_length, "
query += " pg_catalog.col_description(c.oid, tab_columns.ordinal_position::int) as column_comment, "
query += " is_nullable, "
query += " tab_tables.table_type "
query += "FROM information_schema.columns AS tab_columns "
query += "LEFT JOIN pg_catalog.pg_class c "
query += " ON c.relname = tab_columns.table_name "
query += "LEFT JOIN information_schema.tables AS tab_tables "
query += " ON tab_tables.table_catalog = tab_columns.table_catalog "
query += " AND tab_tables.table_schema = tab_columns.table_schema "
query += " AND tab_tables.table_name = tab_columns.table_name "
query += "WHERE tab_columns.table_catalog = '%s' "%(database)
query += " AND tab_columns.table_schema ='%s' "%(schema)
if table != None:
query += " AND tab_columns.table_name = '%s' "%(table)
query += "ORDER BY table_schema, table_name"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = self.removeNewLine(row[0])
line_dict["TABLE_NAME"] = self.removeNewLine(row[1])
if row[2] == "" or row[2] == None:
line_dict["TABLE_COMMENT"] = None
else:
line_dict["TABLE_COMMENT"] = self.removeNewLine(row[2]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["SOURCE_COLUMN_NAME"] = self.removeNewLine(row[3])
if row[5] == None:
line_dict["SOURCE_COLUMN_TYPE"] = self.removeNewLine(row[4])
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(self.removeNewLine(row[4]), row[5])
line_dict["SOURCE_COLUMN_LENGTH"] = row[5]
if row[6] == "" or row[6] == None:
line_dict["SOURCE_COLUMN_COMMENT"] = None
else:
line_dict["SOURCE_COLUMN_COMMENT"] = self.removeNewLine(row[6]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["IS_NULLABLE"] = row[7]
line_dict["TABLE_TYPE"] = row[8]
line_dict["TABLE_CREATE_TIME"] = None
line_dict["DEFAULT_VALUE"] = None
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.PROGRESS:
query = "SELECT "
query += " tab_tables.OWNER, "
query += " tab_tables.TBL, "
query += " tab_tables.DESCRIPTION AS TBL_Commnets, "
query += " COL, "
query += " COLTYPE, "
query += " WIDTH, "
query += " SCALE, "
query += " tab_columns.DESCRIPTION, "
query += " tab_columns.NULLFLAG, "
query += " tab_tables.TBLTYPE "
query += "FROM sysprogress.SYSCOLUMNS_FULL tab_columns "
query += "LEFT JOIN SYSPROGRESS.SYSTABLES_FULL tab_tables "
query += " ON tab_tables.TBL = tab_columns.TBL "
query += " AND tab_tables.OWNER = tab_columns.OWNER "
query += "WHERE "
query += " tab_columns.OWNER = '%s' "%(schema)
if table != None:
query += " AND tab_columns.TBL = '%s' "%(table)
query += "ORDER BY tab_tables.OWNER, tab_tables.TBL"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = self.removeNewLine(row[0])
line_dict["TABLE_NAME"] = self.removeNewLine(row[1])
if row[2] == "" or row[2] == None:
line_dict["TABLE_COMMENT"] = None
else:
line_dict["TABLE_COMMENT"] = self.removeNewLine(row[2]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
line_dict["SOURCE_COLUMN_NAME"] = self.removeNewLine(row[3])
if row[4] in ("decimal", "numeric"):
if row[5] == None:
line_dict["SOURCE_COLUMN_TYPE"] = "%s"%(row[4])
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s,%s)"%(row[4], row[5], row[6])
else:
if row[5] == None:
line_dict["SOURCE_COLUMN_TYPE"] = "%s"%(row[4])
else:
line_dict["SOURCE_COLUMN_TYPE"] = "%s(%s)"%(row[4], row[5])
line_dict["SOURCE_COLUMN_LENGTH"] = row[5]
if self.removeNewLine(row[7]) == "" or row[7] == None:
line_dict["SOURCE_COLUMN_COMMENT"] = None
else:
try:
line_dict["SOURCE_COLUMN_COMMENT"] = self.removeNewLine(row[7]).encode('ascii', 'ignore').decode('unicode_escape', 'ignore')
except UnicodeDecodeError:
line_dict["SOURCE_COLUMN_COMMENT"] = self.removeNewLine(row[7])
line_dict["IS_NULLABLE"] = row[8]
line_dict["TABLE_TYPE"] = row[9]
line_dict["TABLE_CREATE_TIME"] = None
line_dict["DEFAULT_VALUE"] = None
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
logging.debug(result_df)
logging.debug("Executing schemaReader.readTable() - Finished")
return result_df
def readTableKeys(self, JDBCCursor, serverType = None, database = None, schema = None, table = None):
logging.debug("Executing schemaReader.readTableKeys()")
query = None
result_df = pd.DataFrame()
if serverType == constant.MSSQL:
query = "SELECT "
query += " CAST(oParentColDtl.TABLE_SCHEMA AS VARCHAR(4000)) as SCHEMA_NAME, "
query += " CAST(PKnUTable.name AS VARCHAR(4000)) as TABLE_NAME, "
query += " CAST(PKnUKEY.name AS VARCHAR(4000)) as CONSTRAINT_NAME, "
# query += " CAST(PKnUKEY.type_desc AS VARCHAR(4000)) as CONSTRAINT_TYPE, "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.PRIMARY_KEY)
query += " CAST(PKnUKEYCol.name AS VARCHAR(4000)) as COL_NAME, "
query += " oParentColDtl.DATA_TYPE as COL_DATA_TYPE, "
query += " oParentColDtl.CHARACTER_MAXIMUM_LENGTH as COL_LENGTH, "
query += " '' as REFERENCE_SCHEMA_NAME, "
query += " '' as REFERENCE_TABLE_NAME, "
query += " '' as REFERENCE_COL_NAME, "
query += " PKnUColIdx.key_ordinal as ORDINAL_POSITION "
query += "FROM sys.key_constraints as PKnUKEY "
query += "INNER JOIN sys.tables as PKnUTable "
query += " ON PKnUTable.object_id = PKnUKEY.parent_object_id "
query += "INNER JOIN sys.index_columns as PKnUColIdx "
query += " ON PKnUColIdx.object_id = PKnUTable.object_id "
query += " AND PKnUColIdx.index_id = PKnUKEY.unique_index_id "
query += "INNER JOIN sys.columns as PKnUKEYCol "
query += " ON PKnUKEYCol.object_id = PKnUTable.object_id "
query += " AND PKnUKEYCol.column_id = PKnUColIdx.column_id "
query += "INNER JOIN INFORMATION_SCHEMA.COLUMNS oParentColDtl "
query += " ON oParentColDtl.TABLE_NAME=PKnUTable.name "
query += " AND oParentColDtl.COLUMN_NAME=PKnUKEYCol.name "
query += "WHERE oParentColDtl.TABLE_SCHEMA = '%s' "%(schema)
if table != None:
query += " and PKnUTable.name = '%s' "%(table)
query += " and PKnUKEY.type_desc = 'PRIMARY_KEY_CONSTRAINT' "
query += "UNION ALL "
query += "SELECT "
query += " CAST(oParentColDtl.TABLE_SCHEMA AS VARCHAR(4000)) as SCHEMA_NAME, "
query += " CAST(oParent.name AS VARCHAR(4000)) as TABLE_NAME, "
query += " CAST(oConstraint.name AS VARCHAR(4000)) as CONSTRAINT_NAME, "
# query += " CONSTRAINT_TYPE = 'FK', "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.FOREIGN_KEY)
query += " CAST(oParentCol.name AS VARCHAR(4000)) as COL_NAME, "
query += " oParentColDtl.DATA_TYPE as COL_NAME_DATA_TYPE, "
query += " oParentColDtl.CHARACTER_MAXIMUM_LENGTH as COL_LENGTH, "
query += " CAST(OBJECT_SCHEMA_NAME(T.[object_id],DB_ID()) AS VARCHAR(4000)) as REFERENCE_SCHEMA_NAME, "
query += " CAST(oReference.name AS VARCHAR(4000)) as REFERENCE_TABLE_NAME, "
query += " CAST(oReferenceCol.name AS VARCHAR(4000)) as REFERENCE_COL_NAME, "
query += " '' as ORDINAL_POSITION "
query += "FROM sys.foreign_key_columns FKC "
query += "INNER JOIN sys.sysobjects oConstraint "
query += " ON FKC.constraint_object_id=oConstraint.id "
query += "INNER JOIN sys.sysobjects oParent "
query += " ON FKC.parent_object_id=oParent.id "
query += "INNER JOIN sys.all_columns oParentCol "
query += " ON FKC.parent_object_id=oParentCol.object_id "
query += " AND FKC.parent_column_id=oParentCol.column_id "
query += "INNER JOIN sys.sysobjects oReference "
query += " ON FKC.referenced_object_id=oReference.id "
query += "INNER JOIN INFORMATION_SCHEMA.COLUMNS oParentColDtl "
query += " ON oParentColDtl.TABLE_NAME=oParent.name "
query += " AND oParentColDtl.COLUMN_NAME=oParentCol.name "
query += "INNER JOIN sys.all_columns oReferenceCol "
query += " ON FKC.referenced_object_id=oReferenceCol.object_id "
query += " AND FKC.referenced_column_id=oReferenceCol.column_id "
query += "INNER JOIN sys.[tables] AS T ON T.[object_id] = oReferenceCol.[object_id] "
query += "WHERE oParentColDtl.TABLE_SCHEMA = '%s' "%(schema)
if table != None:
query += " and oParent.name = '%s' "%(table)
query += "ORDER BY SCHEMA_NAME, TABLE_NAME, CONSTRAINT_TYPE, ORDINAL_POSITION"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = row[0]
line_dict["TABLE_NAME"] = row[1]
line_dict["CONSTRAINT_NAME"] = row[2]
line_dict["CONSTRAINT_TYPE"] = row[3]
line_dict["COL_NAME"] = row[4]
# line_dict["COL_DATA_TYPE"] = line.split('|')[5]
line_dict["REFERENCE_SCHEMA_NAME"] = row[7]
line_dict["REFERENCE_TABLE_NAME"] = row[8]
line_dict["REFERENCE_COL_NAME"] = row[9]
line_dict["COL_KEY_POSITION"] = row[10]
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.ORACLE:
query = "SELECT "
query += " DISTINCT CAST (acc.OWNER AS VARCHAR(4000)) AS SCHEMA_NAME, "
query += " CAST (acc.TABLE_NAME AS VARCHAR(4000)) AS TABLE_NAME, "
query += " CAST(ac.CONSTRAINT_NAME AS VARCHAR(4000)) AS CONSTRAINT_NAME, "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.PRIMARY_KEY)
query += " CAST ( acc.COLUMN_NAME AS VARCHAR(4000)) AS COL_NAME, "
query += " CAST(atc.data_type AS VARCHAR(4000)) AS COL_NAME_DATA_TYPE, "
query += " atc.DATA_LENGTH, "
query += " '' AS REFERENCE_OWNER_NAME, "
query += " '' AS REFERENCE_TABLE_NAME, "
query += " '' AS REFERENCE_COL_NAME, "
query += " acc.POSITION AS COL_KEY_POSITION, "
query += " atc.DATA_PRECISION, "
query += " atc.CHAR_LENGTH "
query += "FROM ALL_CONSTRAINTS ac "
query += "JOIN ALL_CONS_COLUMNS acc "
query += " ON ac.CONSTRAINT_NAME = acc.CONSTRAINT_NAME "
query += "JOIN all_tab_cols atc "
query += " ON ac.owner = atc.owner "
query += " AND ac.table_name = atc.TABLE_NAME "
query += " AND acc.COLUMN_NAME = atc.COLUMN_NAME "
query += "WHERE ac.CONSTRAINT_TYPE = 'P' "
query += " AND acc.OWNER = '%s' "%(schema)
if table != None:
query += " AND acc.TABLE_NAME = '%s' "%(table)
query += "UNION ALL "
query += "select "
query += " b.owner AS SCHEMA_NAME, "
query += " b.table_name AS TABLE_NAME, "
query += " a.constraint_name AS CONSTRAINT_NAME, "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.FOREIGN_KEY)
query += " b.column_name AS COL_NAME , "
query += " atc.data_type AS COL_NAME_DATA_TYPE, "
query += " atc.DATA_LENGTH, "
query += " c.owner AS REFERENCE_SCHEMA_NAME, "
query += " c.table_name AS REFERENCE_TABLE_NAME, "
query += " c.column_name AS REFERENCE_COL_NAME, "
query += " b.position AS COL_KEY_POSITION, "
query += " atc.DATA_PRECISION, "
query += " atc.CHAR_LENGTH "
query += "from all_cons_columns b "
query += "left join all_cons_columns c "
query += " on b.position = c.position "
query += "left join all_constraints a "
query += " on b.constraint_name = a.constraint_name "
query += " AND a.owner = b.owner "
query += " AND c.constraint_name = a.r_constraint_name "
query += " AND c.owner = a.r_owner "
query += "left join all_tab_cols atc "
query += " on b.owner = atc.owner "
query += " AND b.table_name = atc.table_name "
query += " AND b.column_name = atc.column_name "
query += "where "
query += " a.constraint_type = 'R' "
query += " AND b.OWNER = '%s' "%(schema)
if table != None:
query += " AND b.TABLE_NAME = '%s' "%(table)
query += "ORDER BY SCHEMA_NAME, TABLE_NAME,CONSTRAINT_TYPE,CONSTRAINT_NAME,COL_KEY_POSITION"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = row[0]
line_dict["TABLE_NAME"] = row[1]
line_dict["CONSTRAINT_NAME"] = row[2]
line_dict["CONSTRAINT_TYPE"] = row[3]
line_dict["COL_NAME"] = row[4]
# line_dict["COL_DATA_TYPE"] = line.split('|')[5]
line_dict["REFERENCE_SCHEMA_NAME"] = row[7]
line_dict["REFERENCE_TABLE_NAME"] = row[8]
line_dict["REFERENCE_COL_NAME"] = row[9]
line_dict["COL_KEY_POSITION"] = int(row[10])
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.MYSQL:
query = "SELECT kcu.CONSTRAINT_SCHEMA AS SCHEMA_NAME, "
query += " kcu.table_name AS TABLE_NAME, "
query += " kcu.constraint_name AS CONSTRAINT_NAME, "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.PRIMARY_KEY)
query += " kcu.column_name AS COL_NAME, "
query += " cols.data_type AS COL_DATA_TYPE, "
query += " cols.character_maximum_length AS COL_MAX_LENGTH, "
query += " kcu.referenced_table_schema AS REFERENCE_TABLE_SCHEMA, "
query += " kcu.referenced_table_name AS REFERENCE_TABLE_NAME, "
query += " kcu.referenced_column_name AS REFERENCE_COL_NAME, "
query += " kcu.ORDINAL_POSITION AS COL_KEY_POSITION "
query += "FROM information_schema.key_column_usage kcu "
query += "left join information_schema.columns cols "
query += " on kcu.table_name = cols.table_name and kcu.column_name = cols.column_name "
query += "WHERE "
query += " kcu.referenced_table_name IS NULL "
query += " AND (CONSTRAINT_NAME='PRIMARY' OR CONSTRAINT_NAME='UNIQUE') "
query += " AND kcu.CONSTRAINT_SCHEMA = '%s' "%(database)
if table != None:
query += " AND kcu.table_name = '%s' "%(table)
query += "UNION "
query += "SELECT "
query += " kcu.CONSTRAINT_SCHEMA AS SCHEMA_NAME, "
query += " kcu.table_name AS TABLE_NAME, "
query += " kcu.constraint_name AS CONSTRAINT_NAME, "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.FOREIGN_KEY)
query += " kcu.column_name AS COL_NAME, "
query += " cols.data_type AS COL_DATA_TYPE, "
query += " cols.character_maximum_length AS COL_MAX_LENGTH, "
query += " kcu.referenced_table_schema AS REFERENCE_TABLE_SCHEMA, "
query += " kcu.referenced_table_name AS REFERENCE_TABLE_NAME, "
query += " kcu.referenced_column_name AS REFERENCE_COL_NAME, "
query += " kcu.ORDINAL_POSITION AS COL_KEY_POSITION "
query += "FROM information_schema.key_column_usage kcu "
query += "left join information_schema.columns cols "
query += " on kcu.referenced_table_name = cols.table_name and referenced_column_name = cols.column_name "
query += "WHERE "
query += " kcu.referenced_table_name IS NOT NULL "
query += " AND kcu.CONSTRAINT_SCHEMA = '%s' "%(database)
if table != None:
query += " AND kcu.table_name = '%s' "%(table)
query += "order by schema_name, table_name, CONSTRAINT_TYPE, COL_KEY_POSITION"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = row[0]
line_dict["TABLE_NAME"] = row[1]
line_dict["CONSTRAINT_NAME"] = row[2]
line_dict["CONSTRAINT_TYPE"] = row[3]
line_dict["COL_NAME"] = row[4]
# line_dict["COL_DATA_TYPE"] = line.split('|')[5]
line_dict["REFERENCE_SCHEMA_NAME"] = row[7]
line_dict["REFERENCE_TABLE_NAME"] = row[8]
line_dict["REFERENCE_COL_NAME"] = row[9]
line_dict["COL_KEY_POSITION"] = row[10]
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.DB2_UDB:
query = "select "
query += " TRIM(SI.TBCREATOR) as SCHEMA_NAME, "
query += " TRIM(SI.TBNAME) as TABLE_NAME, "
query += " TRIM(SI.NAME) as CONSTRAINT_NAME, "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.PRIMARY_KEY)
query += " TRIM(SC.NAME) as COL_NAME, "
query += " TRIM(SC.COLTYPE) as COL_DATA_TYPE, "
query += " SC.LENGTH as COL_DATA_LENGTH, "
query += " SC.SCALE as COL_DATA_SCALE, "
query += " '' as REFERENCE_SCHEMA_NAME, "
query += " '' as REFERENCE_TABLE_NAME, "
query += " '' as REFERENCE_COL_NAME, "
query += " SI.COLCOUNT as ORDINAL_POSITION "
query += "FROM SYSIBM.SYSINDEXES SI "
query += "LEFT JOIN SYSIBM.SYSCOLUMNS SC "
query += " ON SI.TBCREATOR = SC.TBCREATOR "
query += " AND SI.TBNAME = SC.TBNAME "
query += "WHERE "
query += " SI.COLNAMES = CONCAT('+',SC.NAME) "
query += " AND SI.uniquerule = 'P'"
query += " AND SI.TBCREATOR = '%s' "%(schema)
if table != None:
query += " AND SI.TBNAME = '%s' "%(table)
query += "UNION ALL "
query = "SELECT "
query += " TRIM(R.tabschema) as SCHEMA_NAME, "
query += " TRIM(R.tabname) as TABLE_NAME, "
query += " TRIM(R.constname) as CONSTRAINT_NAME, "
query += " 'F' AS CONSTRAINT_TYPE, "
query += " TRIM(C.COLNAME) as COL_NAME, "
query += " SC.COLTYPE as COL_DATA_TYPE, "
query += " SC.LENGTH as COL_DATA_LENGTH, "
query += " SC.SCALE as COL_DATA_SCALE, "
query += " TRIM(R.reftabschema) as REFERENCE_SCHEMA_NAME, "
query += " TRIM(R.reftabname) as REFERENCE_TABLE_NAME, "
query += " TRIM(Cref.COLNAME) as REFERENCE_COL_NAME, "
query += " C.COLSEQ as ORDINAL_POSITION "
query += "FROM syscat.references R "
query += "LEFT JOIN syscat.keycoluse C "
query += " ON R.constname = C.constname "
query += "LEFT JOIN syscat.keycoluse Cref "
query += " ON R.refkeyname = Cref.constname "
query += " AND C.COLSEQ = Cref.COLSEQ "
query += "LEFT JOIN SYSIBM.SYSCOLUMNS SC "
query += " ON R.tabschema = SC.TBCREATOR "
query += " AND R.tabname = SC.TBNAME "
query += " AND TRIM(SC.NAME)= TRIM(R.FK_COLNAMES) "
query += "WHERE "
query += " R.tabschema = '%s' "%(schema)
if table != None:
query += " AND R.tabname = '%s' "%(table)
query += "ORDER BY SCHEMA_NAME, TABLE_NAME, CONSTRAINT_TYPE, ORDINAL_POSITION "
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = row[0]
line_dict["TABLE_NAME"] = row[1]
line_dict["CONSTRAINT_NAME"] = row[2]
line_dict["CONSTRAINT_TYPE"] = row[3]
line_dict["COL_NAME"] = row[4]
# line_dict["COL_DATA_TYPE"] = line.split('|')[5]
line_dict["REFERENCE_SCHEMA_NAME"] = row[8]
line_dict["REFERENCE_TABLE_NAME"] = row[9]
line_dict["REFERENCE_COL_NAME"] = row[10]
line_dict["COL_KEY_POSITION"] = int(row[11])
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.DB2_AS400:
query = "SELECT "
query += " TRIM(SPK.TABLE_SCHEM) as SCHEMA_NAME, "
query += " TRIM(SPK.TABLE_NAME) as TABLE_NAME, "
query += " TRIM(SPK.PK_NAME) as CONSTRAINT_NAME, "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.PRIMARY_KEY)
query += " TRIM(SC.COLUMN_NAME) as COL_NAME, "
query += " SC.TYPE_NAME as COL_DATA_TYPE, "
query += " SC.COLUMN_SIZE as COL_DATA_LENGTH, "
query += " SC.DECIMAL_DIGITS as COL_DATA_SCALE, "
query += " '' as REFERENCE_SCHEMA_NAME, "
query += " '' as REFERENCE_TABLE_NAME, "
query += " '' as REFERENCE_COL_NAME, "
query += " SPK.KEY_SEQ as ORDINAL_POSITION "
query += "FROM SYSIBM.SQLPRIMARYKEYS SPK "
query += "LEFT JOIN SYSIBM.SQLCOLUMNS SC "
query += " ON SPK.TABLE_CAT = SC.TABLE_CAT "
query += " AND SPK.TABLE_SCHEM = SC.TABLE_SCHEM "
query += " AND SPK.TABLE_NAME = SC.TABLE_NAME "
query += " AND SPK.COLUMN_NAME=SC.COLUMN_NAME "
query += "WHERE "
query += " SPK.TABLE_SCHEM = '%s' "%(schema)
if table != None:
query += " AND SPK.TABLE_NAME = '%s' "%(table)
query += "UNION ALL "
query += "SELECT "
query += " TRIM(SFK.FKTABLE_SCHEM) as SCHEMA_NAME, "
query += " TRIM(SFK.FKTABLE_NAME) as TABLE_NAME, "
query += " TRIM(SFK.FK_NAME) as CONSTRAINT_NAME, "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.FOREIGN_KEY)
query += " TRIM(SFK.FKCOLUMN_NAME) as COL_NAME, "
query += " SC.TYPE_NAME as COL_DATA_TYPE, "
query += " SC.COLUMN_SIZE as COL_DATA_LENGTH, "
query += " SC.DECIMAL_DIGITS as COL_DATA_SCALE, "
query += " SFK.PKTABLE_SCHEM as REFERENCE_SCHEMA_NAME, "
query += " SFK.PKTABLE_NAME as REFERENCE_TABLE_NAME, "
query += " SFK.PKCOLUMN_NAME as REFERENCE_COL_NAME, "
query += " SFK.KEY_SEQ as ORDINAL_POSITION "
query += "FROM SYSIBM.SQLFOREIGNKEYS SFK "
query += "LEFT JOIN SYSIBM.SQLCOLUMNS SC "
query += " ON SFK.FKTABLE_CAT = SC.TABLE_CAT "
query += " AND SFK.FKTABLE_SCHEM = SC.TABLE_SCHEM "
query += " AND SFK.FKTABLE_NAME = SC.TABLE_NAME "
query += " AND SFK.FKCOLUMN_NAME = SC.COLUMN_NAME "
query += "WHERE "
query += " SFK.FKTABLE_SCHEM = '%s' "%(schema)
if table != None:
query += " AND SFK.FKTABLE_NAME = '%s' "%(table)
query += "ORDER BY SCHEMA_NAME, TABLE_NAME, CONSTRAINT_TYPE, ORDINAL_POSITION"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
if table == None:
line_dict["SCHEMA_NAME"] = row[0]
line_dict["TABLE_NAME"] = row[1]
line_dict["CONSTRAINT_NAME"] = row[2]
line_dict["CONSTRAINT_TYPE"] = row[3]
line_dict["COL_NAME"] = row[4]
# line_dict["COL_DATA_TYPE"] = line.split('|')[5]
line_dict["REFERENCE_SCHEMA_NAME"] = row[8]
line_dict["REFERENCE_TABLE_NAME"] = row[9]
line_dict["REFERENCE_COL_NAME"] = row[10]
line_dict["COL_KEY_POSITION"] = int(row[11])
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.POSTGRESQL:
query = "SELECT "
query += " distinct kcu.constraint_schema AS SCHEMA_NAME, "
query += " kcu.table_name AS TABLE_NAME, "
query += " c.conname AS CONSTRAINT_NAME, "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.PRIMARY_KEY)
query += " CASE WHEN pg_get_constraintdef(c.oid) LIKE 'PRIMARY KEY %' "
query += " THEN substring(pg_get_constraintdef(c.oid), 14, position(')' in pg_get_constraintdef(c.oid))-14) "
query += " END AS COL_NAME, "
query += " '' AS REFERENCE_SCHEMA_NAME, "
query += " '' AS REFERENCE_TABLE_NAME, "
query += " '' AS REFERENCE_COL_NAME "
query += "FROM pg_catalog.pg_constraint c "
query += "LEFT JOIN information_schema.key_column_usage kcu "
query += " ON c.conname = kcu.constraint_name "
query += "LEFT JOIN information_schema.tables ist "
query += " ON ist.table_schema = kcu.constraint_schema "
query += " AND ist.table_name = kcu.table_name "
query += "WHERE "
query += " c.contype = 'p' "
query += " AND pg_get_constraintdef(c.oid) LIKE 'PRIMARY KEY %' "
query += " AND ist.table_catalog = '%s' "%(database)
query += " AND kcu.constraint_schema ='%s' "%(schema)
if table != None:
query += " AND kcu.table_name = '%s' "%(table)
query += "UNION "
query += "SELECT "
query += " kcu.constraint_schema AS SCHEMA_NAME, "
query += " kcu.table_name AS TABLE_NAME, "
query += " c.conname AS CONSTRAINT_NAME, "
query += " '%s' AS CONSTRAINT_TYPE, "%(constant.FOREIGN_KEY)
query += " CASE WHEN pg_get_constraintdef(c.oid) LIKE 'FOREIGN KEY %' "
query += " THEN substring(pg_get_constraintdef(c.oid), 14, position(')' in pg_get_constraintdef(c.oid))-14) "
query += " END AS COL_NAME, "
query += " '' AS REFERENCE_SCHEMA_NAME,"
query += " CASE WHEN pg_get_constraintdef(c.oid) LIKE 'FOREIGN KEY %' "
query += " THEN substring(pg_get_constraintdef(c.oid), position(' REFERENCES ' in pg_get_constraintdef(c.oid))+12, position('(' in substring(pg_get_constraintdef(c.oid), 14))-position(' REFERENCES ' in pg_get_constraintdef(c.oid))+1) "
query += " END AS REFERENCE_TABLE_NAME, "
query += " CASE WHEN pg_get_constraintdef(c.oid) LIKE 'FOREIGN KEY %' "
query += " THEN substring(pg_get_constraintdef(c.oid), position('(' in substring(pg_get_constraintdef(c.oid), 14))+14, position(')' in substring(pg_get_constraintdef(c.oid), position('(' in substring(pg_get_constraintdef(c.oid), 14))+14))-1) "
query += " END AS REFERENCE_COL_NAME "
query += "FROM pg_catalog.pg_constraint c "
query += "LEFT JOIN information_schema.key_column_usage kcu "
query += " ON c.conname = kcu.constraint_name "
query += "LEFT JOIN information_schema.tables ist "
query += " ON ist.table_schema=kcu.constraint_schema "
query += " AND ist.table_name=kcu.table_name "
query += "WHERE "
query += " c.contype = 'f' AND contype IN ('f', 'p') "
query += " AND pg_get_constraintdef(c.oid) LIKE 'FOREIGN KEY %' "
query += " AND ist.table_catalog = '%s' "%(database)
query += " AND kcu.constraint_schema ='%s' "%(schema)
if table != None:
query += " AND kcu.table_name = '%s' "%(table)
query += "ORDER BY SCHEMA_NAME, TABLE_NAME,CONSTRAINT_TYPE "
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
schemaName = row[0]
tableName = row[1]
constraintName = row[2]
constraintType = row[3]
colName = row[4].strip('"')
refSchemaName = row[5]
refTableName = row[6].strip('"')
refColName = row[7].strip('"')
colKeyPosition = 1
if constraintType == constant.FOREIGN_KEY:
if refSchemaName == "" and "." in refTableName:
refArray = refTableName.split(".")
refSchemaName = refArray[0]
refTableName = refArray[1]
if refSchemaName == "":
refSchemaName = "public"
colNameList = colName.split(",")
refColNameList = refColName.split(",")
for i, column in enumerate(colNameList):
colName = colNameList[i]
refColName = refColNameList[i]
if table == None:
line_dict["SCHEMA_NAME"] = schemaName
line_dict["TABLE_NAME"] = tableName
line_dict["CONSTRAINT_NAME"] = constraintName
line_dict["CONSTRAINT_TYPE"] = constraintType
line_dict["COL_NAME"] = colName
# line_dict["COL_DATA_TYPE"] = line.split('|')[5]
line_dict["REFERENCE_SCHEMA_NAME"] = refSchemaName
line_dict["REFERENCE_TABLE_NAME"] = refTableName
line_dict["REFERENCE_COL_NAME"] = refColName
line_dict["COL_KEY_POSITION"] = colKeyPosition
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
# In some cases, we get duplicate Foreign Keys. This removes all duplicate entries
result_df.drop_duplicates(keep="first", inplace=True)
logging.debug(result_df)
logging.debug("Executing schemaReader.readKeys() - Finished")
return result_df
def readTableIndex(self, JDBCCursor, serverType = None, database = None, schema = None, table = None):
logging.debug("Executing schemaReader.readTableColumns()")
query = None
result_df = pd.DataFrame()
if serverType == constant.MSSQL:
query = ""
query += "select i.name,"
query += " i.type, "
query += " i.is_unique, "
query += " col.name, "
query += " ic.index_column_id, "
query += " col.is_nullable "
query += "from sys.objects t "
query += " inner join sys.indexes i "
query += " on t.object_id = i.object_id "
query += " inner join sys.index_columns ic "
query += " on ic.object_id = t.object_id "
query += " and ic.index_id = i.index_id "
query += " inner join sys.columns col "
query += " on col.object_id = t.object_id "
query += " and col.column_id = ic.column_id "
query += "where schema_name(t.schema_id) = '%s' "%(schema)
query += "and t.name = '%s' "%(table)
query += "order by i.object_id, i.index_id"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
uniqueDict = { 0: "Not unique", 1: "Unique" }
indexTypeDict = {
1: "Clustered index",
2: "Nonclustered unique index",
3: "XML index",
4: "Spatial index",
5: "Clustered columnstore index",
6: "Nonclustered columnstore index",
7: "Nonclustered hash index"
}
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
line_dict["Name"] = row[0]
line_dict["Type"] = indexTypeDict.get(row[1], row[1])
line_dict["Unique"] = uniqueDict.get(int(row[2]), int(row[2]))
line_dict["Column"] = row[3]
line_dict["ColumnOrder"] = row[4]
line_dict["IsNullable"] = row[5]
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.ORACLE:
query = ""
query += "SELECT "
query += " ai.index_name, "
query += " ai.index_type, "
query += " ai.uniqueness, "
query += " aic.column_name, "
query += " aic.column_position, "
query += " atc.nullable "
query += "FROM all_indexes ai "
query += "INNER JOIN all_ind_columns aic "
query += " ON ai.owner = aic.index_owner "
query += " AND ai.index_name = aic.index_name "
query += "INNER JOIN all_tab_columns atc "
query += " ON ai.owner = atc.owner "
query += " AND ai.table_name = atc.table_name "
query += " AND aic.column_name = atc.column_name "
query += "WHERE ai.owner = UPPER('%s') "%(schema)
query += " AND ai.table_name = UPPER('%s') "%(table)
query += "ORDER BY aic.column_position"
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
line_dict["Name"] = row[0]
line_dict["Type"] = row[1].capitalize()
if row[2] == "NONUNIQUE":
line_dict["Unique"] = "Not unique"
else:
line_dict["Unique"] = row[2].capitalize()
line_dict["Column"] = row[3]
line_dict["ColumnOrder"] = row[4]
if row[5] == "N":
line_dict["IsNullable"] = 0
else:
line_dict["IsNullable"] = 1
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.MYSQL:
query = "SHOW INDEX FROM `%s`.`%s`"%(database, table)
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
# Order of columns from "SHOW INDEX" is fixed. If mysql change the standard, we need to change here aswell
line_dict["Name"] = row[2]
line_dict["Type"] = row[10].capitalize()
if row[1] == "1":
line_dict["Unique"] = "Not unique"
else:
line_dict["Unique"] = "Unique"
line_dict["Column"] = row[4]
line_dict["ColumnOrder"] = row[3]
if row[9] == "YES":
line_dict["IsNullable"] = 1
else:
line_dict["IsNullable"] = 0
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.DB2_UDB:
query = "select I.INDNAME, I.INDEXTYPE, I.UNIQUERULE, IC.COLNAME, IC.COLSEQ, C.NULLS "
query += "from SYSCAT.INDEXES I "
query += "left join SYSCAT.INDEXCOLUSE IC "
query += " on I.INDSCHEMA = IC.INDSCHEMA "
query += " and I.INDNAME = IC.INDNAME "
query += "left join SYSCAT.COLUMNS C "
query += " on I.TABNAME = C.TABNAME "
query += " and I.TABSCHEMA = C.TABSCHEMA "
query += " and IC.COLNAME = C.COLNAME "
query += "where I.TABNAME = '%s' "%(table)
query += " and I.TABSCHEMA = '%s' "%(schema)
logging.debug("SQL Statement executed: %s" % (query) )
try:
JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
logging.error("Failure when communicating with JDBC database. %s"%(errMsg))
return result_df
uniqueDict = { "D": "Not unique", "U": "Unique", "P": "Unique - PrimaryKey" }
indexTypeDict = {
"BLOK": "Block index",
"CLUS": "Clustering index",
"DIM": "Dimension block index",
"REG": "Regular index",
"XPTH": "XML path index",
"XRGN": "XML region index",
"XVIL": "Index over XML column (logical)",
"XVIP": "Index over XML column (physical)"
}
rows_list = []
for row in JDBCCursor.fetchall():
logging.debug(row)
line_dict = {}
line_dict["Name"] = row[0]
line_dict["Type"] = indexTypeDict.get(row[1].strip(), row[1].strip())
line_dict["Unique"] = uniqueDict.get(row[2].strip(), row[2].strip())
line_dict["Column"] = row[3]
line_dict["ColumnOrder"] = row[4]
if row[5] == "N":
line_dict["IsNullable"] = 0
else:
line_dict["IsNullable"] = 1
rows_list.append(line_dict)
result_df = pd.DataFrame(rows_list)
if serverType == constant.DB2_AS400:
logging.warning("Reading Index information from DB2AS400 connections is not supported. Please contact developer if this is required")
if serverType == constant.POSTGRESQL:
logging.warning("Reading Index information from PostgreSQL connections is not supported. Please contact developer if this is required")
return result_df
def getJDBCtablesAndViews(self, JDBCCursor, serverType, database=None, schemaFilter=None, tableFilter=None):
logging.debug("Executing schemaReader.getJDBCtablesAndViews()")
if schemaFilter != None:
schemaFilter = schemaFilter.replace('*', '%')
if tableFilter != None:
tableFilter = tableFilter.replace('*', '%')
if serverType == constant.MSSQL:
query = "select TABLE_SCHEMA, TABLE_NAME from INFORMATION_SCHEMA.TABLES "
if schemaFilter != None:
query += "where TABLE_SCHEMA like '%s' "%(schemaFilter)
if tableFilter != None:
if schemaFilter != None:
query += "and TABLE_NAME like '%s' "%(tableFilter)
else:
query += "where TABLE_NAME like '%s' "%(tableFilter)
query += "order by TABLE_SCHEMA, TABLE_NAME"
if serverType == constant.ORACLE:
query = "select OWNER, TABLE_NAME as NAME from all_tables "
if schemaFilter != None:
query += "where OWNER like '%s' "%(schemaFilter)
if tableFilter != None:
if schemaFilter != None:
query += "and TABLE_NAME like '%s' "%(tableFilter)
else:
query += "where TABLE_NAME like '%s' "%(tableFilter)
query += "union all "
query += "select OWNER, VIEW_NAME as NAME from all_views "
if schemaFilter != None:
query += "where OWNER like '%s' "%(schemaFilter)
if tableFilter != None:
if schemaFilter != None:
query += "and VIEW_NAME like '%s' "%(tableFilter)
else:
query += "where VIEW_NAME like '%s' "%(tableFilter)
query += "order by OWNER, NAME "
if serverType == constant.MYSQL:
# query = "select '-', table_name from INFORMATION_SCHEMA.tables where table_schema = '%s' "%(self.jdbc_database)
query = "select '-', table_name from INFORMATION_SCHEMA.tables where table_schema = '%s' "%(database)
if tableFilter != None:
query += "and table_name like '%s' "%(tableFilter)
query += "order by table_name"
if serverType == constant.POSTGRESQL:
query = "select TABLE_SCHEMA, TABLE_NAME from INFORMATION_SCHEMA.TABLES "
if schemaFilter != None:
query += "where TABLE_SCHEMA like '%s' "%(schemaFilter)
if tableFilter != None:
if schemaFilter != None:
query += "and TABLE_NAME like '%s' "%(tableFilter)
else:
query += "where TABLE_NAME like '%s' "%(tableFilter)
query += "order by TABLE_SCHEMA, TABLE_NAME"
if serverType == constant.PROGRESS:
query = "select \"_Owner\", \"_File-Name\" from PUB.\"_File\" "
if schemaFilter != None:
query += "WHERE \"_Owner\" LIKE '%s' "%(schemaFilter)
if tableFilter != None:
if schemaFilter != None:
query += "AND \"_File-Name\" LIKE '%s' "%(tableFilter)
else:
query += "WHERE \"_File-Name\" LIKE '%s' "%(tableFilter)
query += "ORDER BY \"_Owner\", \"_File-Name\""
if serverType == constant.DB2_UDB:
query = "SELECT CREATOR, NAME FROM SYSIBM.SYSTABLES "
if schemaFilter != None:
query += "WHERE CREATOR LIKE '%s' "%(schemaFilter)
if tableFilter != None:
if schemaFilter != None:
query += "AND NAME LIKE '%s' "%(tableFilter)
else:
query += "WHERE NAME LIKE '%s' "%(tableFilter)
query += "ORDER BY CREATOR, NAME"
if serverType == constant.DB2_AS400:
query = "SELECT TABLE_SCHEM, TABLE_NAME FROM SYSIBM.SQLTABLES "
if schemaFilter != None:
query += "WHERE TABLE_SCHEM LIKE '%s' "%(schemaFilter)
if tableFilter != None:
if schemaFilter != None:
query += "AND TABLE_NAME LIKE '%s' "%(tableFilter)
else:
query += "WHERE TABLE_NAME LIKE '%s' "%(tableFilter)
query += "ORDER BY TABLE_SCHEM, TABLE_NAME"
logging.debug("SQL Statement executed: %s" % (query) )
JDBCCursor.execute(query)
result_df = pd.DataFrame(JDBCCursor.fetchall())
if len(result_df) > 0:
result_df.columns = ['schema', 'table']
else:
result_df = pd.DataFrame(columns=['schema', 'table'])
logging.debug("Executing schemaReader.getJDBCtablesAndViews() - Finished")
return result_df
def getJdbcTableType(self, serverType, tableTypeFromSource):
""" Returns the table type of the table """
logging.debug("Executing schemaReader.getJdbcTableType()")
# if self.source_columns_df.empty == True:
if tableTypeFromSource == None:
logging.warning("No metadata for tableType sent to getJdbcTableType()")
return None
# tableTypeFromSource = self.source_columns_df.iloc[0]["TABLE_TYPE"]
tableType = None
if serverType == constant.MSSQL:
# BASE TABLE, VIEW
if tableTypeFromSource == "VIEW": tableType = "view"
else: tableType = "table"
elif serverType == constant.ORACLE:
# TABLE, VIEW
if tableTypeFromSource == "VIEW": tableType = "view"
else: tableType = "table"
elif serverType == constant.MYSQL:
# BASE TABLE, VIEW, SYSTEM VIEW (for an INFORMATION_SCHEMA table)
if tableTypeFromSource == "VIEW": tableType = "view"
else: tableType = "table"
elif serverType == constant.POSTGRESQL:
# BASE TABLE, VIEW, FOREIGN TABLE, LOCAL TEMPORARY
if tableTypeFromSource == "VIEW": tableType = "view"
if tableTypeFromSource == "LOCAL TEMPORARY": tableType = "temporary"
else: tableType = "table"
elif serverType == constant.PROGRESS:
# Unsure. Cant find documentation.
# Verified T=Table
# We assume V=View
if tableTypeFromSource == "V": tableType = "view"
else: tableType = "table"
elif serverType == constant.DB2_UDB or serverType == constant.DB2_AS400:
# A = Alias
# C = Clone Table
# D = Accelerator-only table
# G = Global temporary table
# H = History Table
# M = Materialized query table
# P = Table that was implicitly created for XML columns
# R = Archive table
# T = Table
# V = View
# X = Auxiliary table
if tableTypeFromSource == "A": tableType = "view"
if tableTypeFromSource == "V": tableType = "view"
else: tableType = "table"
logging.debug("Executing schemaReader.getJdbcTableType() - Finished")
return tableType
|
[
"pandas.DataFrame",
"logging.error",
"logging.debug",
"logging.warning",
"datetime.datetime.strptime",
"re.search"
] |
[((1101, 1150), 'logging.debug', 'logging.debug', (['"""Initiating schemaReader.source()"""'], {}), "('Initiating schemaReader.source()')\n", (1114, 1150), False, 'import logging\n'), ((1357, 1415), 'logging.debug', 'logging.debug', (['"""Executing schemaReader.readTableColumns()"""'], {}), "('Executing schemaReader.readTableColumns()')\n", (1370, 1415), False, 'import logging\n'), ((1445, 1459), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1457, 1459), True, 'import pandas as pd\n'), ((24901, 24925), 'logging.debug', 'logging.debug', (['result_df'], {}), '(result_df)\n', (24914, 24925), False, 'import logging\n'), ((24928, 24990), 'logging.debug', 'logging.debug', (['"""Executing schemaReader.readTable() - Finished"""'], {}), "('Executing schemaReader.readTable() - Finished')\n", (24941, 24990), False, 'import logging\n'), ((25116, 25171), 'logging.debug', 'logging.debug', (['"""Executing schemaReader.readTableKeys()"""'], {}), "('Executing schemaReader.readTableKeys()')\n", (25129, 25171), False, 'import logging\n'), ((25201, 25215), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (25213, 25215), True, 'import pandas as pd\n'), ((48443, 48467), 'logging.debug', 'logging.debug', (['result_df'], {}), '(result_df)\n', (48456, 48467), False, 'import logging\n'), ((48470, 48531), 'logging.debug', 'logging.debug', (['"""Executing schemaReader.readKeys() - Finished"""'], {}), "('Executing schemaReader.readKeys() - Finished')\n", (48483, 48531), False, 'import logging\n'), ((48658, 48716), 'logging.debug', 'logging.debug', (['"""Executing schemaReader.readTableColumns()"""'], {}), "('Executing schemaReader.readTableColumns()')\n", (48671, 48716), False, 'import logging\n'), ((48746, 48760), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (48758, 48760), True, 'import pandas as pd\n'), ((55362, 55425), 'logging.debug', 'logging.debug', (['"""Executing schemaReader.getJDBCtablesAndViews()"""'], {}), "('Executing schemaReader.getJDBCtablesAndViews()')\n", (55375, 55425), False, 'import logging\n'), ((58798, 58849), 'logging.debug', 'logging.debug', (["('SQL Statement executed: %s' % query)"], {}), "('SQL Statement executed: %s' % query)\n", (58811, 58849), False, 'import logging\n'), ((59068, 59142), 'logging.debug', 'logging.debug', (['"""Executing schemaReader.getJDBCtablesAndViews() - Finished"""'], {}), "('Executing schemaReader.getJDBCtablesAndViews() - Finished')\n", (59081, 59142), False, 'import logging\n'), ((59275, 59333), 'logging.debug', 'logging.debug', (['"""Executing schemaReader.getJdbcTableType()"""'], {}), "('Executing schemaReader.getJdbcTableType()')\n", (59288, 59333), False, 'import logging\n'), ((61039, 61108), 'logging.debug', 'logging.debug', (['"""Executing schemaReader.getJdbcTableType() - Finished"""'], {}), "('Executing schemaReader.getJdbcTableType() - Finished')\n", (61052, 61108), False, 'import logging\n'), ((3860, 3911), 'logging.debug', 'logging.debug', (["('SQL Statement executed: %s' % query)"], {}), "('SQL Statement executed: %s' % query)\n", (3873, 3911), False, 'import logging\n'), ((5910, 5933), 'pandas.DataFrame', 'pd.DataFrame', (['rows_list'], {}), '(rows_list)\n', (5922, 5933), True, 'import pandas as pd\n'), ((9137, 9188), 'logging.debug', 'logging.debug', (["('SQL Statement executed: %s' % query)"], {}), "('SQL Statement executed: %s' % query)\n", (9150, 9188), False, 'import logging\n'), ((11444, 11467), 'pandas.DataFrame', 'pd.DataFrame', (['rows_list'], {}), '(rows_list)\n', (11456, 11467), True, 'import pandas as pd\n'), ((12357, 12408), 'logging.debug', 'logging.debug', (["('SQL Statement executed: %s' % query)"], {}), "('SQL Statement executed: %s' % query)\n", (12370, 12408), False, 'import logging\n'), ((14010, 14033), 'pandas.DataFrame', 'pd.DataFrame', (['rows_list'], {}), '(rows_list)\n', (14022, 14033), True, 'import pandas as pd\n'), ((14976, 15027), 'logging.debug', 'logging.debug', (["('SQL Statement executed: %s' % query)"], {}), "('SQL Statement executed: %s' % query)\n", (14989, 15027), False, 'import logging\n'), ((16700, 16723), 'pandas.DataFrame', 'pd.DataFrame', (['rows_list'], {}), '(rows_list)\n', (16712, 16723), True, 'import pandas as pd\n'), ((17904, 17955), 'logging.debug', 'logging.debug', (["('SQL Statement executed: %s' % query)"], {}), "('SQL Statement executed: %s' % query)\n", (17917, 17955), False, 'import logging\n'), ((19643, 19666), 'pandas.DataFrame', 'pd.DataFrame', (['rows_list'], {}), '(rows_list)\n', (19655, 19666), True, 'import pandas as pd\n'), ((20868, 20919), 'logging.debug', 'logging.debug', (["('SQL Statement executed: %s' % query)"], {}), "('SQL Statement executed: %s' % query)\n", (20881, 20919), False, 'import logging\n'), ((22285, 22308), 'pandas.DataFrame', 'pd.DataFrame', (['rows_list'], {}), '(rows_list)\n', (22297, 22308), True, 'import pandas as pd\n'), ((23136, 23187), 'logging.debug', 'logging.debug', (["('SQL Statement executed: %s' % query)"], {}), "('SQL Statement executed: %s' % query)\n", (23149, 23187), False, 'import logging\n'), ((24874, 24897), 'pandas.DataFrame', 'pd.DataFrame', (['rows_list'], {}), '(rows_list)\n', (24886, 24897), True, 'import pandas as pd\n'), ((29119, 29170), 'logging.debug', 'logging.debug', (["('SQL Statement executed: %s' % query)"], {}), "('SQL Statement executed: %s' % query)\n", (29132, 29170), False, 'import logging\n'), ((29959, 29982), 'pandas.DataFrame', 'pd.DataFrame', (['rows_list'], {}), '(rows_list)\n', (29971, 29982), True, 'import pandas as pd\n'), ((32786, 32837), 'logging.debug', 'logging.debug', (["('SQL Statement executed: %s' % query)"], {}), "('SQL Statement executed: %s' % query)\n", (32799, 32837), False, 'import logging\n'), ((33631, 33654), 'pandas.DataFrame', 'pd.DataFrame', (['rows_list'], {}), '(rows_list)\n', (33643, 33654), True, 'import pandas as pd\n'), ((36032, 36083), 'logging.debug', 'logging.debug', (["('SQL Statement executed: %s' % query)"], {}), "('SQL Statement executed: %s' % query)\n", (36045, 36083), False, 'import logging\n'), ((36872, 36895), 'pandas.DataFrame', 'pd.DataFrame', (['rows_list'], {}), '(rows_list)\n', (36884, 36895), True, 'import pandas as pd\n'), ((39288, 39339), 'logging.debug', 'logging.debug', (["('SQL Statement executed: %s' % query)"], {}), "('SQL Statement executed: %s' % query)\n", (39301, 39339), False, 'import logging\n'), ((40134, 40157), 'pandas.DataFrame', 'pd.DataFrame', (['rows_list'], {}), '(rows_list)\n', (40146, 40157), True, 'import pandas as pd\n'), ((42519, 42570), 'logging.debug', 'logging.debug', (["('SQL Statement executed: %s' % query)"], {}), "('SQL Statement executed: %s' % query)\n", (42532, 42570), False, 'import logging\n'), ((43365, 43388), 'pandas.DataFrame', 'pd.DataFrame', (['rows_list'], {}), '(rows_list)\n', (43377, 43388), True, 'import pandas as pd\n'), ((46652, 46703), 'logging.debug', 'logging.debug', (["('SQL Statement executed: %s' % query)"], {}), "('SQL Statement executed: %s' % query)\n", (46665, 46703), False, 'import logging\n'), ((48274, 48297), 'pandas.DataFrame', 'pd.DataFrame', (['rows_list'], {}), '(rows_list)\n', (48286, 48297), True, 'import pandas as pd\n'), ((49598, 49649), 'logging.debug', 'logging.debug', (["('SQL Statement executed: %s' % query)"], {}), "('SQL Statement executed: %s' % query)\n", (49611, 49649), False, 'import logging\n'), ((50542, 50565), 'pandas.DataFrame', 'pd.DataFrame', (['rows_list'], {}), '(rows_list)\n', (50554, 50565), True, 'import pandas as pd\n'), ((51390, 51441), 'logging.debug', 'logging.debug', (["('SQL Statement executed: %s' % query)"], {}), "('SQL Statement executed: %s' % query)\n", (51403, 51441), False, 'import logging\n'), ((52147, 52170), 'pandas.DataFrame', 'pd.DataFrame', (['rows_list'], {}), '(rows_list)\n', (52159, 52170), True, 'import pandas as pd\n'), ((52268, 52319), 'logging.debug', 'logging.debug', (["('SQL Statement executed: %s' % query)"], {}), "('SQL Statement executed: %s' % query)\n", (52281, 52319), False, 'import logging\n'), ((53120, 53143), 'pandas.DataFrame', 'pd.DataFrame', (['rows_list'], {}), '(rows_list)\n', (53132, 53143), True, 'import pandas as pd\n'), ((53728, 53779), 'logging.debug', 'logging.debug', (["('SQL Statement executed: %s' % query)"], {}), "('SQL Statement executed: %s' % query)\n", (53741, 53779), False, 'import logging\n'), ((54848, 54871), 'pandas.DataFrame', 'pd.DataFrame', (['rows_list'], {}), '(rows_list)\n', (54860, 54871), True, 'import pandas as pd\n'), ((54915, 55058), 'logging.warning', 'logging.warning', (['"""Reading Index information from DB2AS400 connections is not supported. Please contact developer if this is required"""'], {}), "(\n 'Reading Index information from DB2AS400 connections is not supported. Please contact developer if this is required'\n )\n", (54930, 55058), False, 'import logging\n'), ((55093, 55238), 'logging.warning', 'logging.warning', (['"""Reading Index information from PostgreSQL connections is not supported. Please contact developer if this is required"""'], {}), "(\n 'Reading Index information from PostgreSQL connections is not supported. Please contact developer if this is required'\n )\n", (55108, 55238), False, 'import logging\n'), ((59023, 59064), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['schema', 'table']"}), "(columns=['schema', 'table'])\n", (59035, 59064), True, 'import pandas as pd\n'), ((59416, 59487), 'logging.warning', 'logging.warning', (['"""No metadata for tableType sent to getJdbcTableType()"""'], {}), "('No metadata for tableType sent to getJdbcTableType()')\n", (59431, 59487), False, 'import logging\n'), ((4160, 4178), 'logging.debug', 'logging.debug', (['row'], {}), '(row)\n', (4173, 4178), False, 'import logging\n'), ((9437, 9455), 'logging.debug', 'logging.debug', (['row'], {}), '(row)\n', (9450, 9455), False, 'import logging\n'), ((12657, 12675), 'logging.debug', 'logging.debug', (['row'], {}), '(row)\n', (12670, 12675), False, 'import logging\n'), ((15281, 15299), 'logging.debug', 'logging.debug', (['row'], {}), '(row)\n', (15294, 15299), False, 'import logging\n'), ((18204, 18222), 'logging.debug', 'logging.debug', (['row'], {}), '(row)\n', (18217, 18222), False, 'import logging\n'), ((21168, 21186), 'logging.debug', 'logging.debug', (['row'], {}), '(row)\n', (21181, 21186), False, 'import logging\n'), ((23436, 23454), 'logging.debug', 'logging.debug', (['row'], {}), '(row)\n', (23449, 23454), False, 'import logging\n'), ((29419, 29437), 'logging.debug', 'logging.debug', (['row'], {}), '(row)\n', (29432, 29437), False, 'import logging\n'), ((33086, 33104), 'logging.debug', 'logging.debug', (['row'], {}), '(row)\n', (33099, 33104), False, 'import logging\n'), ((36332, 36350), 'logging.debug', 'logging.debug', (['row'], {}), '(row)\n', (36345, 36350), False, 'import logging\n'), ((39588, 39606), 'logging.debug', 'logging.debug', (['row'], {}), '(row)\n', (39601, 39606), False, 'import logging\n'), ((42819, 42837), 'logging.debug', 'logging.debug', (['row'], {}), '(row)\n', (42832, 42837), False, 'import logging\n'), ((46952, 46970), 'logging.debug', 'logging.debug', (['row'], {}), '(row)\n', (46965, 46970), False, 'import logging\n'), ((50193, 50211), 'logging.debug', 'logging.debug', (['row'], {}), '(row)\n', (50206, 50211), False, 'import logging\n'), ((51690, 51708), 'logging.debug', 'logging.debug', (['row'], {}), '(row)\n', (51703, 51708), False, 'import logging\n'), ((52568, 52586), 'logging.debug', 'logging.debug', (['row'], {}), '(row)\n', (52581, 52586), False, 'import logging\n'), ((54416, 54434), 'logging.debug', 'logging.debug', (['row'], {}), '(row)\n', (54429, 54434), False, 'import logging\n'), ((4003, 4078), 'logging.error', 'logging.error', (["('Failure when communicating with JDBC database. %s' % errMsg)"], {}), "('Failure when communicating with JDBC database. %s' % errMsg)\n", (4016, 4078), False, 'import logging\n'), ((5718, 5768), 'datetime.datetime.strptime', 'datetime.strptime', (['row[11]', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(row[11], '%Y-%m-%d %H:%M:%S.%f')\n", (5735, 5768), False, 'from datetime import datetime\n'), ((9280, 9355), 'logging.error', 'logging.error', (["('Failure when communicating with JDBC database. %s' % errMsg)"], {}), "('Failure when communicating with JDBC database. %s' % errMsg)\n", (9293, 9355), False, 'import logging\n'), ((11255, 11302), 'datetime.datetime.strptime', 'datetime.strptime', (['row[12]', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(row[12], '%Y-%m-%d %H:%M:%S')\n", (11272, 11302), False, 'from datetime import datetime\n'), ((12500, 12575), 'logging.error', 'logging.error', (["('Failure when communicating with JDBC database. %s' % errMsg)"], {}), "('Failure when communicating with JDBC database. %s' % errMsg)\n", (12513, 12575), False, 'import logging\n'), ((13821, 13868), 'datetime.datetime.strptime', 'datetime.strptime', (['row[11]', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(row[11], '%Y-%m-%d %H:%M:%S')\n", (13838, 13868), False, 'from datetime import datetime\n'), ((15119, 15194), 'logging.error', 'logging.error', (["('Failure when communicating with JDBC database. %s' % errMsg)"], {}), "('Failure when communicating with JDBC database. %s' % errMsg)\n", (15132, 15194), False, 'import logging\n'), ((16508, 16558), 'datetime.datetime.strptime', 'datetime.strptime', (['row[10]', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(row[10], '%Y-%m-%d %H:%M:%S.%f')\n", (16525, 16558), False, 'from datetime import datetime\n'), ((18047, 18122), 'logging.error', 'logging.error', (["('Failure when communicating with JDBC database. %s' % errMsg)"], {}), "('Failure when communicating with JDBC database. %s' % errMsg)\n", (18060, 18122), False, 'import logging\n'), ((19451, 19501), 'datetime.datetime.strptime', 'datetime.strptime', (['row[10]', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(row[10], '%Y-%m-%d %H:%M:%S.%f')\n", (19468, 19501), False, 'from datetime import datetime\n'), ((21011, 21086), 'logging.error', 'logging.error', (["('Failure when communicating with JDBC database. %s' % errMsg)"], {}), "('Failure when communicating with JDBC database. %s' % errMsg)\n", (21024, 21086), False, 'import logging\n'), ((23279, 23354), 'logging.error', 'logging.error', (["('Failure when communicating with JDBC database. %s' % errMsg)"], {}), "('Failure when communicating with JDBC database. %s' % errMsg)\n", (23292, 23354), False, 'import logging\n'), ((29262, 29337), 'logging.error', 'logging.error', (["('Failure when communicating with JDBC database. %s' % errMsg)"], {}), "('Failure when communicating with JDBC database. %s' % errMsg)\n", (29275, 29337), False, 'import logging\n'), ((32929, 33004), 'logging.error', 'logging.error', (["('Failure when communicating with JDBC database. %s' % errMsg)"], {}), "('Failure when communicating with JDBC database. %s' % errMsg)\n", (32942, 33004), False, 'import logging\n'), ((36175, 36250), 'logging.error', 'logging.error', (["('Failure when communicating with JDBC database. %s' % errMsg)"], {}), "('Failure when communicating with JDBC database. %s' % errMsg)\n", (36188, 36250), False, 'import logging\n'), ((39431, 39506), 'logging.error', 'logging.error', (["('Failure when communicating with JDBC database. %s' % errMsg)"], {}), "('Failure when communicating with JDBC database. %s' % errMsg)\n", (39444, 39506), False, 'import logging\n'), ((42662, 42737), 'logging.error', 'logging.error', (["('Failure when communicating with JDBC database. %s' % errMsg)"], {}), "('Failure when communicating with JDBC database. %s' % errMsg)\n", (42675, 42737), False, 'import logging\n'), ((46795, 46870), 'logging.error', 'logging.error', (["('Failure when communicating with JDBC database. %s' % errMsg)"], {}), "('Failure when communicating with JDBC database. %s' % errMsg)\n", (46808, 46870), False, 'import logging\n'), ((49741, 49816), 'logging.error', 'logging.error', (["('Failure when communicating with JDBC database. %s' % errMsg)"], {}), "('Failure when communicating with JDBC database. %s' % errMsg)\n", (49754, 49816), False, 'import logging\n'), ((51533, 51608), 'logging.error', 'logging.error', (["('Failure when communicating with JDBC database. %s' % errMsg)"], {}), "('Failure when communicating with JDBC database. %s' % errMsg)\n", (51546, 51608), False, 'import logging\n'), ((52411, 52486), 'logging.error', 'logging.error', (["('Failure when communicating with JDBC database. %s' % errMsg)"], {}), "('Failure when communicating with JDBC database. %s' % errMsg)\n", (52424, 52486), False, 'import logging\n'), ((53871, 53946), 'logging.error', 'logging.error', (["('Failure when communicating with JDBC database. %s' % errMsg)"], {}), "('Failure when communicating with JDBC database. %s' % errMsg)\n", (53884, 53946), False, 'import logging\n'), ((6582, 6657), 'logging.error', 'logging.error', (["('Failure when communicating with JDBC database. %s' % errMsg)"], {}), "('Failure when communicating with JDBC database. %s' % errMsg)\n", (6595, 6657), False, 'import logging\n'), ((9977, 10007), 're.search', 're.search', (['"""TIMESTAMP"""', 'row[4]'], {}), "('TIMESTAMP', row[4])\n", (9986, 10007), False, 'import re\n')]
|
#!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines TensorFlow model.
Defines features and classification model.
Typical usage example:
model.create_classifier(config, parameters)
"""
import math
import tensorflow as tf
import tensorflow_hub as hub
BEE_SUBSPECIES = ['Other', 'Carniolan', 'Italian', 'Russian']
def _estimator_metrics(labels, predictions):
"""Creates metrics for Estimator.
Metrics defined here can be used to evaluate the model (on evaluation
data) and also can be used to maximize or minimize their values during
hyper-parameter tunning.
Args:
labels: Evaluation true labels.
predictions: Evaluation model predictions.
Returns:
A dictionary with the evaluation metrics
"""
pred_logistic = predictions['logistic']
pred_class = predictions['class_ids']
return {
'accuracy': tf.metrics.accuracy(labels, pred_class),
'auc_roc': tf.metrics.auc(labels, pred_logistic),
'auc_pr': tf.metrics.auc(labels, pred_logistic, curve='PR'),
'precision': tf.metrics.precision(labels, pred_class),
'recall': tf.metrics.recall(labels, pred_class)}
def create_classifier(config, parameters):
"""Creates a DNN classifier.
Defines features and builds an 'Estimator' with them.
Args:
config: `RunConfig` object to configure the runtime of the `Estimator`.
parameters: Parameters passed to the job.
Returns:
`tf.estimator.DNNClassifier` with specified features and architecture.
"""
# Columns to be used as features.
subspecies = tf.feature_column.categorical_column_with_vocabulary_list(
'subspecies',
vocabulary_list=BEE_SUBSPECIES,
default_value=0)
subspecies = tf.feature_column.embedding_column(
subspecies, dimension=parameters.subspecies_embedding)
image = hub.image_embedding_column('image', parameters.tf_hub_module)
feature_cols = [subspecies, image]
layer = parameters.first_layer_size
lfrac = parameters.layer_reduction_fraction
nlayers = parameters.number_layers
h_units = [layer]
for _ in range(nlayers - 1):
h_units.append(math.ceil(layer * lfrac))
layer = h_units[-1]
estimator = tf.estimator.DNNClassifier(
feature_columns=feature_cols,
hidden_units=h_units,
optimizer=tf.train.AdagradOptimizer(
learning_rate=parameters.learning_rate),
dropout=parameters.dropout, config=config)
estimator = tf.contrib.estimator.add_metrics(
estimator, _estimator_metrics)
estimator = tf.contrib.estimator.forward_features(estimator, 'img_file')
return estimator
|
[
"tensorflow_hub.image_embedding_column",
"tensorflow.feature_column.categorical_column_with_vocabulary_list",
"tensorflow.metrics.accuracy",
"tensorflow.feature_column.embedding_column",
"math.ceil",
"tensorflow.contrib.estimator.add_metrics",
"tensorflow.metrics.recall",
"tensorflow.train.AdagradOptimizer",
"tensorflow.metrics.precision",
"tensorflow.metrics.auc",
"tensorflow.contrib.estimator.forward_features"
] |
[((2232, 2356), 'tensorflow.feature_column.categorical_column_with_vocabulary_list', 'tf.feature_column.categorical_column_with_vocabulary_list', (['"""subspecies"""'], {'vocabulary_list': 'BEE_SUBSPECIES', 'default_value': '(0)'}), "('subspecies',\n vocabulary_list=BEE_SUBSPECIES, default_value=0)\n", (2289, 2356), True, 'import tensorflow as tf\n'), ((2395, 2489), 'tensorflow.feature_column.embedding_column', 'tf.feature_column.embedding_column', (['subspecies'], {'dimension': 'parameters.subspecies_embedding'}), '(subspecies, dimension=parameters.\n subspecies_embedding)\n', (2429, 2489), True, 'import tensorflow as tf\n'), ((2507, 2568), 'tensorflow_hub.image_embedding_column', 'hub.image_embedding_column', (['"""image"""', 'parameters.tf_hub_module'], {}), "('image', parameters.tf_hub_module)\n", (2533, 2568), True, 'import tensorflow_hub as hub\n'), ((3147, 3210), 'tensorflow.contrib.estimator.add_metrics', 'tf.contrib.estimator.add_metrics', (['estimator', '_estimator_metrics'], {}), '(estimator, _estimator_metrics)\n', (3179, 3210), True, 'import tensorflow as tf\n'), ((3236, 3296), 'tensorflow.contrib.estimator.forward_features', 'tf.contrib.estimator.forward_features', (['estimator', '"""img_file"""'], {}), "(estimator, 'img_file')\n", (3273, 3296), True, 'import tensorflow as tf\n'), ((1510, 1549), 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', (['labels', 'pred_class'], {}), '(labels, pred_class)\n', (1529, 1549), True, 'import tensorflow as tf\n'), ((1570, 1607), 'tensorflow.metrics.auc', 'tf.metrics.auc', (['labels', 'pred_logistic'], {}), '(labels, pred_logistic)\n', (1584, 1607), True, 'import tensorflow as tf\n'), ((1627, 1676), 'tensorflow.metrics.auc', 'tf.metrics.auc', (['labels', 'pred_logistic'], {'curve': '"""PR"""'}), "(labels, pred_logistic, curve='PR')\n", (1641, 1676), True, 'import tensorflow as tf\n'), ((1699, 1739), 'tensorflow.metrics.precision', 'tf.metrics.precision', (['labels', 'pred_class'], {}), '(labels, pred_class)\n', (1719, 1739), True, 'import tensorflow as tf\n'), ((1759, 1796), 'tensorflow.metrics.recall', 'tf.metrics.recall', (['labels', 'pred_class'], {}), '(labels, pred_class)\n', (1776, 1796), True, 'import tensorflow as tf\n'), ((2815, 2839), 'math.ceil', 'math.ceil', (['(layer * lfrac)'], {}), '(layer * lfrac)\n', (2824, 2839), False, 'import math\n'), ((3000, 3065), 'tensorflow.train.AdagradOptimizer', 'tf.train.AdagradOptimizer', ([], {'learning_rate': 'parameters.learning_rate'}), '(learning_rate=parameters.learning_rate)\n', (3025, 3065), True, 'import tensorflow as tf\n')]
|
"""Data Processor.
Crunches the numbers and calculates the time left, as well as writes the status
file so we only need to code that once.
Author:
<NAME> <<EMAIL>>
"""
import datetime
from utils.slacker import Slacker
def process_data(step, epoch, accuracy, loss, rate, status_file_path,
validation, max_step, max_epoch, validation_steps):
"""Processes the time left and writes the status file.
Returns:
str: Time left as a human readable string.
float: Running step count.
float: Total number of steps the trainer will iterate through.
"""
# Calculate time left
steps_total = 1
running_step_count = 0
finished = False
if rate == 0:
time_left = "NaN"
finish_at = "NaN"
else:
steps_total = float((max_step * max_epoch))
# Add the validation steps
steps_total += float(validation_steps * max_epoch)
# If we're in validation, then we've reached the max step in this
# epoch + the 10 steps for validation so we add
# validation * max_step
steps_done_this_epoch = float(step + 1
+ (validation * max_step))
steps_times_epochs_done = float(max_step * (epoch - 1)
+ validation_steps * (epoch - 1))
running_step_count = steps_done_this_epoch + steps_times_epochs_done
steps_left = (steps_total - running_step_count)
time_left = int(steps_left / rate)
time_left = datetime.timedelta(seconds=time_left)
finish_at = datetime.datetime.now() + time_left
finish_at = finish_at.strftime("%a, %d %b, %I:%M:%S %p")
max_step = validation_steps if validation else max_step
# Now write the status file
if (step % 10 == 0 or (step == validation_steps
and epoch == max_epoch
and validation))\
and not status_file_path == '':
with open(status_file_path, 'w') as status_file:
lines = ["Step: {}/{}\n".format(step, max_step),
"Epoch: {}/{}\n".format(epoch, max_epoch),
"Accuracy: {:.3f}%, {:.3f}%, {:.3f}%\n".format(
accuracy[0] * 100., accuracy[1] * 100.,
accuracy[2] * 100.),
"Loss: {:.3f}\n".format(loss),
"Rate: {:.3f} steps/s\n".format(rate),
"Time left: {}\n".format(str(time_left)),
"Finishes at: {}\n".format(finish_at)
]
if step == validation_steps and epoch == max_epoch and validation:
finish_at = datetime.datetime.now()
finish_at = finish_at.strftime("%a, %d %b, %I:%M:%S %p")
lines[5] = "Time left: -\n"
lines[6] = "Finished at: {}".format(finish_at)
lines.append("\nFinished training.\n")
message = "".join(lines)
Slacker.send_message(message, "Finished Training")
finished = True
status_file.writelines(lines)
return time_left, running_step_count, steps_total
|
[
"utils.slacker.Slacker.send_message",
"datetime.timedelta",
"datetime.datetime.now"
] |
[((1535, 1572), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'time_left'}), '(seconds=time_left)\n', (1553, 1572), False, 'import datetime\n'), ((1593, 1616), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1614, 1616), False, 'import datetime\n'), ((2710, 2733), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2731, 2733), False, 'import datetime\n'), ((3027, 3077), 'utils.slacker.Slacker.send_message', 'Slacker.send_message', (['message', '"""Finished Training"""'], {}), "(message, 'Finished Training')\n", (3047, 3077), False, 'from utils.slacker import Slacker\n')]
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import spi, pn532
from esphome.const import CONF_ID
AUTO_LOAD = ["pn532"]
CODEOWNERS = ["@OttoWinter", "@jesserockz"]
DEPENDENCIES = ["spi"]
MULTI_CONF = True
pn532_spi_ns = cg.esphome_ns.namespace("pn532_spi")
PN532Spi = pn532_spi_ns.class_("PN532Spi", pn532.PN532, spi.SPIDevice)
CONFIG_SCHEMA = cv.All(
pn532.PN532_SCHEMA.extend(
{
cv.GenerateID(): cv.declare_id(PN532Spi),
}
).extend(spi.spi_device_schema(cs_pin_required=True))
)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
await pn532.setup_pn532(var, config)
await spi.register_spi_device(var, config)
|
[
"esphome.components.pn532.setup_pn532",
"esphome.components.spi.spi_device_schema",
"esphome.components.spi.register_spi_device",
"esphome.codegen.new_Pvariable",
"esphome.config_validation.GenerateID",
"esphome.config_validation.declare_id",
"esphome.codegen.esphome_ns.namespace"
] |
[((268, 304), 'esphome.codegen.esphome_ns.namespace', 'cg.esphome_ns.namespace', (['"""pn532_spi"""'], {}), "('pn532_spi')\n", (291, 304), True, 'import esphome.codegen as cg\n'), ((605, 638), 'esphome.codegen.new_Pvariable', 'cg.new_Pvariable', (['config[CONF_ID]'], {}), '(config[CONF_ID])\n', (621, 638), True, 'import esphome.codegen as cg\n'), ((519, 562), 'esphome.components.spi.spi_device_schema', 'spi.spi_device_schema', ([], {'cs_pin_required': '(True)'}), '(cs_pin_required=True)\n', (540, 562), False, 'from esphome.components import spi, pn532\n'), ((649, 679), 'esphome.components.pn532.setup_pn532', 'pn532.setup_pn532', (['var', 'config'], {}), '(var, config)\n', (666, 679), False, 'from esphome.components import spi, pn532\n'), ((690, 726), 'esphome.components.spi.register_spi_device', 'spi.register_spi_device', (['var', 'config'], {}), '(var, config)\n', (713, 726), False, 'from esphome.components import spi, pn532\n'), ((454, 469), 'esphome.config_validation.GenerateID', 'cv.GenerateID', ([], {}), '()\n', (467, 469), True, 'import esphome.config_validation as cv\n'), ((471, 494), 'esphome.config_validation.declare_id', 'cv.declare_id', (['PN532Spi'], {}), '(PN532Spi)\n', (484, 494), True, 'import esphome.config_validation as cv\n')]
|
import unittest
import numpy as np
from collections import namedtuple
from pyrostest import RosTest, with_launch_file, launch_node
from process.bearing import calculate_directions
from sensor_msgs.msg import NavSatFix
from std_msgs.msg import Float64
fix = namedtuple('fix', ['latitude', 'longitude'])
class TestBearing(unittest.TestCase):
def test_distance(self):
fix1 = fix(33.636700, -84.427863)
fix2 = fix(39.029128, -111.838257)
assert np.isclose(calculate_directions.get_distance(fix1, fix2), 2517000, rtol=.01)
class TestBearingNode(RosTest):
@with_launch_file('buzzmobile', 'test_params.launch')
@launch_node('buzzmobile', 'bearing.py')
def test_bearing_node(self):
with self.mock_pub('/fix', NavSatFix, queue_size=0) as fix_node:
with self.check_topic('/buzzmobile/bearing', Float64) as ct:
# send mock data
fix_node.send(NavSatFix(None, None, 33.636700, -84.427863, None, None, None))
fix_node.send(NavSatFix(None, None, 39.029128, -111.838257, None, None, None))
# check the output from the node
assert np.isclose(ct.message.data, 1.19212)
|
[
"process.bearing.calculate_directions.get_distance",
"numpy.isclose",
"pyrostest.with_launch_file",
"collections.namedtuple",
"pyrostest.launch_node",
"sensor_msgs.msg.NavSatFix"
] |
[((259, 303), 'collections.namedtuple', 'namedtuple', (['"""fix"""', "['latitude', 'longitude']"], {}), "('fix', ['latitude', 'longitude'])\n", (269, 303), False, 'from collections import namedtuple\n'), ((589, 641), 'pyrostest.with_launch_file', 'with_launch_file', (['"""buzzmobile"""', '"""test_params.launch"""'], {}), "('buzzmobile', 'test_params.launch')\n", (605, 641), False, 'from pyrostest import RosTest, with_launch_file, launch_node\n'), ((647, 686), 'pyrostest.launch_node', 'launch_node', (['"""buzzmobile"""', '"""bearing.py"""'], {}), "('buzzmobile', 'bearing.py')\n", (658, 686), False, 'from pyrostest import RosTest, with_launch_file, launch_node\n'), ((484, 529), 'process.bearing.calculate_directions.get_distance', 'calculate_directions.get_distance', (['fix1', 'fix2'], {}), '(fix1, fix2)\n', (517, 529), False, 'from process.bearing import calculate_directions\n'), ((1161, 1197), 'numpy.isclose', 'np.isclose', (['ct.message.data', '(1.19212)'], {}), '(ct.message.data, 1.19212)\n', (1171, 1197), True, 'import numpy as np\n'), ((929, 989), 'sensor_msgs.msg.NavSatFix', 'NavSatFix', (['None', 'None', '(33.6367)', '(-84.427863)', 'None', 'None', 'None'], {}), '(None, None, 33.6367, -84.427863, None, None, None)\n', (938, 989), False, 'from sensor_msgs.msg import NavSatFix\n'), ((1023, 1086), 'sensor_msgs.msg.NavSatFix', 'NavSatFix', (['None', 'None', '(39.029128)', '(-111.838257)', 'None', 'None', 'None'], {}), '(None, None, 39.029128, -111.838257, None, None, None)\n', (1032, 1086), False, 'from sensor_msgs.msg import NavSatFix\n')]
|
import requests
def do_post(url, input, headers=None, quiet=False, json=False, debug=False):
# NOTE: Using python to do this is slow compared with running curl
# directly on the command line (or some other purpose-built tool).
# As a result this mustn't be used for performance testing
if debug:
print("POST URL : {}".format(url))
print("POST Headers: {}".format(headers))
print("POST JSON : {}".format(json))
print("POST Data : {}".format(input))
if json:
response = requests.post(url, json=input, headers=headers)
else:
response = requests.post(url, data=input, headers=headers)
if response.status_code >= 400:
print("Request failed: status = {}".format(response.status_code))
elif response.text and not quiet:
print(response.text)
elif not quiet:
print("Empty response")
return response.text
|
[
"requests.post"
] |
[((538, 585), 'requests.post', 'requests.post', (['url'], {'json': 'input', 'headers': 'headers'}), '(url, json=input, headers=headers)\n', (551, 585), False, 'import requests\n'), ((615, 662), 'requests.post', 'requests.post', (['url'], {'data': 'input', 'headers': 'headers'}), '(url, data=input, headers=headers)\n', (628, 662), False, 'import requests\n')]
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import math
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.op_test import skip_check_grad_ci
def bilinear_interp_mkldnn_np(input,
out_h,
out_w,
out_size=None,
actual_shape=None,
data_layout='NCHW'):
"""bilinear interpolation implement in shape [N, C, H, W]"""
if data_layout == "NHWC":
input = np.transpose(input, (0, 3, 1, 2)) # NHWC => NCHW
if out_size is not None:
out_h = out_size[0]
out_w = out_size[1]
if actual_shape is not None:
out_h = actual_shape[0]
out_w = actual_shape[1]
batch_size, channel, in_h, in_w = input.shape
out = np.zeros((batch_size, channel, out_h, out_w))
for oh in range(out_h):
h0 = int(math.floor((oh + 0.5) * in_h / out_h - 0.5))
h1 = int(math.ceil((oh + 0.5) * in_h / out_h - 0.5))
h0 = max(h0, 0)
h1 = min(h1, in_h - 1)
Wh = (oh + 0.5) * in_h / out_h - 0.5 - h0
for ow in range(out_w):
w0 = int(math.floor((ow + 0.5) * in_w / out_w - 0.5))
w1 = int(math.ceil((ow + 0.5) * in_w / out_w - 0.5))
w0 = max(w0, 0)
w1 = min(w1, in_w - 1)
Ww = (ow + 0.5) * in_w / out_w - 0.5 - w0
input_h0_w0 = input[:, :, h0, w0]
input_h1_w0 = input[:, :, h1, w0]
input_h0_w1 = input[:, :, h0, w1]
input_h1_w1 = input[:, :, h1, w1]
out[:, :, oh,
ow] = input_h0_w0 * (1 - Wh) * (1 - Ww) + input_h1_w0 * Wh * (
1 - Ww) + input_h0_w1 * (1 -
Wh) * Ww + input_h1_w1 * Wh * Ww
if data_layout == "NHWC":
out = np.transpose(out, (0, 2, 3, 1)) # NCHW => NHWC
return out.astype(input.dtype)
@skip_check_grad_ci(reason="Haven not implement interpolate grad kernel.")
class TestBilinearInterpMKLDNNOp(OpTest):
def init_test_case(self):
pass
def setUp(self):
self.op_type = "bilinear_interp_v2"
self.interp_method = 'bilinear'
self._cpu_only = True
self.use_mkldnn = True
self.input_shape = [1, 1, 2, 2]
self.data_layout = 'NCHW'
# priority: actual_shape > out_size > scale > out_h & out_w
self.out_h = 1
self.out_w = 1
self.scale = 2.0
self.out_size = None
self.actual_shape = None
self.init_test_case()
input_np = np.random.random(self.input_shape).astype("float32")
if self.data_layout == "NCHW":
in_h = self.input_shape[2]
in_w = self.input_shape[3]
else:
in_h = self.input_shape[1]
in_w = self.input_shape[2]
scale_h = 0
scale_w = 0
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
scale_h = float(self.scale)
scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
scale_w = self.scale[0]
scale_h = self.scale[0]
elif isinstance(self.scale, list) and len(self.scale) > 1:
scale_w = self.scale[1]
scale_h = self.scale[0]
if scale_h > 0 and scale_w > 0:
out_h = int(in_h * scale_h)
out_w = int(in_w * scale_w)
else:
out_h = self.out_h
out_w = self.out_w
output_np = bilinear_interp_mkldnn_np(input_np, out_h, out_w,
self.out_size, self.actual_shape,
self.data_layout)
if isinstance(self.scale, float):
self.scale = [self.scale, self.scale]
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape
self.attrs = {
'interp_method': self.interp_method,
'out_h': self.out_h,
'out_w': self.out_w,
'scale': self.scale,
'data_layout': self.data_layout,
'use_mkldnn': self.use_mkldnn
}
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output(check_dygraph=False)
class TestBilinearInterpOpMKLDNNNHWC(TestBilinearInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [3, 2, 32, 16]
self.out_h = 27
self.out_w = 49
self.scale = [2.0, 3.0]
self.data_layout = 'NHWC'
class TestBilinearNeighborInterpMKLDNNCase2(TestBilinearInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
class TestBilinearNeighborInterpCase3(TestBilinearInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 128
self.scale = [0.1, 0.05]
class TestBilinearNeighborInterpCase4(TestBilinearInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = [13.0, 15.0]
self.out_size = np.array([65, 129]).astype("int32")
class TestBilinearNeighborInterpCase5(TestBilinearInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [1, 1, 9, 6]
self.out_h = 12
self.out_w = 12
self.out_size = np.array([13, 13]).astype("int32")
class TestBilinearNeighborInterpCase6(TestBilinearInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = 1.0
self.out_size = np.array([65, 129]).astype("int32")
class TestBilinearNeighborInterpSame(TestBilinearInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [2, 3, 32, 64]
self.out_h = 32
self.out_w = 64
self.scale = 2.0
self.out_size = np.array([65, 129]).astype("int32")
if __name__ == "__main__":
from paddle import enable_static
enable_static()
unittest.main()
|
[
"unittest.main",
"paddle.fluid.tests.unittests.op_test.skip_check_grad_ci",
"math.ceil",
"paddle.enable_static",
"numpy.zeros",
"numpy.transpose",
"math.floor",
"numpy.random.random",
"numpy.array"
] |
[((2692, 2765), 'paddle.fluid.tests.unittests.op_test.skip_check_grad_ci', 'skip_check_grad_ci', ([], {'reason': '"""Haven not implement interpolate grad kernel."""'}), "(reason='Haven not implement interpolate grad kernel.')\n", (2710, 2765), False, 'from paddle.fluid.tests.unittests.op_test import skip_check_grad_ci\n'), ((1561, 1606), 'numpy.zeros', 'np.zeros', (['(batch_size, channel, out_h, out_w)'], {}), '((batch_size, channel, out_h, out_w))\n', (1569, 1606), True, 'import numpy as np\n'), ((7082, 7097), 'paddle.enable_static', 'enable_static', ([], {}), '()\n', (7095, 7097), False, 'from paddle import enable_static\n'), ((7102, 7117), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7115, 7117), False, 'import unittest\n'), ((1268, 1301), 'numpy.transpose', 'np.transpose', (['input', '(0, 3, 1, 2)'], {}), '(input, (0, 3, 1, 2))\n', (1280, 1301), True, 'import numpy as np\n'), ((2605, 2636), 'numpy.transpose', 'np.transpose', (['out', '(0, 2, 3, 1)'], {}), '(out, (0, 2, 3, 1))\n', (2617, 2636), True, 'import numpy as np\n'), ((1653, 1696), 'math.floor', 'math.floor', (['((oh + 0.5) * in_h / out_h - 0.5)'], {}), '((oh + 0.5) * in_h / out_h - 0.5)\n', (1663, 1696), False, 'import math\n'), ((1715, 1757), 'math.ceil', 'math.ceil', (['((oh + 0.5) * in_h / out_h - 0.5)'], {}), '((oh + 0.5) * in_h / out_h - 0.5)\n', (1724, 1757), False, 'import math\n'), ((1917, 1960), 'math.floor', 'math.floor', (['((ow + 0.5) * in_w / out_w - 0.5)'], {}), '((ow + 0.5) * in_w / out_w - 0.5)\n', (1927, 1960), False, 'import math\n'), ((1983, 2025), 'math.ceil', 'math.ceil', (['((ow + 0.5) * in_w / out_w - 0.5)'], {}), '((ow + 0.5) * in_w / out_w - 0.5)\n', (1992, 2025), False, 'import math\n'), ((3345, 3379), 'numpy.random.random', 'np.random.random', (['self.input_shape'], {}), '(self.input_shape)\n', (3361, 3379), True, 'import numpy as np\n'), ((6180, 6199), 'numpy.array', 'np.array', (['[65, 129]'], {}), '([65, 129])\n', (6188, 6199), True, 'import numpy as np\n'), ((6428, 6446), 'numpy.array', 'np.array', (['[13, 13]'], {}), '([13, 13])\n', (6436, 6446), True, 'import numpy as np\n'), ((6702, 6721), 'numpy.array', 'np.array', (['[65, 129]'], {}), '([65, 129])\n', (6710, 6721), True, 'import numpy as np\n'), ((6976, 6995), 'numpy.array', 'np.array', (['[65, 129]'], {}), '([65, 129])\n', (6984, 6995), True, 'import numpy as np\n')]
|
from typing import AnyStr, ByteString, Callable, List, Sequence, TypeVar, Union
import numpy as np
ReturnType = TypeVar("ReturnType")
dtype_dict = {
"float": "FLOAT",
"double": "DOUBLE",
"float32": "FLOAT",
"float64": "DOUBLE",
"int8": "INT8",
"int16": "INT16",
"int32": "INT32",
"int64": "INT64",
"uint8": "UINT8",
"uint16": "UINT16",
"uint32": "UINT32",
"uint64": "UINT64",
"bool": "BOOL",
"str": "STRING",
}
allowed_devices = {"CPU", "GPU"}
allowed_backends = {"TF", "TFLITE", "TORCH", "ONNX"}
def numpy2blob(tensor: np.ndarray) -> tuple:
"""Convert the numpy input from user to `Tensor`."""
try:
if tensor.dtype.num == np.dtype("str").num:
dtype = dtype_dict["str"]
blob = "".join([string + "\0" for string in tensor.flat])
else:
dtype = dtype_dict[str(tensor.dtype)]
blob = tensor.tobytes()
except KeyError:
raise TypeError(f"RedisAI doesn't support tensors of type {tensor.dtype}")
shape = tensor.shape
return dtype, shape, blob
def blob2numpy(
value: ByteString, shape: Union[list, tuple], dtype: str, mutable: bool
) -> np.ndarray:
"""Convert `BLOB` result from RedisAI to `np.ndarray`."""
mm = {"FLOAT": "float32", "DOUBLE": "float64"}
dtype = mm.get(dtype, dtype.lower())
if dtype == 'string':
a = np.array(value.decode().split('\0')[:-1], dtype='str')
elif mutable:
a = np.fromstring(value, dtype=dtype)
else:
a = np.frombuffer(value, dtype=dtype)
return a.reshape(shape)
def list2dict(lst):
"""Convert the list from RedisAI to a dict."""
if len(lst) % 2 != 0:
raise RuntimeError("Can't unpack the list: {}".format(lst))
out = {}
for i in range(0, len(lst), 2):
key = lst[i].decode().lower()
val = lst[i + 1]
if key != "blob" and isinstance(val, bytes):
val = val.decode()
out[key] = val
return out
def recursive_bytetransform(arr: List[AnyStr], target: Callable[..., ReturnType]) -> list[ReturnType]:
"""
Recurse value, replacing each element of b'' with the appropriate element.
Function returns the same array after inplace operation which updates `arr`
"""
for ix in range(len(arr)):
obj = arr[ix]
if isinstance(obj, list):
recursive_bytetransform(obj, target)
else:
arr[ix] = target(obj)
return arr
def listify(inp: Union[str, Sequence[str]]) -> Sequence[str]:
"""Wrap the ``inp`` with a list if it's not a list already."""
return (inp,) if not isinstance(inp, (list, tuple)) else inp
|
[
"typing.TypeVar",
"numpy.dtype",
"numpy.frombuffer",
"numpy.fromstring"
] |
[((114, 135), 'typing.TypeVar', 'TypeVar', (['"""ReturnType"""'], {}), "('ReturnType')\n", (121, 135), False, 'from typing import AnyStr, ByteString, Callable, List, Sequence, TypeVar, Union\n'), ((1477, 1510), 'numpy.fromstring', 'np.fromstring', (['value'], {'dtype': 'dtype'}), '(value, dtype=dtype)\n', (1490, 1510), True, 'import numpy as np\n'), ((1533, 1566), 'numpy.frombuffer', 'np.frombuffer', (['value'], {'dtype': 'dtype'}), '(value, dtype=dtype)\n', (1546, 1566), True, 'import numpy as np\n'), ((701, 716), 'numpy.dtype', 'np.dtype', (['"""str"""'], {}), "('str')\n", (709, 716), True, 'import numpy as np\n')]
|
import numpy as np
from path import Path
import scipy.misc
from collections import Counter
import matplotlib.pyplot as plt
class KittiRawLoader(object):
def __init__(self,
dataset_dir,
static_frames_file=None,
img_height=128,
img_width=416,
min_speed=2,
get_gt=False):
dir_path = Path(__file__).realpath().dirname()
test_scene_file = dir_path/'test_scenes.txt'
self.from_speed = static_frames_file is None
if static_frames_file is not None:
static_frames_file = Path(static_frames_file)
self.collect_static_frames(static_frames_file)
with open(test_scene_file, 'r') as f:
test_scenes = f.readlines()
self.test_scenes = [t[:-1] for t in test_scenes]
self.dataset_dir = Path(dataset_dir)
self.img_height = img_height
self.img_width = img_width
self.cam_ids = ['02', '03']
#self.date_list = ['2011_09_26', '2011_09_28', '2011_09_29', '2011_09_30', '2011_10_03']
self.date_list = ['2011_09_26', '2011_09_28', '2011_09_29', '2011_09_30']
self.min_speed = min_speed
self.get_gt = get_gt
self.collect_train_folders()#make self.scenes
#public
def collect_scenes(self, drive):#drive 当前路径
train_scenes = []
for c in self.cam_ids:
oxts = sorted((drive/'oxts'/'data').files('*.txt'))#list
scene_data = {'cid': c, 'dir': drive, 'speed': [], 'frame_id': [], 'rel_path': drive.name + '_' + c}
for n, f in enumerate(oxts):
metadata = np.genfromtxt(f)
speed = metadata[8:11]
scene_data['speed'].append(speed)
scene_data['frame_id'].append('{:010d}'.format(n))
sample = self.load_image(scene_data, 0)
if sample is None:
return []
scene_data['P_rect'] = self.get_P_rect(scene_data, sample[1], sample[2])
scene_data['intrinsics'] = scene_data['P_rect'][:,:3]
train_scenes.append(scene_data)
return train_scenes
#public
#generator using yield rather than return
def get_scene_imgs(self, scene_data):
def construct_sample(scene_data, i, frame_id):
sample = [self.load_image(scene_data, i)[0], frame_id]
if self.get_gt:
sample.append(self.generate_depth_map(scene_data, i))
return sample
if self.from_speed:
cum_speed = np.zeros(3)
for i, speed in enumerate(scene_data['speed']):
cum_speed += speed
speed_mag = np.linalg.norm(cum_speed)
if speed_mag > self.min_speed:
frame_id = scene_data['frame_id'][i]
yield construct_sample(scene_data, i, frame_id)
cum_speed *= 0
else: # from static frame file
drive = str(scene_data['dir'].name)
for (i,frame_id) in enumerate(scene_data['frame_id']):
if (drive not in self.static_frames.keys()) or (frame_id not in self.static_frames[drive]):
yield construct_sample(scene_data, i, frame_id)
#private
def collect_static_frames(self, static_frames_file):
with open(static_frames_file, 'r') as f:
frames = f.readlines()
self.static_frames = {}
for fr in frames:
if fr == '\n':
continue
date, drive, frame_id = fr.split(' ')
curr_fid = '%.10d' % (np.int(frame_id[:-1]))
if drive not in self.static_frames.keys():
self.static_frames[drive] = []
self.static_frames[drive].append(curr_fid)
#private
def collect_train_folders(self):
self.scenes = []
for date in self.date_list:
drive_set = (self.dataset_dir/date).dirs()
for dr in drive_set:
if dr.name[:-5] not in self.test_scenes:
self.scenes.append(dr)
#private
def get_P_rect(self, scene_data, zoom_x, zoom_y):
#print(zoom_x, zoom_y)
calib_file = scene_data['dir'].parent/'calib_cam_to_cam.txt'
filedata = self.read_raw_calib_file(calib_file)
P_rect = np.reshape(filedata['P_rect_' + scene_data['cid']], (3, 4))
P_rect[0] *= zoom_x
P_rect[1] *= zoom_y
return P_rect
def load_image(self, scene_data, tgt_idx):
img_file = scene_data['dir']/'image_{}'.format(scene_data['cid'])/'data'/scene_data['frame_id'][tgt_idx]+'.png'
if not img_file.isfile():
return None
img = scipy.misc.imread(img_file)
zoom_y = self.img_height/img.shape[0]
zoom_x = self.img_width/img.shape[1]
img = scipy.misc.imresize(img, (self.img_height, self.img_width))
return img, zoom_x, zoom_y
def read_raw_calib_file(self, filepath):
# From https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py
"""Read in a calibration file and parse into a dictionary."""
data = {}
with open(filepath, 'r') as f:
for line in f.readlines():
key, value = line.split(':', 1)
# The only non-float values in these files are dates, which
# we don't care about anyway
try:
data[key] = np.array([float(x) for x in value.split()])
except ValueError:
pass
return data
#called by get_scene_imgs
def generate_depth_map(self, scene_data, tgt_idx):
# compute projection matrix velodyne->image plane
def sub2ind(matrixSize, rowSub, colSub):
m, n = matrixSize
return rowSub * (n-1) + colSub - 1
R_cam2rect = np.eye(4)
calib_dir = scene_data['dir'].parent
cam2cam = self.read_raw_calib_file(calib_dir/'calib_cam_to_cam.txt')
velo2cam = self.read_raw_calib_file(calib_dir/'calib_velo_to_cam.txt')
velo2cam = np.hstack((velo2cam['R'].reshape(3,3), velo2cam['T'][..., np.newaxis]))
velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0])))
P_rect = scene_data['P_rect']
R_cam2rect[:3,:3] = cam2cam['R_rect_00'].reshape(3,3)
P_velo2im = np.dot(np.dot(P_rect, R_cam2rect), velo2cam)
velo_file_name = scene_data['dir']/'velodyne_points'/'data'/'{}.bin'.format(scene_data['frame_id'][tgt_idx])
# load velodyne points and remove all behind image plane (approximation)
# each row of the velodyne data is forward, left, up, reflectance
velo = np.fromfile(velo_file_name, dtype=np.float32).reshape(-1, 4)
velo[:,3] = 1
velo = velo[velo[:, 0] >= 0, :]
# project the points to the camera
velo_pts_im = np.dot(P_velo2im, velo.T).T
velo_pts_im[:, :2] = velo_pts_im[:,:2] / velo_pts_im[:,-1:]
# check if in bounds
# use minus 1 to get the exact same value as KITTI matlab code
velo_pts_im[:, 0] = np.round(velo_pts_im[:,0]) - 1
velo_pts_im[:, 1] = np.round(velo_pts_im[:,1]) - 1
val_inds = (velo_pts_im[:, 0] >= 0) & (velo_pts_im[:, 1] >= 0)
val_inds = val_inds & (velo_pts_im[:,0] < self.img_width) & (velo_pts_im[:,1] < self.img_height)
velo_pts_im = velo_pts_im[val_inds, :]
# project to image
depth = np.zeros((self.img_height, self.img_width)).astype(np.float32)
depth[velo_pts_im[:, 1].astype(np.int), velo_pts_im[:, 0].astype(np.int)] = velo_pts_im[:, 2]
# find the duplicate points and choose the closest depth
inds = sub2ind(depth.shape, velo_pts_im[:, 1], velo_pts_im[:, 0])
dupe_inds = [item for item, count in Counter(inds).items() if count > 1]
for dd in dupe_inds:
pts = np.where(inds == dd)[0]
x_loc = int(velo_pts_im[pts[0], 0])
y_loc = int(velo_pts_im[pts[0], 1])
depth[y_loc, x_loc] = velo_pts_im[pts, 2].min()
depth[depth < 0] = 0
return depth
|
[
"numpy.eye",
"numpy.fromfile",
"collections.Counter",
"numpy.zeros",
"numpy.genfromtxt",
"path.Path",
"numpy.linalg.norm",
"numpy.reshape",
"numpy.int",
"numpy.array",
"numpy.where",
"numpy.dot",
"numpy.round"
] |
[((869, 886), 'path.Path', 'Path', (['dataset_dir'], {}), '(dataset_dir)\n', (873, 886), False, 'from path import Path\n'), ((4336, 4395), 'numpy.reshape', 'np.reshape', (["filedata['P_rect_' + scene_data['cid']]", '(3, 4)'], {}), "(filedata['P_rect_' + scene_data['cid']], (3, 4))\n", (4346, 4395), True, 'import numpy as np\n'), ((5884, 5893), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (5890, 5893), True, 'import numpy as np\n'), ((614, 638), 'path.Path', 'Path', (['static_frames_file'], {}), '(static_frames_file)\n', (618, 638), False, 'from path import Path\n'), ((2567, 2578), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2575, 2578), True, 'import numpy as np\n'), ((6382, 6408), 'numpy.dot', 'np.dot', (['P_rect', 'R_cam2rect'], {}), '(P_rect, R_cam2rect)\n', (6388, 6408), True, 'import numpy as np\n'), ((6898, 6923), 'numpy.dot', 'np.dot', (['P_velo2im', 'velo.T'], {}), '(P_velo2im, velo.T)\n', (6904, 6923), True, 'import numpy as np\n'), ((7123, 7150), 'numpy.round', 'np.round', (['velo_pts_im[:, 0]'], {}), '(velo_pts_im[:, 0])\n', (7131, 7150), True, 'import numpy as np\n'), ((7182, 7209), 'numpy.round', 'np.round', (['velo_pts_im[:, 1]'], {}), '(velo_pts_im[:, 1])\n', (7190, 7209), True, 'import numpy as np\n'), ((1662, 1678), 'numpy.genfromtxt', 'np.genfromtxt', (['f'], {}), '(f)\n', (1675, 1678), True, 'import numpy as np\n'), ((2702, 2727), 'numpy.linalg.norm', 'np.linalg.norm', (['cum_speed'], {}), '(cum_speed)\n', (2716, 2727), True, 'import numpy as np\n'), ((3616, 3637), 'numpy.int', 'np.int', (['frame_id[:-1]'], {}), '(frame_id[:-1])\n', (3622, 3637), True, 'import numpy as np\n'), ((6227, 6251), 'numpy.array', 'np.array', (['[0, 0, 0, 1.0]'], {}), '([0, 0, 0, 1.0])\n', (6235, 6251), True, 'import numpy as np\n'), ((6709, 6754), 'numpy.fromfile', 'np.fromfile', (['velo_file_name'], {'dtype': 'np.float32'}), '(velo_file_name, dtype=np.float32)\n', (6720, 6754), True, 'import numpy as np\n'), ((7481, 7524), 'numpy.zeros', 'np.zeros', (['(self.img_height, self.img_width)'], {}), '((self.img_height, self.img_width))\n', (7489, 7524), True, 'import numpy as np\n'), ((7914, 7934), 'numpy.where', 'np.where', (['(inds == dd)'], {}), '(inds == dd)\n', (7922, 7934), True, 'import numpy as np\n'), ((395, 409), 'path.Path', 'Path', (['__file__'], {}), '(__file__)\n', (399, 409), False, 'from path import Path\n'), ((7831, 7844), 'collections.Counter', 'Counter', (['inds'], {}), '(inds)\n', (7838, 7844), False, 'from collections import Counter\n')]
|
import json
import logging
import numpy as np
import pathlib
import pickle
import random
from sklearn.preprocessing import LabelEncoder
import torch
import torch.utils.data
class TranscribedDataset():
le = None
sos = '<sos>'
eos = '<eos>'
pad = '<pad>'
unk = '<unk>'
@classmethod
def init_vocabulary(cls, transcriptions):
cls.le = LabelEncoder()
tokens = [cls.sos, cls.eos, cls.unk, cls.pad] + \
[c for t in transcriptions for c in t]
cls.le.fit(tokens)
@classmethod
def get_label_encoder(cls):
if cls.le is None:
raise ValueError('Vocabulary not initialized.')
return cls.le
@classmethod
def get_token_id(cls, token):
return cls.get_label_encoder().transform([token])[0]
@classmethod
def vocabulary_size(cls):
return len(cls.get_label_encoder().classes_)
@classmethod
def caption2tensor(cls, capt):
le = cls.get_label_encoder()
capt = [c if c in le.classes_ else cls.unk for c in capt]
capt = [cls.sos] + capt + [cls.eos]
return torch.Tensor(le.transform(capt))
class Flickr8KData(torch.utils.data.Dataset, TranscribedDataset):
@classmethod
def init_vocabulary(cls, dataset):
transcriptions = [sd[2] for sd in dataset.split_data]
TranscribedDataset.init_vocabulary(transcriptions)
def __init__(self, root, feature_fname, meta_fname, split='train', language='en',
downsampling_factor=None):
self.root = root
self.split = split
self.feature_fname = feature_fname
self.language = language
if language == 'en':
self.text_key = 'raw'
elif language == 'jp':
self.text_key = 'raw_jp'
else:
raise ValueError('Language {} not supported.'.format(language))
self.root = root
self.split = split
self.language = language
root_path = pathlib.Path(root)
# Loading label encoder
module_path = pathlib.Path(__file__).parent
with open(module_path / 'label_encoders.pkl', 'rb') as f:
self.__class__.le = pickle.load(f)[language]
# Loading metadata
with open(root_path / meta_fname) as fmeta:
metadata = json.load(fmeta)['images']
# Loading mapping from image id to list of caption id
self.image_captions = {}
with open(root_path / 'flickr_audio' / 'wav2capt.txt') as fwav2capt:
for line in fwav2capt:
audio_id, image_id, text_id = line.split()
text_id = int(text_id[1:])
self.image_captions[image_id] = self.image_captions.get(image_id, []) + [(text_id, audio_id)]
# Creating image, caption pairs
self.split_data = []
for image in metadata:
if image['split'] == self.split:
fname = image['filename']
for text_id, audio_id in self.image_captions[fname]:
# In the reduced dataset containing only sentences with
# translations, removed sentences are replaced by 'None' to
# keep the index of the sentence fixed, so that we can
# still retrieve them based on text_id.
# TODO: find a nicer way to handle this
if image['sentences'][text_id] is not None:
if self.text_key in image['sentences'][text_id]:
self.split_data.append((
fname,
audio_id,
image['sentences'][text_id][self.text_key]))
# Downsampling
if downsampling_factor is not None:
num_examples = int(len(self.split_data) // downsampling_factor)
self.split_data = random.sample(self.split_data, num_examples)
# image and audio feature data
image = torch.load(root_path / 'resnet_features.pt')
self.image = dict(zip(image['filenames'], image['features']))
audio = torch.load(root_path / feature_fname)
self.audio = dict(zip(audio['filenames'], audio['features']))
def __getitem__(self, index):
sd = self.split_data[index]
image = self.image[sd[0]]
audio = self.audio[sd[1]]
text = self.caption2tensor(sd[2])
return dict(image_id=sd[0],
audio_id=sd[1],
image=image,
text=text,
audio=audio,
gloss=sd[2])
def __len__(self):
return len(self.split_data)
def get_config(self):
return dict(feature_fname=self.feature_fname,
label_encoder=self.get_label_encoder(),
language=self.language)
def evaluation(self):
"""Returns image features, audio features, caption features, and a
boolean array specifying whether a caption goes with an image."""
audio = []
text = []
image = []
matches = []
image2idx = {}
for sd in self.split_data:
# Add image
if sd[0] in image2idx:
image_idx = image2idx[sd[0]]
else:
image_idx = len(image)
image2idx[sd[0]] = image_idx
image.append(self.image[sd[0]])
# Add audio and text
audio.append(self.audio[sd[1]])
text.append(sd[2])
matches.append((len(audio) - 1, image_idx))
correct = torch.zeros(len(audio), len(image)).bool()
for i, j in matches:
correct[i, j] = True
return dict(image=image, audio=audio, text=text, correct=correct)
def is_slt(self):
return self.language != 'en'
def split_sentences(self, sentences):
if self.language == 'jp':
return sentences
else:
return [s.split() for s in sentences]
class LibriSpeechData(torch.utils.data.Dataset, TranscribedDataset):
@classmethod
def init_vocabulary(cls, dataset):
transcriptions = [m['trn'] for m in dataset.metadata]
TranscribedDataset.init_vocabulary(transcriptions)
def __init__(self, root, feature_fname, meta_fname, split='train',
downsampling_factor=None):
# 'val' set in flickr8k corresponds to 'dev' in librispeech
if split == 'val':
split = 'dev'
self.root = root
self.split = split
self.feature_fname = feature_fname
root_path = pathlib.Path(root)
with open(root_path / meta_fname) as fmeta:
self.metadata = json.load(fmeta)
self.num_lines = self.metadata[-1]['audio_end']
if downsampling_factor is not None:
num_examples = len(self.metadata) // downsampling_factor
self.metadata = random.sample(self.metadata, num_examples)
# filter examples based on split
meta = []
for ex in self.metadata:
if ex['split'] == self.split:
meta.append(ex)
self.metadata = meta
# load audio features
self.audio = np.memmap(root_path / feature_fname, dtype='float64',
mode='r', shape=(self.num_lines, 39))
def __getitem__(self, index):
sd = self.metadata[index]
audio = torch.from_numpy(self.audio[sd['audio_start']:sd['audio_end']])
text = self.caption2tensor(sd['trn'])
return dict(audio_id=sd['fileid'], text=text, audio=audio)
def __len__(self):
return len(self.metadata)
def get_config(self):
return dict(feature_fname=self.feature_fname,
label_encoder=self.get_label_encoder())
def evaluation(self):
"""Returns audio features with corresponding caption"""
audio = []
text = []
for ex in self.metadata:
text.append(ex['trn'])
a = torch.from_numpy(self.audio[ex['audio_start']:ex['audio_end']])
audio.append(a)
return dict(audio=audio, text=text)
def batch_audio(audios, max_frames=2048):
"""Merge audio captions. Truncate to max_frames. Pad with 0s."""
mfcc_lengths = [len(cap[:max_frames, :]) for cap in audios]
mfcc = torch.zeros(len(audios), max(mfcc_lengths), audios[0].size(1))
for i, cap in enumerate(audios):
end = mfcc_lengths[i]
mfcc[i, :end] = cap[:end]
return mfcc.permute(0, 2, 1), torch.tensor(mfcc_lengths)
def batch_text(texts):
"""Merge captions (from tuple of 1D tensor to 2D tensor). Pad with
pad token."""
char_lengths = [len(cap) for cap in texts]
chars = torch.Tensor(len(texts), max(char_lengths)).long()
chars.fill_(Flickr8KData.get_token_id(Flickr8KData.pad))
for i, cap in enumerate(texts):
end = char_lengths[i]
chars[i, :end] = cap[:end]
return chars, torch.tensor(char_lengths)
def batch_image(images):
return torch.stack(images, 0)
def collate_fn(data, max_frames=2048):
images, texts, audios = zip(* [(datum['image'],
datum['text'],
datum['audio']) for datum in data])
# Merge images (from tuple of 3D tensor to 4D tensor).
images = batch_image(images)
mfcc, mfcc_lengths = batch_audio(audios, max_frames=max_frames)
chars, char_lengths = batch_text(texts)
return dict(image=images, audio=mfcc, text=chars, audio_len=mfcc_lengths,
text_len=char_lengths)
def collate_fn_speech(data, max_frames=2048):
texts, audios = zip(* [(datum['text'],
datum['audio']) for datum in data])
mfcc, mfcc_lengths = batch_audio(audios, max_frames=max_frames)
chars, char_lengths = batch_text(texts)
return dict(audio=mfcc, text=chars, audio_len=mfcc_lengths,
text_len=char_lengths)
def flickr8k_loader(root, meta_fname, language, feature_fname,
split='train', batch_size=32, shuffle=False,
max_frames=2048,
downsampling_factor=None):
return torch.utils.data.DataLoader(
dataset=Flickr8KData(root=root,
feature_fname=feature_fname,
meta_fname=meta_fname,
split=split,
language=language,
downsampling_factor=downsampling_factor),
batch_size=batch_size,
shuffle=shuffle,
num_workers=0,
collate_fn=lambda x: collate_fn(x, max_frames=max_frames))
def librispeech_loader(root, meta_fname, feature_fname,
split='train', batch_size=32, shuffle=False,
max_frames=2048,
downsampling_factor=None):
return torch.utils.data.DataLoader(
dataset=LibriSpeechData(root=root,
feature_fname=feature_fname,
meta_fname=meta_fname,
split=split,
downsampling_factor=downsampling_factor),
batch_size=batch_size,
shuffle=shuffle,
num_workers=0,
collate_fn=lambda x: collate_fn_speech(x, max_frames=max_frames))
|
[
"json.load",
"torch.stack",
"random.sample",
"torch.load",
"sklearn.preprocessing.LabelEncoder",
"pathlib.Path",
"pickle.load",
"numpy.memmap",
"torch.tensor",
"torch.from_numpy"
] |
[((9024, 9046), 'torch.stack', 'torch.stack', (['images', '(0)'], {}), '(images, 0)\n', (9035, 9046), False, 'import torch\n'), ((370, 384), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (382, 384), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1977, 1995), 'pathlib.Path', 'pathlib.Path', (['root'], {}), '(root)\n', (1989, 1995), False, 'import pathlib\n'), ((3981, 4025), 'torch.load', 'torch.load', (["(root_path / 'resnet_features.pt')"], {}), "(root_path / 'resnet_features.pt')\n", (3991, 4025), False, 'import torch\n'), ((4112, 4149), 'torch.load', 'torch.load', (['(root_path / feature_fname)'], {}), '(root_path / feature_fname)\n', (4122, 4149), False, 'import torch\n'), ((6604, 6622), 'pathlib.Path', 'pathlib.Path', (['root'], {}), '(root)\n', (6616, 6622), False, 'import pathlib\n'), ((7210, 7306), 'numpy.memmap', 'np.memmap', (['(root_path / feature_fname)'], {'dtype': '"""float64"""', 'mode': '"""r"""', 'shape': '(self.num_lines, 39)'}), "(root_path / feature_fname, dtype='float64', mode='r', shape=(self\n .num_lines, 39))\n", (7219, 7306), True, 'import numpy as np\n'), ((7418, 7481), 'torch.from_numpy', 'torch.from_numpy', (["self.audio[sd['audio_start']:sd['audio_end']]"], {}), "(self.audio[sd['audio_start']:sd['audio_end']])\n", (7434, 7481), False, 'import torch\n'), ((8528, 8554), 'torch.tensor', 'torch.tensor', (['mfcc_lengths'], {}), '(mfcc_lengths)\n', (8540, 8554), False, 'import torch\n'), ((8959, 8985), 'torch.tensor', 'torch.tensor', (['char_lengths'], {}), '(char_lengths)\n', (8971, 8985), False, 'import torch\n'), ((2050, 2072), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (2062, 2072), False, 'import pathlib\n'), ((3880, 3924), 'random.sample', 'random.sample', (['self.split_data', 'num_examples'], {}), '(self.split_data, num_examples)\n', (3893, 3924), False, 'import random\n'), ((6703, 6719), 'json.load', 'json.load', (['fmeta'], {}), '(fmeta)\n', (6712, 6719), False, 'import json\n'), ((6921, 6963), 'random.sample', 'random.sample', (['self.metadata', 'num_examples'], {}), '(self.metadata, num_examples)\n', (6934, 6963), False, 'import random\n'), ((8006, 8069), 'torch.from_numpy', 'torch.from_numpy', (["self.audio[ex['audio_start']:ex['audio_end']]"], {}), "(self.audio[ex['audio_start']:ex['audio_end']])\n", (8022, 8069), False, 'import torch\n'), ((2178, 2192), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2189, 2192), False, 'import pickle\n'), ((2305, 2321), 'json.load', 'json.load', (['fmeta'], {}), '(fmeta)\n', (2314, 2321), False, 'import json\n')]
|
# Clyde 'Thluffy' Sinclair
# SoftDev -- Rona Ed.
# Oct 2020
from flask import Flask
app = Flask(__name__) # create instance of class Flask
@app.route("/") # assign fxn to route
def hello_world():
print("the __name__ of this module is... ")
# Still should print '__main__' in `python app.py`
# and with `flask run` it prints out `app`
print(__name__)
return "No hablo queso!"
if __name__ == "__main__": # true if this file NOT imported
# now running with `flask run` allows all changes you would expect with
# `python app.py` in v3 but it now prints `app` for name of the module
# this does not work as epected unless you run `flask app.py --reload`
app.debug = True # enable auto-reload upon code change
app.run()
|
[
"flask.Flask"
] |
[((91, 106), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (96, 106), False, 'from flask import Flask\n')]
|
# Capstone Python bindings, by <NAME> <<EMAIL>>
import sys
from platform import system
_python2 = sys.version_info[0] < 3
if _python2:
range = xrange
__all__ = [
'Cs',
'CsInsn',
'cs_disasm_quick',
'cs_disasm_lite',
'cs_version',
'cs_support',
'version_bind',
'debug',
'CS_API_MAJOR',
'CS_API_MINOR',
'CS_VERSION_MAJOR',
'CS_VERSION_MINOR',
'CS_VERSION_EXTRA',
'CS_ARCH_ARM',
'CS_ARCH_ARM64',
'CS_ARCH_MIPS',
'CS_ARCH_X86',
'CS_ARCH_PPC',
'CS_ARCH_SPARC',
'CS_ARCH_SYSZ',
'CS_ARCH_XCORE',
'CS_ARCH_ALL',
'CS_MODE_LITTLE_ENDIAN',
'CS_MODE_BIG_ENDIAN',
'CS_MODE_16',
'CS_MODE_32',
'CS_MODE_64',
'CS_MODE_ARM',
'CS_MODE_THUMB',
'CS_MODE_MCLASS',
'CS_MODE_MICRO',
'CS_MODE_MIPS3',
'CS_MODE_MIPS32R6',
'CS_MODE_MIPSGP64',
'CS_MODE_V8',
'CS_MODE_V9',
'CS_MODE_MIPS32',
'CS_MODE_MIPS64',
'CS_OPT_SYNTAX',
'CS_OPT_SYNTAX_DEFAULT',
'CS_OPT_SYNTAX_INTEL',
'CS_OPT_SYNTAX_ATT',
'CS_OPT_SYNTAX_NOREGNAME',
'CS_OPT_DETAIL',
'CS_OPT_MODE',
'CS_OPT_ON',
'CS_OPT_OFF',
'CS_ERR_OK',
'CS_ERR_MEM',
'CS_ERR_ARCH',
'CS_ERR_HANDLE',
'CS_ERR_CSH',
'CS_ERR_MODE',
'CS_ERR_OPTION',
'CS_ERR_DETAIL',
'CS_ERR_VERSION',
'CS_ERR_MEMSETUP',
'CS_ERR_DIET',
'CS_ERR_SKIPDATA',
'CS_ERR_X86_ATT',
'CS_ERR_X86_INTEL',
'CS_SUPPORT_DIET',
'CS_SUPPORT_X86_REDUCE',
'CS_SKIPDATA_CALLBACK',
'CS_OP_INVALID',
'CS_OP_REG',
'CS_OP_IMM',
'CS_OP_MEM',
'CS_OP_FP',
'CS_GRP_INVALID',
'CS_GRP_JUMP',
'CS_GRP_CALL',
'CS_GRP_RET',
'CS_GRP_INT',
'CS_GRP_IRET',
'CsError',
'__version__',
]
# Capstone C interface
# API version
CS_API_MAJOR = 3
CS_API_MINOR = 0
# Package version
CS_VERSION_MAJOR = CS_API_MAJOR
CS_VERSION_MINOR = CS_API_MINOR
CS_VERSION_EXTRA = 5
__version__ = "%u.%u.%u" %(CS_VERSION_MAJOR, CS_VERSION_MINOR, CS_VERSION_EXTRA)
# architectures
CS_ARCH_ARM = 0
CS_ARCH_ARM64 = 1
CS_ARCH_MIPS = 2
CS_ARCH_X86 = 3
CS_ARCH_PPC = 4
CS_ARCH_SPARC = 5
CS_ARCH_SYSZ = 6
CS_ARCH_XCORE = 7
CS_ARCH_MAX = 8
CS_ARCH_ALL = 0xFFFF
# disasm mode
CS_MODE_LITTLE_ENDIAN = 0 # little-endian mode (default mode)
CS_MODE_ARM = 0 # ARM mode
CS_MODE_16 = (1 << 1) # 16-bit mode (for X86)
CS_MODE_32 = (1 << 2) # 32-bit mode (for X86)
CS_MODE_64 = (1 << 3) # 64-bit mode (for X86, PPC)
CS_MODE_THUMB = (1 << 4) # ARM's Thumb mode, including Thumb-2
CS_MODE_MCLASS = (1 << 5) # ARM's Cortex-M series
CS_MODE_V8 = (1 << 6) # ARMv8 A32 encodings for ARM
CS_MODE_MICRO = (1 << 4) # MicroMips mode (MIPS architecture)
CS_MODE_MIPS3 = (1 << 5) # Mips III ISA
CS_MODE_MIPS32R6 = (1 << 6) # Mips32r6 ISA
CS_MODE_MIPSGP64 = (1 << 7) # General Purpose Registers are 64-bit wide (MIPS arch)
CS_MODE_V9 = (1 << 4) # Sparc V9 mode (for Sparc)
CS_MODE_BIG_ENDIAN = (1 << 31) # big-endian mode
CS_MODE_MIPS32 = CS_MODE_32 # Mips32 ISA
CS_MODE_MIPS64 = CS_MODE_64 # Mips64 ISA
# Capstone option type
CS_OPT_SYNTAX = 1 # Intel X86 asm syntax (CS_ARCH_X86 arch)
CS_OPT_DETAIL = 2 # Break down instruction structure into details
CS_OPT_MODE = 3 # Change engine's mode at run-time
CS_OPT_MEM = 4 # Change engine's mode at run-time
CS_OPT_SKIPDATA = 5 # Skip data when disassembling
CS_OPT_SKIPDATA_SETUP = 6 # Setup user-defined function for SKIPDATA option
# Capstone option value
CS_OPT_OFF = 0 # Turn OFF an option - default option of CS_OPT_DETAIL
CS_OPT_ON = 3 # Turn ON an option (CS_OPT_DETAIL)
# Common instruction operand types - to be consistent across all architectures.
CS_OP_INVALID = 0
CS_OP_REG = 1
CS_OP_IMM = 2
CS_OP_MEM = 3
CS_OP_FP = 4
# Common instruction groups - to be consistent across all architectures.
CS_GRP_INVALID = 0 # uninitialized/invalid group.
CS_GRP_JUMP = 1 # all jump instructions (conditional+direct+indirect jumps)
CS_GRP_CALL = 2 # all call instructions
CS_GRP_RET = 3 # all return instructions
CS_GRP_INT = 4 # all interrupt instructions (int+syscall)
CS_GRP_IRET = 5 # all interrupt return instructions
# Capstone syntax value
CS_OPT_SYNTAX_DEFAULT = 0 # Default assembly syntax of all platforms (CS_OPT_SYNTAX)
CS_OPT_SYNTAX_INTEL = 1 # Intel X86 asm syntax - default syntax on X86 (CS_OPT_SYNTAX, CS_ARCH_X86)
CS_OPT_SYNTAX_ATT = 2 # ATT asm syntax (CS_OPT_SYNTAX, CS_ARCH_X86)
CS_OPT_SYNTAX_NOREGNAME = 3 # Asm syntax prints register name with only number - (CS_OPT_SYNTAX, CS_ARCH_PPC, CS_ARCH_ARM)
# Capstone error type
CS_ERR_OK = 0 # No error: everything was fine
CS_ERR_MEM = 1 # Out-Of-Memory error: cs_open(), cs_disasm()
CS_ERR_ARCH = 2 # Unsupported architecture: cs_open()
CS_ERR_HANDLE = 3 # Invalid handle: cs_op_count(), cs_op_index()
CS_ERR_CSH = 4 # Invalid csh argument: cs_close(), cs_errno(), cs_option()
CS_ERR_MODE = 5 # Invalid/unsupported mode: cs_open()
CS_ERR_OPTION = 6 # Invalid/unsupported option: cs_option()
CS_ERR_DETAIL = 7 # Invalid/unsupported option: cs_option()
CS_ERR_MEMSETUP = 8
CS_ERR_VERSION = 9 # Unsupported version (bindings)
CS_ERR_DIET = 10 # Information irrelevant in diet engine
CS_ERR_SKIPDATA = 11 # Access irrelevant data for "data" instruction in SKIPDATA mode
CS_ERR_X86_ATT = 12 # X86 AT&T syntax is unsupported (opt-out at compile time)
CS_ERR_X86_INTEL = 13 # X86 Intel syntax is unsupported (opt-out at compile time)
# query id for cs_support()
CS_SUPPORT_DIET = CS_ARCH_ALL + 1
CS_SUPPORT_X86_REDUCE = CS_ARCH_ALL+2
import ctypes, ctypes.util
from os.path import split, join, dirname
import distutils.sysconfig
import pkg_resources
import inspect
if not hasattr(sys.modules[__name__], '__file__'):
__file__ = inspect.getfile(inspect.currentframe())
if sys.platform == 'darwin':
_lib = "libcapstone.dylib"
elif sys.platform in ('win32', 'cygwin'):
_lib = "capstone.dll"
else:
_lib = "libcapstone.so"
_found = False
def _load_lib(path):
lib_file = join(path, _lib)
try:
return ctypes.cdll.LoadLibrary(lib_file)
except OSError:
# if we're on linux, try again with .so.3 extension
if lib_file.endswith('.so'):
try:
return ctypes.cdll.LoadLibrary(lib_file + '.3')
except OSError:
return None
return None
_cs = None
# Loading attempts, in order
# - pkg_resources can get us the path to the local libraries
# - we can get the path to the local libraries by parsing our filename
# - global load
# - python's lib directory
# - last-gasp attempt at some hardcoded paths on darwin and linux
_path_list = [pkg_resources.resource_filename(__name__, 'lib'),
join(split(__file__)[0], 'lib'),
'',
distutils.sysconfig.get_python_lib(),
"/usr/local/lib/" if sys.platform == 'darwin' else '/usr/lib64']
for _path in _path_list:
_cs = _load_lib(_path)
if _cs is not None: break
else:
raise ImportError("ERROR: fail to load the dynamic library.")
# low-level structure for C code
def copy_ctypes(src):
"""Returns a new ctypes object which is a bitwise copy of an existing one"""
dst = type(src)()
ctypes.memmove(ctypes.byref(dst), ctypes.byref(src), ctypes.sizeof(type(src)))
return dst
def copy_ctypes_list(src):
return [copy_ctypes(n) for n in src]
# Weird import placement because these modules are needed by the below code but need the above functions
from . import arm, arm64, mips, ppc, sparc, systemz, x86, xcore
class _cs_arch(ctypes.Union):
_fields_ = (
('arm64', arm64.CsArm64),
('arm', arm.CsArm),
('mips', mips.CsMips),
('x86', x86.CsX86),
('ppc', ppc.CsPpc),
('sparc', sparc.CsSparc),
('sysz', systemz.CsSysz),
('xcore', xcore.CsXcore),
)
class _cs_detail(ctypes.Structure):
_fields_ = (
('regs_read', ctypes.c_ubyte * 12),
('regs_read_count', ctypes.c_ubyte),
('regs_write', ctypes.c_ubyte * 20),
('regs_write_count', ctypes.c_ubyte),
('groups', ctypes.c_ubyte * 8),
('groups_count', ctypes.c_ubyte),
('arch', _cs_arch),
)
class _cs_insn(ctypes.Structure):
_fields_ = (
('id', ctypes.c_uint),
('address', ctypes.c_uint64),
('size', ctypes.c_uint16),
('bytes', ctypes.c_ubyte * 16),
('mnemonic', ctypes.c_char * 32),
('op_str', ctypes.c_char * 160),
('detail', ctypes.POINTER(_cs_detail)),
)
# callback for SKIPDATA option
CS_SKIPDATA_CALLBACK = ctypes.CFUNCTYPE(ctypes.c_size_t, ctypes.POINTER(ctypes.c_char), ctypes.c_size_t, ctypes.c_size_t, ctypes.c_void_p)
class _cs_opt_skipdata(ctypes.Structure):
_fields_ = (
('mnemonic', ctypes.c_char_p),
('callback', CS_SKIPDATA_CALLBACK),
('user_data', ctypes.c_void_p),
)
# setup all the function prototype
def _setup_prototype(lib, fname, restype, *argtypes):
getattr(lib, fname).restype = restype
getattr(lib, fname).argtypes = argtypes
_setup_prototype(_cs, "cs_open", ctypes.c_int, ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(ctypes.c_size_t))
_setup_prototype(_cs, "cs_disasm", ctypes.c_size_t, ctypes.c_size_t, ctypes.POINTER(ctypes.c_char), ctypes.c_size_t, \
ctypes.c_uint64, ctypes.c_size_t, ctypes.POINTER(ctypes.POINTER(_cs_insn)))
_setup_prototype(_cs, "cs_free", None, ctypes.c_void_p, ctypes.c_size_t)
_setup_prototype(_cs, "cs_close", ctypes.c_int, ctypes.POINTER(ctypes.c_size_t))
_setup_prototype(_cs, "cs_reg_name", ctypes.c_char_p, ctypes.c_size_t, ctypes.c_uint)
_setup_prototype(_cs, "cs_insn_name", ctypes.c_char_p, ctypes.c_size_t, ctypes.c_uint)
_setup_prototype(_cs, "cs_group_name", ctypes.c_char_p, ctypes.c_size_t, ctypes.c_uint)
_setup_prototype(_cs, "cs_op_count", ctypes.c_int, ctypes.c_size_t, ctypes.POINTER(_cs_insn), ctypes.c_uint)
_setup_prototype(_cs, "cs_op_index", ctypes.c_int, ctypes.c_size_t, ctypes.POINTER(_cs_insn), ctypes.c_uint, ctypes.c_uint)
_setup_prototype(_cs, "cs_errno", ctypes.c_int, ctypes.c_size_t)
_setup_prototype(_cs, "cs_option", ctypes.c_int, ctypes.c_size_t, ctypes.c_int, ctypes.c_void_p)
_setup_prototype(_cs, "cs_version", ctypes.c_int, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int))
_setup_prototype(_cs, "cs_support", ctypes.c_bool, ctypes.c_int)
_setup_prototype(_cs, "cs_strerror", ctypes.c_char_p, ctypes.c_int)
# access to error code via @errno of CsError
class CsError(Exception):
def __init__(self, errno):
self.errno = errno
if _python2:
def __str__(self):
return _cs.cs_strerror(self.errno)
else:
def __str__(self):
return _cs.cs_strerror(self.errno).decode()
# return the core's version
def cs_version():
major = ctypes.c_int()
minor = ctypes.c_int()
combined = _cs.cs_version(ctypes.byref(major), ctypes.byref(minor))
return (major.value, minor.value, combined)
# return the binding's version
def version_bind():
return (CS_API_MAJOR, CS_API_MINOR, (CS_API_MAJOR << 8) + CS_API_MINOR)
def cs_support(query):
return _cs.cs_support(query)
# dummy class resembling Cs class, just for cs_disasm_quick()
# this class only need to be referenced to via 2 fields: @csh & @arch
class _dummy_cs(object):
def __init__(self, csh, arch):
self.csh = csh
self.arch = arch
self._detail = False
# Quick & dirty Python function to disasm raw binary code
# This function return CsInsn objects
# NOTE: you might want to use more efficient Cs class & its methods.
def cs_disasm_quick(arch, mode, code, offset, count=0):
# verify version compatibility with the core before doing anything
(major, minor, _combined) = cs_version()
if major != CS_API_MAJOR or minor != CS_API_MINOR:
# our binding version is different from the core's API version
raise CsError(CS_ERR_VERSION)
csh = ctypes.c_size_t()
status = _cs.cs_open(arch, mode, ctypes.byref(csh))
if status != CS_ERR_OK:
raise CsError(status)
all_insn = ctypes.POINTER(_cs_insn)()
res = _cs.cs_disasm(csh, code, len(code), offset, count, ctypes.byref(all_insn))
if res > 0:
try:
for i in range(res):
yield CsInsn(_dummy_cs(csh, arch), all_insn[i])
finally:
_cs.cs_free(all_insn, res)
else:
status = _cs.cs_errno(csh)
if status != CS_ERR_OK:
raise CsError(status)
return
yield
status = _cs.cs_close(ctypes.byref(csh))
if status != CS_ERR_OK:
raise CsError(status)
# Another quick, but lighter function to disasm raw binary code.
# This function is faster than cs_disasm_quick() around 20% because
# cs_disasm_lite() only return tuples of (address, size, mnemonic, op_str),
# rather than CsInsn objects.
# NOTE: you might want to use more efficient Cs class & its methods.
def cs_disasm_lite(arch, mode, code, offset, count=0):
# verify version compatibility with the core before doing anything
(major, minor, _combined) = cs_version()
if major != CS_API_MAJOR or minor != CS_API_MINOR:
# our binding version is different from the core's API version
raise CsError(CS_ERR_VERSION)
if cs_support(CS_SUPPORT_DIET):
# Diet engine cannot provide @mnemonic & @op_str
raise CsError(CS_ERR_DIET)
csh = ctypes.c_size_t()
status = _cs.cs_open(arch, mode, ctypes.byref(csh))
if status != CS_ERR_OK:
raise CsError(status)
all_insn = ctypes.POINTER(_cs_insn)()
res = _cs.cs_disasm(csh, code, len(code), offset, count, ctypes.byref(all_insn))
if res > 0:
try:
for i in range(res):
insn = all_insn[i]
yield (insn.address, insn.size, insn.mnemonic.decode('ascii'), insn.op_str.decode('ascii'))
finally:
_cs.cs_free(all_insn, res)
else:
status = _cs.cs_errno(csh)
if status != CS_ERR_OK:
raise CsError(status)
return
yield
status = _cs.cs_close(ctypes.byref(csh))
if status != CS_ERR_OK:
raise CsError(status)
# Python-style class to disasm code
class CsInsn(object):
def __init__(self, cs, all_info):
self._raw = copy_ctypes(all_info)
self._cs = cs
if self._cs._detail and self._raw.id != 0:
# save detail
self._detail = copy_ctypes(self._raw.detail.contents)
# return instruction's ID.
@property
def id(self):
return self._raw.id
# return instruction's address.
@property
def address(self):
return self._raw.address
# return instruction's size.
@property
def size(self):
return self._raw.size
# return instruction's machine bytes (which should have @size bytes).
@property
def bytes(self):
return bytearray(self._raw.bytes)[:self._raw.size]
# return instruction's mnemonic.
@property
def mnemonic(self):
if self._cs._diet:
# Diet engine cannot provide @mnemonic.
raise CsError(CS_ERR_DIET)
return self._raw.mnemonic.decode('ascii')
# return instruction's operands (in string).
@property
def op_str(self):
if self._cs._diet:
# Diet engine cannot provide @op_str.
raise CsError(CS_ERR_DIET)
return self._raw.op_str.decode('ascii')
# return list of all implicit registers being read.
@property
def regs_read(self):
if self._raw.id == 0:
raise CsError(CS_ERR_SKIPDATA)
if self._cs._diet:
# Diet engine cannot provide @regs_read.
raise CsError(CS_ERR_DIET)
if self._cs._detail:
return self._detail.regs_read[:self._detail.regs_read_count]
raise CsError(CS_ERR_DETAIL)
# return list of all implicit registers being modified
@property
def regs_write(self):
if self._raw.id == 0:
raise CsError(CS_ERR_SKIPDATA)
if self._cs._diet:
# Diet engine cannot provide @regs_write
raise CsError(CS_ERR_DIET)
if self._cs._detail:
return self._detail.regs_write[:self._detail.regs_write_count]
raise CsError(CS_ERR_DETAIL)
# return list of semantic groups this instruction belongs to.
@property
def groups(self):
if self._raw.id == 0:
raise CsError(CS_ERR_SKIPDATA)
if self._cs._diet:
# Diet engine cannot provide @groups
raise CsError(CS_ERR_DIET)
if self._cs._detail:
return self._detail.groups[:self._detail.groups_count]
raise CsError(CS_ERR_DETAIL)
def __gen_detail(self):
arch = self._cs.arch
if arch == CS_ARCH_ARM:
(self.usermode, self.vector_size, self.vector_data, self.cps_mode, self.cps_flag, self.cc, self.update_flags, \
self.writeback, self.mem_barrier, self.operands) = arm.get_arch_info(self._detail.arch.arm)
elif arch == CS_ARCH_ARM64:
(self.cc, self.update_flags, self.writeback, self.operands) = \
arm64.get_arch_info(self._detail.arch.arm64)
elif arch == CS_ARCH_X86:
(self.prefix, self.opcode, self.rex, self.addr_size, \
self.modrm, self.sib, self.disp, \
self.sib_index, self.sib_scale, self.sib_base, self.sse_cc, \
self.avx_cc, self.avx_sae, self.avx_rm, self.operands) = x86.get_arch_info(self._detail.arch.x86)
elif arch == CS_ARCH_MIPS:
self.operands = mips.get_arch_info(self._detail.arch.mips)
elif arch == CS_ARCH_PPC:
(self.bc, self.bh, self.update_cr0, self.operands) = \
ppc.get_arch_info(self._detail.arch.ppc)
elif arch == CS_ARCH_SPARC:
(self.cc, self.hint, self.operands) = sparc.get_arch_info(self._detail.arch.sparc)
elif arch == CS_ARCH_SYSZ:
(self.cc, self.operands) = systemz.get_arch_info(self._detail.arch.sysz)
elif arch == CS_ARCH_XCORE:
(self.operands) = xcore.get_arch_info(self._detail.arch.xcore)
def __getattr__(self, name):
if not self._cs._detail:
raise CsError(CS_ERR_DETAIL)
attr = object.__getattribute__
if not attr(self, '_cs')._detail:
raise AttributeError(name)
_dict = attr(self, '__dict__')
if 'operands' not in _dict:
self.__gen_detail()
if name not in _dict:
raise AttributeError(name)
return _dict[name]
# get the last error code
def errno(self):
return _cs.cs_errno(self._cs.csh)
# get the register name, given the register ID
def reg_name(self, reg_id):
if self._raw.id == 0:
raise CsError(CS_ERR_SKIPDATA)
if self._cs._diet:
# Diet engine cannot provide register name
raise CsError(CS_ERR_DIET)
if reg_id == 0:
return "(invalid)"
return _cs.cs_reg_name(self._cs.csh, reg_id).decode('ascii')
# get the instruction name
def insn_name(self):
if self._cs._diet:
# Diet engine cannot provide instruction name
raise CsError(CS_ERR_DIET)
if self._raw.id == 0:
return "(invalid)"
return _cs.cs_insn_name(self._cs.csh, self.id).decode('ascii')
# get the group name
def group_name(self, group_id):
if self._raw.id == 0:
raise CsError(CS_ERR_SKIPDATA)
if self._cs._diet:
# Diet engine cannot provide register name
raise CsError(CS_ERR_DIET)
if group_id == 0:
return "(invalid)"
return _cs.cs_group_name(self._cs.csh, group_id).decode('ascii')
# verify if this insn belong to group with id as @group_id
def group(self, group_id):
if self._raw.id == 0:
raise CsError(CS_ERR_SKIPDATA)
if self._cs._diet:
# Diet engine cannot provide group information
raise CsError(CS_ERR_DIET)
return group_id in self.groups
# verify if this instruction implicitly read register @reg_id
def reg_read(self, reg_id):
if self._raw.id == 0:
raise CsError(CS_ERR_SKIPDATA)
if self._cs._diet:
# Diet engine cannot provide regs_read information
raise CsError(CS_ERR_DIET)
return reg_id in self.regs_read
# verify if this instruction implicitly modified register @reg_id
def reg_write(self, reg_id):
if self._raw.id == 0:
raise CsError(CS_ERR_SKIPDATA)
if self._cs._diet:
# Diet engine cannot provide regs_write information
raise CsError(CS_ERR_DIET)
return reg_id in self.regs_write
# return number of operands having same operand type @op_type
def op_count(self, op_type):
if self._raw.id == 0:
raise CsError(CS_ERR_SKIPDATA)
c = 0
for op in self.operands:
if op.type == op_type:
c += 1
return c
# get the operand at position @position of all operands having the same type @op_type
def op_find(self, op_type, position):
if self._raw.id == 0:
raise CsError(CS_ERR_SKIPDATA)
c = 0
for op in self.operands:
if op.type == op_type:
c += 1
if c == position:
return op
class Cs(object):
def __init__(self, arch, mode):
# verify version compatibility with the core before doing anything
(major, minor, _combined) = cs_version()
if major != CS_API_MAJOR or minor != CS_API_MINOR:
self.csh = None
# our binding version is different from the core's API version
raise CsError(CS_ERR_VERSION)
self.arch, self._mode = arch, mode
self.csh = ctypes.c_size_t()
status = _cs.cs_open(arch, mode, ctypes.byref(self.csh))
if status != CS_ERR_OK:
self.csh = None
raise CsError(status)
try:
import ccapstone
# rewire disasm to use the faster version
self.disasm = ccapstone.Cs(self).disasm
except:
pass
if arch == CS_ARCH_X86:
# Intel syntax is default for X86
self._syntax = CS_OPT_SYNTAX_INTEL
else:
self._syntax = None
self._detail = False # by default, do not produce instruction details
self._diet = cs_support(CS_SUPPORT_DIET)
self._x86reduce = cs_support(CS_SUPPORT_X86_REDUCE)
# default mnemonic for SKIPDATA
self._skipdata_mnem = ".byte"
self._skipdata = False
# destructor to be called automatically when object is destroyed.
def __del__(self):
if self.csh:
try:
status = _cs.cs_close(ctypes.byref(self.csh))
if status != CS_ERR_OK:
raise CsError(status)
except: # _cs might be pulled from under our feet
pass
# def option(self, opt_type, opt_value):
# return _cs.cs_option(self.csh, opt_type, opt_value)
# is this a diet engine?
@property
def diet(self):
return self._diet
# is this engine compiled with X86-reduce option?
@property
def x86_reduce(self):
return self._x86reduce
# return assembly syntax.
@property
def syntax(self):
return self._syntax
# syntax setter: modify assembly syntax.
@syntax.setter
def syntax(self, style):
status = _cs.cs_option(self.csh, CS_OPT_SYNTAX, style)
if status != CS_ERR_OK:
raise CsError(status)
# save syntax
self._syntax = style
# return current skipdata status
@property
def skipdata(self):
return self._skipdata
# setter: modify skipdata status
@skipdata.setter
def skipdata(self, opt):
if opt == False:
status = _cs.cs_option(self.csh, CS_OPT_SKIPDATA, CS_OPT_OFF)
else:
status = _cs.cs_option(self.csh, CS_OPT_SKIPDATA, CS_OPT_ON)
if status != CS_ERR_OK:
raise CsError(status)
# save this option
self._skipdata = opt
def skipdata_setup(self, opt):
_skipdata_opt = _cs_opt_skipdata()
_mnem, _cb, _ud = opt
_skipdata_opt.mnemonic = _mnem.encode()
_skipdata_opt.callback = CS_SKIPDATA_CALLBACK(_cb)
_skipdata_opt.user_data = ctypes.cast(_ud, ctypes.c_void_p)
status = _cs.cs_option(self.csh, CS_OPT_SKIPDATA_SETUP, ctypes.cast(ctypes.byref(_skipdata_opt), ctypes.c_void_p))
if status != CS_ERR_OK:
raise CsError(status)
self._skipdata_opt = _skipdata_opt
# check to see if this engine supports a particular arch,
# or diet mode (depending on @query).
def support(self, query):
return cs_support(query)
# is detail mode enable?
@property
def detail(self):
return self._detail
# modify detail mode.
@detail.setter
def detail(self, opt): # opt is boolean type, so must be either 'True' or 'False'
if opt == False:
status = _cs.cs_option(self.csh, CS_OPT_DETAIL, CS_OPT_OFF)
else:
status = _cs.cs_option(self.csh, CS_OPT_DETAIL, CS_OPT_ON)
if status != CS_ERR_OK:
raise CsError(status)
# save detail
self._detail = opt
# return disassembly mode of this engine.
@property
def mode(self):
return self._mode
# modify engine's mode at run-time.
@mode.setter
def mode(self, opt): # opt is new disasm mode, of int type
status = _cs.cs_option(self.csh, CS_OPT_MODE, opt)
if status != CS_ERR_OK:
raise CsError(status)
# save mode
self._mode = opt
# Disassemble binary & return disassembled instructions in CsInsn objects
def disasm(self, code, offset, count=0):
all_insn = ctypes.POINTER(_cs_insn)()
'''if not _python2:
print(code)
code = code.encode()
print(code)'''
# Hack, unicorn's memory accessors give you back bytearrays, but they
# cause TypeErrors when you hand them into Capstone.
if isinstance(code, bytearray):
code = bytes(code)
res = _cs.cs_disasm(self.csh, code, len(code), offset, count, ctypes.byref(all_insn))
if res > 0:
try:
for i in range(res):
yield CsInsn(self, all_insn[i])
finally:
_cs.cs_free(all_insn, res)
else:
status = _cs.cs_errno(self.csh)
if status != CS_ERR_OK:
raise CsError(status)
return
yield
# Light function to disassemble binary. This is about 20% faster than disasm() because
# unlike disasm(), disasm_lite() only return tuples of (address, size, mnemonic, op_str),
# rather than CsInsn objects.
def disasm_lite(self, code, offset, count=0):
if self._diet:
# Diet engine cannot provide @mnemonic & @op_str
raise CsError(CS_ERR_DIET)
all_insn = ctypes.POINTER(_cs_insn)()
res = _cs.cs_disasm(self.csh, code, len(code), offset, count, ctypes.byref(all_insn))
if res > 0:
try:
for i in range(res):
insn = all_insn[i]
yield (insn.address, insn.size, insn.mnemonic.decode('ascii'), insn.op_str.decode('ascii'))
finally:
_cs.cs_free(all_insn, res)
else:
status = _cs.cs_errno(self.csh)
if status != CS_ERR_OK:
raise CsError(status)
return
yield
# print out debugging info
def debug():
# is Cython there?
try:
from . import ccapstone
return ccapstone.debug()
except:
# no Cython, fallback to Python code below
pass
if cs_support(CS_SUPPORT_DIET):
diet = "diet"
else:
diet = "standard"
archs = { "arm": CS_ARCH_ARM, "arm64": CS_ARCH_ARM64, \
"mips": CS_ARCH_MIPS, "ppc": CS_ARCH_PPC, "sparc": CS_ARCH_SPARC, \
"sysz": CS_ARCH_SYSZ, 'xcore': CS_ARCH_XCORE }
all_archs = ""
keys = archs.keys()
for k in sorted(keys):
if cs_support(archs[k]):
all_archs += "-%s" % k
if cs_support(CS_ARCH_X86):
all_archs += "-x86"
if cs_support(CS_SUPPORT_X86_REDUCE):
all_archs += "_reduce"
(major, minor, _combined) = cs_version()
return "python-%s%s-c%u.%u-b%u.%u" % (diet, all_archs, major, minor, CS_API_MAJOR, CS_API_MINOR)
|
[
"ctypes.c_int",
"ctypes.c_size_t",
"ctypes.byref",
"ctypes.cdll.LoadLibrary",
"pkg_resources.resource_filename",
"ccapstone.Cs",
"ctypes.cast",
"inspect.currentframe",
"os.path.split",
"os.path.join",
"ccapstone.debug",
"ctypes.POINTER"
] |
[((6144, 6160), 'os.path.join', 'join', (['path', '_lib'], {}), '(path, _lib)\n', (6148, 6160), False, 'from os.path import split, join, dirname\n'), ((6791, 6839), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '"""lib"""'], {}), "(__name__, 'lib')\n", (6822, 6839), False, 'import pkg_resources\n'), ((8767, 8796), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_char'], {}), '(ctypes.c_char)\n', (8781, 8796), False, 'import ctypes, ctypes.util\n'), ((9293, 9324), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_size_t'], {}), '(ctypes.c_size_t)\n', (9307, 9324), False, 'import ctypes, ctypes.util\n'), ((9395, 9424), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_char'], {}), '(ctypes.c_char)\n', (9409, 9424), False, 'import ctypes, ctypes.util\n'), ((9650, 9681), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_size_t'], {}), '(ctypes.c_size_t)\n', (9664, 9681), False, 'import ctypes, ctypes.util\n'), ((10012, 10036), 'ctypes.POINTER', 'ctypes.POINTER', (['_cs_insn'], {}), '(_cs_insn)\n', (10026, 10036), False, 'import ctypes, ctypes.util\n'), ((10121, 10145), 'ctypes.POINTER', 'ctypes.POINTER', (['_cs_insn'], {}), '(_cs_insn)\n', (10135, 10145), False, 'import ctypes, ctypes.util\n'), ((10389, 10417), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_int'], {}), '(ctypes.c_int)\n', (10403, 10417), False, 'import ctypes, ctypes.util\n'), ((10419, 10447), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_int'], {}), '(ctypes.c_int)\n', (10433, 10447), False, 'import ctypes, ctypes.util\n'), ((10959, 10973), 'ctypes.c_int', 'ctypes.c_int', ([], {}), '()\n', (10971, 10973), False, 'import ctypes, ctypes.util\n'), ((10986, 11000), 'ctypes.c_int', 'ctypes.c_int', ([], {}), '()\n', (10998, 11000), False, 'import ctypes, ctypes.util\n'), ((12093, 12110), 'ctypes.c_size_t', 'ctypes.c_size_t', ([], {}), '()\n', (12108, 12110), False, 'import ctypes, ctypes.util\n'), ((13564, 13581), 'ctypes.c_size_t', 'ctypes.c_size_t', ([], {}), '()\n', (13579, 13581), False, 'import ctypes, ctypes.util\n'), ((5904, 5926), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (5924, 5926), False, 'import inspect\n'), ((6185, 6218), 'ctypes.cdll.LoadLibrary', 'ctypes.cdll.LoadLibrary', (['lib_file'], {}), '(lib_file)\n', (6208, 6218), False, 'import ctypes, ctypes.util\n'), ((7372, 7389), 'ctypes.byref', 'ctypes.byref', (['dst'], {}), '(dst)\n', (7384, 7389), False, 'import ctypes, ctypes.util\n'), ((7391, 7408), 'ctypes.byref', 'ctypes.byref', (['src'], {}), '(src)\n', (7403, 7408), False, 'import ctypes, ctypes.util\n'), ((9502, 9526), 'ctypes.POINTER', 'ctypes.POINTER', (['_cs_insn'], {}), '(_cs_insn)\n', (9516, 9526), False, 'import ctypes, ctypes.util\n'), ((11031, 11050), 'ctypes.byref', 'ctypes.byref', (['major'], {}), '(major)\n', (11043, 11050), False, 'import ctypes, ctypes.util\n'), ((11052, 11071), 'ctypes.byref', 'ctypes.byref', (['minor'], {}), '(minor)\n', (11064, 11071), False, 'import ctypes, ctypes.util\n'), ((12148, 12165), 'ctypes.byref', 'ctypes.byref', (['csh'], {}), '(csh)\n', (12160, 12165), False, 'import ctypes, ctypes.util\n'), ((12241, 12265), 'ctypes.POINTER', 'ctypes.POINTER', (['_cs_insn'], {}), '(_cs_insn)\n', (12255, 12265), False, 'import ctypes, ctypes.util\n'), ((12329, 12351), 'ctypes.byref', 'ctypes.byref', (['all_insn'], {}), '(all_insn)\n', (12341, 12351), False, 'import ctypes, ctypes.util\n'), ((12702, 12719), 'ctypes.byref', 'ctypes.byref', (['csh'], {}), '(csh)\n', (12714, 12719), False, 'import ctypes, ctypes.util\n'), ((13619, 13636), 'ctypes.byref', 'ctypes.byref', (['csh'], {}), '(csh)\n', (13631, 13636), False, 'import ctypes, ctypes.util\n'), ((13712, 13736), 'ctypes.POINTER', 'ctypes.POINTER', (['_cs_insn'], {}), '(_cs_insn)\n', (13726, 13736), False, 'import ctypes, ctypes.util\n'), ((13800, 13822), 'ctypes.byref', 'ctypes.byref', (['all_insn'], {}), '(all_insn)\n', (13812, 13822), False, 'import ctypes, ctypes.util\n'), ((14252, 14269), 'ctypes.byref', 'ctypes.byref', (['csh'], {}), '(csh)\n', (14264, 14269), False, 'import ctypes, ctypes.util\n'), ((22129, 22146), 'ctypes.c_size_t', 'ctypes.c_size_t', ([], {}), '()\n', (22144, 22146), False, 'import ctypes, ctypes.util\n'), ((24773, 24806), 'ctypes.cast', 'ctypes.cast', (['_ud', 'ctypes.c_void_p'], {}), '(_ud, ctypes.c_void_p)\n', (24784, 24806), False, 'import ctypes, ctypes.util\n'), ((28196, 28213), 'ccapstone.debug', 'ccapstone.debug', ([], {}), '()\n', (28211, 28213), False, 'import ccapstone\n'), ((6860, 6875), 'os.path.split', 'split', (['__file__'], {}), '(__file__)\n', (6865, 6875), False, 'from os.path import split, join, dirname\n'), ((8643, 8669), 'ctypes.POINTER', 'ctypes.POINTER', (['_cs_detail'], {}), '(_cs_detail)\n', (8657, 8669), False, 'import ctypes, ctypes.util\n'), ((22188, 22210), 'ctypes.byref', 'ctypes.byref', (['self.csh'], {}), '(self.csh)\n', (22200, 22210), False, 'import ctypes, ctypes.util\n'), ((26280, 26304), 'ctypes.POINTER', 'ctypes.POINTER', (['_cs_insn'], {}), '(_cs_insn)\n', (26294, 26304), False, 'import ctypes, ctypes.util\n'), ((26699, 26721), 'ctypes.byref', 'ctypes.byref', (['all_insn'], {}), '(all_insn)\n', (26711, 26721), False, 'import ctypes, ctypes.util\n'), ((27496, 27520), 'ctypes.POINTER', 'ctypes.POINTER', (['_cs_insn'], {}), '(_cs_insn)\n', (27510, 27520), False, 'import ctypes, ctypes.util\n'), ((27593, 27615), 'ctypes.byref', 'ctypes.byref', (['all_insn'], {}), '(all_insn)\n', (27605, 27615), False, 'import ctypes, ctypes.util\n'), ((22429, 22447), 'ccapstone.Cs', 'ccapstone.Cs', (['self'], {}), '(self)\n', (22441, 22447), False, 'import ccapstone\n'), ((24883, 24910), 'ctypes.byref', 'ctypes.byref', (['_skipdata_opt'], {}), '(_skipdata_opt)\n', (24895, 24910), False, 'import ctypes, ctypes.util\n'), ((6376, 6416), 'ctypes.cdll.LoadLibrary', 'ctypes.cdll.LoadLibrary', (["(lib_file + '.3')"], {}), "(lib_file + '.3')\n", (6399, 6416), False, 'import ctypes, ctypes.util\n'), ((23131, 23153), 'ctypes.byref', 'ctypes.byref', (['self.csh'], {}), '(self.csh)\n', (23143, 23153), False, 'import ctypes, ctypes.util\n')]
|