hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf226702a58cd66b8218eb1a113b8f6fd669441 | 12,784 | py | Python | python/vsi/utils/image_utils.py | cabdiweli1/vsi_common | 179a607fe7834bcd5ca35804c05c3d9909269b94 | [
"MIT"
] | 7 | 2017-09-04T01:53:43.000Z | 2020-12-20T20:21:35.000Z | python/vsi/utils/image_utils.py | cabdiweli1/vsi_common | 179a607fe7834bcd5ca35804c05c3d9909269b94 | [
"MIT"
] | 165 | 2017-07-10T21:56:07.000Z | 2022-03-22T12:20:56.000Z | python/vsi/utils/image_utils.py | cabdiweli1/vsi_common | 179a607fe7834bcd5ca35804c05c3d9909269b94 | [
"MIT"
] | 8 | 2017-06-23T20:37:16.000Z | 2020-09-08T21:00:26.000Z | """ A collection of utility functions related to Image data """
import numpy as np
import PIL.Image as Image
try:
from itertools import izip
except ImportError:
izip = zip
import scipy.ndimage.filters
import skimage.transform
def sk_resize(img, nsize=None, nscale=None, **kwargs):
""" make skimage.transform.resize() behave in a sane way
Parameters
----------
img : array_like
The image
nsize : array_like, optional
nsize=[nr, nc] : resize each channel of img
nscale : array_like, optional
**kwargs
arbitrary keyword arguments
Returns
-------
numpy.array
The image scaled
Raises
------
Exception
Where there is an unexpected nsize
"""
assert nsize is not None or nscale is not None, 'either nsize or nscale must be set'
if nsize is None:
nrows, ncols = img.shape[0:2]
nsize = round(nscale[0]*nrows), round(nscale[1]*ncols)
# no need to resize
# REVIEW maybe should return a copy since that is the normal behavior of
# this function
if (len(nsize) !=2) and (len(nsize) != len(img.shape)):
raise Exception('Unexpected nsize ' + str(nsize))
if np.all(np.array(img.shape[0:2]) == nsize): return img.copy()
# resize() rescales pos integer images to between 0 and 1, however, it
# clips neg values at 0
in_type = img.dtype
if issubclass(in_type.type, np.integer):
# REVIEW dont always need to use double here...
img = img.astype(dtype=np.double) # copies array
# resize() expects floating-point images to be scaled between 0 and 1
# (otherwise it clips image!!). scale and rescale to prevent
min_val = np.nanmin(img)
max_val = np.nanmax(img) - min_val
img = (img - min_val) / max_val
# WARNING this is not equivilent to PIL.resize(), which at least downsamples
# by selecting entire rows/cols when order=0 (nearest-neighbor interp)
img_scaled = skimage.transform.resize(img, nsize, **kwargs)
img_scaled = img_scaled * max_val + min_val
if issubclass(in_type.type, np.integer):
img_scaled = np.round(img_scaled)
img_scaled = img_scaled.astype(dtype=in_type) # will copy array
return img_scaled
def rgb2gray(rgb):
""" convert an rgb image stored as a numpy array to grayscale
Parameters
----------
rgb : array_like
The RGB image
Returns
-------
numpy.array
A numpy array to grayscale
"""
gr = np.dot(rgb[..., :3].astype(np.float), [0.299, 0.587, 0.144])
if rgb.dtype == np.uint8:
gr[gr > 255] = 255
gr[gr < 0] = 0
return gr.astype(rgb.dtype)
def weighted_smooth(image, weights, pyramid_min_dim=50, convergence_thresh=0.01, max_its=1000):
""" smooth the values in image using a multi-scale regularization.
Parameters
----------
image : array_like
The image
weights : float
weights should be the same dimensions as image, with values in range (0,1)
pyramid_min_dim : array_like, optional
convergence_thres : float, optional
The convergence threshold
max_its : int, optional
Returns
-------
numpy.array
The smooth image
"""
# create image pyramids
image_py = [Image.fromarray(image),]
weight_py = [Image.fromarray(weights),]
new_dims = (int(image_py[-1].size[0] / 2), int(image_py[-1].size[1] / 2))
while np.min(new_dims) > pyramid_min_dim:
image_level = image_py[-1].resize(new_dims, Image.BILINEAR)
weight_level = weight_py[-1].resize(new_dims, Image.BILINEAR)
image_py.append(image_level)
weight_py.append(weight_level)
new_dims = (int(image_py[-1].size[0] / 2), int(image_py[-1].size[1] / 2))
num_levels = len(image_py)
# initialize with top of pyramid
image_smooth_prev = np.array(image_py[num_levels-1])
# traverse all levels of pyramid
num_its = 0
for l in reversed(range(num_levels)):
weights_np = np.array(weight_py[l])
for i in range(max_its):
num_its = i+1
# smooth image
image_smooth = scipy.ndimage.filters.gaussian_filter(image_smooth_prev, sigma=1.0, mode='nearest')
# compute weighted sum of smoothed image and original
image_smooth = weights_np*image_py[l] + (1.0 - weights_np)*image_smooth
# test for convergence
maxdiff = np.abs(image_smooth - image_smooth_prev).max()
if maxdiff <= convergence_thresh:
break
image_smooth_prev = image_smooth
print('level %d: %d iterations' % (l,num_its))
# initialize next level with output from previous level
if l > 0:
image_smooth_prev_pil = Image.fromarray(image_smooth_prev)
image_smooth_prev = np.array(image_smooth_prev_pil.resize(image_py[l-1].size))
return image_smooth
def mutual_information(img1, img2, min_val, max_val, nbins):
""" compute mutual information of img1 and img2
Parameters
----------
img1 : array_like
The first image
img2 : array_like
The second image
min_val : array_like
The minimum of an array
max_val : array_like
The maximum value of an array along a given axis
nbins :
Returns
-------
numpy.array
Mutual information of img1 and img2
"""
i1 = np.array(img1).ravel().astype('float')
i2 = np.array(img2).ravel().astype('float')
val_range = max_val - min_val
# fill in counts
b1 = ((i1 - min_val) / val_range * nbins).astype('int')
b2 = ((i2 - min_val) / val_range * nbins).astype('int')
counts = np.zeros((nbins,nbins))
for (v1,v2) in izip(b1,b2):
counts[v1,v2] += 1
total = float(len(i1))
p0 = counts.sum(axis=0) / total
p1 = counts.sum(axis=1) / total
p01 = counts / total
ind_prob = np.tile(p0.reshape(1,nbins),(nbins,1)) * np.tile(p1.reshape(nbins,1),(1,nbins))
# avoid divide by zero
zero_mask = (p01 == 0).astype('float')
p01_clean = p01 + zero_mask
ind_prob_clean = ind_prob + zero_mask
mi_matrix = p01 * np.log(p01_clean / ind_prob_clean)
return mi_matrix.sum() / np.log(nbins)
def normalized_cross_correlation(img1, img2):
""" compute the normalized cross correlation of img1 and img2
Parameters
----------
img1 : array_like
The first image
img2 : array_like
The second image
Returns
-------
numpy.array
The normalized cross correlation of image 1 and image 2
"""
i1 = np.array(img1,'float').ravel()
i2 = np.array(img2,'float').ravel()
i1 -= i1.mean()
norm1 = np.sqrt(np.sum(i1 * i1))
if norm1 > 0:
i1 /= norm1
i2 -= i2.mean()
norm2 = np.sqrt(np.sum(i2 * i2))
if norm2 > 0:
i2 /= norm2
return np.dot(i1, i2)
def sample_point(image, pt):
""" return the pixel value, or None if the point is outside image bounds
Parameters
----------
image : array_like
The image
pt : array_like
The point
Returns
-------
array_like
The pixel value (None if the point is outside the image bounds)
"""
if pt[0] >= 0 and pt[0] < image.size[0] and pt[1] >= 0 and pt[1] < image.size[1]:
return image.getpixel(tuple(pt))
return None
def sample_patch(image, corners, patch_size, check_bounds=True):
""" return an Image of size patch_size, or None if the patch is outside image
bounds
Parameters
----------
image : array_like
The image
corners : array_like
The four corners
patch_size : array_like
The patch size
check_bounds : bool, optional
True if the patch is outside the image bounds.
Returns
-------
array_like
The transformed image of size patch_size
"""
if check_bounds:
if any([c[0] < 0 or c[0] >= image.size[0] or c[1] < 0 or c[1] >= image.size[1] for c in corners]):
return None
corner_verts = (corners[0][0], corners[0][1],
corners[1][0], corners[1][1],
corners[2][0], corners[2][1],
corners[3][0], corners[3][1])
# transform is counting on patch_size to be a tuple, not numpy array
patch_size_tuple = (patch_size[0], patch_size[1])
return image.transform(patch_size_tuple, Image.QUAD, corner_verts, Image.NEAREST)
def sample_plane_inverse(planar_patch, plane_origin, plane_x, plane_y, img_shape, camera):
""" return the projection of a planar patch into the image
Parameters
----------
planar_patch : array_like
The planar patch to project into the image (as a 2-d numpy array)
plane_origin :array_like
3-d point corresponding to the upper left of the patch
plane_x : array_like
3-d vector from origin to extent of patch in the "x" direction
plane_y : array_like
3-d vector from origin to extent of patch in the "y" direction:
assumed perpendicular to plane_x
img_shape : array_like
the dimensions (rows, cols) of the image to project into
camera : array_like
a PinholeCamera
Returns
-------
numpy.array
The projection of a planar patch into the image
"""
plane_xlen = np.linalg.norm(plane_x)
plane_ylen = np.linalg.norm(plane_y)
image2plane = camera.image2plane(plane_origin, plane_x, plane_y)
plane2patch = np.array(((planar_patch.shape[1]/plane_xlen, 0, 0),(0, planar_patch.shape[0]/plane_ylen, 0),(0,0,1)))
img2patch = np.dot(plane2patch, image2plane)
return sample_patch_projective(planar_patch, img2patch, img_shape)
def sample_plane(image, camera, plane_origin, plane_x, plane_y, patch_shape):
""" return a sampled patch based on the 3-d plane defined by plane_origin,
plane_x, and plane_y
Parameters
----------
image : array_like
The image
camera : array_like
A PinholeCamera
plane_origin :array_like
3-d point corresponding to the upper left of the patch
plane_x : array_like
3-d vector from origin to extent of patch in the "x" direction
plane_y : array_like
3-d vector from origin to extent of patch in the "y" direction:
assumed perpendicular to plane_x
patch_shape : array_like
The patch shape
Returns
-------
numpy.array
A sampled patch
"""
plane_xlen = np.linalg.norm(plane_x)
plane_ylen = np.linalg.norm(plane_y)
plane2image = camera.plane2image(plane_origin, plane_x, plane_y)
patch2plane = np.array(((plane_xlen/patch_shape[1], 0, 0),(0, plane_ylen/patch_shape[0], 0),(0,0,1)))
patch2img = np.dot(plane2image, patch2plane)
return sample_patch_projective(image, patch2img, patch_shape)
def sample_patch_projective(image, inv_xform_3x3, patch_shape):
""" return a warped image as a numpy array with dtype float64 of size
patch_size. if input image is not already of type float64, it will be
converted
Parameters
----------
image : array_like
The image
inv_xform_3x3 : array_like
Homogeneous transformation matrix
patch_shape : array_like
The patch shape
Returns
-------
numpy.array
The warped image of size patch_size
"""
P = skimage.transform.ProjectiveTransform(inv_xform_3x3)
# skimage clips values to range [0,1] for floating point images. do scale and unscale here.
do_scale = False
og_dtype = image.dtype
if image.dtype in (np.float32, np.float64, np.float128, np.float16):
minval = np.nanmin(image)
maxval = np.nanmax(image)
# if range is good, no need to rescale
if minval < 0.0 or maxval > 1.0:
do_scale = True
# make a copy of image so as not to corrupt original data
image = image.copy()
scale_factor = maxval - minval
image -= minval
if scale_factor != 0:
image /= scale_factor
# do the warping
patch = skimage.transform.warp(image, P, output_shape=patch_shape, mode='constant', cval=np.nan)
# revert to original type if necessary
if og_dtype != patch.dtype:
if og_dtype == np.uint8:
patch = skimage.img_as_ubyte(patch)
elif og_dtype == np.bool:
patch = skimage.img_as_bool(patch)
elif og_dtype == np.uint16:
patch = skimage.img_as_uint(patch)
elif og_dtype == np.int16:
patch = skimage.img_as_int(patch)
else:
# just to straight cast, hope for the best
patch_out = np.zeros(patch.shape, og_dtype)
np.copyto(patch_out,patch)
patch = patch_out
# unscale if necessary
if do_scale:
patch *= scale_factor
patch += minval
return patch
def sample_patch_perspective(image, inv_xform_3x3, patch_size):
""" return an Image of size patch_size
Parameters
----------
image : array_like
The image
inv_xform_3x3 : array_like
Homogeneous transformation matrix
patch_shape : array_like
The patch shape
Returns
-------
numpy.array
An image of size patch_size
"""
patch_size_tuple = (patch_size[0], patch_size[1])
inv_xform_array = inv_xform_3x3.reshape(9,) / inv_xform_3x3[2,2]
patch = image.transform(patch_size_tuple, Image.PERSPECTIVE, inv_xform_array, Image.NEAREST)
ones_img = Image.new('L', image.size, 255)
mask = ones_img.transform(patch_size_tuple, Image.PERSPECTIVE, inv_xform_array, Image.NEAREST)
return patch, mask
| 30.00939 | 117 | 0.678191 |
acf226b71872f5de098c00d8e94a8757e76c7150 | 13,867 | py | Python | X3_Customizer/Make_Executable.py | bvbohnen/X3_Customizer | 7e8808ee33c463348a3d12959b9311fff17e224d | [
"MIT"
] | 4 | 2018-01-17T16:49:05.000Z | 2018-05-16T20:41:02.000Z | X3_Customizer/Make_Executable.py | bvbohnen/X3_Customizer | 7e8808ee33c463348a3d12959b9311fff17e224d | [
"MIT"
] | 1 | 2021-07-23T00:50:08.000Z | 2021-07-23T00:50:08.000Z | X3_Customizer/Make_Executable.py | bvbohnen/X3_Customizer | 7e8808ee33c463348a3d12959b9311fff17e224d | [
"MIT"
] | 1 | 2018-10-13T18:19:02.000Z | 2018-10-13T18:19:02.000Z | '''
Use PyInstaller to bundle the python code into a standalone executable.
While it is unclear on how to use PyInstaller directly as a python
package, a specifications file can be generated here, and pyinstaller
called as a subprocess to run the compilation, and then the
expected output folders checked for files and manipulated as needed.
Note:
Pyinstaller has two main modes for generated code:
1) Multiple files in a single folder, with dlls, python bytecode, etc.
broken out, and the exe acting as a booststrap.
This creates an ugly folder with a bunch of files lumped in
with the exe.
2) Single exe which contains the above files in a packed form, and
unpacks them to a temp directory when executed, then acting similar
to (1).
This is stated as being slower to start, and not cleaning up after
itself if the program terminates early, which could be bad since
this customizer is expected to crash when users provide unsupported
arguments and such (plus it has its own potential bugs to worry
about).
Option (1) should be a lot safer, and its ugliness has a couple possible
workarounds:
1) Create a shortcut in a higher level folder which links to the exe,
so the user can just use the shortcut.
This is going to be windows-specific, but most or all expected
users are likely on windows, so this should be okay.
In testing, this seems to have problems with handling command
line args properly.
2) Put misc files into a subfolder, and add that subfolder to the
system search path using a runtime hook, essentially python code
that runs at startup before other modules are loaded.
See https://stackoverflow.com/questions/19579979/pyinstaller-changing-dll-and-pyd-output-location
for an example.
In practice, any subfolder seems to work without needing the hook,
though a couple files still need to be kept with the exe to
the basic bootstrapping.
3) Make a bat file to launch the app, which can more easily pass
command line args.
TODO:
Consider bundling with documentation into a zip file automatically,
include all misc files from the source and patches folders.
'''
import argparse
import sys
import os
import shutil
# Conditional import of pyinstaller, checking if it is available.
try:
import PyInstaller
except:
print('Error: PyInstaller not found.')
sys.exit()
import subprocess
This_dir = os.path.normpath(os.path.join(os.path.dirname(__file__)))
def Make(*args):
# Scrap the first arg if it is a .py file; this is mostly because
# visual studio is set to insert it for normal customizer
# calls, and it is bothersome to remove that when testing
# this script.
try:
if args[0].endswith('.py'):
args = args[1:]
except:
pass
# Set up command line arguments.
argparser = argparse.ArgumentParser(
description='Generate an executable from the X3 Customizer source'
' python code, using pyinstaller.',
)
#-Removed; single file support is maybe a bad idea, as noted
# in comments above.
#argparser.add_argument(
# '-single_file',
# action='store_true',
# help = 'Tells pyinstaller to create a single exe file, instead'
# ' of bundling multiple files in a folder.')
argparser.add_argument(
'-preclean',
action='store_true',
help = 'Force pyinstaller to do a fresh compile, ignoring any'
' work from a prior build.')
argparser.add_argument(
'-postclean',
action='store_true',
help = 'Delete the pyinstaller work folder when done, though this'
' will slow down rebuilds.')
argparser.add_argument(
'-o', '-O',
action='store_true',
help = 'Compile with basic python optimization, removing assertions'
' and perhaps some debug checks.')
argparser.add_argument(
'-oo', '-OO',
action='store_true',
help = 'Compile with more python optimization, similar to -O'
' but also trimming docstrings to reduce code size.')
# Run the parser on the input args.
parsed_args = argparser.parse_args(args)
# Set the output folder names.
# Note: changing the pyinstaller build and dist folder names is
# possible, but not through the spec file (at least not easily),
# so do it through the command line call.
build_folder = os.path.join('..','pyinstaller_build_files')
# Pick the final location to place the exe and support files.
# This should have the same relative path to reach any common
# files in the source and patches folders, which can be
# moved up under the main level (so the x3_customizer exe
# can be down one folder, and the python down another folder).
dist_folder = os.path.normpath(os.path.join(This_dir, '..', 'bin'))
# Subfolder to shove misc exe support files into.
# Update: the new pyinstaller with python 3.7 doesn't like moving
# these files away from the exe.
exe_support_folder = os.path.join(dist_folder)#, 'support')
program_name = 'X3_Customizer'
# Note: it would be nice to put the spec file in a subfolder, but
# pyinstaller messes up (seems to change its directory to wherever
# the spec file is) and can't find the source python, so the spec
# file needs to be kept in the main dir and cleaned up at the end.
spec_file_path = 'X3_Customizer.spec'
# Hook file probably works like the spec file.
hook_file_path = 'pyinstaller_x3c_hook.py'
# Change the working directory to here.
# May not be necessary, but pyinstaller not tested for running
# from other directories, and this just makes things easier
# in general.
original_cwd = os.getcwd()
os.chdir(This_dir)
# Generate lines for a hook file.
# With the packaging of X3_Customizer, this doesn't appears to
# be needed anymore.
# TODO: maybe remove entirely.
hook_lines = []
# Prepare the specification file expected by pyinstaller.
spec_lines = []
# Note: most places where paths may be used should be set as
# raw strings, so the os.path forward slashes will get treated
# as escapes when python parses them again otherwise.
# Analysis block specifies details of the source files to process.
spec_lines += [
'a = Analysis(',
' [',
# Files to include.
# It seems like only the main x3_customizer is needed, everything
# else getting recognized correctly.
' "Main.py",',
' ],',
# Relative path to work in; just use here.
' pathex = [r"{}"],'.format(This_dir),
# Misc external binaries; unnecessary.
' binaries = [],',
# Misc data files. While the source/patches folders could be
# included, it makes more sense to somehow launch the generated
# exe from the expected location so those folders are
# seen as normal.
' datas = [],',
# Misc imports pyinstaller didn't see.
# It seems to catch everything.
' hiddenimports = [],',
' hookspath = [],',
# Extra python files to run when the exe starts up.
' runtime_hooks = [',
' "{}",'.format(hook_file_path),
' ],',
# Exclude scipy, since it adds 500 MB to the 12 MB compile.
# Code which uses scipy should have an appropriate backup.
# Also skip numpy and matplotlib, which are only present for
# some optional scaling equation verification.
' excludes = [',
' r"scipy",',
' r"numpy",',
' r"matplotlib",',
' ],',
' win_no_prefer_redirects = False,',
' win_private_assemblies = False,',
' cipher = None,',
')',
'',
]
spec_lines += [
'pyz = PYZ(a.pure, a.zipped_data,',
' cipher = None,',
')',
'',
]
spec_lines += [
'exe = EXE(pyz,',
' a.scripts,',
' exclude_binaries = True,',
' name = "{}",'.format(program_name),
' debug = False,',
' strip = False,',
' upx = True,',
' console = True,',
')',
'',
]
spec_lines += [
'coll = COLLECT(exe,',
' a.binaries,',
' a.zipfiles,',
' a.datas,',
' strip = False,',
' upx = True,',
' name = "{}",'.format(program_name),
')',
'',
]
# Write the spec and hook files to the build folder, creating it
# if needed.
if not os.path.exists(build_folder):
os.mkdir(build_folder)
with open(spec_file_path, 'w') as file:
file.write('\n'.join(spec_lines))
with open(hook_file_path, 'w') as file:
file.write('\n'.join(hook_lines))
# Delete the existing dist directory; pyinstaller has trouble with
# this for some reason (maybe using os.remove, which also seemed
# to have the same permission error pyinstaller gets).
if os.path.exists(dist_folder):
# Use shutil for this; os.remove is for a file, os.rmdir is
# for an empty directory, but shutil rmtree will handle
# the whole thing.
shutil.rmtree(dist_folder)
# Run pyinstaller.
# This can call "pyinstaller" directly, assuming it is registered
# on the command line, but it may be more robust to run python
# and target the PyInstaller package.
# By going through python, it is also possible to set optimization
# mode that will be applied to the compiled code.
# TODO: add optimization flag.
pyinstaller_call_args = [
'python',
'-m', 'PyInstaller',
spec_file_path,
'--distpath', dist_folder,
'--workpath', build_folder,
]
# Set a clean flag if requested, making pyinstaller do a fresh
# run. Alternatively, could just delete the work folder.
if parsed_args.preclean:
pyinstaller_call_args.append('--clean')
# Add the optimization flag, OO taking precedence.
# Put this flag before the -m and script name, else it gets fed
# to the script.
# (In practice, these seem to make little to no difference, but are
# kinda neat to have anyway.)
if parsed_args.oo:
pyinstaller_call_args.insert(1, '-OO')
elif parsed_args.o:
pyinstaller_call_args.insert(1, '-O')
subprocess.run(pyinstaller_call_args)
# Check if the exe was created.
exe_path = os.path.join(dist_folder, program_name, program_name + '.exe')
if not os.path.exists(exe_path):
# It wasn't found; quit early.
print('Executable not created.')
return
# Move most files to a folder under the exe.
# Create the folder to move to first.
if not os.path.exists(exe_support_folder):
os.mkdir(exe_support_folder)
# Traverse the folder with the files; this was collected under
# another folder with the name of the program.
path_to_exe_files = os.path.join(dist_folder, program_name)
for file_name in os.listdir(path_to_exe_files):
# These select names will be kept together and moved to the
# bin folder.
if file_name in [
program_name + '.exe',
'base_library.zip',
'pyexpat.pyd',
'python36.dll',
]:
# Move the file up one level to the dist folder.
shutil.move(
os.path.join(path_to_exe_files, file_name),
os.path.join(dist_folder, file_name),
)
else:
# Move the file up one level, and down to the support folder.
shutil.move(
os.path.join(path_to_exe_files, file_name),
os.path.join(exe_support_folder, file_name),
)
# Clean out the now empty folder in the dist directory.
os.rmdir(path_to_exe_files)
# Clean up the spec and hook files.
os.remove(spec_file_path)
os.remove(hook_file_path)
# Delete the pyinstaller work folder, if requested.
if parsed_args.postclean:
if os.path.exists(build_folder):
shutil.rmtree(build_folder)
# Create a bat file for launching the exe from the top
# level directory.
with open(os.path.join(This_dir, '..', 'Launch_X3_Customizer.bat'), 'w') as file:
file.write('\n'.join([
# Disable the echo of the command.
'@echo off',
# Use '%*' to pass all command line args.
# Add the '-default_script' arg, in case this is launched without
# a specified user script.
os.path.join('bin', program_name + '.exe') + ' %* -default_script',
# Wait for user input, so they can read messages.
'pause',
]))
# Create an alternate version for cleaning out changes by
# setting the -clean flag.
with open(os.path.join(This_dir, '..', 'Clean_X3_Customizer.bat'), 'w') as file:
file.write('\n'.join([
# Disable the echo of the command.
'@echo off',
# Use '%*' to pass all command line args.
os.path.join('bin', program_name + '.exe') + ' %* -default_script -clean',
# Wait for user input, so they can read messages.
'pause',
]))
# Restory any original workind directory, in case this function
# was called from somewhere else.
os.chdir(original_cwd)
if __name__ == '__main__':
# Feed all args except the first (which is the file name).
Make(*sys.argv[1:])
| 35.92487 | 101 | 0.623495 |
acf226d242b59bbd43ca7a96bb98cf6e48ed3906 | 25,577 | py | Python | controllers/assess.py | unimauro/eden | b739d334e6828d0db14b3790f2f5e2666fc83576 | [
"MIT"
] | null | null | null | controllers/assess.py | unimauro/eden | b739d334e6828d0db14b3790f2f5e2666fc83576 | [
"MIT"
] | null | null | null | controllers/assess.py | unimauro/eden | b739d334e6828d0db14b3790f2f5e2666fc83576 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Assessments - Controller
"""
module = request.controller
resourcename = request.function
if module not in deployment_settings.modules:
raise HTTP(404, body="Module disabled: %s" % module)
# =============================================================================
def index():
""" Module's Home Page """
module_name = deployment_settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# -----------------------------------------------------------------------------
def create():
""" Redirect to assess/create """
redirect(URL(f="assess", args="create"))
# =============================================================================
# UN Common Operational Datasets
# =============================================================================
def population():
""" RESTful controller """
output = s3_rest_controller()
return output
# =============================================================================
# Rapid Assessments
# =============================================================================
def rat():
""" Rapid Assessments, RESTful controller """
# Load Models
s3mgr.load("assess_rat")
tablename = "%s_%s" % (module, resourcename)
table = db[tablename]
# Villages only
#table.location_id.requires = IS_NULL_OR(IS_ONE_OF(db(db.gis_location.level == "L5"),
# "gis_location.id",
# repr_select, sort=True))
# Subheadings in forms:
s3mgr.configure("assess_section2",
subheadings = {
T("Population and number of households"): "population_total",
T("Fatalities"): "dead_women",
T("Casualties"): "injured_women",
T("Missing Persons"): "missing_women",
T("General information on demographics"): "household_head_elderly",
T("Comments"): "comments"})
s3mgr.configure("assess_section3",
subheadings = {
T("Access to Shelter"): "houses_total",
T("Water storage containers in households"): "water_containers_available",
T("Other non-food items"): "cooking_equipment_available",
T("Shelter/NFI Assistance"): "nfi_assistance_available",
T("Comments"): "comments"})
s3mgr.configure("assess_section4",
subheadings = {
T("Water supply"): "water_source_pre_disaster_type",
T("Water collection"): "water_coll_time",
T("Places for defecation"): "defec_place_type",
T("Environment"): "close_industry",
T("Latrines"): "latrines_number",
T("Comments"): "comments"})
s3mgr.configure("assess_section5",
subheadings = {
T("Health services status"): "health_services_pre_disaster",
T("Current health problems"): "health_problems_adults",
T("Nutrition problems"): "malnutrition_present_pre_disaster",
T("Comments"): "comments"})
s3mgr.configure("assess_section6",
subheadings = {
T("Existing food stocks"): "food_stocks_main_dishes",
T("food_sources") : "Food sources",
T("Food assistance"): "food_assistance_available",
T("Comments"): "comments"})
s3mgr.configure("assess_section7",
subheadings = {
"%s / %s" % (T("Sources of income"),
T("Major expenses")): "income_sources_pre_disaster",
T("business_damaged"): "Access to cash",
T("Current community priorities"): "rank_reconstruction_assistance",
T("Comments"): "comments"})
s3mgr.configure("assess_section8",
subheadings = {
T("Access to education services"): "schools_total",
T("Alternative places for studying"): "alternative_study_places_available",
T("School activities"): "schools_open_pre_disaster",
T("School attendance"): "children_0612_female",
T("School assistance"): "school_assistance_available",
T("Comments"): "comments"})
s3mgr.configure("assess_section9",
subheadings = {
T("Physical Safety"): "vulnerable_groups_safe_env",
T("Separated children, caregiving arrangements"): "children_separated",
T("Persons in institutions"): "children_in_disabled_homes",
T("Activities of children"): "child_activities_u12f_pre_disaster",
T("Coping Activities"): "coping_activities_elderly",
T("Current general needs"): "current_general_needs",
T("Comments"): "comments"})
# @ToDo Generalize this and make it available as a function that other
# component prep methods can call to set the default for a join field.
def prep(r):
if r.interactive:
# Pre-populate staff ID
staff_id = auth.s3_logged_in_human_resource()
if staff_id:
r.table.staff_id.default = staff_id
if r.method == "create":
# If this assessment is being created as a component of a shelter,
# it will have the shelter id in its vars.
shelter_id = r.get_vars.get("rat.shelter_id", None)
if shelter_id:
try:
shelter_id = int(shelter_id)
except ValueError:
pass
else:
r.table.shelter_id.default = shelter_id
return True
response.s3.prep = prep
# Post-processor
def postp(r, output):
s3_action_buttons(r, deletable=False)
# Redirect to update view to open tabs
if r.representation == "html" and r.method == "create":
r.next = r.url(method="",
id=s3mgr.get_session("assess", "rat"))
return output
response.s3.postp = postp
# Over-ride the listadd since we're not a component here
s3mgr.configure(tablename, create_next="", listadd=True)
tabs = [(T("Identification"), None),
(T("Demographic"), "section2"),
(T("Shelter & Essential NFIs"), "section3"),
(T("WatSan"), "section4"),
(T("Health"), "section5"),
(T("Nutrition"), "section6"),
(T("Livelihood"), "section7"),
(T("Education"), "section8"),
(T("Protection"), "section9") ]
rheader = lambda r: rat_rheader(r,
tabs)
output = s3_rest_controller(rheader=rheader,
s3ocr_config={"tabs": tabs})
response.s3.stylesheets.append( "S3/rat.css" )
return output
# -----------------------------------------------------------------------------
def rat_rheader(r, tabs=[]):
""" Resource Headers """
if r.representation == "html":
if r.name == "rat":
report = r.record
if report:
htable = db.hrm_human_resource
rheader_tabs = s3_rheader_tabs(r, tabs, paging=True)
location = report.location_id
if location:
location = gis_location_represent(location)
staff = report.staff_id
if staff:
organisation_represent = s3db.org_organisation_represent
query = (htable.id == staff)
organisation_id = db(query).select(htable.organisation_id,
limitby=(0, 1)).first().organisation_id
organisation = organisation_represent(organisation_id)
else:
organisation = None
staff = report.staff2_id
if staff:
query = (htable.id == staff)
organisation2_id = db(query).select(htable.organisation_id,
limitby=(0, 1)).first().organisation_id
if organisation2_id == organisation_id:
organisation2 = None
else:
organisation2 = organisation_represent(organisation_id)
else:
organisation2 = None
if organisation2:
orgs = "%s, %s" % (organisation, organisation2)
else:
orgs = organisation
rheader = DIV(TABLE(
TR(
TH("%s: " % T("Location")), location,
TH("%s: " % T("Date")), report.date
),
TR(
TH("%s: " % T("Organizations")), orgs,
)
),
rheader_tabs)
return rheader
return None
# =============================================================================
# Flexible Impact Assessments
# =============================================================================
def assess_rheader(r, tabs=[]):
""" Resource Headers for Flexible Impact Assessments """
if r.representation == "html":
rheader_tabs = s3_rheader_tabs(r, tabs)
assess = r.record
person_represent = s3db.pr_person_represent
if assess:
rheader = DIV(TABLE(TR(
TH("%s: " % T("Date & Time")),
assess.datetime,
TH("%s: " % T("Location")),
gis_location_represent(assess.location_id),
TH("%s: " % T("Assessor")),
person_represent(assess.assessor_person_id),
),
),
rheader_tabs
)
return rheader
return None
# -----------------------------------------------------------------------------
def assess():
""" RESTful CRUD controller """
# Load Models
s3mgr.load("assess_assess")
s3mgr.load("impact_impact")
tablename = "%s_%s" % (module, resourcename)
table = db[tablename]
# Pre-processor
def prep(r):
if session.s3.mobile and r.method == "create" and r.interactive:
# redirect to mobile-specific form:
redirect(URL(f="assess_short_mobile"))
return True
response.s3.prep = prep
#table.incident_id.comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Incident"),
# T("Optional link to an Incident which this Assessment was triggered by.")))
tabs = [
(T("Edit Details"), None),
(T("Baselines"), "baseline"),
(T("Impacts"), "impact"),
(T("Summary"), "summary"),
#(T("Requested"), "ritem"),
]
rheader = lambda r: assess_rheader(r, tabs)
return s3_rest_controller(rheader=rheader)
# -----------------------------------------------------------------------------
def impact_type():
""" RESTful CRUD controller """
# Load Models
s3mgr.load("impact_impact")
module = "impact"
resourcename = "type"
return s3_rest_controller(module, resourcename)
# -----------------------------------------------------------------------------
def baseline_type():
""" RESTful CRUD controller """
# Load Models
s3mgr.load("assess_assess")
return s3_rest_controller()
# -----------------------------------------------------------------------------
def baseline():
""" RESTful CRUD controller """
# Load Models
s3mgr.load("assess_assess")
return s3_rest_controller()
# -----------------------------------------------------------------------------
def summary():
""" RESTful CRUD controller """
# Load Models
s3mgr.load("assess_assess")
return s3_rest_controller()
# =============================================================================
def basic_assess():
""" Custom page to hide the complexity of the Assessments/Impacts/Summary model: PC Browser version """
if not auth.is_logged_in():
session.error = T("Need to be logged-in to be able to submit assessments")
redirect(URL(c="default", f="user", args=["login"]))
# Load Models
s3mgr.load("assess_assess")
s3mgr.load("impact_impact")
# See if we've been created from an Incident
ireport_id = request.vars.get("ireport_id")
if ireport_id:
# Location is the same as the calling Incident
irs_location_id = s3_get_db_field_value(tablename = "irs_ireport",
fieldname = "location_id",
look_up_value = ireport_id)
location = gis_location_represent(irs_location_id)
custom_assess_fields = (
("impact", 1),
("impact", 2),
("impact", 3),
("impact", 4),
("impact", 5),
("impact", 6),
("impact", 7),
("assess", "comments"),
)
form, form_accepted, assess_id = custom_assess(custom_assess_fields,
location_id=irs_location_id)
else:
location = None
custom_assess_fields = (
("assess", "location_id", "selector"),
("impact", 1),
("impact", 2),
("impact", 3),
("impact", 4),
("impact", 5),
("impact", 6),
("impact", 7),
("assess", "comments"),
)
form, form_accepted, assess_id = custom_assess(custom_assess_fields)
if form_accepted:
session.confirmation = T("Basic Assessment Reported")
redirect(URL(f="assess", args=[assess_id, "impact"]))
return dict(title = T("Basic Assessment"),
location = location,
form = form)
# -----------------------------------------------------------------------------
def mobile_basic_assess():
""" Custom page to hide the complexity of the Assessments/Impacts/Summary model: Mobile device version """
if not auth.is_logged_in():
redirect(URL(c="default", f="index"))
# Load Models
s3mgr.load("assess_assess")
s3mgr.load("impact_impact")
custom_assess_fields = (
("assess", "location_id", "auto"),
("impact", 1),
("impact", 2),
("impact", 3),
("impact", 4),
("impact", 5),
("impact", 6),
("impact", 7),
("assess", "comments"),
)
form, form_accepted, assess_id = custom_assess(custom_assess_fields)
if form_accepted:
form = FORM(H1(deployment_settings.get_system_name_short()),
H2(T("Short Assessment")),
P(T("Assessment Reported")),
A(T("Report Another Assessment..."),
_href = URL(r=request)
),
_class = "mobile",
)
return dict(form = form)
# -----------------------------------------------------------------------------
def color_code_severity_widget(widget, name):
""" Utility function to colour-code Severity options """
for option, color in zip(widget, ["green", "yellow", "orange", "red"]):
option[0].__setitem__("_style", "background-color:%s;" % color)
option[0][0].__setitem__("_name", name)
return widget
# -----------------------------------------------------------------------------
def custom_assess(custom_assess_fields, location_id=None):
"""
Build a custom page to hide the complexity of the
Assessments/Impacts/Summary model
@ToDo: Improved validation
- the existing .double JS isn't 100% reliable & this currently crashes
the back-end upon submission if bad data slips through
"""
# Load Models
s3mgr.load("assess_assess")
s3mgr.load("impact_impact")
form_rows = []
comment = ""
for field in custom_assess_fields:
name = "custom_%s_%s" % (field[0], field[1])
if field[0] == "assess":
if field[1] == "comments":
label = "%s:" % db.assess_assess[ field[1] ].label
#widget = db.assess_assess[ field[1] ].widget
widget = TEXTAREA(_name = name,
_class = "double",
_type = "text")
elif field[1] == "location_id":
if field[2] == "auto":
# HTML5 Geolocate
label = "%s:" % T("Location")
#widget = db.assess_assess[ field[1] ].widget
widget = DIV(INPUT(_name = name,
_type = "text"),
INPUT(_name = "gis_location_lat",
_id = "gis_location_lat",
_type = "text"),
INPUT(_name = "gis_location_lon",
_id = "gis_location_lon",
_type = "text"))
else:
# Location Selector
label = "%s:" % T("Location")
#widget = SELECT(_id = name,
# _class = "reference gis_location",
# _name = "location_id")
#response.s3.gis.location_id = "custom_assess_location_id"
widget = db.assess_assess.location_id.widget(field=db.assess_assess.location_id,
value="")
elif field[0] == "baseline":
label = s3_get_db_field_value(tablename = "assess_baseline_type",
fieldname = "name",
look_up_value = field[1])
label = T(label)
widget = INPUT(_name = name,
_class = "double",
_type = "text")
elif field[0] == "impact":
label = "%s:" % T(s3_get_db_field_value(tablename = "impact_type",
fieldname = "name",
look_up_value = field[1]))
value_widget = INPUT(_name = name,
_class = "double",
_type = "text")
severity_widget = db.assess_summary.value.widget(db.impact_impact.severity,
0,
_name = "%s_severity" % name
)
severity_widget = color_code_severity_widget(severity_widget,
"%s_severity" % name)
widget = DIV(value_widget,
DIV("%s:" % T("Severity")),
severity_widget,
XML(" "))
elif field[0] == "summary":
label = "%s:" % T(org_subsector_represent(field[1]))
widget = db.assess_summary.value.widget(db.assess_summary.value,
0, _name = name)
widget = color_code_severity_widget(widget)
# Add the field components to the form_rows
if field[0] == "title":
form_rows.append(TR(H3( field[1] )))
else:
form_rows = form_rows + list(s3_formstyle("%s__row" % name,
label,
widget,
comment))
form = FORM(TABLE(*form_rows),
INPUT(_value = T("Save"), _type = "submit"))
assess_id = None
form_accepted = form.accepts(request.vars, session)
if form_accepted:
record_dict = {"organisation_id" : session.s3.organisation_id}
for field in custom_assess_fields:
if field[0] != "assess" or field[1] == "location":
continue
name = "custom__assess_%s" % field[1]
if name in request.vars:
record_dict[field[1]] = request.vars[name]
# Add Location (must happen first)
if "custom_assess_location_id" in request.vars:
# Auto
location_dict = {}
if "gis_location_lat" in request.vars:
location_dict["lat"] = request.vars["gis_location_lat"]
if "gis_location_lon" in request.vars:
location_dict["lon"] = request.vars["gis_location_lon"]
location_dict["name"] = request.vars["custom_assess_location_id"]
record_dict["location_id"] = s3db.gis_location.insert(**location_dict)
if "location_id" in request.vars:
# Location Selector
record_dict["location_id"] = request.vars["location_id"]
if location_id:
# Location_id was passed to function
record_dict["location_id"] = location_id
# Add Assessment
assess_id = db.assess_assess.insert(**record_dict)
fk_dict = dict(baseline = "baseline_type_id",
impact = "impact_type_id",
summary = "subsector_id"
)
component_dict = dict(baseline = "assess_baseline",
impact = "impact_impact",
summary = "assess_summary"
)
# Add Assessment Components
sector_summary = {}
for field in custom_assess_fields:
if field[0] == "assess":
continue
record_dict = {}
name = "custom_%s_%s" % (field[0], field[1])
if name in request.vars:
record_dict["assess_id"] = assess_id
record_dict[fk_dict[ field[0] ] ] = field[1]
record_dict["value"] = request.vars[name]
if field[0] == "impact":
severity = int(request.vars[name + "_severity"])
record_dict["severity"] = severity
if not record_dict["value"] and not record_dict["severity"]:
# Do not record impact if there is no data for it.
# Should we still average severity though? Now not doing this
continue
# Record the Severity per sector
sector_id = \
s3_get_db_field_value(tablename = "impact_type",
fieldname = "sector_id",
look_up_value = field[1])
if sector_id in sector_summary.keys():
sector_summary[sector_id].append(severity)
elif sector_id:
sector_summary[sector_id] = [severity]
db[component_dict[ field[0] ] ].insert(**record_dict)
# Add Cluster summaries
# @ToDo: make sure that this doesn't happen if there are sectors in the assess
for sector_id in sector_summary.keys():
severity_values = sector_summary[sector_id]
db.assess_summary.insert(assess_id = assess_id,
sector_id = sector_id,
# Average severity
value = sum(severity_values) / len(severity_values)
)
# Send Out Notification SMS
#message = "Sahana: " + T("New Assessment reported from") + " %s by %s %s" % ( location_dict["name"],
# session.auth.user.first_name,
# session.auth.user.last_name
# )
# Hard coded notification message for Demo
#msg.send_by_pe_id( 3,
# subject="",
# message=message,
# sender_pe_id = None,
# pr_message_method = 2,
# sender="",
# fromaddress="")
return form, form_accepted, assess_id
# END =========================================================================
| 40.662957 | 130 | 0.458967 |
acf22a7c30ca4b8f2a4b283fe1f935d525b9ecae | 9,389 | py | Python | dash_database/__init__.py | ThibTrip/dash_database | 138f2a69e64835cdacffe4320946bf71a8b96179 | [
"Unlicense"
] | 8 | 2019-09-09T17:37:27.000Z | 2022-01-29T18:18:53.000Z | dash_database/__init__.py | ThibTrip/dash_database | 138f2a69e64835cdacffe4320946bf71a8b96179 | [
"Unlicense"
] | 1 | 2019-09-08T14:24:56.000Z | 2019-09-08T14:24:56.000Z | dash_database/__init__.py | ThibTrip/dash_database | 138f2a69e64835cdacffe4320946bf71a8b96179 | [
"Unlicense"
] | 1 | 2021-03-24T17:02:10.000Z | 2021-03-24T17:02:10.000Z | #!/usr/bin/env python
# coding: utf-8
# In[1]:
"""Initializes the dash_database library.
Imports the DashDatabase class which is the main object of the dash_database library.
"""
import sqlitedict
import re
import typing
# # DashDatabase
# In[2]:
class DashDatabase():
"""The DashDatabase class manages the values of the users of a dash app.
It creates a sqlitedict database (sqlitedict.SqliteDict) in autocommit mode which works more or
less like a python dictionnary (sqlitedict_db['key'] = value) but the values are stored within a sqlite file.
It is thread safe so you can use it in Dash like you would use redis for instance.
More information on the sqlitedict GitHub repository.
In each method of DashDatabase there is an argument "user_id".
With DashDatabase you can use the same key names for all users and DashDatabase will internally prepend the user_id
internally so that user A and user B have their own data under the same key.
Under the hood the full key names look like this:
123_password (user_id 123, key name password)
456_password (user_id 456, key name password)
The user id can be anything you want as long as it is a str or an int (it can be for instance a session id as well) but
you need to make sure that the data is assigned to the correct user of your dash app! You should definitely see
"Example integration within dash app" (below Usage in this docstring) to handle this.
Args:
db_location: ':memory:', None or a filepath
if ':memory:' (default) the database exists in memory until the class instance expires
This offers the best performance (see ./dash_database/tests/performance_tests.ipynb)
if None the database exists in a temporary file path and gets deleted after the class instance expires
if a filepath the database exists in given filepath and stays after the class instance expires
Usage:
>>> from dash_database import DashDatabase
>>> # create an instance of DashDatabase
>>> dash_db = DashDatabase(db_location = None) # if None it is created in a temp folder and deleted after use
>>> # save values for user 123
>>> dash_db.store_user_value(user_id = 123, key_name = 'account_id', value = 46887)
>>> dash_db.store_user_value(user_id = 123, key_name = 'favorite_animal', value = 'monkey')
>>> dash_db.list_stored_user_keys(123) # list the names of the keys used by the user
['account_id', 'favorite_animal']
>>> # save values for user 456
>>> dash_db.store_user_value(user_id = 456, key_name = 'account_id', value = 87874)
>>> dash_db.store_user_value(456, 'favorite_color', 'green')
>>> dash_db.list_stored_user_keys(456) # list the names of the keys used by the user
['account_id', 'favorite_color']
>>> # get the value behind a user key
>>> dash_db.get_user_value(user_id = 123, key_name = 'favorite_animal')
'monkey'
>>> # delete a key and its value for a user
>>> dash_db.delete_user_value(user_id = 123, key_name = 'favorite_animal',
... # when if_not_exists is equal to "raise" you get an error if a key does not exist
... # if it is equal to "ignore" nothing happens if it does not exist (default)
... if_not_exists = 'ignore')
>>> # delete all keys and their values for a user
>>> dash_db.delete_all_user_values(456)
>>> # list all keys of the users again for testing purposes
>>> dash_db.list_stored_user_keys(123)
['account_id']
>>> dash_db.list_stored_user_keys(456)
[]
Example integration within dash app:
See ./dash_database/README.MD
"""
def __init__(self, db_location = ':memory:'):
"""Creates an instance of the DashDatabase class."""
self.db = sqlitedict.SqliteDict(filename = db_location, autocommit = True)
def _create_unique_key_name(user_id, key_name):
"""Creates a unique key name in sqlitedict by using the user id (see class docstring).
Args:
user_id: unique identifier (str or int)
key_name: non unique identifier (str or int, gets attached to the user_id internaly)
Usage:
>>> _create_unique_key_name(user_id = 123, key_name = 'password')
'123_password'
"""
unique_key_name = f'{user_id}_{key_name}'
return unique_key_name
def _verify_types(user_id, key_name = None):
# user_id is always used an argument so we can always test it
tested_types_user_id = tuple([str,int])
if not isinstance(user_id,tested_types_user_id):
raise TypeError(f'the type user_id can only be one of {tested_types_user_id} (for stability reasons as other types were not tested)')
# key_name is not always used so check if it is not None first
tested_types_key_name = tuple([str,int])
if key_name is not None:
if not isinstance(key_name,tested_types_key_name):
raise TypeError(f'the type key_name can only be one of {tested_types_key_name} (for stability reasons as other types were not tested)')
self._create_unique_key_name = _create_unique_key_name
self._verify_types = _verify_types
def list_stored_user_keys(self, user_id):
"""Lists all keys in use from given user_id.
Args:
user_id: unique identifier (str or int)
Usage:
see docsting of DashDatabase
"""
self._verify_types(user_id)
# get list of keys starting with user_id
user_keys = [key for key in self.db.keys() if key.startswith(str(user_id))]
# remove user_id from the key names
user_keys = [re.sub(f'{re.escape(str(user_id))}\_','',str(key)) for key in user_keys]
return user_keys
def store_user_value(self, user_id, key_name, value):
"""Store a value for given user id with given key name.
Args:
user_id: unique identifier (str or int)
key_name: non unique identifier (str or int, gets attached to the user_id internally)
value: can be any picklable object.
Usage:
see docsting of DashDatabase
"""
self._verify_types(user_id, key_name)
unique_key_name = self._create_unique_key_name(user_id, key_name)
self.db[unique_key_name] = value
def get_user_value(self, user_id, key_name):
"""Gets a value for given user id from given key name.
Returns None if the key does not exist for given user id.
Args:
user_id: unique identifier (str or int)
key_name: non unique identifier (str or int, attached to the user_id)
Usage:
see docsting of DashDatabase
"""
self._verify_types(user_id, key_name)
unique_key_name = self._create_unique_key_name(user_id, key_name)
return self.db.get(unique_key_name)
def delete_user_value(self, user_id, key_name, if_not_exists = 'ignore'):
"""Deletes a value and the corresponding key for given user id.
Args:
user_id: unique identifier (str or int)
key_name: non unique identifier (str or int, attached to the user_id)
if_not_exists: only relevant if the key does not exist, in this case:
if 'ignore' (default) nothing happens
if 'raise' raises a KeyError
Usage:
see docsting of DashDatabase
"""
self._verify_types(user_id, key_name)
unique_key_name = self._create_unique_key_name(user_id, key_name)
if if_not_exists == 'ignore':
try:
self.db.pop(unique_key_name)
except KeyError:
pass
elif if_not_exists == 'raise':
self.db.pop(unique_key_name)
else:
raise ValueError('if_not_exists must be either "ignore" or "raise"')
def delete_all_user_values(self, user_id):
"""Deletes all values and their corresponding keys for given user id.
Args:
user_id: unique identifier (str or int)
Usage:
see docsting of DashDatabase
"""
self._verify_types(user_id)
user_keys = self.list_stored_user_keys(user_id = user_id)
for key in user_keys:
self.delete_user_value(user_id = user_id, key_name = key)
def __repr__(self):
"""Create representation for DashDabase (show location of database)."""
repr_str = f'DashDatabase at "{self.db.filename}"'
return repr_str
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error = True, verbose = True) | 39.120833 | 155 | 0.616147 |
acf22a8a8ff293c77e43ba08dbe6623418739308 | 334 | py | Python | bifrost_crypto_twitter/src/post_twitter.py | lejcruz/bifrost_crypto_twitter | 1d28e3bdce04f70f549d1bdefdbdd2098f41e2fb | [
"MIT"
] | null | null | null | bifrost_crypto_twitter/src/post_twitter.py | lejcruz/bifrost_crypto_twitter | 1d28e3bdce04f70f549d1bdefdbdd2098f41e2fb | [
"MIT"
] | null | null | null | bifrost_crypto_twitter/src/post_twitter.py | lejcruz/bifrost_crypto_twitter | 1d28e3bdce04f70f549d1bdefdbdd2098f41e2fb | [
"MIT"
] | null | null | null | import tweepy
def post_twitter(text, consumer_key, consumer_secret, access_token, access_token_secret):
client = tweepy.Client(
consumer_key=consumer_key, consumer_secret=consumer_secret,
access_token=access_token, access_token_secret=access_token_secret
)
client.create_tweet(
text=text
)
| 23.857143 | 89 | 0.745509 |
acf22aa54136944b6cc709b8b47fb59193b41f5d | 726 | py | Python | webvirtcloud/wsgi_custom.py | NGXTDN/webvirtcloud | 82ded115b95fd5c24bdec8d38533fb0e20945276 | [
"Apache-2.0"
] | 1 | 2020-12-09T19:07:40.000Z | 2020-12-09T19:07:40.000Z | webvirtcloud/wsgi_custom.py | NGXTDN/webvirtcloud | 82ded115b95fd5c24bdec8d38533fb0e20945276 | [
"Apache-2.0"
] | 1 | 2021-05-11T11:50:09.000Z | 2021-05-11T11:50:09.000Z | webvirtcloud/wsgi_custom.py | NGXTDN/webvirtcloud | 82ded115b95fd5c24bdec8d38533fb0e20945276 | [
"Apache-2.0"
] | 1 | 2021-05-20T08:47:14.000Z | 2021-05-20T08:47:14.000Z | """
WSGI config for webvirtcloud project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# execfile('/srv/webvirtcloud/venv/bin/activate_this.py', dict(__file__='/srv/webvirtcloud/venv/bin/activate_this.py'))
exec(
compile(
open("/srv/webvirtcloud/venv/bin/activate", "rb").read(),
"/srv/webvirtcloud/venv/bin/activate",
"exec",
)
)
sys.path.append("/srv/webvirtcloud")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webvirtcloud.settings")
application = get_wsgi_application()
| 25.928571 | 119 | 0.73416 |
acf22bc8bc9a1a62cd0b498f056edd5d80e6bafe | 4,634 | py | Python | examples/convert_mne_sample.py | kingjr/mne-bids | 3a4543076912cebbc89a5f0b9433cda1b9e288b8 | [
"BSD-3-Clause"
] | null | null | null | examples/convert_mne_sample.py | kingjr/mne-bids | 3a4543076912cebbc89a5f0b9433cda1b9e288b8 | [
"BSD-3-Clause"
] | null | null | null | examples/convert_mne_sample.py | kingjr/mne-bids | 3a4543076912cebbc89a5f0b9433cda1b9e288b8 | [
"BSD-3-Clause"
] | null | null | null | """
.. currentmodule:: mne_bids
.. _ex-convert-mne-sample:
==========================================
02. Convert MNE sample data to BIDS format
==========================================
In this example we will use MNE-BIDS to organize the MNE sample data according
to the BIDS standard.
In a second step we will read the organized dataset using MNE-BIDS.
"""
# Authors: Mainak Jas <mainak.jas@telecom-paristech.fr>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Teon Brooks <teon.brooks@gmail.com>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
# Richard Höchenberger <richard.hoechenberger@gmail.com>
#
# License: BSD (3-clause)
# %%
# First we import some basic Python libraries, followed by MNE-Python and its
# sample data, and then finally the MNE-BIDS functions we need for this example
import os.path as op
import shutil
import mne
from mne.datasets import sample
from mne_bids import (write_raw_bids, read_raw_bids, write_meg_calibration,
write_meg_crosstalk, BIDSPath, print_dir_tree)
from mne_bids.stats import count_events
# %%
# Now we can read the MNE sample data. We define an `event_id` based on our
# knowledge of the data, to give meaning to events in the data.
#
# With `raw_fname` and `events_data`, we determine where to get the sample data
# from. `output_path` determines where we will write the BIDS conversion to.
data_path = sample.data_path()
event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3,
'Visual/Right': 4, 'Smiley': 5, 'Button': 32}
raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
events_data = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-eve.fif')
output_path = op.join(data_path, '..', 'MNE-sample-data-bids')
# %%
# To ensure the output path doesn't contain any leftover files from previous
# tests and example runs, we simply delete it.
#
# .. warning:: Do not delete directories that may contain important data!
#
if op.exists(output_path):
shutil.rmtree(output_path)
# %%
#
# .. note::
#
# ``mne-bids`` will try to infer as much information from the data as
# possible to then save this data in BIDS-specific "sidecar" files. For
# example the manufacturer information, which is inferred from the data file
# extension. However, sometimes inferring is ambiguous (e.g., if your file
# format is non-standard for the manufacturer). In these cases, MNE-BIDS does
# *not* guess and you will have to update your BIDS fields manually.
#
# Based on our path definitions above, we read the raw data file, define
# a new BIDS name for it, and then run the automatic BIDS conversion.
raw = mne.io.read_raw_fif(raw_fname)
raw.info['line_freq'] = 60 # specify power line frequency as required by BIDS
bids_path = BIDSPath(subject='01', session='01',
task='audiovisual', run='01', root=output_path)
write_raw_bids(raw, bids_path, events_data=events_data,
event_id=event_id, overwrite=True)
# %%
# The sample MEG dataset comes with fine-calibration and crosstalk files that
# are required when processing Elekta/Neuromag/MEGIN data using MaxFilter®.
# Let's store these data in appropriate places, too.
cal_fname = op.join(data_path, 'SSS', 'sss_cal_mgh.dat')
ct_fname = op.join(data_path, 'SSS', 'ct_sparse_mgh.fif')
write_meg_calibration(cal_fname, bids_path)
write_meg_crosstalk(ct_fname, bids_path)
# %%
# Now let's see the structure of the BIDS folder we created.
print_dir_tree(output_path)
# %%
# Now let's get an overview of the events on the whole dataset
counts = count_events(output_path)
counts
# %%
# A big advantage of having data organized according to BIDS is that software
# packages can automate your workflow. For example, reading the data back
# into MNE-Python can easily be done using :func:`read_raw_bids`.
raw = read_raw_bids(bids_path=bids_path)
# %%
# The resulting data is already in a convenient form to create epochs and
# evoked data.
events, event_id = mne.events_from_annotations(raw)
epochs = mne.Epochs(raw, events, event_id)
epochs['Auditory'].average().plot()
# %%
# It is trivial to retrieve the path of the fine-calibration and crosstalk
# files, too.
print(bids_path.meg_calibration_fpath)
print(bids_path.meg_crosstalk_fpath)
# %%
# The README created by :func:`write_raw_bids` also takes care of the citation
# for mne-bids. If you are preparing a manuscript, please make sure to also
# cite MNE-BIDS there.
readme = op.join(output_path, 'README')
with open(readme, 'r', encoding='utf-8-sig') as fid:
text = fid.read()
print(text)
| 33.824818 | 79 | 0.719681 |
acf22c566a6c34bdcab4a01fb54567e95fd009bc | 22,358 | py | Python | edk2basetools/UPT/Xml/PcdXml.py | matthewfcarlson/edk2-pytool-base | ddf78ca6e2110f03e020a5bd0ca32b2a463fecff | [
"BSD-2-Clause-Patent"
] | null | null | null | edk2basetools/UPT/Xml/PcdXml.py | matthewfcarlson/edk2-pytool-base | ddf78ca6e2110f03e020a5bd0ca32b2a463fecff | [
"BSD-2-Clause-Patent"
] | 1 | 2020-04-14T22:23:01.000Z | 2020-04-15T06:47:53.000Z | edk2basetools/UPT/Xml/PcdXml.py | matthewfcarlson/edk2-pytool-base | ddf78ca6e2110f03e020a5bd0ca32b2a463fecff | [
"BSD-2-Clause-Patent"
] | null | null | null | ## @file
# This file is used to parse a PCD file of .PKG file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
PcdXml
'''
##
# Import Modules
#
from edk2basetools.Library.Xml.XmlRoutines import XmlElement
from edk2basetools.Library.Xml.XmlRoutines import XmlAttribute
from edk2basetools.Library.Xml.XmlRoutines import XmlNode
from edk2basetools.Library.Xml.XmlRoutines import CreateXmlElement
from edk2basetools.Library.Xml.XmlRoutines import XmlList
from edk2basetools.Library.StringUtils import GetStringOfList
from edk2basetools.Library.StringUtils import ConvertNEToNOTEQ
from edk2basetools.Library.StringUtils import ConvertNOTEQToNE
from edk2basetools.Library import GlobalData
from Object.POM.CommonObject import PcdObject
from Object.POM.CommonObject import PcdErrorObject
from Xml.CommonXml import HelpTextXml
from Xml.CommonXml import PromptXml
from Xml.CommonXml import CommonDefinesXml
from Xml.XmlParserMisc import GetHelpTextList
from Xml.XmlParserMisc import GetPromptList
import re
##
# PcdErrorXml
#
class PcdErrorXml(object):
def __init__(self):
self.ValidValueList = ''
self.ValidValueListLang = ''
self.ValidValueRange = ''
self.Expression = ''
self.ErrorNumber = ''
self.ErrorMessage = []
def FromXml(self, Item, Key):
self.ValidValueList = XmlElement(Item, '%s/ValidValueList' % Key)
self.ValidValueListLang = \
XmlAttribute(XmlNode(Item, '%s/ValidValueList' % Key), 'Lang')
self.ValidValueRange = self.TransferValidEpxr2ValidRange(XmlElement(Item, '%s/ValidValueRange' % Key))
self.Expression = XmlElement(Item, '%s/Expression' % Key)
self.ErrorNumber = XmlElement(Item, '%s/ErrorNumber' % Key)
for ErrMsg in XmlList(Item, '%s/ErrorMessage' % Key):
ErrorMessageString = XmlElement(ErrMsg, 'ErrorMessage')
ErrorMessageLang = \
XmlAttribute(XmlNode(ErrMsg, 'ErrorMessage'), 'Lang')
self.ErrorMessage.append((ErrorMessageLang, ErrorMessageString))
Error = PcdErrorObject()
Error.SetValidValue(self.ValidValueList)
Error.SetValidValueLang(self.ValidValueListLang)
Error.SetValidValueRange(self.ValidValueRange)
Error.SetExpression(self.Expression)
Error.SetErrorNumber(self.ErrorNumber)
Error.SetErrorMessageList(self.ErrorMessage)
return Error
def ToXml(self, PcdError, Key):
if self.Expression:
pass
AttributeList = []
NodeList = []
if PcdError.GetValidValue():
Element1 = \
CreateXmlElement('ValidValueList', PcdError.GetValidValue(), [], \
[['Lang', PcdError.GetValidValueLang()]])
NodeList.append(Element1)
if PcdError.GetValidValueRange():
TansferedRangeStr = self.TransferValidRange2Expr(PcdError.GetTokenSpaceGuidCName(),
PcdError.GetCName(),
PcdError.GetValidValueRange())
Element1 = \
CreateXmlElement('ValidValueRange', \
TansferedRangeStr, [], [])
NodeList.append(Element1)
if PcdError.GetExpression():
NodeList.append(['Expression', PcdError.GetExpression()])
if PcdError.GetErrorNumber():
NodeList.append(['ErrorNumber', PcdError.GetErrorNumber()])
for Item in PcdError.GetErrorMessageList():
Element = \
CreateXmlElement('ErrorMessage', Item[1], [], [['Lang', Item[0]]])
NodeList.append(Element)
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def TransferValidRange2Expr(self, TokenSpaceGuidCName, CName, ValidRange):
if self.Expression:
pass
INT_RANGE_PATTERN1 = '[\t\s]*[0-9]+[\t\s]*-[\t\s]*[0-9]+'
INT_RANGE_PATTERN2 = '[\t\s]*(LT|GT|LE|GE|XOR|EQ)[\t\s]+\d+[\t\s]*'
HEX_RANGE_PATTERN1 = \
'[\t\s]*0[xX][a-fA-F0-9]+[\t\s]*-[\t\s]*0[xX][a-fA-F0-9]+'
HEX_RANGE_PATTERN2 = '[\t\s]*(LT|GT|LE|GE|XOR|EQ)[\t\s]+0[xX][a-fA-F0-9]+[\t\s]*'
IntMatch1 = re.compile(INT_RANGE_PATTERN1)
IntMatch2 = re.compile(INT_RANGE_PATTERN2)
HexMatch1 = re.compile(HEX_RANGE_PATTERN1)
HexMatch2 = re.compile(HEX_RANGE_PATTERN2)
PcdName = '.'.join([TokenSpaceGuidCName, CName])
HexMatchedList = []
IntMatchedList = []
#
# Convert HEX2 format range
#
if HexMatch2:
for MatchObj in HexMatch2.finditer(ValidRange):
MatchStr = MatchObj.group()
TransferedRangeStr = ' '.join(['', PcdName, MatchStr.strip()])
ValidRange = ValidRange.replace(MatchStr, TransferedRangeStr)
#
# Convert INT2 format range
#
if IntMatch2:
for MatchObj in IntMatch2.finditer(ValidRange):
MatchStr = MatchObj.group()
TransferedRangeStr = ' '.join(['', PcdName, MatchStr.strip()])
ValidRange = ValidRange.replace(MatchStr, TransferedRangeStr)
#
# Convert HEX1 format range
#
if HexMatch1:
HexMatchedList += HexMatch1.findall(ValidRange)
for MatchStr in HexMatchedList:
RangeItemList = MatchStr.strip().split('-')
TransferedRangeStr = '(%s GE %s) AND (%s LE %s)' % \
(PcdName, RangeItemList[0].strip(), PcdName, RangeItemList[1].strip())
ValidRange = ValidRange.replace(MatchStr, TransferedRangeStr)
#
# Convert INT1 format range
#
if IntMatch1:
IntMatchedList += IntMatch1.findall(ValidRange)
for MatchStr in IntMatchedList:
RangeItemList = MatchStr.strip().split('-')
TransferedRangeStr = '(%s GE %s) AND (%s LE %s)' % \
(PcdName, RangeItemList[0].strip(), PcdName, RangeItemList[1].strip())
ValidRange = ValidRange.replace(MatchStr, TransferedRangeStr)
return ValidRange
def TransferValidEpxr2ValidRange(self, ValidRangeExpr):
if self.Expression:
pass
PCD_PATTERN = \
'[\t\s]*[_a-zA-Z][a-zA-Z0-9_]*[\t\s]*\.[\t\s]*[_a-zA-Z][a-zA-Z0-9_]*[\t\s]*'
IntPattern1 = \
'[\t\s]*\([\t\s]*'+PCD_PATTERN+'[\t\s]+GE[\t\s]+\d+[\t\s]*\)[\t\s]+AND[\t\s]+\([\t\s]*'+\
PCD_PATTERN+'[\t\s]+LE[\t\s]+\d+[\t\s]*\)'
IntPattern1 = IntPattern1.replace(' ', '')
IntPattern2 = '[\t\s]*'+PCD_PATTERN+'[\t\s]+(LT|GT|LE|GE|XOR|EQ)[\t\s]+\d+[\t\s]*'
HexPattern1 = \
'[\t\s]*\([\t\s]*'+PCD_PATTERN+'[\t\s]+GE[\t\s]+0[xX][0-9a-fA-F]+[\t\s]*\)[\t\s]+AND[\t\s]+\([\t\s]*'+\
PCD_PATTERN+'[\t\s]+LE[\t\s]+0[xX][0-9a-fA-F]+[\t\s]*\)'
HexPattern1 = HexPattern1.replace(' ', '')
HexPattern2 = '[\t\s]*'+PCD_PATTERN+'[\t\s]+(LT|GT|LE|GE|XOR|EQ)[\t\s]+0[xX][0-9a-zA-Z]+[\t\s]*'
#
# Do the Hex1 conversion
#
HexMatchedList = re.compile(HexPattern1).findall(ValidRangeExpr)
HexRangeDict = {}
for HexMatchedItem in HexMatchedList:
#
# To match items on both sides of '-'
#
RangeItemList = re.compile('[\t\s]*0[xX][0-9a-fA-F]+[\t\s]*').findall(HexMatchedItem)
if RangeItemList and len(RangeItemList) == 2:
HexRangeDict[HexMatchedItem] = RangeItemList
for Key in HexRangeDict.keys():
MaxItem = MixItem = ''
if int(HexRangeDict[Key][0], 16) > int(HexRangeDict[Key][1], 16):
MaxItem = HexRangeDict[Key][0]
MixItem = HexRangeDict[Key][1]
else:
MaxItem = HexRangeDict[Key][1]
MixItem = HexRangeDict[Key][0]
Range = ' %s - %s' % (MixItem.strip(), MaxItem.strip())
ValidRangeExpr = ValidRangeExpr.replace(Key, Range)
#
# Do the INT1 conversion
#
IntRangeDict = {}
IntMatchList = re.compile(IntPattern1).findall(ValidRangeExpr)
for MatchedItem in IntMatchList:
#
# To match items on both sides of '-'
#
RangeItemList = re.compile('[\t\s]*\d+[\t\s]*').findall(MatchedItem)
if RangeItemList and len(RangeItemList) == 2:
IntRangeDict[MatchedItem] = RangeItemList
for Key in IntRangeDict.keys():
MaxItem = MixItem = ''
if int(IntRangeDict[Key][0]) > int(IntRangeDict[Key][1]):
MaxItem = IntRangeDict[Key][0]
MixItem = IntRangeDict[Key][1]
else:
MaxItem = IntRangeDict[Key][1]
MixItem = IntRangeDict[Key][0]
Range = ' %s - %s' % (MixItem.strip(), MaxItem.strip())
ValidRangeExpr = ValidRangeExpr.replace(Key, Range)
#
# Do the HEX2 conversion
#
for MatchObj in re.compile(HexPattern2).finditer(ValidRangeExpr):
MatchStr = MatchObj.group()
Range = re.compile(PCD_PATTERN).sub(' ', MatchStr)
ValidRangeExpr = ValidRangeExpr.replace(MatchStr, Range)
#
# Do the INT2 conversion
#
for MatchObj in re.compile(IntPattern2).finditer(ValidRangeExpr):
MatchStr = MatchObj.group()
Range = re.compile(PCD_PATTERN).sub(' ', MatchStr)
ValidRangeExpr = ValidRangeExpr.replace(MatchStr, Range)
return ValidRangeExpr
def __str__(self):
return "ValidValueList = %s ValidValueListLang = %s ValidValueRange \
= %s Expression = %s ErrorNumber = %s %s" % \
(self.ValidValueList, self.ValidValueListLang, self.ValidValueRange, \
self.Expression, self.ErrorNumber, self.ErrorMessage)
##
# PcdEntryXml
#
class PcdEntryXml(object):
def __init__(self):
self.PcdItemType = ''
self.PcdUsage = ''
self.TokenSpaceGuidCName = ''
self.TokenSpaceGuidValue = ''
self.Token = ''
self.CName = ''
self.PcdCName = ''
self.DatumType = ''
self.ValidUsage = ''
self.DefaultValue = ''
self.MaxDatumSize = ''
self.Value = ''
self.Offset = ''
self.CommonDefines = CommonDefinesXml()
self.Prompt = []
self.HelpText = []
self.PcdError = []
##
# AsBuilt will use FromXml
#
def FromXml(self, Item, Key):
self.PcdItemType = \
XmlAttribute(XmlNode(Item, '%s' % Key), 'PcdItemType')
self.PcdUsage = XmlAttribute(XmlNode(Item, '%s' % Key), 'PcdUsage')
self.TokenSpaceGuidCName = \
XmlElement(Item, '%s/TokenSpaceGuidCname' % Key)
self.TokenSpaceGuidValue = \
XmlElement(Item, '%s/TokenSpaceGuidValue' % Key)
self.Token = XmlElement(Item, '%s/Token' % Key)
self.CName = XmlElement(Item, '%s/CName' % Key)
self.PcdCName = XmlElement(Item, '%s/PcdCName' % Key)
self.DatumType = XmlElement(Item, '%s/DatumType' % Key)
self.ValidUsage = XmlElement(Item, '%s/ValidUsage' % Key)
if not GlobalData.gIS_BINARY_INF:
self.DefaultValue = XmlElement(Item, '%s/DefaultValue' % Key)
else:
self.DefaultValue = XmlElement(Item, '%s/Value' % Key)
self.MaxDatumSize = XmlElement(Item, '%s/MaxDatumSize' % Key)
self.Value = XmlElement(Item, '%s/Value' % Key)
self.Offset = XmlElement(Item, '%s/Offset' % Key)
self.CommonDefines.FromXml(XmlNode(Item, '%s' % Key), Key)
for HelpTextItem in XmlList(Item, '%s/HelpText' % Key):
HelpTextObj = HelpTextXml()
HelpTextObj.FromXml(HelpTextItem, '%s/HelpText' % Key)
self.HelpText.append(HelpTextObj)
for PcdErrorItem in XmlList(Item, '%s/PcdError' % Key):
PcdErrorObjXml = PcdErrorXml()
PcdErrorObj = PcdErrorObjXml.FromXml(PcdErrorItem, 'PcdError')
self.PcdError.append(PcdErrorObj)
self.DefaultValue = ConvertNOTEQToNE(self.DefaultValue)
PcdEntry = PcdObject()
PcdEntry.SetSupArchList(self.CommonDefines.SupArchList)
PcdEntry.SetTokenSpaceGuidCName(self.TokenSpaceGuidCName)
PcdEntry.SetTokenSpaceGuidValue(self.TokenSpaceGuidValue)
PcdEntry.SetToken(self.Token)
PcdEntry.SetOffset(self.Offset)
PcdEntry.SetCName(self.CName)
PcdEntry.SetPcdCName(self.PcdCName)
PcdEntry.SetDatumType(self.DatumType)
PcdEntry.SetValidUsage(self.ValidUsage)
PcdEntry.SetDefaultValue(self.DefaultValue)
PcdEntry.SetMaxDatumSize(self.MaxDatumSize)
PcdEntry.SetFeatureFlag(ConvertNOTEQToNE(self.CommonDefines.FeatureFlag))
PcdEntry.SetItemType(self.PcdItemType)
PcdEntry.SetHelpTextList(GetHelpTextList(self.HelpText))
PcdEntry.SetPcdErrorsList(self.PcdError)
return PcdEntry
##
# Package will use FromXml2
#
def FromXml2(self, Item, Key):
self.TokenSpaceGuidCName = \
XmlElement(Item, '%s/TokenSpaceGuidCname' % Key)
self.Token = XmlElement(Item, '%s/Token' % Key)
self.CName = XmlElement(Item, '%s/CName' % Key)
self.DatumType = XmlElement(Item, '%s/DatumType' % Key)
self.ValidUsage = XmlElement(Item, '%s/ValidUsage' % Key)
self.DefaultValue = XmlElement(Item, '%s/DefaultValue' % Key)
self.MaxDatumSize = XmlElement(Item, '%s/MaxDatumSize' % Key)
self.CommonDefines.FromXml(XmlNode(Item, '%s' % Key), Key)
for PromptItem in XmlList(Item, '%s/Prompt' % Key):
PromptObj = PromptXml()
PromptObj.FromXml(PromptItem, '%s/Prompt' % Key)
self.Prompt.append(PromptObj)
for HelpTextItem in XmlList(Item, '%s/HelpText' % Key):
HelpTextObj = HelpTextXml()
HelpTextObj.FromXml(HelpTextItem, '%s/HelpText' % Key)
self.HelpText.append(HelpTextObj)
for PcdErrorItem in XmlList(Item, '%s/PcdError' % Key):
PcdErrorObjXml = PcdErrorXml()
PcdErrorObj = PcdErrorObjXml.FromXml(PcdErrorItem, 'PcdError')
self.PcdError.append(PcdErrorObj)
self.DefaultValue = ConvertNOTEQToNE(self.DefaultValue)
PcdEntry = PcdObject()
PcdEntry.SetSupArchList(self.CommonDefines.SupArchList)
PcdEntry.SetSupModuleList(self.CommonDefines.SupModList)
PcdEntry.SetTokenSpaceGuidCName(self.TokenSpaceGuidCName)
PcdEntry.SetToken(self.Token)
PcdEntry.SetCName(self.CName)
PcdEntry.SetDatumType(self.DatumType)
PcdEntry.SetValidUsage(self.ValidUsage)
PcdEntry.SetDefaultValue(self.DefaultValue)
PcdEntry.SetMaxDatumSize(self.MaxDatumSize)
PcdEntry.SetFeatureFlag(ConvertNOTEQToNE(self.CommonDefines.FeatureFlag))
PcdEntry.SetPromptList(GetPromptList(self.Prompt))
PcdEntry.SetHelpTextList(GetHelpTextList(self.HelpText))
PcdEntry.SetPcdErrorsList(self.PcdError)
return PcdEntry
##
# Module will use FromXml3
#
def FromXml3(self, Item, Key):
self.PcdItemType = \
XmlAttribute(XmlNode(Item, '%s' % Key), 'PcdItemType')
self.PcdUsage = XmlAttribute(XmlNode(Item, '%s' % Key), 'PcdUsage')
self.TokenSpaceGuidCName = \
XmlElement(Item, '%s/TokenSpaceGuidCName' % Key)
self.CName = XmlElement(Item, '%s/CName' % Key)
self.DefaultValue = XmlElement(Item, '%s/DefaultValue' % Key)
self.CommonDefines.FromXml(XmlNode(Item, '%s' % Key), Key)
for HelpTextItem in XmlList(Item, '%s/HelpText' % Key):
HelpTextObj = HelpTextXml()
HelpTextObj.FromXml(HelpTextItem, '%s/HelpText' % Key)
self.HelpText.append(HelpTextObj)
for PcdErrorItem in XmlList(Item, '%s/PcdError' % Key):
PcdErrorObj = PcdErrorXml()
PcdErrorObj.FromXml(PcdErrorItem, 'PcdError')
self.PcdError.append(PcdErrorObj)
self.DefaultValue = ConvertNOTEQToNE(self.DefaultValue)
PcdEntry = PcdObject()
PcdEntry.SetSupArchList(self.CommonDefines.SupArchList)
PcdEntry.SetTokenSpaceGuidCName(self.TokenSpaceGuidCName)
PcdEntry.SetCName(self.CName)
PcdEntry.SetValidUsage(self.PcdUsage)
PcdEntry.SetDefaultValue(self.DefaultValue)
PcdEntry.SetFeatureFlag(ConvertNOTEQToNE(self.CommonDefines.FeatureFlag))
PcdEntry.SetItemType(self.PcdItemType)
PcdEntry.SetHelpTextList(GetHelpTextList(self.HelpText))
PcdEntry.SetPcdErrorsList(self.PcdError)
return PcdEntry
def ToXml(self, PcdEntry, Key):
if self.PcdCName:
pass
DefaultValue = ConvertNEToNOTEQ(PcdEntry.GetDefaultValue())
AttributeList = \
[['SupArchList', GetStringOfList(PcdEntry.GetSupArchList())], \
['PcdUsage', PcdEntry.GetValidUsage()], \
['PcdItemType', PcdEntry.GetItemType()], \
['FeatureFlag', PcdEntry.GetFeatureFlag()],
]
NodeList = [['TokenSpaceGuidCname', PcdEntry.GetTokenSpaceGuidCName()],
['TokenSpaceGuidValue', PcdEntry.GetTokenSpaceGuidValue()],
['Token', PcdEntry.GetToken()],
['CName', PcdEntry.GetCName()],
['DatumType', PcdEntry.GetDatumType()],
['ValidUsage', GetStringOfList(PcdEntry.GetValidUsage())],
['DefaultValue', DefaultValue],
['MaxDatumSize', PcdEntry.GetMaxDatumSize()],
['Offset', PcdEntry.GetOffset()],
]
for Item in PcdEntry.GetHelpTextList():
Tmp = HelpTextXml()
NodeList.append(Tmp.ToXml(Item))
for Item in PcdEntry.GetPcdErrorsList():
Tmp = PcdErrorXml()
NodeList.append(Tmp.ToXml(Item, 'PcdError'))
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
##
# Package will use ToXml2
#
def ToXml2(self, PcdEntry, Key):
if self.PcdCName:
pass
DefaultValue = ConvertNEToNOTEQ(PcdEntry.GetDefaultValue())
AttributeList = \
[['SupArchList', GetStringOfList(PcdEntry.GetSupArchList())], \
['SupModList', GetStringOfList(PcdEntry.GetSupModuleList())]
]
NodeList = [['TokenSpaceGuidCname', PcdEntry.GetTokenSpaceGuidCName()],
['Token', PcdEntry.GetToken()],
['CName', PcdEntry.GetCName()],
['DatumType', PcdEntry.GetDatumType()],
['ValidUsage', GetStringOfList(PcdEntry.GetValidUsage())],
['DefaultValue', DefaultValue],
['MaxDatumSize', PcdEntry.GetMaxDatumSize()],
]
for Item in PcdEntry.GetPromptList():
Tmp = PromptXml()
NodeList.append(Tmp.ToXml(Item))
for Item in PcdEntry.GetHelpTextList():
Tmp = HelpTextXml()
NodeList.append(Tmp.ToXml(Item))
for Item in PcdEntry.GetPcdErrorsList():
Tmp = PcdErrorXml()
NodeList.append(Tmp.ToXml(Item, 'PcdError'))
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
##
# Module will use ToXml3
#
def ToXml3(self, PcdEntry, Key):
if self.PcdCName:
pass
DefaultValue = ConvertNEToNOTEQ(PcdEntry.GetDefaultValue())
AttributeList = \
[['SupArchList', GetStringOfList(PcdEntry.GetSupArchList())], \
['PcdUsage', PcdEntry.GetValidUsage()], \
['PcdItemType', PcdEntry.GetItemType()], \
['FeatureFlag', ConvertNEToNOTEQ(PcdEntry.GetFeatureFlag())],
]
NodeList = [['CName', PcdEntry.GetCName()],
['TokenSpaceGuidCName', PcdEntry.GetTokenSpaceGuidCName()],
['DefaultValue', DefaultValue],
]
for Item in PcdEntry.GetHelpTextList():
Tmp = HelpTextXml()
NodeList.append(Tmp.ToXml(Item))
for Item in PcdEntry.GetPcdErrorsList():
Tmp = PcdErrorXml()
NodeList.append(Tmp.ToXml(Item, 'PcdError'))
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
##
# AsBuild Module will use ToXml4
#
def ToXml4(self, PcdEntry, Key):
if self.PcdCName:
pass
DefaultValue = ConvertNEToNOTEQ(PcdEntry.GetDefaultValue())
AttributeList = []
NodeList = [
['TokenSpaceGuidValue', PcdEntry.GetTokenSpaceGuidValue()],
['PcdCName', PcdEntry.GetCName()],
['Token', PcdEntry.GetToken()],
['DatumType', PcdEntry.GetDatumType()],
['MaxDatumSize', PcdEntry.GetMaxDatumSize()],
['Value', DefaultValue],
['Offset', PcdEntry.GetOffset()]
]
for Item in PcdEntry.GetHelpTextList():
Tmp = HelpTextXml()
NodeList.append(Tmp.ToXml(Item))
for Item in PcdEntry.GetPcdErrorsList():
Tmp = PcdErrorXml()
NodeList.append(Tmp.ToXml(Item, 'PcdError'))
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
Str = \
('PcdItemType = %s PcdUsage = %s TokenSpaceGuidCName = %s \
TokenSpaceGuidValue = %s Token = %s CName = %s PcdCName = %s \
DatumType = %s ValidUsage = %s DefaultValue = %s MaxDatumSize = %s \
Value = %s Offset = %s %s') % \
(self.PcdItemType, self.PcdUsage, self.TokenSpaceGuidCName, \
self.TokenSpaceGuidValue, self.Token, self.CName, self.PcdCName, \
self.DatumType, self.ValidUsage, self.DefaultValue, \
self.MaxDatumSize, self.Value, self.Offset, self.CommonDefines)
for Item in self.HelpText:
Str = Str + "\n\t" + str(Item)
for Item in self.PcdError:
Str = Str + "\n\tPcdError:" + str(Item)
return Str
| 40.21223 | 111 | 0.600188 |
acf22d31d75b8095056cff14cf913de4d4d8590e | 592 | py | Python | pascals_triangle.py | canberkeh/Algorithms | 5d4ac443a76e492332ccefa69b71bea62fe83aa1 | [
"Unlicense"
] | null | null | null | pascals_triangle.py | canberkeh/Algorithms | 5d4ac443a76e492332ccefa69b71bea62fe83aa1 | [
"Unlicense"
] | null | null | null | pascals_triangle.py | canberkeh/Algorithms | 5d4ac443a76e492332ccefa69b71bea62fe83aa1 | [
"Unlicense"
] | null | null | null | def pascal(num):
if num > 1:
p = [[1], [1, 1]] #ilk iki eleman belli olduğu için direkt yazıyoruz
for i in range(2, num): #ilk iki eleman belli olduğu için direkt yazıyoruz.
a = [1] # baş ve sona hep 1 geldiği için
for j in range(1, i): # baş ve son 1 olacak, bunun içerisine yeni sayılar eklenecek
a.append(p[i-1][j-1] + p[i-1][j]) # her seferinde döngüdeki bir önceki elemanı ekleyecek
a.append(1) # sonuna 1 ekliyor
p.append(a)
return p
elif num == 1:return [[1]]
print(pascal(5)) | 49.333333 | 104 | 0.565878 |
acf22d9608bd4e05d8b169b6169cde1fb46d0567 | 1,873 | py | Python | python/EastTextDetection/tests/__init__.py | openmpf/openmpf-components | acf012aeda0bac902e4678a97338b0aa5ffe38bf | [
"Apache-2.0"
] | 5 | 2017-10-19T12:14:09.000Z | 2022-02-11T15:16:48.000Z | python/EastTextDetection/tests/__init__.py | openmpf/openmpf-components | acf012aeda0bac902e4678a97338b0aa5ffe38bf | [
"Apache-2.0"
] | 126 | 2017-05-05T19:40:32.000Z | 2022-03-08T19:14:00.000Z | python/EastTextDetection/tests/__init__.py | openmpf/openmpf-components | acf012aeda0bac902e4678a97338b0aa5ffe38bf | [
"Apache-2.0"
] | 2 | 2018-06-08T18:32:55.000Z | 2020-08-27T21:25:07.000Z | #############################################################################
# NOTICE #
# #
# This software (or technical data) was produced for the U.S. Government #
# under contract, and is subject to the Rights in Data-General Clause #
# 52.227-14, Alt. IV (DEC 2007). #
# #
# Copyright 2021 The MITRE Corporation. All Rights Reserved. #
#############################################################################
#############################################################################
# Copyright 2021 The MITRE Corporation #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#############################################################################
| 72.038462 | 77 | 0.33796 |
acf22d97d462634c799229e2577703dd9c82228b | 535 | py | Python | crescent/resources/api_gateway_v2/integration/__init__.py | mpolatcan/zepyhrus | 2fd0b1b9b21613b5876a51fe8b5f9e3afbec1b67 | [
"Apache-2.0"
] | 1 | 2020-03-26T19:20:03.000Z | 2020-03-26T19:20:03.000Z | crescent/resources/api_gateway_v2/integration/__init__.py | mpolatcan/zepyhrus | 2fd0b1b9b21613b5876a51fe8b5f9e3afbec1b67 | [
"Apache-2.0"
] | null | null | null | crescent/resources/api_gateway_v2/integration/__init__.py | mpolatcan/zepyhrus | 2fd0b1b9b21613b5876a51fe8b5f9e3afbec1b67 | [
"Apache-2.0"
] | null | null | null | from .tls_config import TlsConfig
from .integration import Integration
from .constants import ConnectionType, ContentHandlingStrategy, IntegrationType, PassthroughBehavior
class IntegrationFactory:
ConnectionType = ConnectionType
ContentHandlingStrategy = ContentHandlingStrategy
IntegrationType = IntegrationType
PassthroughBehavior = PassthroughBehavior
@staticmethod
def Create(id: str): return Integration(id)
@staticmethod
def TlsConfig(): return TlsConfig()
__all__ = ["IntegrationFactory"]
| 26.75 | 100 | 0.794393 |
acf22dcaae5d90921f6d9f524c8abb0ef9eb0799 | 934 | py | Python | day-3-pizza-order/main.py | hadi-learn/CoursePython100DaysOfCode | 92a84d4802931d0d0924eeb58422efdc045de1ce | [
"MIT"
] | 1 | 2021-10-21T16:23:35.000Z | 2021-10-21T16:23:35.000Z | day-3-pizza-order/main.py | hadi-learn/CoursePython100DaysOfCode | 92a84d4802931d0d0924eeb58422efdc045de1ce | [
"MIT"
] | null | null | null | day-3-pizza-order/main.py | hadi-learn/CoursePython100DaysOfCode | 92a84d4802931d0d0924eeb58422efdc045de1ce | [
"MIT"
] | null | null | null | print("Welcome to the Python pizza deliveries!")
size = input("What size pizza do you want? S, M, or L? ").lower()
add_pepperoni = input("Do you want pepperoni? Y or N? ").lower()
extra_cheese = input("Do you want extra cheese? Y or N? ").lower()
pizza_price = 0
pepperoni_price = 0
cheese_price = 0
if size == "s":
pizza_price = 15
if add_pepperoni == "y":
pepperoni_price = 2
elif add_pepperoni == "n":
pepperoni_price = 0
elif size == "m":
pizza_price = 20
if add_pepperoni == "y":
pepperoni_price = 3
elif add_pepperoni == "n":
pepperoni_price = 0
elif size == "l":
pizza_price = 25
if add_pepperoni == "y":
pepperoni_price = 3
elif add_pepperoni == "n":
pepperoni_price = 0
if extra_cheese == "y":
cheese_price = 1
elif extra_cheese == "n":
cheese_price = 0
print(f"Your final bill is: ${pizza_price + pepperoni_price + cheese_price}")
| 29.1875 | 77 | 0.631692 |
acf22e4810dae27eb92fb0741e596d3b4b5371d1 | 230 | py | Python | examples/4/size.py | ekohilas/comp2041_plpy | 54ab21eaba1b11814cd683ba4a5b18f329561cff | [
"MIT"
] | 2 | 2018-05-28T06:19:37.000Z | 2019-02-28T19:48:18.000Z | examples/4/size.py | ekohilas/comp2041_plpy | 54ab21eaba1b11814cd683ba4a5b18f329561cff | [
"MIT"
] | null | null | null | examples/4/size.py | ekohilas/comp2041_plpy | 54ab21eaba1b11814cd683ba4a5b18f329561cff | [
"MIT"
] | null | null | null | #!/usr/local/bin/python3.5 -u
import sys
sys.stdout.write("Enter a number: ")
a = float(sys.stdin.readline())
if a < 0:
print("negative")
elif a == 0:
print("zero")
elif a < 10:
print("small")
else:
print("large")
| 17.692308 | 36 | 0.608696 |
acf22e63ee6f8f9255356047e82f13ace98e0168 | 10,145 | py | Python | src/common/MouseControl.py | achimmihca/DedoMouse | 5b8e9a87afd17d9537bcdc9e8cb62b65e9a59ed2 | [
"MIT"
] | null | null | null | src/common/MouseControl.py | achimmihca/DedoMouse | 5b8e9a87afd17d9537bcdc9e8cb62b65e9a59ed2 | [
"MIT"
] | 6 | 2021-08-23T19:28:36.000Z | 2021-10-01T18:52:36.000Z | src/common/MouseControl.py | achimmihca/DedoMouse | 5b8e9a87afd17d9537bcdc9e8cb62b65e9a59ed2 | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import Optional
import threading
import time
import pyautogui # type: ignore
from math import sqrt
from enum import Enum
from rx.subject.subject import Subject
import common.AppContext as AppContext
from common.Config import MousePositioningMode
from .LogHolder import LogHolder
from .PidControl import PidControl
from .Vector import Vector
from .util import get_time_ms
from .util import limit_float
# Disable pyautogui delay between actions (https://github.com/asweigart/pyautogui/issues/568)
pyautogui.PAUSE = 0
class MouseControl(LogHolder):
def __init__(self, app_context: AppContext.AppContext):
super().__init__()
self.app_context = app_context
self.config = app_context.config
# Configure PID values:
# - start by setting i and d to zero and p to a small value.
# - then configure p, i, d, in this order.
# Proportional factor between 0 and 1.
# Increase value if mouse is not moving fast enough to destination.
# Reduce value if mouse is moving too fast or is jittering.
p = self.config.mouse_position_pid_p.value
# Integral factor.
# Set to 0 if mouse is not moving smoothly. Then slowly increase value until ok.
i = self.config.mouse_position_pid_i.value
# Derivative factor.
# Set to 0 if mouse is not moving smoothly. Then slowly increase value until ok.
d = self.config.mouse_position_pid_d.value
self.last_mouse_position: Optional[Vector] = None
self.mouse_x_pid_control = PidControl(p, i, d)
self.mouse_y_pid_control = PidControl(p, i, d)
self.last_mouse_position_time_ms = get_time_ms()
self.is_drag_started = False
self.last_single_left_click_time_ms = 0
self.performed_action_desciption = Subject()
# Log all performed actions
self.performed_action_desciption.subscribe(lambda new_value: self.log.info(new_value))
def on_new_mouse_position_detected(self, new_mouse_px: Vector) -> None:
if self.config.mouse_positioning_mode.value == MousePositioningMode.RELATIVE:
self._handle_new_mouse_position_via_relative_positioning(new_mouse_px)
elif self.config.mouse_positioning_mode.value == MousePositioningMode.ABSOLUTE:
self._handle_new_mouse_position_via_absolute_positioning(new_mouse_px)
def _handle_new_mouse_position_via_absolute_positioning(self, new_mouse_px: Vector) -> None:
if self.config.is_control_mouse_position.value and not self.config.is_all_control_disabled.value:
delta_time_seconds = (get_time_ms() - self.last_mouse_position_time_ms) / 1000
current_pos = self._get_mouse_position()
smooth_mouse_x = self.mouse_x_pid_control.get_next_value(current_pos.x, new_mouse_px.x, delta_time_seconds)
smooth_mouse_y = self.mouse_y_pid_control.get_next_value(current_pos.y, new_mouse_px.y, delta_time_seconds)
self._set_mouse_position(Vector(smooth_mouse_x, smooth_mouse_y))
self.last_mouse_position_time_ms = get_time_ms()
def _handle_new_mouse_position_via_relative_positioning(self, new_mouse_px: Vector) -> None:
min_distance_px: float = self.app_context.webcam_control.actual_capture_size.x * self.config.min_mouse_position_difference_percent.value
if self.config.is_control_mouse_position.value and not self.config.is_all_control_disabled.value:
if not self.last_mouse_position:
self.last_mouse_position = new_mouse_px
return
mouse_position_difference = new_mouse_px.subtract(self.last_mouse_position)
if mouse_position_difference.magnitude() <= min_distance_px:
return
delta_time_seconds = (get_time_ms() - self.last_mouse_position_time_ms) / 1000
reduced_mouse_position_difference = mouse_position_difference.scale_by_scalar(1 - self.config.min_mouse_position_difference_percent.value)
# limit square root to avoid div by zero
reduced_mouse_position_difference_magniture_square_root = limit_float(sqrt(reduced_mouse_position_difference.magnitude()), 0.000001, None)
smooth_mouse_difference = (reduced_mouse_position_difference
.scale_by_scalar(1 / reduced_mouse_position_difference_magniture_square_root)
.scale_by_scalar(1 / delta_time_seconds)
.scale_by_scalar(self.config.mouse_position_difference_sensitivity.value))
current_pos = self._get_mouse_position()
new_pos = current_pos.add(smooth_mouse_difference)
self._set_mouse_position(new_pos)
self.last_mouse_position = new_mouse_px
self.last_mouse_position_time_ms = get_time_ms()
def on_single_click_detected(self, mouse_button: MouseButton) -> None:
if self.is_drag_started:
self.performed_action_desciption.on_next(f"{mouse_button.name.lower()} click, but ongoing drag")
return
if self.config.is_control_click.value and not self.config.is_all_control_disabled.value:
if mouse_button == MouseButton.LEFT:
self.last_single_left_click_time_ms = get_time_ms()
self._do_click(mouse_button)
self.performed_action_desciption.on_next(f"{mouse_button.name.lower()} click")
else:
self.performed_action_desciption.on_next(f"{mouse_button.name.lower()} click, but ignored")
def on_double_left_click_detected(self) -> None:
if self.is_drag_started:
self.performed_action_desciption.on_next("double click, but ongoing drag")
return
if self.config.is_control_click.value and not self.config.is_all_control_disabled.value:
duration_since_last_single_click = get_time_ms() - self.last_single_left_click_time_ms
if (duration_since_last_single_click < 500):
# Wait a little bit such that the OS will take the following two clicks as a double click and not as a triple click.
do_double_click_thread = threading.Thread(target=lambda: self.do_double_click_after_sleep_in_ms(500 - duration_since_last_single_click))
do_double_click_thread.start()
else:
self._do_click(MouseButton.LEFT)
self._do_click(MouseButton.LEFT)
self.performed_action_desciption.on_next("double left click")
else:
self.performed_action_desciption.on_next("double left click, but ignored")
def do_double_click_after_sleep_in_ms(self, sleep_time_ms: int) -> None:
time.sleep(sleep_time_ms / 1000)
self._do_click(MouseButton.LEFT)
self._do_click(MouseButton.LEFT)
def on_begin_drag(self) -> None:
if self.is_drag_started:
self.performed_action_desciption.on_next("begin drag but drag already started, thus ignored")
return
self.is_drag_started = True
if self.config.is_control_click.value and not self.config.is_all_control_disabled.value:
self._do_start_drag()
self.performed_action_desciption.on_next("begin drag")
else:
self.performed_action_desciption.on_next("begin drag, but ignored")
def on_end_drag(self) -> None:
if not self.is_drag_started:
self.performed_action_desciption.on_next("end drag but no drag started yet, thus ignored")
return
self.is_drag_started = False
if self.config.is_control_click.value and not self.config.is_all_control_disabled.value:
self._do_end_drag()
self.performed_action_desciption.on_next("end drag")
else:
self.performed_action_desciption.on_next("end drag but ignored")
def on_scroll(self, x: int, y: int) -> None:
scroll_direction = self._get_scroll_direction(x, y)
if self.is_drag_started:
self.performed_action_desciption.on_next(f"scroll {scroll_direction}, but ongoing drag")
return
try:
if self.config.is_control_scroll.value and not self.config.is_all_control_disabled.value:
self._do_scroll(x, y)
self.performed_action_desciption.on_next(f"scroll {scroll_direction}")
else:
self.performed_action_desciption.on_next(f"scroll {scroll_direction}, but ignored")
except Exception as e:
self.performed_action_desciption.on_next(f"scrolling failed (horizontal:{x}, vertical:{y}): {str(e)}")
def _do_click(self, mouse_button: MouseButton) -> None:
if mouse_button == MouseButton.LEFT:
pyautogui.click(button=pyautogui.LEFT)
if mouse_button == MouseButton.RIGHT:
pyautogui.click(button=pyautogui.RIGHT)
if mouse_button == MouseButton.MIDDLE:
pyautogui.click(button=pyautogui.MIDDLE)
def _do_start_drag(self) -> None:
pyautogui.mouseDown(button=pyautogui.LEFT)
def _do_end_drag(self) -> None:
pyautogui.mouseUp(button=pyautogui.LEFT)
def _do_scroll(self, x: int, y: int) -> None:
if x != 0:
# Horizontal scrolling not yet supported.
pass
if y != 0:
# Scroll steps are tiny in PyAutoGui. Thus, multiply by some factor.
pyautogui.scroll(y * self.config.pyautogui_scroll_factor.value)
def _get_scroll_direction(self, x: int, y: int) -> str:
if (x > 0 and y == 0):
return "right"
if (x < 0 and y == 0):
return "left"
if (x == 0 and y > 0):
return "up"
if (x == 0 and y < 0):
return "down"
return "diagonal"
def _get_mouse_position(self) -> Vector:
x, y = pyautogui.position()
return Vector(x, y)
def _set_mouse_position(self, new_pos: Vector) -> None:
pyautogui.moveTo(int(new_pos.x), int(new_pos.y))
class MouseButton(Enum):
LEFT = 1
RIGHT = 2
MIDDLE = 3 | 46.967593 | 152 | 0.687432 |
acf22eac252b9baf05e504bb792154e8236b48df | 6,210 | py | Python | goodman_pipeline/images/data_classifier.py | SunilSimha/goodman_pipeline | f99ebec0879a0e54aef6e8346bd1ba3cf46fe551 | [
"BSD-3-Clause"
] | 6 | 2017-05-30T02:48:35.000Z | 2018-04-10T02:11:52.000Z | goodman_pipeline/images/data_classifier.py | SunilSimha/goodman_pipeline | f99ebec0879a0e54aef6e8346bd1ba3cf46fe551 | [
"BSD-3-Clause"
] | 167 | 2017-04-24T21:12:53.000Z | 2018-10-01T14:14:11.000Z | goodman_pipeline/images/data_classifier.py | SunilSimha/goodman_pipeline | f99ebec0879a0e54aef6e8346bd1ba3cf46fe551 | [
"BSD-3-Clause"
] | 6 | 2019-06-22T13:15:51.000Z | 2022-03-25T18:10:07.000Z | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
import logging
from astropy.io.fits.verify import VerifyError
from ccdproc import ImageFileCollection
from ..core import fix_keywords
class DataClassifier(object):
"""Classifies the data being presented to the pipeline.
Data classifier is intended to define the camera that is being used and the
technique in use. This will be used later to make important decisions
regarding the process to be used.
"""
def __init__(self):
"""Initialization method for the DataClassifier class
The general arguments of the program are parsed and become part of the
class attributes. The rest of attributes are initialized as None.
"""
self.log = logging.getLogger(__name__)
self.raw_path = None
self.nights_dict = None
self.instrument = None
self.image_collection = None
self.objects_collection = None
self.technique = None
def __repr__(self):
"""String representation of the information contained."""
return str("Raw Path: {:s}\n"
"Instrument: {:s} Camera\n"
"Observing Technique: {:s}".format(self.raw_path,
self.instrument,
self.technique))
def __call__(self, raw_path):
"""Call method for the DataClassifier class
This method call specific method that define all the attributes of the
class. The intention is to define the instrument and technique in use.
Args:
raw_path (str): Full Path to raw data
"""
self.raw_path = raw_path
# define the ImageFileCollection instance right away.
try:
ifc = ImageFileCollection(self.raw_path)
except VerifyError as error: # pragma: no cover
self.log.error("Raised VerifyError: {:}".format(error))
self.log.critical("Some keywords are not FITS compliant. Trying "
"to fix the headers.")
fix_keywords(path=self.raw_path)
self.log.info("Headers have been fixed, please rerun the pipeline!")
sys.exit()
self.image_collection = ifc.summary.to_pandas()
self.objects_collection = self.image_collection[
self.image_collection.obstype != 'BIAS']
self.nights_dict = {}
self.log.debug('Raw path: {:s}'.format(self.raw_path))
self._get_instrument()
if self.instrument is not None:
self.log.info('Instrument: {:s} Camera'.format(self.instrument))
else:
self.log.critical("Unable to determine which camera was used.")
self.log.info("Make sure you only have 'Blue' or 'Red' camera data "
"only, not both.")
sys.exit()
self._get_obs_technique()
if self.technique is not None:
self.log.info('Observing Technique: {:s}'.format(self.technique))
# else:
# self.log.critical("Unable to determine observing technique used.")
# sys.exit()
if self.instrument is not None and self.technique is not None:
# folder name is used as key for the dictionary
night = os.path.basename(self.raw_path)
self.nights_dict[night] = {'full_path': self.raw_path,
'instrument': self.instrument,
'technique': self.technique}
else:
self.log.error('Failed to determine Instrument or Technique '
'for the night: {:s}'.format(self.raw_path))
def _get_instrument(self):
"""Identify Goodman's Camera
The header keyword of the camera is `INSTCONF`.
Notes:
This methods no longer offers backwards compatibility.
"""
instconf = self.objects_collection.instconf.unique()
if len(instconf) > 1:
for _inst in instconf:
self.log.debug("INSTCONF = {:s} is present.".format(_inst))
self.log.warning("Camera changes are forbidden during the night")
elif len(instconf) == 1:
self.instrument = instconf[0]
self.log.debug("Detected {:s} camera.".format(self.instrument))
# else:
# self.log.error("Impossible to determine which camera was used.")
def _get_obs_technique(self):
"""Identify if the data is Imaging or Spectroscopy
For imaging data the keyword `WAVMODE` is `Imaging` therefore the logic
here is: If there is only one value for `WAVMODE` and it is `Imaging`
then the technique is `Imaging`. If `Imaging` is in the result along
with other then it will assume the technique is Spectroscopy and will
ignore all the Imaging data. If none of the conditions above are met it
will assume the technique is Spectroscopy.
The result is stored as an attribute of the class.
"""
# self.technique = identify_technique()
wavmodes = [str(w).upper() for w in self.objects_collection.wavmode.unique()]
if len(wavmodes) == 1 and wavmodes[0] == 'IMAGING':
self.technique = 'Imaging'
elif 'IMAGING' in wavmodes and len(wavmodes) > 1:
self.log.error('There seems to be Imaging and Spectroscopic '
'data. I will assume the Imaging data are '
'acquisition images therefore they will be '
'ignored.')
self.log.info("If you really have Imaging data, please process "
"them in a separated folder.")
self.technique = 'Spectroscopy'
else:
self.technique = 'Spectroscopy'
# inform the results, no need to return
self.log.info('Detected {:s} Data from {:s} '
'Camera'.format(self.technique, self.instrument))
if __name__ == '__main__':
pass
| 37.636364 | 85 | 0.592271 |
acf22ee97568ec221e90b0d288823f2a2e5c0a10 | 22,074 | py | Python | cloudmesh/configuration/Config.py | cloudmesh/cloudmesh-config | 9fd06731ecd822f2118cfff9a8fc3082cd2a824b | [
"Apache-2.0"
] | null | null | null | cloudmesh/configuration/Config.py | cloudmesh/cloudmesh-config | 9fd06731ecd822f2118cfff9a8fc3082cd2a824b | [
"Apache-2.0"
] | 6 | 2019-11-27T22:12:21.000Z | 2020-05-01T23:12:43.000Z | cloudmesh/configuration/Config.py | cloudmesh/cloudmesh-config | 9fd06731ecd822f2118cfff9a8fc3082cd2a824b | [
"Apache-2.0"
] | 8 | 2019-09-30T19:04:31.000Z | 2020-04-30T18:29:57.000Z | import os
import re
import shutil
import sys
from os import mkdir
from os.path import isfile, realpath, exists, dirname
from pathlib import Path
from shutil import copyfile
import munch
import oyaml as yaml
import requests
from cloudmesh.common.FlatDict import FlatDict
from cloudmesh.common.FlatDict import flatten
from cloudmesh.common.Shell import Shell
from cloudmesh.common.console import Console
from cloudmesh.common.dotdict import dotdict
from cloudmesh.common.util import backup_name
from cloudmesh.common.util import banner
from cloudmesh.common.util import path_expand
from cloudmesh.common.util import writefile
from cloudmesh.common.variables import Variables
from cloudmesh.configuration import __version__ as cloudmesh_yaml_version
from cloudmesh.common.location import Location
# see also https://github.com/cloudmesh/client/blob/main/cloudmesh_client/cloud/register.py
class Active(object):
def __init__(self):
self.config = Config()
def clouds(self):
names = []
entries = self.config["cloudmesh"]["cloud"]
for entry in entries:
if entries[entry]["cm"]["active"]:
names.append(entry)
if len(names) == 0:
names = None
return names
class Config(object):
__shared_state = {}
def __init__(self,
config_path='~/.cloudmesh/cloudmesh.yaml',
encrypted=False):
"""
Initialize the Config class.
:param config_path: A local file path to cloudmesh yaml config
with a root element `cloudmesh`.
Default: `~/.cloudmesh/cloudmesh.yaml`
"""
self.__dict__ = self.__shared_state
if "data" not in self.__dict__:
if ".yaml" in config_path:
p = os.path.dirname(config_path)
else:
p = config_path
self.location = Location(directory=p)
self.load(config_path=self.location.config())
# self.load(config_path=config_path)
try:
self.user = self["cloudmesh.profile.user"]
except:
pass
@staticmethod
def version():
return cloudmesh_yaml_version
@staticmethod
def secrets():
return [
"AZURE_SUBSCRIPTION_ID",
"AZURE_TENANT_ID",
"AZURE_APPLICATION_ID",
"AZURE_SECRET_KEY",
"EC2_ACCESS_ID",
"EC2_SECRET_KEY",
"OS_PASSWORD",
"OS_USERNAME",
"OS_PROJECT_ID",
"MONGO_PASSWORD",
"MONGO_USERNAME",
"password",
"passwd",
"project_id",
"private_key_id",
"private_key",
"client_id",
"client_email",
"client_x509_cert_url",
"auth__password",
"auth.password"
]
@staticmethod
def exceptions():
return [
"cloudmesh.version",
"cloudmesh.security.publickey",
"cloudmesh.security.privatekey",
"cloudmesh.security.secpath",
"cloudmesh.security.secrets",
"cloudmesh.security.exceptions",
"cloudmesh.data.mongo.MONGO_PORT",
"cloudmesh.data.mongo.MONGO_HOST",
"cloudmesh.data.mongo.LOCAL",
"cloudmesh.data.mongo.MODE",
"cloudmesh.data.mongo.MONGO_DBNAME"
]
def fetch(self,
url=None,
destination=None):
"""
fetches the cloudmesh yaml file and places it in the given
destination dir
:param url: The url of the cloudmesh.yaml file from github
:param destination: The destination file. If not specified it is the
home dir.
:return:
"""
if url is None:
url = "https://raw.githubusercontent.com/cloudmesh/"\
"cloudmesh-configuration/main/cloudmesh/configuration/etc/cloudmesh.yaml"
if destination is None:
destination = "~/.cloudmesh/cloudmesh.yaml"
destination = path_expand(destination)
Shell.mkdir("~/.cloudmesh")
r = requests.get(url)
content = r.text
writefile(destination, content)
def load(self, config_path=None):
"""
loads a configuration file
:param config_path:
:type config_path:
:return:
:rtype:
"""
# VERBOSE("Load config")
self.config_path = Path(path_expand(config_path or self.location.config())).resolve()
self.config_folder = dirname(self.config_path)
self.create(config_path=config_path)
with open(self.config_path, "r") as stream:
content = stream.read()
# content = path_expand(content)
content = self.spec_replace(content)
self.data = yaml.load(content, Loader=yaml.SafeLoader)
# print (self.data["cloudmesh"].keys())
# self.data is loaded as nested OrderedDict, can not use set or get
# methods directly
if self.data is None:
raise EnvironmentError(
"Failed to load configuration file cloudmesh.yaml, "
"please check the path and file locally")
#
# populate default variables
#
self.variable_database = Variables(filename="~/.cloudmesh/variable.dat")
self.set_debug_defaults()
default = self.default()
for name in self.default():
if name not in self.variable_database:
self.variable_database[name] = default[name]
if "cloud" in default:
self.cloud = default["cloud"]
else:
self.cloud = None
def create(self, config_path=None):
"""
creates the cloudmesh.yaml file in the specified location. The
default is
~/.cloudmesh/cloudmesh.yaml
If the file does not exist, it is initialized with a default. You still
need to edit the file.
:param config_path: The yaml file to create
:type config_path: string
"""
self.config_path = Path(path_expand(config_path or self.location.config())).resolve()
self.config_folder = dirname(self.config_path)
if not exists(self.config_folder):
mkdir(self.config_folder)
if not isfile(self.config_path):
source = Path(dirname(realpath(__file__)) + "/etc/cloudmesh.yaml")
copyfile(source.resolve(), self.config_path)
# read defaults
self.__init__()
defaults = self["cloudmesh.default"]
# pprint(defaults)
d = Variables()
if defaults is not None:
print("# Set default from yaml file:")
for key in defaults:
value = defaults[key]
print("set {key}={value}".format(**locals()))
d[key] = defaults[key]
#
# bug make check a instance method
#
def check(self, path=None):
# bug: path not needed
error = False
# path = path_expand(path or self.location.config())
path = path_expand(path or self.location.config())
#
# bug path not passed along ;-) we can just remove it
#
config = Config()
banner("Check for CLOUDMESH_CONFIG_DIR")
if os.path.isfile(path):
print("Config found in:", path)
if "CLOUDMESH_CONFIG_DIR" in os.environ:
directory = os.environ("CLOUDMESH_CONFIG_DIR")
print("CLOUDMESH_CONFIG_DIR={directory}")
config_path = str(Path(directory) / "cloudmesh.yaml")
if os.path.isfile(path):
print("Config found in:", path)
else:
Console.error(f"File {config_path} not found.")
if path != config_path:
Console.warning("You may have two cloudmesh.yaml file.")
Console.warning("We use: {config_path is use}")
banner("Check Version")
dist_version = config.version()
yaml_version = config["cloudmesh.version"]
if dist_version == yaml_version:
Console.ok(f"The version is {dist_version}")
else:
Console.error("Your version do not match")
print()
print("Found ~/.cloudmesh/cloudmesh.yaml:", yaml_version)
print("Please update to version :", dist_version)
print("")
print("See also: ")
print()
print(" https://github.com/cloudmesh/cloudmesh-configuration/"
"blob/main/cloudmesh/configuration/etc/cloudmesh.yaml")
banner("Check for TAB Characters")
error = Config.check_for_tabs(path)
if not error:
Console.ok("OK. No TABs found")
banner("yamllint")
try:
import yamllint # noqa: F401
options = \
'-f colored ' \
'-d "{extends: relaxed, ""rules: {line-length: {max: 256}}}"'
r = Shell.live('yamllint {options} {path}'.format(**locals()))
if 'error' in r or 'warning' in r:
print(70 * '-')
print(" line:column description")
print()
else:
Console.ok("OK. No issues found")
print()
except:
Console.error("Could not execute yamllint. Please add with")
Console.error("pip install yamllint")
@staticmethod
def check_for_tabs(filename, verbose=True):
"""identifies if the file contains tabs and returns True if it
does. It also prints the location of the lines and columns. If
verbose is set to False, the location is not printed.
:param verbose: if true prints issues
:param filename: the filename
:type filename: str
:rtype: True if there are tabs in the file
"""
filename = path_expand(filename)
file_contains_tabs = False
with open(filename, 'r') as f:
lines = f.read().splitlines()
line_no = 1
for line in lines:
if "\t" in line:
file_contains_tabs = True
location = [
i for i in range(len(line)) if line.startswith('\t', i)]
if verbose:
Console.error(f"Tab found in line {line_no} and column(s) {location}")
line_no += 1
return file_contains_tabs
def save(self, path=None, backup=True):
"""
#
# not tested
#
saves th dic into the file. It also creates a backup if set to true The
backup filename appends a .bak.NO where number is a number that is not
yet used in the backup directory.
:param path:
:type path:
:return:
:rtype:
"""
path = path_expand(path or self.location.config())
if backup:
destination = backup_name(path)
shutil.copyfile(path, destination)
yaml_file = self.data.copy()
with open(self.config_path, "w") as stream:
yaml.safe_dump(yaml_file, stream, default_flow_style=False)
def spec_replace(self, spec):
# TODO: BUG: possible bug redundant char \{ in escape
# may be relevant for python 2 may behave differnet in
# differnt python versions, has to be checked. a unit test
# should be created to just check the \{ issue
#
variables = re.findall(r"\{\w.+\}", spec)
for i in range(0, len(variables)):
data = yaml.load(spec, Loader=yaml.SafeLoader)
m = munch.DefaultMunch.fromDict(data)
for variable in variables:
text = variable
variable = variable[1:-1]
try:
value = eval("m.{variable}".format(**locals()))
if "{" not in value:
spec = spec.replace(text, value)
except:
value = variable
return spec
def credentials(self, kind, name):
"""
:param kind: the first level of attributes after cloudmesh
:param name: the name of the resource
:return:
"""
return self.data["cloudmesh"][kind][name]["credentials"]
# noinspection PyPep8Naming
def check_for_TBD(self, kind, name):
configuration = Config()["cloudmesh.{kind}.{name}".format(**locals())]
result = {"cloudmesh": {"cloud": {name: configuration}}}
banner("checking cloudmesh.{kind}.{name} in "
"~/.cloudmesh/cloudmesh.yaml file".format(**locals()))
print(yaml.dump(result))
flat = flatten(configuration, sep=".")
for attribute in flat:
if "TBD" in str(flat[attribute]):
Console.error(
"~/.cloudmesh.yaml: Attribute cloudmesh.{name}.{attribute} contains TBD".format(
**locals()))
def set_debug_defaults(self):
for name in ["trace", "debug"]:
if name not in self.variable_database:
self.variable_database[name] = str(False)
def dict(self):
return self.data
def __str__(self):
return self.cat_dict(self.data)
@staticmethod
def cat_dict(d,
mask_secrets=True,
attributes=None,
color=None):
kluge = yaml.dump(d,
default_flow_style=False, indent=2)
content = kluge.splitlines()
return Config.cat_lines(content, mask_secrets=mask_secrets)
@staticmethod
def cat_lines(content,
mask_secrets=True,
attributes=None,
color=None):
colors = ['TBD', "xxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", "12345", "xxxx"]
if color:
colors = colors + color
secrets = Config.secrets()
if attributes:
secrets = secrets + attributes
lines = []
for line in content:
if "TBD" not in line:
if mask_secrets:
for attribute in secrets:
if attribute + ":" in line:
line = line.split(":")[0] + Console.text(message=": '********'", color='BLUE')
break
for colorme in colors:
if colorme in line:
attribute, value = line.split(":", 1)
line = attribute + ":" + Console.text(color='RED',
message=value)
# line = line.replace(colorme,
# Console.text(color='RED', message=colorme))
lines.append(line)
lines = '\n'.join(lines)
return lines
@staticmethod
def cat(mask_secrets=True,
attributes=None,
path="~/.cloudmesh/cloudmesh.yaml",
color=None):
_path = path_expand("~/.cloudmesh/cloudmesh.yaml")
with open(_path) as f:
content = f.read().splitlines()
return Config.cat_lines(content,
mask_secrets=mask_secrets,
attributes=None, color=None)
def get(self, key, default=None):
"""
A helper function for reading values from the config without
a chain of `get()` calls.
Usage:
mongo_conn = conf.get('db.mongo.MONGO_CONNECTION_STRING')
default_db = conf.get('default.db')
az_credentials = conf.get('data.service.azure.credentials')
:param default:
:param key: A string representing the value's path in the config.
"""
try:
return self.__getitem__(key)
except KeyError:
if default is None:
path = self.config_path
Console.warning(
"The key '{key}' could not be found in the yaml file '{path}'".format(
**locals()))
# sys.exit(1)
raise KeyError(key)
return default
except Exception as e:
print(e)
sys.exit(1)
def __setitem__(self, key, value):
self.set(key, value)
def set(self, key, value):
"""
A helper function for setting the default cloud in the config without
a chain of `set()` calls.
Usage:
mongo_conn = conf.set('db.mongo.MONGO_CONNECTION_STRING',
"https://localhost:3232")
:param key: A string representing the value's path in the config.
:param value: value to be set.
"""
if value.lower() in ['true', 'false']:
value = value.lower() == 'true'
try:
if "." in key:
keys = key.split(".")
#
# create parents
#
parents = keys[:-1]
location = self.data
for parent in parents:
if parent not in location:
location[parent] = {}
location = location[parent]
#
# create entry
#
location[keys[len(keys) - 1]] = value
else:
self.data[key] = value
except KeyError:
path = self.config_path
Console.error(
"The key '{key}' could not be found in the yaml file '{path}'".format(
**locals()))
sys.exit(1)
except Exception as e:
print(e)
sys.exit(1)
yaml_file = self.data.copy()
with open(self.config_path, "w") as stream:
yaml.safe_dump(yaml_file, stream, default_flow_style=False)
def set_cloud(self, key, value):
"""
A helper function for setting the default cloud in the config without
a chain of `set()` calls.
Usage:
mongo_conn = conf.get('db.mongo.MONGO_CONNECTION_STRING',
"https://localhost:3232")
:param key: A string representing the value's path in the config.
:param value: value to be set.
"""
self.data['cloudmesh']['default']['cloud'] = value
print("Setting env parameter cloud to: " + self.data['cloudmesh']['default']['cloud'])
yaml_file = self.data.copy()
with open(self.config_path, "w") as stream:
print("Writing update to cloudmesh.yaml")
yaml.safe_dump(yaml_file, stream, default_flow_style=False)
def default(self):
return dotdict(self.data["cloudmesh"]["default"])
# def get(self, item):
# return self.__getitem__(item)
def __getitem__(self, item):
"""
gets an item form the dict. The key is . separated
use it as follows get("a.b.c")
:param item:
:type item:
:return:
"""
try:
if "." in item:
keys = item.split(".")
else:
return self.data[item]
element = self.data[keys[0]]
for key in keys[1:]:
element = element[key]
except KeyError:
path = self.config_path
Console.warning(
"The key '{item}' could not be found in the yaml file '{path}'".format(
**locals()))
raise KeyError(item)
# sys.exit(1)
except Exception as e:
print(e)
sys.exit(1)
# if element.lower() in ['true', 'false']:
# element = element.lower() == 'true'
return element
def __delitem__(self, item):
"""
#
# BUG THIS DOES NOT WORK
#
gets an item form the dict. The key is . separated
use it as follows get("a.b.c")
:param item:
:type item:
:return:
"""
try:
if "." in item:
keys = item.split(".")
else:
return self.data[item]
element = self.data
print(keys)
for key in keys:
element = element[key]
del element
except KeyError:
path = self.config_path
Console.error(
"The key '{item}' could not be found in the yaml file '{path}'".format(
**locals()))
sys.exit(1)
except Exception as e:
print(e)
sys.exit(1)
def search(self, key, value=None):
"""
search("cloudmesh.cloud.*.cm.active", True)
:param key:
:param value:
:return:
"""
flat = FlatDict(self.data, sep=".")
result = flat.search(key, value)
return result
def edit(self, attribute):
"""
edits the dict specified by the attribute and fills out all TBD values.
:param attribute:
:type attribute: string
:return:
"""
Console.ok("Filling out: {attribute}".format(attribute=attribute))
try:
config = Config()
values = config[attribute]
print("Editing the values for {attribute}"
.format(attribute=attribute))
print("Current Values:")
print(yaml.dump(values, indent=2))
for key in values:
if values[key] == "TBD":
result = input("Please enter new value for {key}: "
.format(**locals()))
values[key] = result
config.save()
except Exception as e:
print(e)
Console.error(f"could not find the attribute '{attribute}' in the yaml file.")
| 31.266289 | 106 | 0.534294 |
acf22efadf749b0422ed182a04401aae6bf1fd21 | 13,620 | py | Python | app/core/views.py | walidham/iws2 | deb6fc22133d49ee955811060664267b615dd781 | [
"BSD-2-Clause"
] | 1 | 2018-03-26T18:35:03.000Z | 2018-03-26T18:35:03.000Z | app/core/views.py | walidham/iws2 | deb6fc22133d49ee955811060664267b615dd781 | [
"BSD-2-Clause"
] | null | null | null | app/core/views.py | walidham/iws2 | deb6fc22133d49ee955811060664267b615dd781 | [
"BSD-2-Clause"
] | null | null | null | # Copyright 2014 SolidBuilds.com. All rights reserved
#
# Authors: Ling Thio <ling.thio@gmail.com>
from flask import redirect, render_template, render_template_string, Blueprint, jsonify
from flask import request, url_for
from flask_user import current_user, login_required, roles_accepted, roles_required
from app import app, db
from app.core.models import UserProfileForm, FeatureRequest, User, Product, UsersRoles, Role
from flask_wtf import csrf
from datetime import datetime
from sqlalchemy import func
core_blueprint = Blueprint('core', __name__, url_prefix='/')
# The Home page is accessible to anyone
@core_blueprint.route('')
def home_page():
if not current_user.is_authenticated:
return render_template('core/guest_page.html')
else:
return render_template('core/home_page.html')
# The User page is accessible to authenticated users (users that have logged in)
@core_blueprint.route('user')
@login_required # Limits access to authenticated users
def user_page():
return render_template('core/user_page.html')
# The Admin page is accessible to users with the 'admin' role
@core_blueprint.route('admin')
@roles_accepted('admin') # Limits access to users with the 'admin' role
def admin_page():
return render_template('core/admin_page.html')
@core_blueprint.route('user/profile', methods=['GET', 'POST'])
@login_required
def user_profile_page():
# Initialize form
form = UserProfileForm(request.form, current_user)
# Process valid POST
if request.method == 'POST' and form.validate():
# Copy form fields to user_profile fields
form.populate_obj(current_user)
# Save user_profile
db.session.commit()
# Redirect to home page
return redirect(url_for('core.home_page'))
# Process GET or invalid POST
return render_template('core/user_profile_page.html',
form=form)
# Register blueprint
app.register_blueprint(core_blueprint)
# Feature Route
@app.route('/features')
@login_required
def feature_request():
# Test if user is IWS user or client
if current_user.roles[0].name == 'client':
features = FeatureRequest.query.filter(FeatureRequest.user_id == current_user.id)
return render_template('core/feature_requests.html',
features=features)
else:
features = FeatureRequest.query.all()
return render_template('core/feature_requests.html',
features=features)
@app.route('/new_feature', methods=['POST'])
@login_required
def new_feature():
if current_user.roles[0].name == 'client':
#If feature added by client
#get the count of FR gp : global priority
gp = db.session.query(func.count(FeatureRequest.id)).scalar()
#by default the priority will be in the end
gp = gp +1
#get the count of FR cp : client priority
cp = db.session.query(func.count(FeatureRequest.id)).filter(FeatureRequest.user_id == current_user.id).scalar()
#by default the priority will be in the end
cp = cp +1
date_object = datetime.strptime(request.json['target_date'], '%m-%d-%Y')
feature = FeatureRequest(title=request.json['title'],description=request.json['description'],
target_date=date_object,ticket_url=request.json['ticket_url'],
user_id=current_user.id,product_id=request.json['product_id'],
global_priority=gp,client_priority=cp)
db.session.add(feature)
db.session.commit()
#id = cur.lastrowid
return jsonify({"title": request.json['title'],
"description": request.json['description'],
"client_priority": cp,
"global_priority": gp,
"target_date": request.json['target_date'],
"ticket_url": request.json['ticket_url'],
"client_id": request.json['client_id'],
"id": feature.id,
"product_id": request.json['product_id']})
else:
#If feature added by IWS USER
#get the count of FR gp : global priority
gp = db.session.query(func.count(FeatureRequest.id)).scalar()
#by default the priority will be in the end
gp = gp +1
#get the count of FR cp : client priority
cp = db.session.query(func.count(FeatureRequest.id)).filter(FeatureRequest.user_id == request.json['client_id']).scalar()
cp = cp + 1
date_object = datetime.strptime(request.json['target_date'], '%m-%d-%Y')
feature = FeatureRequest(title=request.json['title'],description=request.json['description'],
target_date=date_object,ticket_url=request.json['ticket_url'],
user_id=request.json['client_id'],product_id=request.json['product_id'],
global_priority=gp,client_priority=cp)
db.session.add(feature)
db.session.commit()
#id = cur.lastrowid
return jsonify({"title": request.json['title'],
"description": request.json['description'],
"client_priority": cp,
"global_priority": gp,
"target_date": request.json['target_date'],
"ticket_url": request.json['ticket_url'],
"client_id": request.json['client_id'],
"id": feature.id,
"product_id": request.json['product_id']})
@app.route('/save_priorities', methods=['POST'])
@login_required
def save_priorities():
if current_user.roles[0].name == 'client':
id_feature = request.json['id']
client_priority = request.json['priority']
global_pri = request.json['global_priority']
fr = FeatureRequest.query.filter_by(id=id_feature).first()
fr.global_priority = global_pri
fr.client_priority = client_priority
db.session.commit()
return jsonify(reponse=dict(result="ok"))
else:
id_feature = request.json['id']
client_priority = request.json['priority']
global_pri = request.json['global_priority']
fr = FeatureRequest.query.filter_by(id=id_feature).first()
fr.global_priority = global_pri
fr.client_priority = client_priority
db.session.commit()
return jsonify(reponse=dict(result="ok"))
@app.route('/update_feature', methods=['POST'])
@login_required
def update_feature():
id_feature = request.json['id']
date_object = datetime.strptime(request.json['target_date'], '%m-%d-%Y')
fr = FeatureRequest.query.filter_by(id=id_feature).first()
if fr:
fr.title = request.json['title']
fr.ticket_url = request.json['ticket_url']
fr.target_date = date_object
fr.product_id = request.json['product_id']
#db.session.query(FeatureRequest).filter_by(id = id_feature).update({'global_priority': int(priority)})
db.session.commit()
return jsonify(reponse=dict(result="ok"))
else:
return jsonify(reponse=dict(result="error"))
@app.route('/delete_feature', methods=['POST'])
@login_required
def delete_feature():
id_feature = request.json['id']
fr = FeatureRequest.query.filter_by(id=id_feature).first()
if fr:
FeatureRequest.query.filter_by(id=id_feature).delete()
db.session.commit()
return jsonify(reponse=dict(result="ok"))
else:
return jsonify(reponse=dict(result="error"))
@app.route('/features_list')
@login_required
def features_list():
if current_user.roles[0].name == 'client':
cur = FeatureRequest.query.filter(FeatureRequest.user_id == current_user.id).order_by(FeatureRequest.client_priority)
entries = [dict(id=row.id,title=row.title,
target_date=row.target_date,description=row.description,ticket_url=row.ticket_url, client_priority=row.client_priority,
global_priority=row.global_priority, client_id = row.user_id, product_id=row.product_id) for row in cur]
return jsonify(features=entries)
else:
cur = FeatureRequest.query.order_by(FeatureRequest.global_priority).all()
entries = [dict(id=row.id,title=row.title,
target_date=row.target_date,description=row.description,ticket_url=row.ticket_url, client_priority=row.client_priority,
global_priority=row.global_priority, client_id = row.user_id, product_id=row.product_id) for row in cur]
return jsonify(features=entries)
# Client Route
@app.route('/clients')
@roles_required('admin')
@login_required
def clients():
clients = User.query.join(User.roles).filter(Role.name == 'client').group_by(User).all()
return render_template('core/clients.html',
clients=clients)
@app.route('/clients_list')
@login_required
def clients_list():
cur = User.query.join(User.roles).filter(Role.name == 'client').group_by(User).order_by(User.id).all()
entries = [dict(company_name=row.company_name,email=row.email,
description="",id=row.id,last_name=row.last_name, first_name=row.first_name,priority=row.priority) for row in cur]
return jsonify(clients=entries)
@app.route('/new_client', methods=['POST'])
@roles_required('admin')
@login_required
def new_client():
email = request.json['email']
user = User.query.filter(User.email==email).first()
if not user:
user = User(email=email, first_name=request.json['first_name'], last_name=request.json['last_name'],
password = app.user_manager.hash_password(request.json['password']),
company_name=request.json['company_name'], active=True,confirmed_at=datetime.utcnow())
role = Role.query.filter(Role.name == 'client').first()
user.roles.append(role)
db.session.add(user)
db.session.commit()
return jsonify({"email": request.json['email'],
"first_name": request.json['first_name'],
"result": "OK",
"last_name": request.json['last_name'],
"company_name": request.json['company_name'],
})
else:
return jsonify({"result":"Error","msg":"email exist"})
# Product Route
@app.route('/products')
@roles_required('admin')
@login_required
def products():
products = Product.query.all()
return render_template('core/products.html',
products=products)
@app.route('/products_list')
@login_required
def products_list():
cur = Product.query.all()
entries = [dict(id=row.id,product_name=row.product_name, description=row.description) for row in cur]
return jsonify(products=entries)
@app.route('/new_product', methods=['POST'])
@roles_required('admin')
@login_required
def new_product():
product = Product.query.filter(Product.product_name==request.json['product_name']).first()
if not product:
product = Product(product_name=request.json['product_name'], description=request.json['description'])
db.session.add(product)
db.session.commit()
return jsonify({"product_name": request.json['product_name'],
"description": request.json['description'],
"id": product.id,
"result": "OK"
})
else:
return jsonify({"result":"Error","msg":"product name exist"})
# User Route
@app.route('/users')
@roles_required('admin')
@login_required
def users():
users = User.query.join(User.roles).filter(Role.name == 'user').group_by(User).all()
return render_template('core/users.html',
users=users)
@app.route('/users_list')
@roles_required('admin')
@login_required
def users_list():
cur = User.query.join(User.roles).filter(Role.name == 'user').group_by(User).order_by(User.id).all()
entries = [dict(email=row.email, id=row.id,last_name=row.last_name, first_name=row.first_name) for row in cur]
return jsonify(users=entries)
@app.route('/new_user', methods=['POST'])
@roles_required('admin')
@login_required
def new_user():
email = request.json['email']
user = User.query.filter(User.email==email).first()
if not user:
user = User(email=email, first_name=request.json['first_name'], last_name=request.json['last_name'],
password = app.user_manager.hash_password(request.json['password']),
active=True,confirmed_at=datetime.utcnow())
role = Role.query.filter(Role.name == 'user').first()
user.roles.append(role)
db.session.add(user)
db.session.commit()
return jsonify({"email": request.json['email'],
"first_name": request.json['first_name'],
"result": "OK",
"last_name": request.json['last_name'],
"id": user.id
})
else:
return jsonify({"result":"Error","msg":"email exist"})
| 37.01087 | 137 | 0.620117 |
acf22ff1c506df5cf5c5e0a563bb46f3709ba5ff | 3,555 | py | Python | revlib/utils.py | AIshutin/revlib | 7dd203d5b709c6a23701b00e00c02f50f5c3de81 | [
"MIT"
] | null | null | null | revlib/utils.py | AIshutin/revlib | 7dd203d5b709c6a23701b00e00c02f50f5c3de81 | [
"MIT"
] | null | null | null | revlib/utils.py | AIshutin/revlib | 7dd203d5b709c6a23701b00e00c02f50f5c3de81 | [
"MIT"
] | null | null | null | import torch
from torch import nn
class IOdataset(torch.utils.data.Dataset):
def __init__(self, input, output):
super().__init__()
assert(type(input) == type(output))
if isinstance(input, torch.Tensor):
self.len = input.shape[0]
assert(self.len == output.shape[0])
else:
self.len = len(input)
assert(self.len == len(output))
self.input = input
self.output = output
def __len__(self):
return self.len
def __getitem__(self, ind):
return (self.input[ind], self.output[ind])
def apply_transform(self, transform, where="output"):
if where == "output":
for i in range(len(self.output)):
output[i] = transform(output[i])
else:
for i in range(len(self.input)):
input[i] = transform(input[i])
class NoLabelsDataset(torch.utils.data.Dataset):
def __init__(self, dataset):
super().__init__()
self.data = dataset
def __len__(self):
return len(self.data)
def __getitem__(self, ind):
return self.data[ind][0]
def calc_parameters(lay):
total = 0
try:
for tensor in lay.parameters():
curr = 1
for el in tensor.shape:
curr *= el
total += curr
return total
except AttributeError as exp:
return 0
def extract_layers(net):
lays = []
for lay in net.children():
if not isinstance(lay, nn.Sequential) and not isinstance(lay, LayBlock):
lays.append(lay)
else:
lays += extract_layers(lay)
return lays
ACTIVATIONS = [nn.ReLU, nn.ReLU6, nn.ELU, nn.SELU, nn.PReLU, nn.LeakyReLU,
nn.Threshold, nn.Sigmoid, nn.Tanh, nn.LogSigmoid,
nn.Softplus, nn.Softsign, nn.Softmin,
nn.Softmax, nn.Softmax2d, nn.LogSoftmax]
def check_if_activation(lay):
for act in ACTIVATIONS:
if isinstance(lay, act):
#print(lay, True)
return True
#print(lay, False)
return False
class LayBlock(torch.nn.Module):
def __init__(self, lays, ind, ishapes, oshapes):
super().__init__()
self.lays = nn.Sequential(*lays)
self.ind = ind
self.reverted = False
self.ishapes = ishapes
self.oshapes = oshapes
def revert(self):
assert(self.reverted is False)
self.reverted = True
actf = nn.ReLU
layers = extract_layers(self.lays)
self.ind += len(layers) - 1
for lay in layers:
if check_if_activation(lay):
actf = type(lay)
import reverse_layers as rl
lays = []
lays = [rl.get_reversed(lay=layers[i], input_shape=self.ishapes[i], \
output_shape=self.oshapes[i]) for i in range(len(layers)) \
if not check_if_activation(layers[i])][::-1]
lays.append(actf())
self.lays = nn.Sequential(*lays)
return self
def forward(self, X):
return self.lays(X)
def __str__(self):
return str(self.lays)
def __repr__(self):
return self.lays.__repr__()
def apply_net(net, data, device=torch.device('cpu'), batch_size=1):
dataloader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=False)
result = []
for data in dataloader:
data.to(device)
res = net(data).cpu()
result += [el.squeeze(0) for el in res.chunk(res.shape[0], dim=0)]
return result
| 27.992126 | 88 | 0.576371 |
acf23071332567b1e014d3625f7703989ac3f937 | 15,756 | py | Python | tests/providers/kubernetes/kubernetes_virtual_machine_test.py | Nowasky/PerfKitBenchmarker | cfa88e269eb373780910896ed4bdc8db09469753 | [
"Apache-2.0"
] | 3 | 2018-04-28T13:06:14.000Z | 2020-06-09T02:39:44.000Z | tests/providers/kubernetes/kubernetes_virtual_machine_test.py | Nowasky/PerfKitBenchmarker | cfa88e269eb373780910896ed4bdc8db09469753 | [
"Apache-2.0"
] | 1 | 2018-03-15T21:01:27.000Z | 2018-03-15T21:01:27.000Z | tests/providers/kubernetes/kubernetes_virtual_machine_test.py | Nowasky/PerfKitBenchmarker | cfa88e269eb373780910896ed4bdc8db09469753 | [
"Apache-2.0"
] | 6 | 2019-06-11T18:59:57.000Z | 2021-03-02T19:14:42.000Z | # Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for providers.kubernetes.kubernetes_virtual_machine."""
# pylint: disable=not-context-manager
import builtins
import json
import unittest
from absl import flags as flgs
import contextlib2
import mock
from perfkitbenchmarker import os_types
from perfkitbenchmarker import providers
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.azure import util as azure_util
from perfkitbenchmarker.providers.kubernetes import kubernetes_pod_spec
from perfkitbenchmarker.providers.kubernetes import kubernetes_virtual_machine
from tests import pkb_common_test_case
FLAGS = flgs.FLAGS
FLAGS.kubernetes_anti_affinity = False
_COMPONENT = 'test_component'
_RUN_URI = 'fake_run_uri'
_NAME = 'fake_name'
_KUBECTL = 'fake_kubectl_path'
_KUBECONFIG = 'fake_kubeconfig_path'
_EXPECTED_CALL_BODY_WITHOUT_GPUS = """
{
"spec": {
"dnsPolicy":
"ClusterFirst",
"volumes": [],
"containers": [{
"name": "fake_name",
"workingDir": "/root",
"volumeMounts": [],
"image": "test_image",
"securityContext": {
"privileged": true
},
"command": ["tail", "-f", "/dev/null"]
}]
},
"kind": "Pod",
"metadata": {
"name": "fake_name",
"labels": {
"pkb": "fake_name"
}
},
"apiVersion": "v1"
}
"""
_EXPECTED_CALL_BODY_WITH_2_GPUS = """
{
"spec": {
"dnsPolicy":
"ClusterFirst",
"volumes": [],
"containers": [{
"name": "fake_name",
"volumeMounts": [],
"workingDir": "/root",
"image": "test_image",
"securityContext": {
"privileged": true
},
"resources" : {
"limits": {
"nvidia.com/gpu": "2"
},
"requests": {
"nvidia.com/gpu": "2"
}
},
"command": ["tail", "-f", "/dev/null"]
}]
},
"kind": "Pod",
"metadata": {
"name": "fake_name",
"labels": {
"pkb": "fake_name"
}
},
"apiVersion": "v1"
}
"""
_EXPECTED_CALL_BODY_WITH_NVIDIA_CUDA_IMAGE = """
{
"spec": {
"dnsPolicy":
"ClusterFirst",
"volumes": [],
"containers": [{
"name": "fake_name",
"volumeMounts": [],
"workingDir": "/root",
"image": "nvidia/cuda:9.0-devel-ubuntu16.04",
"securityContext": {
"privileged": true
},
"command": ["tail", "-f", "/dev/null"]
}]
},
"kind": "Pod",
"metadata": {
"name": "fake_name",
"labels": {
"pkb": "fake_name"
}
},
"apiVersion": "v1"
}
"""
_EXPECTED_CALL_BODY_WITH_VM_GROUP = """
{
"spec": {
"dnsPolicy":
"ClusterFirst",
"volumes": [],
"containers": [{
"name": "fake_name",
"volumeMounts": [],
"workingDir": "/root",
"image": "test_image",
"securityContext": {
"privileged": true
},
"command": ["tail", "-f", "/dev/null"]
}],
"nodeSelector": {
"pkb_nodepool": "my-vm-group"
}
},
"kind": "Pod",
"metadata": {
"name": "fake_name",
"labels": {
"pkb": "fake_name"
}
},
"apiVersion": "v1"
}
"""
def get_write_mock_from_temp_file_mock(temp_file_mock):
"""Returns the write method mock from the NamedTemporaryFile mock.
This can be used to make assertions about the calls make to write(),
which exists on the instance returned from the NamedTemporaryFile mock.
The reason for the __enter__() in this context is due to the fact
that NamedTemporaryFile is used in a context manager inside
kubernetes_helper.py.
Args:
temp_file_mock: mock object of the NamedTemporaryFile() contextManager
"""
return temp_file_mock().__enter__().write
@contextlib2.contextmanager
def patch_critical_objects(stdout='', stderr='', return_code=0, flags=FLAGS):
with contextlib2.ExitStack() as stack:
retval = (stdout, stderr, return_code)
flags.gcloud_path = 'gcloud'
flags.run_uri = _RUN_URI
flags.kubectl = _KUBECTL
flags.kubeconfig = _KUBECONFIG
stack.enter_context(mock.patch(builtins.__name__ + '.open'))
stack.enter_context(mock.patch(vm_util.__name__ + '.PrependTempDir'))
# Save and return the temp_file mock here so that we can access the write()
# call on the instance that the mock returned. This allows us to verify
# that the body of the file is what we expect it to be (useful for
# verifying that the pod.yml body was written correctly).
temp_file = stack.enter_context(
mock.patch(vm_util.__name__ + '.NamedTemporaryFile'))
issue_command = stack.enter_context(
mock.patch(vm_util.__name__ + '.IssueCommand', return_value=retval))
yield issue_command, temp_file
class TestKubernetesVirtualMachine(
pkb_common_test_case.TestOsMixin,
kubernetes_virtual_machine.KubernetesVirtualMachine):
pass
class BaseKubernetesVirtualMachineTestCase(
pkb_common_test_case.PkbCommonTestCase):
def assertJsonEqual(self, str1, str2):
json1 = json.loads(str1)
json2 = json.loads(str2)
self.assertEqual(
json.dumps(json1, sort_keys=True),
json.dumps(json2, sort_keys=True)
)
class KubernetesResourcesTestCase(
BaseKubernetesVirtualMachineTestCase):
@staticmethod
def create_virtual_machine_spec():
spec = kubernetes_pod_spec.KubernetesPodSpec(
_COMPONENT,
resource_limits={
'cpus': 2,
'memory': '5GiB'
},
resource_requests={
'cpus': 1.5,
'memory': '4GiB'
},
gpu_count=2,
gpu_type='k80',
)
return spec
def testPodResourceLimits(self):
spec = self.create_virtual_machine_spec()
self.assertEqual(spec.resource_limits.cpus, 2)
self.assertEqual(spec.resource_limits.memory, 5120)
def testCreatePodResourceBody(self):
spec = self.create_virtual_machine_spec()
with patch_critical_objects():
kub_vm = TestKubernetesVirtualMachine(spec)
expected = {
'limits': {
'cpu': '2',
'memory': '5120Mi',
'nvidia.com/gpu': '2'
},
'requests': {
'cpu': '1.5',
'memory': '4096Mi',
'nvidia.com/gpu': '2'
}
}
actual = kub_vm._BuildResourceBody()
self.assertDictEqual(expected, actual)
def testGetMetadata(self):
spec = self.create_virtual_machine_spec()
with patch_critical_objects():
kub_vm = TestKubernetesVirtualMachine(spec)
subset_of_expected_metadata = {
'pod_cpu_limit': 2,
'pod_memory_limit_mb': 5120,
'pod_cpu_request': 1.5,
'pod_memory_request_mb': 4096,
}
actual = kub_vm.GetResourceMetadata()
self.assertDictContainsSubset(subset_of_expected_metadata, actual)
class KubernetesVirtualMachineOsTypesTestCase(
BaseKubernetesVirtualMachineTestCase):
@staticmethod
def create_kubernetes_vm(os_type):
spec = kubernetes_pod_spec.KubernetesPodSpec(
_COMPONENT)
vm_class = virtual_machine.GetVmClass(providers.KUBERNETES,
os_type)
kub_vm = vm_class(spec)
kub_vm._WaitForPodBootCompletion = lambda: None
kub_vm._Create()
def testCreateUbuntu1604(self):
with patch_critical_objects() as (_, temp_file):
self.create_kubernetes_vm(os_types.UBUNTU1604)
write_mock = get_write_mock_from_temp_file_mock(temp_file)
create_json = json.loads(write_mock.call_args[0][0])
self.assertEqual(create_json['spec']['containers'][0]['image'],
'ubuntu:16.04')
class KubernetesVirtualMachineVmGroupAffinityTestCase(
BaseKubernetesVirtualMachineTestCase):
@staticmethod
def create_kubernetes_vm():
spec = kubernetes_pod_spec.KubernetesPodSpec(
_COMPONENT,
image='test_image',
install_packages=False,
machine_type='test_machine_type',
zone='test_zone')
kub_vm = TestKubernetesVirtualMachine(spec)
kub_vm.name = _NAME
kub_vm.vm_group = 'my_vm_group'
kub_vm._WaitForPodBootCompletion = lambda: None # pylint: disable=invalid-name
kub_vm._Create()
def testCreateVmGroupAffinity(self):
with patch_critical_objects() as (_, temp_file):
self.create_kubernetes_vm()
write_mock = get_write_mock_from_temp_file_mock(temp_file)
self.assertJsonEqual(
write_mock.call_args[0][0],
_EXPECTED_CALL_BODY_WITH_VM_GROUP
)
class KubernetesVirtualMachineTestCase(
BaseKubernetesVirtualMachineTestCase):
@staticmethod
def create_virtual_machine_spec():
spec = kubernetes_pod_spec.KubernetesPodSpec(
_COMPONENT,
image='test_image',
install_packages=False,
machine_type='test_machine_type',
zone='test_zone')
return spec
def testCreate(self):
spec = self.create_virtual_machine_spec()
with patch_critical_objects() as (issue_command, _):
kub_vm = TestKubernetesVirtualMachine(spec)
kub_vm._WaitForPodBootCompletion = lambda: None # pylint: disable=invalid-name
kub_vm._Create()
command = issue_command.call_args[0][0]
command_string = ' '.join(command[:4])
self.assertEqual(issue_command.call_count, 1)
self.assertIn('{0} --kubeconfig={1} create -f'.format(
_KUBECTL, _KUBECONFIG), command_string)
def testCreatePodBodyWrittenCorrectly(self):
spec = self.create_virtual_machine_spec()
with patch_critical_objects() as (_, temp_file):
kub_vm = TestKubernetesVirtualMachine(spec)
# Need to set the name explicitly on the instance because the test
# running is currently using a single PKB instance, so the BaseVm
# instance counter is at an unpredictable number at this stage, and it is
# used to set the name.
kub_vm.name = _NAME
kub_vm._WaitForPodBootCompletion = lambda: None
kub_vm._Create()
write_mock = get_write_mock_from_temp_file_mock(temp_file)
self.assertJsonEqual(
write_mock.call_args[0][0],
_EXPECTED_CALL_BODY_WITHOUT_GPUS
)
def testDownloadPreprovisionedDataAws(self):
spec = self.create_virtual_machine_spec()
FLAGS.container_cluster_cloud = 'AWS'
with patch_critical_objects(flags=FLAGS) as (issue_command, _):
kub_vm = (
kubernetes_virtual_machine.Ubuntu1604BasedKubernetesVirtualMachine(
spec))
kub_vm.DownloadPreprovisionedData('path', 'name', 'filename')
command = issue_command.call_args[0][0]
command_string = ' '.join(command)
self.assertIn('s3', command_string)
def testDownloadPreprovisionedDataAzure(self):
azure_util.GetAzureStorageConnectionString = mock.Mock(return_value='')
spec = self.create_virtual_machine_spec()
FLAGS.container_cluster_cloud = 'Azure'
with patch_critical_objects() as (issue_command, _):
kub_vm = (
kubernetes_virtual_machine.Ubuntu1604BasedKubernetesVirtualMachine(
spec))
kub_vm.DownloadPreprovisionedData('path', 'name', 'filename')
command = issue_command.call_args[0][0]
command_string = ' '.join(command)
self.assertIn('az storage blob download', command_string)
self.assertIn('--connection-string', command_string)
def testDownloadPreprovisionedDataGcp(self):
spec = self.create_virtual_machine_spec()
FLAGS.container_cluster_cloud = 'GCP'
with patch_critical_objects() as (issue_command, _):
kub_vm = (
kubernetes_virtual_machine.Ubuntu1604BasedKubernetesVirtualMachine(
spec))
kub_vm.DownloadPreprovisionedData('path', 'name', 'filename')
command = issue_command.call_args[0][0]
command_string = ' '.join(command)
self.assertIn('gsutil', command_string)
class KubernetesVirtualMachineWithGpusTestCase(
BaseKubernetesVirtualMachineTestCase):
@staticmethod
def create_virtual_machine_spec():
spec = kubernetes_pod_spec.KubernetesPodSpec(
_COMPONENT,
image='test_image',
gpu_count=2,
gpu_type='k80',
install_packages=False,
machine_type='test_machine_type',
zone='test_zone')
return spec
def testCreate(self):
spec = self.create_virtual_machine_spec()
with patch_critical_objects() as (issue_command, _):
kub_vm = TestKubernetesVirtualMachine(spec)
kub_vm._WaitForPodBootCompletion = lambda: None
kub_vm._Create()
command = issue_command.call_args[0][0]
command_string = ' '.join(command[:4])
self.assertEqual(issue_command.call_count, 1)
self.assertIn('{0} --kubeconfig={1} create -f'.format(
_KUBECTL, _KUBECONFIG), command_string)
def testCreatePodBodyWrittenCorrectly(self):
spec = self.create_virtual_machine_spec()
with patch_critical_objects() as (_, temp_file):
kub_vm = TestKubernetesVirtualMachine(spec)
# Need to set the name explicitly on the instance because the test
# running is currently using a single PKB instance, so the BaseVm
# instance counter is at an unpredictable number at this stage, and it is
# used to set the name.
kub_vm.name = _NAME
kub_vm._WaitForPodBootCompletion = lambda: None
kub_vm._Create()
write_mock = get_write_mock_from_temp_file_mock(temp_file)
self.assertJsonEqual(
write_mock.call_args[0][0],
_EXPECTED_CALL_BODY_WITH_2_GPUS
)
class KubernetesVirtualMachineWithNvidiaCudaImage(
BaseKubernetesVirtualMachineTestCase):
@staticmethod
def create_virtual_machine_spec():
spec = kubernetes_pod_spec.KubernetesPodSpec(
_COMPONENT,
install_packages=False,
machine_type='test_machine_type',
zone='test_zone')
return spec
def testCreatePodBodyWrittenCorrectly(self):
spec = self.create_virtual_machine_spec()
vm_class = virtual_machine.GetVmClass(providers.KUBERNETES,
os_types.UBUNTU1604_CUDA9)
with patch_critical_objects() as (_, temp_file):
kub_vm = vm_class(spec)
# Need to set the name explicitly on the instance because the test
# running is currently using a single PKB instance, so the BaseVm
# instance counter is at an unpredictable number at this stage, and it is
# used to set the name.
kub_vm.name = _NAME
kub_vm._WaitForPodBootCompletion = lambda: None
kub_vm._Create()
write_mock = get_write_mock_from_temp_file_mock(temp_file)
self.assertJsonEqual(
write_mock.call_args[0][0],
_EXPECTED_CALL_BODY_WITH_NVIDIA_CUDA_IMAGE
)
if __name__ == '__main__':
unittest.main()
| 31.261905 | 85 | 0.653275 |
acf230a9df6fefe9bfdb10d82a3179058c056d69 | 6,683 | py | Python | loldib/getratings/models/NA/na_trundle/na_trundle_bot.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_trundle/na_trundle_bot.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_trundle/na_trundle_bot.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | from getratings.models.ratings import Ratings
class NA_Trundle_Bot_Aatrox(Ratings):
pass
class NA_Trundle_Bot_Ahri(Ratings):
pass
class NA_Trundle_Bot_Akali(Ratings):
pass
class NA_Trundle_Bot_Alistar(Ratings):
pass
class NA_Trundle_Bot_Amumu(Ratings):
pass
class NA_Trundle_Bot_Anivia(Ratings):
pass
class NA_Trundle_Bot_Annie(Ratings):
pass
class NA_Trundle_Bot_Ashe(Ratings):
pass
class NA_Trundle_Bot_AurelionSol(Ratings):
pass
class NA_Trundle_Bot_Azir(Ratings):
pass
class NA_Trundle_Bot_Bard(Ratings):
pass
class NA_Trundle_Bot_Blitzcrank(Ratings):
pass
class NA_Trundle_Bot_Brand(Ratings):
pass
class NA_Trundle_Bot_Braum(Ratings):
pass
class NA_Trundle_Bot_Caitlyn(Ratings):
pass
class NA_Trundle_Bot_Camille(Ratings):
pass
class NA_Trundle_Bot_Cassiopeia(Ratings):
pass
class NA_Trundle_Bot_Chogath(Ratings):
pass
class NA_Trundle_Bot_Corki(Ratings):
pass
class NA_Trundle_Bot_Darius(Ratings):
pass
class NA_Trundle_Bot_Diana(Ratings):
pass
class NA_Trundle_Bot_Draven(Ratings):
pass
class NA_Trundle_Bot_DrMundo(Ratings):
pass
class NA_Trundle_Bot_Ekko(Ratings):
pass
class NA_Trundle_Bot_Elise(Ratings):
pass
class NA_Trundle_Bot_Evelynn(Ratings):
pass
class NA_Trundle_Bot_Ezreal(Ratings):
pass
class NA_Trundle_Bot_Fiddlesticks(Ratings):
pass
class NA_Trundle_Bot_Fiora(Ratings):
pass
class NA_Trundle_Bot_Fizz(Ratings):
pass
class NA_Trundle_Bot_Galio(Ratings):
pass
class NA_Trundle_Bot_Gangplank(Ratings):
pass
class NA_Trundle_Bot_Garen(Ratings):
pass
class NA_Trundle_Bot_Gnar(Ratings):
pass
class NA_Trundle_Bot_Gragas(Ratings):
pass
class NA_Trundle_Bot_Graves(Ratings):
pass
class NA_Trundle_Bot_Hecarim(Ratings):
pass
class NA_Trundle_Bot_Heimerdinger(Ratings):
pass
class NA_Trundle_Bot_Illaoi(Ratings):
pass
class NA_Trundle_Bot_Irelia(Ratings):
pass
class NA_Trundle_Bot_Ivern(Ratings):
pass
class NA_Trundle_Bot_Janna(Ratings):
pass
class NA_Trundle_Bot_JarvanIV(Ratings):
pass
class NA_Trundle_Bot_Jax(Ratings):
pass
class NA_Trundle_Bot_Jayce(Ratings):
pass
class NA_Trundle_Bot_Jhin(Ratings):
pass
class NA_Trundle_Bot_Jinx(Ratings):
pass
class NA_Trundle_Bot_Kalista(Ratings):
pass
class NA_Trundle_Bot_Karma(Ratings):
pass
class NA_Trundle_Bot_Karthus(Ratings):
pass
class NA_Trundle_Bot_Kassadin(Ratings):
pass
class NA_Trundle_Bot_Katarina(Ratings):
pass
class NA_Trundle_Bot_Kayle(Ratings):
pass
class NA_Trundle_Bot_Kayn(Ratings):
pass
class NA_Trundle_Bot_Kennen(Ratings):
pass
class NA_Trundle_Bot_Khazix(Ratings):
pass
class NA_Trundle_Bot_Kindred(Ratings):
pass
class NA_Trundle_Bot_Kled(Ratings):
pass
class NA_Trundle_Bot_KogMaw(Ratings):
pass
class NA_Trundle_Bot_Leblanc(Ratings):
pass
class NA_Trundle_Bot_LeeSin(Ratings):
pass
class NA_Trundle_Bot_Leona(Ratings):
pass
class NA_Trundle_Bot_Lissandra(Ratings):
pass
class NA_Trundle_Bot_Lucian(Ratings):
pass
class NA_Trundle_Bot_Lulu(Ratings):
pass
class NA_Trundle_Bot_Lux(Ratings):
pass
class NA_Trundle_Bot_Malphite(Ratings):
pass
class NA_Trundle_Bot_Malzahar(Ratings):
pass
class NA_Trundle_Bot_Maokai(Ratings):
pass
class NA_Trundle_Bot_MasterYi(Ratings):
pass
class NA_Trundle_Bot_MissFortune(Ratings):
pass
class NA_Trundle_Bot_MonkeyKing(Ratings):
pass
class NA_Trundle_Bot_Mordekaiser(Ratings):
pass
class NA_Trundle_Bot_Morgana(Ratings):
pass
class NA_Trundle_Bot_Nami(Ratings):
pass
class NA_Trundle_Bot_Nasus(Ratings):
pass
class NA_Trundle_Bot_Nautilus(Ratings):
pass
class NA_Trundle_Bot_Nidalee(Ratings):
pass
class NA_Trundle_Bot_Nocturne(Ratings):
pass
class NA_Trundle_Bot_Nunu(Ratings):
pass
class NA_Trundle_Bot_Olaf(Ratings):
pass
class NA_Trundle_Bot_Orianna(Ratings):
pass
class NA_Trundle_Bot_Ornn(Ratings):
pass
class NA_Trundle_Bot_Pantheon(Ratings):
pass
class NA_Trundle_Bot_Poppy(Ratings):
pass
class NA_Trundle_Bot_Quinn(Ratings):
pass
class NA_Trundle_Bot_Rakan(Ratings):
pass
class NA_Trundle_Bot_Rammus(Ratings):
pass
class NA_Trundle_Bot_RekSai(Ratings):
pass
class NA_Trundle_Bot_Renekton(Ratings):
pass
class NA_Trundle_Bot_Rengar(Ratings):
pass
class NA_Trundle_Bot_Riven(Ratings):
pass
class NA_Trundle_Bot_Rumble(Ratings):
pass
class NA_Trundle_Bot_Ryze(Ratings):
pass
class NA_Trundle_Bot_Sejuani(Ratings):
pass
class NA_Trundle_Bot_Shaco(Ratings):
pass
class NA_Trundle_Bot_Shen(Ratings):
pass
class NA_Trundle_Bot_Shyvana(Ratings):
pass
class NA_Trundle_Bot_Singed(Ratings):
pass
class NA_Trundle_Bot_Sion(Ratings):
pass
class NA_Trundle_Bot_Sivir(Ratings):
pass
class NA_Trundle_Bot_Skarner(Ratings):
pass
class NA_Trundle_Bot_Sona(Ratings):
pass
class NA_Trundle_Bot_Soraka(Ratings):
pass
class NA_Trundle_Bot_Swain(Ratings):
pass
class NA_Trundle_Bot_Syndra(Ratings):
pass
class NA_Trundle_Bot_TahmKench(Ratings):
pass
class NA_Trundle_Bot_Taliyah(Ratings):
pass
class NA_Trundle_Bot_Talon(Ratings):
pass
class NA_Trundle_Bot_Taric(Ratings):
pass
class NA_Trundle_Bot_Teemo(Ratings):
pass
class NA_Trundle_Bot_Thresh(Ratings):
pass
class NA_Trundle_Bot_Tristana(Ratings):
pass
class NA_Trundle_Bot_Trundle(Ratings):
pass
class NA_Trundle_Bot_Tryndamere(Ratings):
pass
class NA_Trundle_Bot_TwistedFate(Ratings):
pass
class NA_Trundle_Bot_Twitch(Ratings):
pass
class NA_Trundle_Bot_Udyr(Ratings):
pass
class NA_Trundle_Bot_Urgot(Ratings):
pass
class NA_Trundle_Bot_Varus(Ratings):
pass
class NA_Trundle_Bot_Vayne(Ratings):
pass
class NA_Trundle_Bot_Veigar(Ratings):
pass
class NA_Trundle_Bot_Velkoz(Ratings):
pass
class NA_Trundle_Bot_Vi(Ratings):
pass
class NA_Trundle_Bot_Viktor(Ratings):
pass
class NA_Trundle_Bot_Vladimir(Ratings):
pass
class NA_Trundle_Bot_Volibear(Ratings):
pass
class NA_Trundle_Bot_Warwick(Ratings):
pass
class NA_Trundle_Bot_Xayah(Ratings):
pass
class NA_Trundle_Bot_Xerath(Ratings):
pass
class NA_Trundle_Bot_XinZhao(Ratings):
pass
class NA_Trundle_Bot_Yasuo(Ratings):
pass
class NA_Trundle_Bot_Yorick(Ratings):
pass
class NA_Trundle_Bot_Zac(Ratings):
pass
class NA_Trundle_Bot_Zed(Ratings):
pass
class NA_Trundle_Bot_Ziggs(Ratings):
pass
class NA_Trundle_Bot_Zilean(Ratings):
pass
class NA_Trundle_Bot_Zyra(Ratings):
pass
| 16.026379 | 46 | 0.77151 |
acf23136850d3b22b78cde15a43c868cb703debe | 43,340 | py | Python | ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pslinux.py | zyclove/ambari | 1032f0f54cb7b312b9a3b37570cd840f4e1e89d4 | [
"Apache-2.0"
] | null | null | null | ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pslinux.py | zyclove/ambari | 1032f0f54cb7b312b9a3b37570cd840f4e1e89d4 | [
"Apache-2.0"
] | null | null | null | ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pslinux.py | zyclove/ambari | 1032f0f54cb7b312b9a3b37570cd840f4e1e89d4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python2
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Linux platform implementation."""
from __future__ import division
import base64
import errno
import os
import re
import socket
import struct
import sys
import warnings
from psutil import _common
from psutil import _psposix
from psutil._common import (isfile_strict, usage_percent, deprecated)
from psutil._compat import PY3, xrange, namedtuple, wraps, b, defaultdict
import _psutil_linux as cext
import _psutil_posix
__extra__all__ = [
# io prio constants
"IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE",
"IOPRIO_CLASS_IDLE",
# connection status constants
"CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
"CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
"CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING",
# other
"phymem_buffers", "cached_phymem"]
# --- constants
HAS_PRLIMIT = hasattr(cext, "linux_prlimit")
# RLIMIT_* constants, not guaranteed to be present on all kernels
if HAS_PRLIMIT:
for name in dir(cext):
if name.startswith('RLIM'):
__extra__all__.append(name)
# Number of clock ticks per second
CLOCK_TICKS = os.sysconf("SC_CLK_TCK")
PAGESIZE = os.sysconf("SC_PAGE_SIZE")
BOOT_TIME = None # set later
DEFAULT_ENCODING = sys.getdefaultencoding()
# ioprio_* constants http://linux.die.net/man/2/ioprio_get
IOPRIO_CLASS_NONE = 0
IOPRIO_CLASS_RT = 1
IOPRIO_CLASS_BE = 2
IOPRIO_CLASS_IDLE = 3
# taken from /fs/proc/array.c
PROC_STATUSES = {
"R": _common.STATUS_RUNNING,
"S": _common.STATUS_SLEEPING,
"D": _common.STATUS_DISK_SLEEP,
"T": _common.STATUS_STOPPED,
"t": _common.STATUS_TRACING_STOP,
"Z": _common.STATUS_ZOMBIE,
"X": _common.STATUS_DEAD,
"x": _common.STATUS_DEAD,
"K": _common.STATUS_WAKE_KILL,
"W": _common.STATUS_WAKING
}
# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h
TCP_STATUSES = {
"01": _common.CONN_ESTABLISHED,
"02": _common.CONN_SYN_SENT,
"03": _common.CONN_SYN_RECV,
"04": _common.CONN_FIN_WAIT1,
"05": _common.CONN_FIN_WAIT2,
"06": _common.CONN_TIME_WAIT,
"07": _common.CONN_CLOSE,
"08": _common.CONN_CLOSE_WAIT,
"09": _common.CONN_LAST_ACK,
"0A": _common.CONN_LISTEN,
"0B": _common.CONN_CLOSING
}
# set later from __init__.py
NoSuchProcess = None
AccessDenied = None
TimeoutExpired = None
# --- named tuples
def _get_cputimes_fields():
"""Return a namedtuple of variable fields depending on the
CPU times available on this Linux kernel version which may be:
(user, nice, system, idle, iowait, irq, softirq, [steal, [guest,
[guest_nice]]])
"""
f = open('/proc/stat', 'rb')
try:
values = f.readline().split()[1:]
finally:
f.close()
fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq']
vlen = len(values)
if vlen >= 8:
# Linux >= 2.6.11
fields.append('steal')
if vlen >= 9:
# Linux >= 2.6.24
fields.append('guest')
if vlen >= 10:
# Linux >= 3.2.0
fields.append('guest_nice')
return fields
scputimes = namedtuple('scputimes', _get_cputimes_fields())
svmem = namedtuple(
'svmem', ['total', 'available', 'percent', 'used', 'free',
'active', 'inactive', 'buffers', 'cached'])
pextmem = namedtuple('pextmem', 'rss vms shared text lib data dirty')
pmmap_grouped = namedtuple(
'pmmap_grouped', ['path', 'rss', 'size', 'pss', 'shared_clean',
'shared_dirty', 'private_clean', 'private_dirty',
'referenced', 'anonymous', 'swap'])
pmmap_ext = namedtuple(
'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
# --- system memory
def virtual_memory():
total, free, buffers, shared, _, _ = cext.linux_sysinfo()
cached = active = inactive = None
f = open('/proc/meminfo', 'rb')
CACHED, ACTIVE, INACTIVE = b("Cached:"), b("Active:"), b("Inactive:")
try:
for line in f:
if line.startswith(CACHED):
cached = int(line.split()[1]) * 1024
elif line.startswith(ACTIVE):
active = int(line.split()[1]) * 1024
elif line.startswith(INACTIVE):
inactive = int(line.split()[1]) * 1024
if (cached is not None
and active is not None
and inactive is not None):
break
else:
# we might get here when dealing with exotic Linux flavors, see:
# http://code.google.com/p/psutil/issues/detail?id=313
msg = "'cached', 'active' and 'inactive' memory stats couldn't " \
"be determined and were set to 0"
warnings.warn(msg, RuntimeWarning)
cached = active = inactive = 0
finally:
f.close()
avail = free + buffers + cached
used = total - free
percent = usage_percent((total - avail), total, _round=1)
return svmem(total, avail, percent, used, free,
active, inactive, buffers, cached)
def swap_memory():
_, _, _, _, total, free = cext.linux_sysinfo()
used = total - free
percent = usage_percent(used, total, _round=1)
# get pgin/pgouts
f = open("/proc/vmstat", "rb")
SIN, SOUT = b('pswpin'), b('pswpout')
sin = sout = None
try:
for line in f:
# values are expressed in 4 kilo bytes, we want bytes instead
if line.startswith(SIN):
sin = int(line.split(b(' '))[1]) * 4 * 1024
elif line.startswith(SOUT):
sout = int(line.split(b(' '))[1]) * 4 * 1024
if sin is not None and sout is not None:
break
else:
# we might get here when dealing with exotic Linux flavors, see:
# http://code.google.com/p/psutil/issues/detail?id=313
msg = "'sin' and 'sout' swap memory stats couldn't " \
"be determined and were set to 0"
warnings.warn(msg, RuntimeWarning)
sin = sout = 0
finally:
f.close()
return _common.sswap(total, used, free, percent, sin, sout)
@deprecated(replacement='psutil.virtual_memory().cached')
def cached_phymem():
return virtual_memory().cached
@deprecated(replacement='psutil.virtual_memory().buffers')
def phymem_buffers():
return virtual_memory().buffers
# --- CPUs
def cpu_times():
"""Return a named tuple representing the following system-wide
CPU times:
(user, nice, system, idle, iowait, irq, softirq [steal, [guest,
[guest_nice]]])
Last 3 fields may not be available on all Linux kernel versions.
"""
f = open('/proc/stat', 'rb')
try:
values = f.readline().split()
finally:
f.close()
fields = values[1:len(scputimes._fields) + 1]
fields = [float(x) / CLOCK_TICKS for x in fields]
return scputimes(*fields)
def per_cpu_times():
"""Return a list of namedtuple representing the CPU times
for every CPU available on the system.
"""
cpus = []
f = open('/proc/stat', 'rb')
try:
# get rid of the first line which refers to system wide CPU stats
f.readline()
CPU = b('cpu')
for line in f:
if line.startswith(CPU):
values = line.split()
fields = values[1:len(scputimes._fields) + 1]
fields = [float(x) / CLOCK_TICKS for x in fields]
entry = scputimes(*fields)
cpus.append(entry)
return cpus
finally:
f.close()
def cpu_count_logical():
"""Return the number of logical CPUs in the system."""
try:
return os.sysconf("SC_NPROCESSORS_ONLN")
except ValueError:
# as a second fallback we try to parse /proc/cpuinfo
num = 0
f = open('/proc/cpuinfo', 'rb')
try:
lines = f.readlines()
finally:
f.close()
PROCESSOR = b('processor')
for line in lines:
if line.lower().startswith(PROCESSOR):
num += 1
# unknown format (e.g. amrel/sparc architectures), see:
# http://code.google.com/p/psutil/issues/detail?id=200
# try to parse /proc/stat as a last resort
if num == 0:
f = open('/proc/stat', 'rt')
try:
lines = f.readlines()
finally:
f.close()
search = re.compile('cpu\d')
for line in lines:
line = line.split(' ')[0]
if search.match(line):
num += 1
if num == 0:
# mimic os.cpu_count()
return None
return num
def cpu_count_physical():
"""Return the number of physical CPUs in the system."""
f = open('/proc/cpuinfo', 'rb')
try:
lines = f.readlines()
finally:
f.close()
found = set()
PHYSICAL_ID = b('physical id')
for line in lines:
if line.lower().startswith(PHYSICAL_ID):
found.add(line.strip())
if found:
return len(found)
else:
return None # mimic os.cpu_count()
# --- other system functions
def users():
"""Return currently connected users as a list of namedtuples."""
retlist = []
rawlist = cext.users()
for item in rawlist:
user, tty, hostname, tstamp, user_process = item
# note: the underlying C function includes entries about
# system boot, run level and others. We might want
# to use them in the future.
if not user_process:
continue
if hostname == ':0.0':
hostname = 'localhost'
nt = _common.suser(user, tty or None, hostname, tstamp)
retlist.append(nt)
return retlist
def boot_time():
"""Return the system boot time expressed in seconds since the epoch."""
global BOOT_TIME
f = open('/proc/stat', 'rb')
try:
BTIME = b('btime')
for line in f:
if line.startswith(BTIME):
ret = float(line.strip().split()[1])
BOOT_TIME = ret
return ret
raise RuntimeError("line 'btime' not found")
finally:
f.close()
# --- processes
def pids():
"""Returns a list of PIDs currently running on the system."""
return [int(x) for x in os.listdir(b('/proc')) if x.isdigit()]
def pid_exists(pid):
"""Check For the existence of a unix pid."""
return _psposix.pid_exists(pid)
# --- network
class Connections:
"""A wrapper on top of /proc/net/* files, retrieving per-process
and system-wide open connections (TCP, UDP, UNIX) similarly to
"netstat -an".
Note: in case of UNIX sockets we're only able to determine the
local endpoint/path, not the one it's connected to.
According to [1] it would be possible but not easily.
[1] http://serverfault.com/a/417946
"""
def __init__(self):
tcp4 = ("tcp", socket.AF_INET, socket.SOCK_STREAM)
tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM)
udp4 = ("udp", socket.AF_INET, socket.SOCK_DGRAM)
udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM)
unix = ("unix", socket.AF_UNIX, None)
self.tmap = {
"all": (tcp4, tcp6, udp4, udp6, unix),
"tcp": (tcp4, tcp6),
"tcp4": (tcp4,),
"tcp6": (tcp6,),
"udp": (udp4, udp6),
"udp4": (udp4,),
"udp6": (udp6,),
"unix": (unix,),
"inet": (tcp4, tcp6, udp4, udp6),
"inet4": (tcp4, udp4),
"inet6": (tcp6, udp6),
}
def get_proc_inodes(self, pid):
inodes = defaultdict(list)
for fd in os.listdir("/proc/%s/fd" % pid):
try:
inode = os.readlink("/proc/%s/fd/%s" % (pid, fd))
except OSError:
# TODO: need comment here
continue
else:
if inode.startswith('socket:['):
# the process is using a socket
inode = inode[8:][:-1]
inodes[inode].append((pid, int(fd)))
return inodes
def get_all_inodes(self):
inodes = {}
for pid in pids():
try:
inodes.update(self.get_proc_inodes(pid))
except OSError:
# os.listdir() is gonna raise a lot of access denied
# exceptions in case of unprivileged user; that's fine
# as we'll just end up returning a connection with PID
# and fd set to None anyway.
# Both netstat -an and lsof does the same so it's
# unlikely we can do any better.
# ENOENT just means a PID disappeared on us.
err = sys.exc_info()[1]
if err.errno not in (errno.ENOENT, errno.EPERM, errno.EACCES):
raise
return inodes
def decode_address(self, addr, family):
"""Accept an "ip:port" address as displayed in /proc/net/*
and convert it into a human readable form, like:
"0500000A:0016" -> ("10.0.0.5", 22)
"0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521)
The IP address portion is a little or big endian four-byte
hexadecimal number; that is, the least significant byte is listed
first, so we need to reverse the order of the bytes to convert it
to an IP address.
The port is represented as a two-byte hexadecimal number.
Reference:
http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html
"""
ip, port = addr.split(':')
port = int(port, 16)
if PY3:
ip = ip.encode('ascii')
# this usually refers to a local socket in listen mode with
# no end-points connected
if not port:
return ()
if family == socket.AF_INET:
# see: http://code.google.com/p/psutil/issues/detail?id=201
if sys.byteorder == 'little':
ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1])
else:
ip = socket.inet_ntop(family, base64.b16decode(ip))
else: # IPv6
# old version - let's keep it, just in case...
# ip = ip.decode('hex')
# return socket.inet_ntop(socket.AF_INET6,
# ''.join(ip[i:i+4][::-1] for i in xrange(0, 16, 4)))
ip = base64.b16decode(ip)
# see: http://code.google.com/p/psutil/issues/detail?id=201
if sys.byteorder == 'little':
ip = socket.inet_ntop(
socket.AF_INET6,
struct.pack('>4I', *struct.unpack('<4I', ip)))
else:
ip = socket.inet_ntop(
socket.AF_INET6,
struct.pack('<4I', *struct.unpack('<4I', ip)))
return (ip, port)
def process_inet(self, file, family, type_, inodes, filter_pid=None):
"""Parse /proc/net/tcp* and /proc/net/udp* files."""
if file.endswith('6') and not os.path.exists(file):
# IPv6 not supported
return
f = open(file, 'rt')
try:
f.readline() # skip the first line
for line in f:
_, laddr, raddr, status, _, _, _, _, _, inode = \
line.split()[:10]
if inode in inodes:
# We assume inet sockets are unique, so we error
# out if there are multiple references to the
# same inode. We won't do this for UNIX sockets.
if len(inodes[inode]) > 1 and type_ != socket.AF_UNIX:
raise ValueError("ambiguos inode with multiple "
"PIDs references")
pid, fd = inodes[inode][0]
else:
pid, fd = None, -1
if filter_pid is not None and filter_pid != pid:
continue
else:
if type_ == socket.SOCK_STREAM:
status = TCP_STATUSES[status]
else:
status = _common.CONN_NONE
laddr = self.decode_address(laddr, family)
raddr = self.decode_address(raddr, family)
yield (fd, family, type_, laddr, raddr, status, pid)
finally:
f.close()
def process_unix(self, file, family, inodes, filter_pid=None):
"""Parse /proc/net/unix files."""
f = open(file, 'rt')
try:
f.readline() # skip the first line
for line in f:
tokens = line.split()
_, _, _, _, type_, _, inode = tokens[0:7]
if inode in inodes:
# With UNIX sockets we can have a single inode
# referencing many file descriptors.
pairs = inodes[inode]
else:
pairs = [(None, -1)]
for pid, fd in pairs:
if filter_pid is not None and filter_pid != pid:
continue
else:
if len(tokens) == 8:
path = tokens[-1]
else:
path = ""
type_ = int(type_)
raddr = None
status = _common.CONN_NONE
yield (fd, family, type_, path, raddr, status, pid)
finally:
f.close()
def retrieve(self, kind, pid=None):
if kind not in self.tmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in self.tmap])))
if pid is not None:
inodes = self.get_proc_inodes(pid)
if not inodes:
# no connections for this process
return []
else:
inodes = self.get_all_inodes()
ret = []
for f, family, type_ in self.tmap[kind]:
if family in (socket.AF_INET, socket.AF_INET6):
ls = self.process_inet(
"/proc/net/%s" % f, family, type_, inodes, filter_pid=pid)
else:
ls = self.process_unix(
"/proc/net/%s" % f, family, inodes, filter_pid=pid)
for fd, family, type_, laddr, raddr, status, bound_pid in ls:
if pid:
conn = _common.pconn(fd, family, type_, laddr, raddr,
status)
else:
conn = _common.sconn(fd, family, type_, laddr, raddr,
status, bound_pid)
ret.append(conn)
return ret
_connections = Connections()
def net_connections(kind='inet'):
"""Return system-wide open connections."""
return _connections.retrieve(kind)
def net_io_counters():
"""Return network I/O statistics for every network interface
installed on the system as a dict of raw tuples.
"""
f = open("/proc/net/dev", "rt")
try:
lines = f.readlines()
finally:
f.close()
retdict = {}
for line in lines[2:]:
colon = line.rfind(':')
assert colon > 0, repr(line)
name = line[:colon].strip()
fields = line[colon + 1:].strip().split()
bytes_recv = int(fields[0])
packets_recv = int(fields[1])
errin = int(fields[2])
dropin = int(fields[3])
bytes_sent = int(fields[8])
packets_sent = int(fields[9])
errout = int(fields[10])
dropout = int(fields[11])
retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv,
errin, errout, dropin, dropout)
return retdict
# --- disks
def disk_io_counters():
"""Return disk I/O statistics for every disk installed on the
system as a dict of raw tuples.
"""
# man iostat states that sectors are equivalent with blocks and
# have a size of 512 bytes since 2.4 kernels. This value is
# needed to calculate the amount of disk I/O in bytes.
SECTOR_SIZE = 512
# determine partitions we want to look for
partitions = []
f = open("/proc/partitions", "rt")
try:
lines = f.readlines()[2:]
finally:
f.close()
for line in reversed(lines):
_, _, _, name = line.split()
if name[-1].isdigit():
# we're dealing with a partition (e.g. 'sda1'); 'sda' will
# also be around but we want to omit it
partitions.append(name)
else:
if not partitions or not partitions[-1].startswith(name):
# we're dealing with a disk entity for which no
# partitions have been defined (e.g. 'sda' but
# 'sda1' was not around), see:
# http://code.google.com/p/psutil/issues/detail?id=338
partitions.append(name)
#
retdict = {}
f = open("/proc/diskstats", "rt")
try:
lines = f.readlines()
finally:
f.close()
for line in lines:
# http://www.mjmwired.net/kernel/Documentation/iostats.txt
_, _, name, reads, _, rbytes, rtime, writes, _, wbytes, wtime = \
line.split()[:11]
if name in partitions:
rbytes = int(rbytes) * SECTOR_SIZE
wbytes = int(wbytes) * SECTOR_SIZE
reads = int(reads)
writes = int(writes)
rtime = int(rtime)
wtime = int(wtime)
retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime)
return retdict
def disk_partitions(all=False):
"""Return mounted disk partitions as a list of nameduples"""
phydevs = []
f = open("/proc/filesystems", "r")
try:
for line in f:
if not line.startswith("nodev"):
phydevs.append(line.strip())
finally:
f.close()
retlist = []
partitions = cext.disk_partitions()
for partition in partitions:
device, mountpoint, fstype, opts = partition
if device == 'none':
device = ''
if not all:
if device == '' or fstype not in phydevs:
continue
ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
retlist.append(ntuple)
return retlist
disk_usage = _psposix.disk_usage
# --- decorators
def wrap_exceptions(fun):
"""Decorator which translates bare OSError and IOError exceptions
into NoSuchProcess and AccessDenied.
"""
@wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except EnvironmentError:
# support for private module import
if NoSuchProcess is None or AccessDenied is None:
raise
# ENOENT (no such file or directory) gets raised on open().
# ESRCH (no such process) can get raised on read() if
# process is gone in meantime.
err = sys.exc_info()[1]
if err.errno in (errno.ENOENT, errno.ESRCH):
raise NoSuchProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
return wrapper
class Process(object):
"""Linux process implementation."""
__slots__ = ["pid", "_name"]
def __init__(self, pid):
self.pid = pid
self._name = None
@wrap_exceptions
def name(self):
fname = "/proc/%s/stat" % self.pid
if PY3:
f = open(fname, "rt", encoding=DEFAULT_ENCODING)
else:
f = open(fname, "rt")
try:
name = f.read().split(' ')[1].replace('(', '').replace(')', '')
finally:
f.close()
# XXX - gets changed later and probably needs refactoring
return name
def exe(self):
try:
exe = os.readlink("/proc/%s/exe" % self.pid)
except (OSError, IOError):
err = sys.exc_info()[1]
if err.errno == errno.ENOENT:
# no such file error; might be raised also if the
# path actually exists for system processes with
# low pids (about 0-20)
if os.path.lexists("/proc/%s" % self.pid):
return ""
else:
# ok, it is a process which has gone away
raise NoSuchProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
# readlink() might return paths containing null bytes ('\x00').
# Certain names have ' (deleted)' appended. Usually this is
# bogus as the file actually exists. Either way that's not
# important as we don't want to discriminate executables which
# have been deleted.
exe = exe.split('\x00')[0]
if exe.endswith(' (deleted)') and not os.path.exists(exe):
exe = exe[:-10]
return exe
@wrap_exceptions
def cmdline(self):
fname = "/proc/%s/cmdline" % self.pid
if PY3:
f = open(fname, "rt", encoding=DEFAULT_ENCODING)
else:
f = open(fname, "rt")
try:
# return the args as a list
return [x for x in f.read().split('\x00') if x]
finally:
f.close()
@wrap_exceptions
def terminal(self):
tmap = _psposix._get_terminal_map()
f = open("/proc/%s/stat" % self.pid, 'rb')
try:
tty_nr = int(f.read().split(b(' '))[6])
finally:
f.close()
try:
return tmap[tty_nr]
except KeyError:
return None
if os.path.exists('/proc/%s/io' % os.getpid()):
@wrap_exceptions
def io_counters(self):
fname = "/proc/%s/io" % self.pid
f = open(fname, 'rb')
SYSCR, SYSCW = b("syscr"), b("syscw")
READ_BYTES, WRITE_BYTES = b("read_bytes"), b("write_bytes")
try:
rcount = wcount = rbytes = wbytes = None
for line in f:
if rcount is None and line.startswith(SYSCR):
rcount = int(line.split()[1])
elif wcount is None and line.startswith(SYSCW):
wcount = int(line.split()[1])
elif rbytes is None and line.startswith(READ_BYTES):
rbytes = int(line.split()[1])
elif wbytes is None and line.startswith(WRITE_BYTES):
wbytes = int(line.split()[1])
for x in (rcount, wcount, rbytes, wbytes):
if x is None:
raise NotImplementedError(
"couldn't read all necessary info from %r" % fname)
return _common.pio(rcount, wcount, rbytes, wbytes)
finally:
f.close()
else:
def io_counters(self):
raise NotImplementedError("couldn't find /proc/%s/io (kernel "
"too old?)" % self.pid)
@wrap_exceptions
def cpu_times(self):
f = open("/proc/%s/stat" % self.pid, 'rb')
try:
st = f.read().strip()
finally:
f.close()
# ignore the first two values ("pid (exe)")
st = st[st.find(b(')')) + 2:]
values = st.split(b(' '))
utime = float(values[11]) / CLOCK_TICKS
stime = float(values[12]) / CLOCK_TICKS
return _common.pcputimes(utime, stime)
@wrap_exceptions
def wait(self, timeout=None):
try:
return _psposix.wait_pid(self.pid, timeout)
except _psposix.TimeoutExpired:
# support for private module import
if TimeoutExpired is None:
raise
raise TimeoutExpired(timeout, self.pid, self._name)
@wrap_exceptions
def create_time(self):
f = open("/proc/%s/stat" % self.pid, 'rb')
try:
st = f.read().strip()
finally:
f.close()
# ignore the first two values ("pid (exe)")
st = st[st.rfind(b(')')) + 2:]
values = st.split(b(' '))
# According to documentation, starttime is in field 21 and the
# unit is jiffies (clock ticks).
# We first divide it for clock ticks and then add uptime returning
# seconds since the epoch, in UTC.
# Also use cached value if available.
bt = BOOT_TIME or boot_time()
return (float(values[19]) / CLOCK_TICKS) + bt
@wrap_exceptions
def memory_info(self):
f = open("/proc/%s/statm" % self.pid, 'rb')
try:
vms, rss = f.readline().split()[:2]
return _common.pmem(int(rss) * PAGESIZE,
int(vms) * PAGESIZE)
finally:
f.close()
@wrap_exceptions
def memory_info_ex(self):
# ============================================================
# | FIELD | DESCRIPTION | AKA | TOP |
# ============================================================
# | rss | resident set size | | RES |
# | vms | total program size | size | VIRT |
# | shared | shared pages (from shared mappings) | | SHR |
# | text | text ('code') | trs | CODE |
# | lib | library (unused in Linux 2.6) | lrs | |
# | data | data + stack | drs | DATA |
# | dirty | dirty pages (unused in Linux 2.6) | dt | |
# ============================================================
f = open("/proc/%s/statm" % self.pid, "rb")
try:
vms, rss, shared, text, lib, data, dirty = \
[int(x) * PAGESIZE for x in f.readline().split()[:7]]
finally:
f.close()
return pextmem(rss, vms, shared, text, lib, data, dirty)
if os.path.exists('/proc/%s/smaps' % os.getpid()):
def memory_maps(self):
"""Return process's mapped memory regions as a list of nameduples.
Fields are explained in 'man proc'; here is an updated (Apr 2012)
version: http://goo.gl/fmebo
"""
f = None
try:
f = open("/proc/%s/smaps" % self.pid, "rt")
first_line = f.readline()
current_block = [first_line]
def get_blocks():
data = {}
for line in f:
fields = line.split(None, 5)
if not fields[0].endswith(':'):
# new block section
yield (current_block.pop(), data)
current_block.append(line)
else:
try:
data[fields[0]] = int(fields[1]) * 1024
except ValueError:
if fields[0].startswith('VmFlags:'):
# see issue #369
continue
else:
raise ValueError("don't know how to inte"
"rpret line %r" % line)
yield (current_block.pop(), data)
if first_line: # smaps file can be empty
for header, data in get_blocks():
hfields = header.split(None, 5)
try:
addr, perms, offset, dev, inode, path = hfields
except ValueError:
addr, perms, offset, dev, inode, path = \
hfields + ['']
if not path:
path = '[anon]'
else:
path = path.strip()
yield (addr, perms, path,
data['Rss:'],
data.get('Size:', 0),
data.get('Pss:', 0),
data.get('Shared_Clean:', 0),
data.get('Shared_Dirty:', 0),
data.get('Private_Clean:', 0),
data.get('Private_Dirty:', 0),
data.get('Referenced:', 0),
data.get('Anonymous:', 0),
data.get('Swap:', 0))
f.close()
except EnvironmentError:
# XXX - Can't use wrap_exceptions decorator as we're
# returning a generator; this probably needs some
# refactoring in order to avoid this code duplication.
if f is not None:
f.close()
err = sys.exc_info()[1]
if err.errno in (errno.ENOENT, errno.ESRCH):
raise NoSuchProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
except:
if f is not None:
f.close()
raise
f.close()
else:
def memory_maps(self, ext):
msg = "couldn't find /proc/%s/smaps; kernel < 2.6.14 or " \
"CONFIG_MMU kernel configuration option is not enabled" \
% self.pid
raise NotImplementedError(msg)
@wrap_exceptions
def cwd(self):
# readlink() might return paths containing null bytes causing
# problems when used with other fs-related functions (os.*,
# open(), ...)
path = os.readlink("/proc/%s/cwd" % self.pid)
return path.replace('\x00', '')
@wrap_exceptions
def num_ctx_switches(self):
vol = unvol = None
f = open("/proc/%s/status" % self.pid, "rb")
VOLUNTARY = b("voluntary_ctxt_switches")
NON_VOLUNTARY = b("nonvoluntary_ctxt_switches")
try:
for line in f:
if line.startswith(VOLUNTARY):
vol = int(line.split()[1])
elif line.startswith(NON_VOLUNTARY):
unvol = int(line.split()[1])
if vol is not None and unvol is not None:
return _common.pctxsw(vol, unvol)
raise NotImplementedError(
"'voluntary_ctxt_switches' and 'nonvoluntary_ctxt_switches'"
"fields were not found in /proc/%s/status; the kernel is "
"probably older than 2.6.23" % self.pid)
finally:
f.close()
@wrap_exceptions
def num_threads(self):
f = open("/proc/%s/status" % self.pid, "rb")
try:
THREADS = b("Threads:")
for line in f:
if line.startswith(THREADS):
return int(line.split()[1])
raise NotImplementedError("line not found")
finally:
f.close()
@wrap_exceptions
def threads(self):
thread_ids = os.listdir("/proc/%s/task" % self.pid)
thread_ids.sort()
retlist = []
hit_enoent = False
for thread_id in thread_ids:
try:
f = open("/proc/%s/task/%s/stat" % (self.pid, thread_id), 'rb')
except EnvironmentError:
err = sys.exc_info()[1]
if err.errno == errno.ENOENT:
# no such file or directory; it means thread
# disappeared on us
hit_enoent = True
continue
raise
try:
st = f.read().strip()
finally:
f.close()
# ignore the first two values ("pid (exe)")
st = st[st.find(b(')')) + 2:]
values = st.split(b(' '))
utime = float(values[11]) / CLOCK_TICKS
stime = float(values[12]) / CLOCK_TICKS
ntuple = _common.pthread(int(thread_id), utime, stime)
retlist.append(ntuple)
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return retlist
@wrap_exceptions
def nice_get(self):
#f = open('/proc/%s/stat' % self.pid, 'r')
# try:
# data = f.read()
# return int(data.split()[18])
# finally:
# f.close()
# Use C implementation
return _psutil_posix.getpriority(self.pid)
@wrap_exceptions
def nice_set(self, value):
return _psutil_posix.setpriority(self.pid, value)
@wrap_exceptions
def cpu_affinity_get(self):
from_bitmask = lambda x: [i for i in xrange(64) if (1 << i) & x]
bitmask = cext.proc_cpu_affinity_get(self.pid)
return from_bitmask(bitmask)
@wrap_exceptions
def cpu_affinity_set(self, cpus):
try:
cext.proc_cpu_affinity_set(self.pid, cpus)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.EINVAL:
allcpus = tuple(range(len(per_cpu_times())))
for cpu in cpus:
if cpu not in allcpus:
raise ValueError("invalid CPU #%i (choose between %s)"
% (cpu, allcpus))
raise
# only starting from kernel 2.6.13
if hasattr(cext, "proc_ioprio_get"):
@wrap_exceptions
def ionice_get(self):
ioclass, value = cext.proc_ioprio_get(self.pid)
return _common.pionice(ioclass, value)
@wrap_exceptions
def ionice_set(self, ioclass, value):
if ioclass in (IOPRIO_CLASS_NONE, None):
if value:
msg = "can't specify value with IOPRIO_CLASS_NONE"
raise ValueError(msg)
ioclass = IOPRIO_CLASS_NONE
value = 0
if ioclass in (IOPRIO_CLASS_RT, IOPRIO_CLASS_BE):
if value is None:
value = 4
elif ioclass == IOPRIO_CLASS_IDLE:
if value:
msg = "can't specify value with IOPRIO_CLASS_IDLE"
raise ValueError(msg)
value = 0
else:
value = 0
if not 0 <= value <= 8:
raise ValueError(
"value argument range expected is between 0 and 8")
return cext.proc_ioprio_set(self.pid, ioclass, value)
if HAS_PRLIMIT:
@wrap_exceptions
def rlimit(self, resource, limits=None):
# if pid is 0 prlimit() applies to the calling process and
# we don't want that
if self.pid == 0:
raise ValueError("can't use prlimit() against PID 0 process")
if limits is None:
# get
return cext.linux_prlimit(self.pid, resource)
else:
# set
if len(limits) != 2:
raise ValueError(
"second argument must be a (soft, hard) tuple")
soft, hard = limits
cext.linux_prlimit(self.pid, resource, soft, hard)
@wrap_exceptions
def status(self):
f = open("/proc/%s/status" % self.pid, 'rb')
try:
STATE = b("State:")
for line in f:
if line.startswith(STATE):
letter = line.split()[1]
if PY3:
letter = letter.decode()
# XXX is '?' legit? (we're not supposed to return
# it anyway)
return PROC_STATUSES.get(letter, '?')
finally:
f.close()
@wrap_exceptions
def open_files(self):
retlist = []
files = os.listdir("/proc/%s/fd" % self.pid)
hit_enoent = False
for fd in files:
file = "/proc/%s/fd/%s" % (self.pid, fd)
if os.path.islink(file):
try:
file = os.readlink(file)
except OSError:
# ENOENT == file which is gone in the meantime
err = sys.exc_info()[1]
if err.errno == errno.ENOENT:
hit_enoent = True
continue
raise
else:
# If file is not an absolute path there's no way
# to tell whether it's a regular file or not,
# so we skip it. A regular file is always supposed
# to be absolutized though.
if file.startswith('/') and isfile_strict(file):
ntuple = _common.popenfile(file, int(fd))
retlist.append(ntuple)
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return retlist
@wrap_exceptions
def connections(self, kind='inet'):
ret = _connections.retrieve(kind, self.pid)
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return ret
@wrap_exceptions
def num_fds(self):
return len(os.listdir("/proc/%s/fd" % self.pid))
@wrap_exceptions
def ppid(self):
f = open("/proc/%s/status" % self.pid, 'rb')
try:
PPID = b("PPid:")
for line in f:
if line.startswith(PPID):
# PPid: nnnn
return int(line.split()[1])
raise NotImplementedError("line not found")
finally:
f.close()
@wrap_exceptions
def uids(self):
f = open("/proc/%s/status" % self.pid, 'rb')
try:
UID = b('Uid:')
for line in f:
if line.startswith(UID):
_, real, effective, saved, fs = line.split()
return _common.puids(int(real), int(effective), int(saved))
raise NotImplementedError("line not found")
finally:
f.close()
@wrap_exceptions
def gids(self):
f = open("/proc/%s/status" % self.pid, 'rb')
try:
GID = b('Gid:')
for line in f:
if line.startswith(GID):
_, real, effective, saved, fs = line.split()
return _common.pgids(int(real), int(effective), int(saved))
raise NotImplementedError("line not found")
finally:
f.close()
| 35.350734 | 79 | 0.515759 |
acf2315ff8a400feb7697d25a4d1b753a0bcadc0 | 11,629 | py | Python | datacite/schema42.py | iris-edu-int/datacite | 4a3aa5b9bb156cee616848cc7c8d929ad76fa3cc | [
"BSD-3-Clause"
] | null | null | null | datacite/schema42.py | iris-edu-int/datacite | 4a3aa5b9bb156cee616848cc7c8d929ad76fa3cc | [
"BSD-3-Clause"
] | null | null | null | datacite/schema42.py | iris-edu-int/datacite | 4a3aa5b9bb156cee616848cc7c8d929ad76fa3cc | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of DataCite.
#
# Copyright (C) 2016 CERN.
# Copyright (C) 2019 Caltech.
#
# DataCite is free software; you can redistribute it and/or modify it
# under the terms of the Revised BSD License; see LICENSE file for
# more details.
"""DataCite v4.2 JSON to XML transformations."""
from __future__ import absolute_import, print_function
import pkg_resources
from lxml import etree
from lxml.builder import E
from .jsonutils import validator_factory
from .xmlutils import Rules, dump_etree_helper, etree_to_string, \
set_elem_attr, set_non_empty_attr
rules = Rules()
ns = {
None: 'http://datacite.org/schema/kernel-4',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xml': 'xml',
}
root_attribs = {
'{http://www.w3.org/2001/XMLSchema-instance}schemaLocation':
'http://datacite.org/schema/kernel-4 '
'http://schema.datacite.org/meta/kernel-4.2/metadata.xsd',
}
validator = validator_factory(pkg_resources.resource_filename(
'datacite',
'schemas/datacite-v4.2.json'
))
def dump_etree(data):
"""Convert JSON dictionary to DataCite v4.2 XML as ElementTree."""
return dump_etree_helper(data, rules, ns, root_attribs)
def tostring(data, **kwargs):
"""Convert JSON dictionary to DataCite v4.2 XML as string."""
return etree_to_string(dump_etree(data), **kwargs)
def validate(data):
"""Validate DataCite v4.2 JSON dictionary."""
return validator.is_valid(data)
@rules.rule('identifier')
def identifier(path, value):
"""Transform identifier."""
return E.identifier(
value['identifier'],
identifierType=value['identifierType']
)
def affiliations(root, values):
"""Extract affiliation."""
vals = values.get('affiliations', [])
for val in vals:
elem = E.affiliation(val['affiliation'])
root.append(elem)
def familyname(root, value):
"""Extract family name."""
val = value.get('familyName')
if val:
root.append(E.familyName(val))
def givenname(root, value):
"""Extract family name."""
val = value.get('givenName')
if val:
root.append(E.givenName(val))
def person_or_org_name(root, value, xml_tagname, json_tagname):
"""Extract creator/contributor name and it's 'nameType' attribute."""
elem = E(xml_tagname, value[json_tagname])
set_elem_attr(elem, 'nameType', value)
set_non_empty_attr(elem, '{xml}lang', value.get('lang'))
root.append(elem)
def nameidentifiers(root, values):
"""Extract nameidentifier."""
vals = values.get('nameIdentifiers', [])
for val in vals:
if val.get('nameIdentifier'):
elem = E.nameIdentifier(val['nameIdentifier'])
elem.set('nameIdentifierScheme', val['nameIdentifierScheme'])
set_elem_attr(elem, 'schemeURI', val)
root.append(elem)
@rules.rule('creators')
def creators(path, values):
"""Transform creators."""
if not values:
return
root = E.creators()
for value in values:
creator = E.creator()
person_or_org_name(creator, value, 'creatorName', 'name')
givenname(creator, value)
familyname(creator, value)
nameidentifiers(creator, value)
affiliations(creator, value)
root.append(creator)
return root
@rules.rule('titles')
def titles(path, values):
"""Transform titles."""
if not values:
return
root = E.titles()
for value in values:
elem = etree.Element('title', nsmap=ns)
elem.text = value['title']
set_non_empty_attr(elem, '{xml}lang', value.get('lang'))
# 'type' was a mistake in 4.0 serializer, which is supported
# for backwards compatibility until kernel 5 is released.
set_non_empty_attr(elem, 'titleType', value.get('type'))
# 'titleType' will supersede 'type' if available
set_non_empty_attr(elem, 'titleType', value.get('titleType'))
root.append(elem)
return root
@rules.rule('publisher')
def publisher(path, value):
"""Transform publisher."""
if not value:
return
return E.publisher(value)
@rules.rule('publicationYear')
def publication_year(path, value):
"""Transform publicationYear."""
if not value:
return
return E.publicationYear(str(value))
@rules.rule('subjects')
def subjects(path, values):
"""Transform subjects."""
if not values:
return
root = E.subjects()
for value in values:
elem = E.subject(value['subject'])
set_non_empty_attr(elem, '{xml}lang', value.get('lang'))
set_elem_attr(elem, 'subjectScheme', value)
set_elem_attr(elem, 'schemeURI', value)
set_elem_attr(elem, 'valueURI', value)
root.append(elem)
return root
@rules.rule('contributors')
def contributors(path, values):
"""Transform contributors."""
if not values:
return
root = E.contributors()
for value in values:
contributor = E.contributor()
person_or_org_name(contributor, value, 'contributorName', 'name')
set_elem_attr(contributor, 'contributorType', value)
givenname(contributor, value)
familyname(contributor, value)
nameidentifiers(contributor, value)
affiliations(contributor, value)
root.append(contributor)
return root
@rules.rule('dates')
def dates(path, values):
"""Transform dates."""
if not values:
return
root = E.dates()
for value in values:
elem = E.date(value['date'], dateType=value['dateType'])
set_elem_attr(elem, 'dateInformation', value)
root.append(elem)
return root
@rules.rule('language')
def language(path, value):
"""Transform language."""
if not value:
return
return E.language(value)
@rules.rule('types')
def resource_type(path, value):
"""Transform resourceType."""
elem = E.resourceType()
elem.set('resourceTypeGeneral', value['resourceTypeGeneral'])
elem.text = value['resourceType']
return elem
@rules.rule('alternateIdentifiers')
def alternate_identifiers(path, values):
"""Transform alternateIdenftifiers."""
if not values:
return
root = E.alternateIdentifiers()
for value in values:
elem = E.alternateIdentifier(value['alternateIdentifier'])
elem.set('alternateIdentifierType', value['alternateIdentifierType'])
root.append(elem)
return root
@rules.rule('relatedIdentifiers')
def related_identifiers(path, values):
"""Transform relatedIdentifiers."""
if not values:
return
root = E.relatedIdentifiers()
for value in values:
elem = E.relatedIdentifier()
elem.text = value['relatedIdentifier']
elem.set('relatedIdentifierType', value['relatedIdentifierType'])
elem.set('relationType', value['relationType'])
set_elem_attr(elem, 'relatedMetadataScheme', value)
set_elem_attr(elem, 'schemeURI', value)
set_elem_attr(elem, 'schemeType', value)
set_elem_attr(elem, 'resourceTypeGeneral', value)
root.append(elem)
return root
def free_text_list(plural, singular, values):
"""List of elements with free text."""
if not values:
return
root = etree.Element(plural)
for value in values:
etree.SubElement(root, singular).text = value
return root
@rules.rule('sizes')
def sizes(path, values):
"""Transform sizes."""
return free_text_list('sizes', 'size', values)
@rules.rule('formats')
def formats(path, values):
"""Transform sizes."""
return free_text_list('formats', 'format', values)
@rules.rule('version')
def version(path, value):
"""Transform version."""
if not value:
return
return E.version(value)
@rules.rule('rightsList')
def rights(path, values):
"""Transform rights."""
if not values:
return
root = E.rightsList()
for value in values:
if 'rights' in value:
elem = E.rights(value['rights'])
# Handle the odd case where no rights text present
else:
elem = E.rights()
set_elem_attr(elem, 'rightsURI', value)
set_elem_attr(elem, 'rightsIdentifierScheme', value)
set_elem_attr(elem, 'rightsIdentifier', value)
set_elem_attr(elem, 'schemeURI', value)
set_non_empty_attr(elem, '{xml}lang', value.get('lang'))
root.append(elem)
return root
@rules.rule('descriptions')
def descriptions(path, values):
"""Transform descriptions."""
if not values:
return
root = E.descriptions()
for value in values:
elem = E.description(
value['description'], descriptionType=value['descriptionType']
)
set_non_empty_attr(elem, '{xml}lang', value.get('lang'))
root.append(elem)
return root
def geopoint(root, value):
"""Extract a point (either geoLocationPoint or polygonPoint)."""
root.append(E.pointLongitude(str(value['pointLongitude'])))
root.append(E.pointLatitude(str(value['pointLatitude'])))
@rules.rule('geoLocations')
def geolocations(path, values):
"""Transform geolocations."""
if not values:
return
root = E.geoLocations()
for value in values:
element = E.geoLocation()
place = value.get('geoLocationPlace')
if place:
element.append(E.geoLocationPlace(place))
point = value.get('geoLocationPoint')
if point:
elem = E.geoLocationPoint()
geopoint(elem, point)
element.append(elem)
box = value.get('geoLocationBox')
if box:
elem = E.geoLocationBox()
elem.append(E.westBoundLongitude(str(box['westBoundLongitude'])))
elem.append(E.eastBoundLongitude(str(box['eastBoundLongitude'])))
elem.append(E.southBoundLatitude(str(box['southBoundLatitude'])))
elem.append(E.northBoundLatitude(str(box['northBoundLatitude'])))
element.append(elem)
polygons = value.get('geoLocationPolygons', [])
for polygon in polygons:
elem = E.geoLocationPolygon()
points = polygon["polygonPoints"]
for p in points:
e = E.polygonPoint()
geopoint(e, p)
elem.append(e)
inPoint = polygon.get("inPolygonPoint")
if inPoint:
e = E.inPolygonPoint()
geopoint(e, inPoint)
elem.append(e)
element.append(elem)
root.append(element)
return root
@rules.rule('fundingReferences')
def fundingreferences(path, values):
"""Transform funding references."""
if not values:
return
root = E.fundingReferences()
for value in values:
element = E.fundingReference()
element.append(E.funderName(value.get('funderName')))
identifier = value.get('funderIdentifier')
if identifier:
elem = E.funderIdentifier(identifier)
typev = value.get('funderIdentifierType')
if typev:
elem.set('funderIdentifierType', typev)
element.append(elem)
number = value.get('awardNumber')
if number:
elem = E.awardNumber(number)
uri = value.get('awardURI')
if uri:
elem.set('awardURI', uri)
element.append(elem)
title = value.get('awardTitle')
if title:
element.append(E.awardTitle(title))
if len(element):
root.append(element)
return root
| 27.298122 | 77 | 0.633932 |
acf232f41e44dd7daf9828c4a8d1ec199f8a1c70 | 6,781 | py | Python | submitit/core/test_core.py | rpanai/submitit | ca25f9e6c0684b7d3d714ba9997f05bc07c532db | [
"MIT"
] | null | null | null | submitit/core/test_core.py | rpanai/submitit | ca25f9e6c0684b7d3d714ba9997f05bc07c532db | [
"MIT"
] | null | null | null | submitit/core/test_core.py | rpanai/submitit | ca25f9e6c0684b7d3d714ba9997f05bc07c532db | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# pylint: disable=redefined-outer-name
import contextlib
import sys
import time
from pathlib import Path
from typing import Any, Iterator, List, Optional, Union
from unittest.mock import patch
import pytest
from . import core, submission, utils
class _SecondCall:
"""Helps mocking CommandFunction which is like a subprocess check_output, but
with a second call.
"""
def __init__(self, outputs: Any) -> None:
self._outputs = outputs
def __call__(self, *args: Any, **kwargs: Any) -> Any:
return self._outputs
class MockedSubprocess:
"""Helper for mocking subprocess calls"""
SACCT_HEADER = "JobID|State"
SACCT_JOB = "{j}|{state}\n{j}.ext+|{state}\n{j}.0|{state}"
def __init__(
self, state: str = "RUNNING", job_id: str = "12", shutil_which: Optional[str] = None, array: int = 0
) -> None:
self.state = state
self.shutil_which = shutil_which
self.job_id = job_id
self._sacct = self.sacct(state, job_id, array)
self._sbatch = f"Running job {job_id}\n".encode()
def __call__(self, command: str, **kwargs: Any) -> Any:
if command[0] == "sacct":
return self._sacct
elif command[0] == "sbatch":
return self._sbatch
elif command[0] == "scancel":
return ""
else:
raise ValueError(f'Unknown command to mock "{command}".')
def sacct(self, state: str, job_id: str, array: int) -> bytes:
if array == 0:
lines = self.SACCT_JOB.format(j=job_id, state=state)
else:
lines = "\n".join(self.SACCT_JOB.format(j=f"{job_id}_{i}", state=state) for i in range(array))
return "\n".join((self.SACCT_HEADER, lines)).encode()
def which(self, name: str) -> Optional[str]:
return "here" if name == self.shutil_which else None
@contextlib.contextmanager
def context(self) -> Iterator[None]:
with patch(
"submitit.core.utils.CommandFunction",
new=lambda *args, **kwargs: _SecondCall(self(*args, **kwargs)),
):
with patch("subprocess.check_output", new=self):
with patch("shutil.which", new=self.which):
with patch("subprocess.check_call", new=self):
yield None
class FakeInfoWatcher(core.InfoWatcher):
# pylint: disable=abstract-method
def get_state(self, job_id: str, mode: str = "standard") -> str:
return "running"
class FakeJob(core.Job[core.R]):
watcher = FakeInfoWatcher()
class FakeExecutor(core.PicklingExecutor):
job_class = FakeJob
@property
def _submitit_command_str(self) -> str:
return "echo 1"
def _num_tasks(self) -> int:
return 1
def _make_submission_file_text(self, command: str, uid: str) -> str: # pylint: disable=unused-argument
"""Creates the text of a file which will be created and run
for the submission (for slurm, this is sbatch file).
"""
return command + "2" # this makes "echo 12"
def _make_submission_command(self, submission_file_path: Path) -> List[str]:
"""Create the submission command.
"""
with submission_file_path.open("r") as f:
text: str = f.read()
return text.split() # this makes ["echo", "12"]
@staticmethod
def _get_job_id_from_submission_command(string: Union[bytes, str]) -> str:
return string if isinstance(string, str) else string.decode() # this returns "12"
def _three_time(x: int) -> int:
return 3 * x
def do_nothing(*args: Any, **kwargs: Any) -> int:
print("my args", args, flush=True)
print("my kwargs", kwargs, flush=True)
if "sleep" in kwargs:
print("Waiting", flush=True)
time.sleep(int(kwargs["sleep"]))
if kwargs.get("error", False):
print("Raising", flush=True)
raise ValueError("Too bad")
print("Finishing", flush=True)
return 12
def test_fake_job(tmp_path: Path) -> None:
job: FakeJob[int] = FakeJob(job_id="12", folder=tmp_path)
repr(job)
assert not job.done(force_check=True)
# logs
assert job.stdout() is None
assert job.stderr() is None
with job.paths.stderr.open("w") as f:
f.write("blublu")
assert job.stderr() == "blublu"
# result
utils.pickle_dump(("success", 12), job.paths.result_pickle)
assert job.result() == 12
# exception
assert job.exception() is None
utils.pickle_dump(("error", "blublu"), job.paths.result_pickle)
assert isinstance(job.exception(), Exception)
with pytest.raises(core.utils.FailedJobError):
job.result()
def test_fake_job_cancel_at_deletion(tmp_path: Path) -> None:
job: FakeJob[Any] = FakeJob(job_id="12", folder=tmp_path).cancel_at_deletion() # type: ignore
with patch("subprocess.call", return_value=None) as mock:
assert mock.call_count == 0
del job
assert mock.call_count == 1
def test_fake_executor(tmp_path: Path) -> None:
executor = FakeExecutor(folder=tmp_path)
job = executor.submit(_three_time, 8)
assert job.job_id == "12"
assert job.paths.submission_file.exists()
with utils.environment_variables(_TEST_CLUSTER_="slurm", SLURM_JOB_ID=str(job.job_id)):
submission.process_job(folder=job.paths.folder)
assert job.result() == 24
def test_fake_executor_batch(tmp_path: Path) -> None:
executor = FakeExecutor(folder=tmp_path)
with executor.batch():
job = executor.submit(_three_time, 8)
with executor.batch(): # make sure we can send a new batch
job = executor.submit(_three_time, 8)
assert isinstance(job, FakeJob)
# bad update
with pytest.raises(RuntimeError):
with executor.batch():
executor.update_parameters(blublu=12)
# bad access
with pytest.raises(RuntimeError):
with executor.batch():
job = executor.submit(_three_time, 8)
job.job_id # pylint: disable=pointless-statement
# empty context
with pytest.warns(RuntimeWarning):
with executor.batch():
pass
# multi context
with pytest.raises(RuntimeError):
with executor.batch():
with executor.batch():
job = executor.submit(_three_time, 8)
if __name__ == "__main__":
args, kwargs = [], {} # oversimplisitic parser
for argv in sys.argv[1:]:
if "=" in argv:
key, val = argv.split("=")
kwargs[key.strip("-")] = val
else:
args.append(argv)
do_nothing(*args, **kwargs)
| 31.985849 | 108 | 0.630438 |
acf23318e53c9bb6c497c8f67f2e80a02201a956 | 3,085 | py | Python | test/functional/mining_getblocktemplate_longpoll.py | Deviantcoin/Wallet-2021 | 83123d3614703660997dfae022ead7f5ffc2a2f3 | [
"MIT"
] | 1 | 2021-11-23T15:51:12.000Z | 2021-11-23T15:51:12.000Z | test/functional/mining_getblocktemplate_longpoll.py | Deviantcoin/Wallet-2021 | 83123d3614703660997dfae022ead7f5ffc2a2f3 | [
"MIT"
] | null | null | null | test/functional/mining_getblocktemplate_longpoll.py | Deviantcoin/Wallet-2021 | 83123d3614703660997dfae022ead7f5ffc2a2f3 | [
"MIT"
] | 1 | 2021-12-04T20:33:07.000Z | 2021-12-04T20:33:07.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test longpolling with getblocktemplate."""
from test_framework.test_framework import devTestFramework
from test_framework.util import *
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
templat = node.getblocktemplate()
self.longpollid = templat['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
def run(self):
self.node.getblocktemplate({'longpollid':int(self.longpollid)})
class GetBlockTemplateLPTest(devTestFramework):
def set_test_params(self):
self.num_nodes = 2
def run_test(self):
self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
self.nodes[0].generate(10)
templat = self.nodes[0].getblocktemplate()
longpollid = templat['longpollid']
# longpollid should not change between successive invocations if nothing else happens
templat2 = self.nodes[0].getblocktemplate()
assert(templat2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
self.nodes[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(self.nodes[0])
thr.start()
self.nodes[0].generate(1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
# min_relay_fee is fee per 1000 bytes, which should be more than enough.
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
| 42.847222 | 112 | 0.681361 |
acf23341d4757b77053555cac5ea3db7944bc99c | 5,027 | py | Python | website/addons/base/serializer.py | fayetality/osf.io | b5ab5d7e0f1bff55823378d67e6b4cab1b67449b | [
"Apache-2.0"
] | null | null | null | website/addons/base/serializer.py | fayetality/osf.io | b5ab5d7e0f1bff55823378d67e6b4cab1b67449b | [
"Apache-2.0"
] | 1 | 2019-08-16T13:45:12.000Z | 2019-08-16T13:45:12.000Z | website/addons/base/serializer.py | lamdnhan/osf.io | fcf68873f7a66b23e6ea9d222efedb60c7797ac4 | [
"Apache-2.0"
] | null | null | null | import abc
from framework.auth.decorators import collect_auth
from website.util import api_url_for, web_url_for
class AddonSerializer(object):
__metaclass__ = abc.ABCMeta
# TODO take addon_node_settings, addon_user_settings
def __init__(self, node_settings=None, user_settings=None):
self.node_settings = node_settings
self.user_settings = user_settings
@abc.abstractproperty
def addon_serialized_urls(self):
pass
@abc.abstractproperty
def serialized_urls(self):
pass
@abc.abstractproperty
def user_is_owner(self):
pass
@abc.abstractproperty
def credentials_owner(self):
pass
@property
def serialized_node_settings(self):
result = {
'nodeHasAuth': self.node_settings.has_auth,
'userIsOwner': self.user_is_owner,
'urls': self.serialized_urls,
}
if self.user_settings:
result['userHasAuth'] = self.user_settings.has_auth
else:
result['userHasAuth'] = False
if self.node_settings.has_auth:
owner = self.credentials_owner
if owner:
result['urls']['owner'] = web_url_for('profile_view_id',
uid=owner._primary_key)
result['ownerName'] = owner.fullname
return result
@property
def serialized_user_settings(self):
return {}
class OAuthAddonSerializer(AddonSerializer):
@property
def serialized_accounts(self):
return [
self.serialize_account(each)
for each in self.user_settings.external_accounts
]
@property
def serialized_user_settings(self):
retval = super(OAuthAddonSerializer, self).serialized_user_settings
retval['accounts'] = []
if self.user_settings:
retval['accounts'] = self.serialized_accounts
return retval
def serialize_account(self, external_account):
if external_account is None:
return None
return {
'id': external_account._id,
'provider_id': external_account.provider_id,
'provider_name': external_account.provider_name,
'provider_short_name': external_account.provider,
'display_name': external_account.display_name,
'profile_url': external_account.profile_url,
'nodes': [
self.serialize_granted_node(node)
for node in self.user_settings.get_attached_nodes(
external_account=external_account
)
],
}
@collect_auth
def serialize_granted_node(self, node, auth):
node_settings = node.get_addon(
self.user_settings.oauth_provider.short_name
)
serializer = node_settings.serializer(node_settings=node_settings)
urls = serializer.addon_serialized_urls
urls['view'] = node.url
return {
'id': node._id,
'title': node.title if node.can_view(auth) else None,
'urls': urls,
}
class CitationsAddonSerializer(OAuthAddonSerializer):
REQUIRED_URLS = ['importAuth', 'folders', 'config', 'deauthorize', 'accounts']
@property
def serialized_urls(self):
external_account = self.node_settings.external_account
ret = {
'auth': api_url_for('oauth_connect',
service_name=self.node_settings.provider_name),
'settings': web_url_for('user_addons'),
'files': self.node_settings.owner.url,
}
if external_account and external_account.profile_url:
ret['owner'] = external_account.profile_url
addon_urls = self.addon_serialized_urls
# Make sure developer returns set of needed urls
for url in self.REQUIRED_URLS:
assert url in addon_urls, "addon_serilized_urls must include key '{0}'".format(url)
ret.update(addon_urls)
return ret
@property
def serialized_node_settings(self):
result = super(CitationsAddonSerializer, self).serialized_node_settings
result['folder'] = {
'name': self.node_settings.selected_folder_name
}
return result
@property
def user_is_owner(self):
if self.user_settings is None:
return False
user_accounts = self.user_settings.external_accounts
return bool(
(
self.node_settings.has_auth and
(self.node_settings.external_account in user_accounts)
) or len(user_accounts)
)
@property
def credentials_owner(self):
return self.node_settings.user_settings.owner
@abc.abstractmethod
def serialize_folder(self, folder):
pass
def serialize_citation(self, citation):
return {
'csl': citation,
'kind': 'file',
'id': citation['id'],
}
| 29.922619 | 95 | 0.618062 |
acf2335305dd7f2c48d39eaaff106b0afcf9ed0c | 1,680 | py | Python | ariadne/scalars.py | commandtab/ariadne | c154b8ed3026e500d1c8dd8d792fa728ce17f6b4 | [
"BSD-3-Clause"
] | null | null | null | ariadne/scalars.py | commandtab/ariadne | c154b8ed3026e500d1c8dd8d792fa728ce17f6b4 | [
"BSD-3-Clause"
] | null | null | null | ariadne/scalars.py | commandtab/ariadne | c154b8ed3026e500d1c8dd8d792fa728ce17f6b4 | [
"BSD-3-Clause"
] | null | null | null | from typing import Optional
from graphql.type import GraphQLScalarType, GraphQLSchema
from .types import Bindable, ScalarOperation
class Scalar(Bindable):
_serialize: Optional[ScalarOperation]
_parse_value: Optional[ScalarOperation]
_parse_literal: Optional[ScalarOperation]
def __init__(self, name: str) -> None:
self.name = name
self._serialize = None
self._parse_value = None
self._parse_literal = None
def serializer(self, f: ScalarOperation) -> ScalarOperation:
self._serialize = f
return f
def value_parser(self, f: ScalarOperation) -> ScalarOperation:
self._parse_value = f
return f
def literal_parser(self, f: ScalarOperation) -> ScalarOperation:
self._parse_literal = f
return f
def bind_to_schema(self, schema: GraphQLSchema) -> None:
graphql_type = schema.type_map.get(self.name)
self.validate_graphql_type(graphql_type)
if self._serialize:
graphql_type.serialize = self._serialize
if self._parse_value:
graphql_type.parse_value = self._parse_value
if self._parse_literal:
graphql_type.parse_literal = self._parse_literal
def validate_graphql_type(self, graphql_type) -> None:
if not graphql_type:
raise ValueError("Scalar %s is not defined in the schema" % self.name)
if not isinstance(graphql_type, GraphQLScalarType):
raise ValueError(
"%s is defined in the schema, but it is instance of %s (expected %s)"
% (self.name, type(graphql_type).__name__, GraphQLScalarType.__name__)
)
| 33.6 | 86 | 0.669048 |
acf233c9aea79a450fc8f017c1ed18d54bc29a5a | 5,682 | py | Python | shuttl/database.py | shuttl-io/shuttl-cms | 50c85db0de42e901c371561270be6425cc65eccc | [
"MIT"
] | 2 | 2017-06-26T18:06:58.000Z | 2017-10-11T21:45:29.000Z | shuttl/database.py | shuttl-io/shuttl-cms | 50c85db0de42e901c371561270be6425cc65eccc | [
"MIT"
] | null | null | null | shuttl/database.py | shuttl-io/shuttl-cms | 50c85db0de42e901c371561270be6425cc65eccc | [
"MIT"
] | null | null | null | from shuttl import db
import sqlalchemy
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm.dynamic import Query
class DoNotSerializeException(Exception): pass
## Base model for all Model objects
class BaseModel(object):
## the ID of the object.
id = db.Column(db.Integer, primary_key=True)
## tells SQLAlchemy what to call the table.
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
## Saves the object to the data base. In case of an error, this rolls back the session and re-raises the issue.
def save(self):
try:
db.session.add(self)
db.session.commit()
pass
except:
db.session.rollback()
raise
pass
## Delete the object from the database
def delete(self):
db.session.delete(self)
db.session.commit()
pass
## Checks to see if two objects are the equal, essentially if the IDs are equal and they are the same type
# \param other the object to check if its equal
# \return true if the two objects are equal, false otherwise.
def __eq__(self, other):
if other is None:
return False
return self.id == other.id and type(self) == type(other)
## Hashes the objects. usage: hash(object)
# \return the hash of the ID.
def __hash__(self):
return hash(self.id)
## a property that holds all of the fields that the JSON representation will hold. By default, this will contain all
# relationships and columns of the object.
# \note to override this method, you can make the list yourself (eg: ["name", "id", "face"]) or you can call the base
# method and add or remove fields from the result (eg: res = super(Class, self).__json__(); res.add("face"))
# \return a set containing all properties to add to the serialization
@property
def __json__(self):
fields = {c.name.replace("_id", "") for c in self.__table__.columns}
for i in self.__mapper__.relationships:
if i.key not in fields:
fields.add(i.key)
pass
return fields
## creates an object and then saves the object.
@classmethod
def Create(cls, *args, **kwargs):
inst = cls(*args, **kwargs)
inst.save()
return inst
## This tries to cast all of the possible objects into json serializable objects (eg, strings, ints, and bools)
# this is gets called recursively to try to make every object JSON Serializable. This does most of the serialization
# work. This is Private.
# \param obj the object to serialize
# \param stack a set of all BaseModel (or a subclass) objects that have been serialized
# \param level this is to prevent the serialization from going to deep.
# \raises DoNotSerializeException if the level = -1 or obj is in the stack
# \return this function returns many different things. If obj is a BaseModel (or a subclass), then this will return
# a dictionary. If obj is a list or a subclass of Query, this returns a list containing all objects in the original
# list serialized. If obj is not a str, int, float, or bool, this returns obj casted to a string. otherwise this
# returns obj
def _serialize(self, obj, stack, level):
isSubClass = issubclass(obj.__class__, BaseModel)
if isSubClass and obj in stack or level + 1 == 0:
raise DoNotSerializeException
if isSubClass and obj not in stack:
return obj.serialize(stack=stack, max_level=level-1)
elif type(obj) not in {str, int, float, bool}:
try:
objIter = iter(obj)
lst = []
for i in objIter:
try:
lst.append(self._serialize(i, stack, level))
pass
except DoNotSerializeException:
continue
pass
return lst
except TypeError:
return str(obj)
pass
return obj
## This is the public function that will get called when the object needs to be serialized.
# \param stack this is a set of all rendered objects. This is to prevent infinite recursion (eg. An organization
# serializes a website -> the website serializes the organization again -> the organization serializes the website
# again . . . this continues forever). This defaults to none because of
# http://docs.python-guide.org/en/latest/writing/gotchas/
# \param max_level this is how many BaseModel objects you want to render. The Default is 10, for infinite use
# float("inf")
# \return a dictionary representing the object.
# \note why return a dictionary over a JSON string? Its trivial to convert a dictionary to a JSON string, so I rather
# return a dictionary because an dictionary is easier to modify than a string. You can convert the JSON string to
# a dictionary trivially but then you have to convert it back to JSON string and you still have a dictionary. SO why
# not just return a dictionary?
def serialize(self, stack=None, max_level=10, *args, **kwargs):
if stack is None:
stack = set()
pass
stack.add(self)
dictionary = {}
for name in self.__json__:
try:
value = self._serialize(getattr(self, name), stack, max_level)
pass
except (DoNotSerializeException, AttributeError):
continue
dictionary[name] = value
pass
return dictionary
| 42.402985 | 121 | 0.637804 |
acf2340eecf705e908fe56b7fab3db3db53b5ad9 | 6,791 | py | Python | MNIST/reconstruction_fooler_maker.py | hendrycks/fooling | 0db6fce956e84de0229461743a42f48bbdc8f3df | [
"MIT"
] | 10 | 2016-08-02T21:01:54.000Z | 2021-07-29T19:11:08.000Z | MNIST/reconstruction_fooler_maker.py | hendrycks/fooling | 0db6fce956e84de0229461743a42f48bbdc8f3df | [
"MIT"
] | 3 | 2017-11-13T01:58:48.000Z | 2018-08-04T07:42:30.000Z | MNIST/reconstruction_fooler_maker.py | hendrycks/fooling | 0db6fce956e84de0229461743a42f48bbdc8f3df | [
"MIT"
] | 4 | 2017-05-16T19:48:14.000Z | 2019-06-04T10:50:18.000Z | import numpy as np
import tensorflow as tf
import pickle
import sys
import os
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/")
n_labels = 10
n_channels = 1
image_width = 28
n_hidden = 256
n_input = image_width ** 2
bottleneck = 10
try:
num_to_make = int(sys.argv[1])
print('Number of foolers to generate:', num_to_make)
except:
print('Defaulted to making one fooling image')
num_to_make = 1
graph = tf.Graph()
with graph.as_default():
# fixing input to be 1 because we know input size
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.int64, [None])
W = {
'1': tf.Variable(tf.nn.l2_normalize(tf.random_normal([n_input, n_hidden]), 0)),
'2': tf.Variable(tf.nn.l2_normalize(tf.random_normal([n_hidden, n_hidden]), 0)),
'out': tf.Variable(tf.nn.l2_normalize(tf.random_normal([n_hidden, n_labels]), 0)),
'decodeh2': tf.Variable(tf.nn.l2_normalize(tf.random_normal([n_hidden, bottleneck]), 0)),
'out_info': tf.Variable(tf.nn.l2_normalize(tf.random_normal([n_labels, bottleneck]), 0)),
'd1': tf.Variable(tf.nn.l2_normalize(tf.random_normal([bottleneck, n_hidden]), 0)),
'salvage': tf.Variable(tf.nn.l2_normalize(tf.random_normal([n_hidden, n_input]), 0)),
}
b = {
'1': tf.Variable(tf.zeros([n_hidden])),
'2': tf.Variable(tf.zeros([n_hidden])),
'out': tf.Variable(tf.zeros([n_labels])),
'd2': tf.Variable(tf.zeros([bottleneck])),
'd1': tf.Variable(tf.zeros([n_hidden])),
'salvage': tf.Variable(tf.zeros([n_input])),
}
def gelu_fast(__x):
return 0.5 * __x * (1 + tf.tanh(tf.sqrt(2 / np.pi) * (__x + 0.044715 * tf.pow(__x, 3))))
f = gelu_fast
def model(_x):
h1 = f(tf.matmul(_x, W['1']) + b['1'])
h2 = f(tf.matmul(h1, W['2']) + b['2'])
out = tf.matmul(h2, W['out']) + b['out']
decode2 = f(tf.matmul(h2, W['decodeh2']) + tf.matmul(out, W['out_info']) + b['d2'])
decode1 = f(tf.matmul(decode2, W['d1']) + b['d1'])
salvaged = tf.matmul(decode1, W['salvage']) + b['salvage']
return out, salvaged
pred, recon = model(x)
starter_learning_rate = 0.001
ce = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(pred, y))
loss = ce + 1*tf.reduce_mean(tf.square(x - recon))
lr = tf.constant(0.001)
optimizer = tf.train.AdamOptimizer(lr).minimize(loss)
wrong_pred = tf.not_equal(tf.argmax(pred, 1), y)
compute_error = 100. * tf.reduce_mean(tf.to_float(wrong_pred))
sess = tf.InteractiveSession(graph=graph)
tf.initialize_all_variables().run()
batch_size = 128
training_epochs = 20
num_batches = int(mnist.train.num_examples / batch_size)
ce_ema = 2.3 # - log(0.1)
err_ema = 0.9
risk_loss_ema = 0.3 # - log(0.5)
learning_rate = 0.001
for epoch in range(training_epochs):
if epoch >= 20:
learning_rate = 0.0001
for i in range(num_batches):
bx, by = mnist.train.next_batch(batch_size)
_, err, l = sess.run([optimizer, compute_error, ce], feed_dict={x: bx, y: by, lr: learning_rate})
ce_ema = ce_ema * 0.95 + 0.05 * l
err_ema = err_ema * 0.95 + 0.05 * err
for i in range(mnist.validation.num_examples // batch_size):
bx, by = mnist.validation.next_batch(batch_size)
_, err, l = sess.run([optimizer, compute_error, ce], feed_dict={x: bx, y: by, lr: learning_rate})
ce_ema = ce_ema * 0.95 + 0.05 * l
err_ema = err_ema * 0.95 + 0.05 * err
print('Epoch number:', epoch, 'Error EMA:', err_ema, 'CE EMA', ce_ema)
print('Done training')
mean_img = tf.reshape(tf.reduce_mean(mnist.train.next_batch(1000)[0], 0, keep_dims=True), [28,28]).eval()
# def show_image(img, rescale=False, add_mean=False):
# img = img.reshape(28,28)
#
# img = img.copy()
# if add_mean:
# img += mean_img
# if rescale:
# low, high = np.min(img), np.max(img)
# img = (img - low) / (high - low)
# plt.imshow(img, vmin=0, vmax=1)
# plt.gca().axis('off')
def make_fooling_image(image, target, reg=1., step=1/255., max_iters=1000, confidence_thresh=0.5):
fooling_image = image.copy()
for j in range(max_iters):
dFool, decoded, probs = sess.run([tf.gradients(ce + reg*tf.reduce_sum(tf.square(image[0] -
fooling_image[0]))/2., x)[0],
recon, tf.nn.softmax(model(fooling_image)[0])],
feed_dict={x: fooling_image, y: [target]})
fooling_image[0] -= step * (np.squeeze(dFool[0]))
fooling_image[0] = np.clip(fooling_image[0], 0, 1) # poor man's box constraints
fool_prob = probs[0, target]
if j % 10 == 0:
print('Fooling Image Probability Percent (iter %s): %s' % (j, 100.*fool_prob))
if fool_prob > confidence_thresh:
print('Final food prob percent:', 100*fool_prob)
break
return fooling_image, decoded
l1_distances = []
l2_distances = []
linf_distances = []
try:
history = pickle.load(open("./data/mnist_foolers_reconstruction.p", "rb"))
except:
history = {}
if not os.path.exists('./data'):
os.makedirs('./data')
for i in range(num_to_make):
image, true_y = mnist.test.next_batch(1)
# ensure the network gets our current example correct
while true_y != tf.argmax(model(tf.constant(image))[0], 1).eval()[0]:
image, true_y = mnist.test.next_batch(1)
target_y = np.random.choice(10)
while target_y == true_y:
target_y = np.random.choice(10)
fooling_image, decoded = make_fooling_image(image, target_y)
confidence = sess.run(tf.nn.softmax(model(fooling_image)[0])[0, target_y])
if confidence < 0.5:
fooled = 'not_fooled'
print('Network is NOT fooled!')
else:
fooled = 'fooled'
print('Network is fooled!')
if fooled == 'fooled':
l2 = np.sum(np.square(image - fooling_image))
l1 = np.sum(np.abs(image - fooling_image))
linf = np.sum(np.max(np.abs(image - fooling_image)))
l2_distances.append(l2)
l1_distances.append(l1)
linf_distances.append(linf)
history[str(i)] = [true_y, target_y, fooling_image, image,
decoded, sess.run(model(tf.constant(image))[1]), l2, l1]
print('Number of fooling examples collected:', len(l2_distances))
print('L1 mean:', np.mean(np.array(l1_distances)))
print('L2 mean:', np.mean(np.array(l2_distances)))
print('LInf mean:', np.mean(np.array(linf_distances)))
pickle.dump(history, open("./data/mnist_foolers_reconstruction.p", "wb"))
| 36.12234 | 119 | 0.613459 |
acf23446994ff0781db490d3e6e815ac5ba73258 | 6,415 | py | Python | tests/ut/python/parallel/test_stridedslice.py | GuoSuiming/mindspore | 48afc4cfa53d970c0b20eedfb46e039db2a133d5 | [
"Apache-2.0"
] | 55 | 2020-12-17T10:26:06.000Z | 2022-03-28T07:18:26.000Z | tests/ut/python/parallel/test_stridedslice.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | null | null | null | tests/ut/python/parallel/test_stridedslice.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | 14 | 2021-01-29T02:39:47.000Z | 2022-03-23T05:00:26.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
from mindspore import context, Tensor, Parameter
from mindspore.common.api import _executor
from mindspore.nn import Cell, TrainOneStepCell, Momentum
from mindspore.ops import operations as P
class Net(Cell):
def __init__(self, weight, w2, begin, end, strides, strategy1=None, strategy2=None, is_parameter=True, mask=0):
super().__init__()
self.mul = P.Mul().shard(strategy1)
self.strided_slice = P.StridedSlice(begin_mask=mask).shard(strategy2)
if is_parameter:
self.weight = Parameter(weight, "w1")
else:
self.weight = weight
self.mul2 = P.Mul()
self.weight2 = Parameter(w2, "w2")
self.begin = begin
self.end = end
self.strides = strides
def construct(self, x, b):
out = self.strided_slice(self.weight, self.begin, self.end, self.strides)
out = self.mul(x, out)
out = self.mul2(out, self.weight2)
return out
class Net2(Cell):
def __init__(self, weight2, begin, end, strides, strategy1=None, strategy2=None):
super().__init__()
self.mul = P.Mul().shard(strategy1)
self.strided_slice = P.StridedSlice().shard(strategy2)
self.weight2 = Parameter(weight2, "w2")
self.begin = begin
self.end = end
self.strides = strides
def construct(self, x, b):
out = self.mul(x, self.weight2)
out = self.strided_slice(out, self.begin, self.end, self.strides)
return out
_x = Tensor(np.ones([128, 64, 1]), dtype=ms.float32)
_w1 = Tensor(np.ones([256, 64, 32]), dtype=ms.float32)
_w2 = Tensor(np.ones([128, 64, 1]), dtype=ms.float32)
_b = Tensor(np.ones([128, 64, 32]), dtype=ms.float32)
def compile_net(net):
context.set_context(save_graphs=True)
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x, _b)
context.reset_auto_parallel_context()
def test_stridedslice_no_fully_fetch_split_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((2, 2, 2), (2, 2, 2))
strategy2 = ((2, 2, 2),)
net = Net(_w1, _w2, (0, 0, 0), (128, 64, 32), (1, 1, 1), strategy1, strategy2, is_parameter=True)
with pytest.raises(RuntimeError):
compile_net(net)
def test_stridedslice_strides_no_1_split_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((2, 2, 2), (2, 2, 2))
strategy2 = ((1, 2, 2),)
net = Net(_w1, _w2, (0, 0, 0), (128, 64, 32), (1, 1, 2), strategy1, strategy2, is_parameter=True)
with pytest.raises(RuntimeError):
compile_net(net)
def test_stridedslice_mask_no_0_split_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((2, 2, 2), (2, 2, 2))
strategy2 = ((1, 2, 2),)
net = Net(_w1, _w2, (0, 0, 0), (128, 64, 32), (1, 1, 1), strategy1, strategy2, is_parameter=True, mask=1)
with pytest.raises(RuntimeError):
compile_net(net)
def test_stridedslice_begin_size_smaller():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((1, 4, 1), (1, 4, 2))
strategy2 = ((1, 4, 2),)
net = Net(_w1, _w2, (0, 0), (128, 64), (1, 1), strategy1, strategy2, is_parameter=True)
compile_net(net)
def test_stridedslice_parameter():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((1, 4, 1), (1, 4, 2))
strategy2 = ((1, 4, 2),)
net = Net(_w1, _w2, (0, 0, 0), (128, 64, 32), (1, 1, 1), strategy1, strategy2, is_parameter=True)
compile_net(net)
def test_stridedslice_tensor():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((1, 4, 1), (1, 4, 2))
strategy2 = ((1, 4, 2),)
net = Net(_w1, _w2, (0, 0, 0), (128, 64, 32), (1, 1, 1), strategy1, strategy2, is_parameter=False)
compile_net(net)
def test_stridedslice_parameter_no_full_split():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((1, 4, 1), (1, 4, 2))
strategy2 = ((1, 2, 2),)
net = Net(_w1, _w2, (0, 0, 0), (128, 64, 32), (1, 1, 1), strategy1, strategy2, is_parameter=True)
compile_net(net)
def test_stridedslice_output():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((1, 8, 1), (1, 8, 1))
strategy2 = ((1, 8, 1),)
net = Net2(_w2, (0, 0, 0), (64, 64, 1), (1, 1, 1), strategy1, strategy2)
compile_net(net)
def test_stridedslice_output_no_full_split():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((1, 8, 1), (1, 8, 1))
strategy2 = ((1, 4, 1),)
net = Net2(_w2, (0, 0, 0), (64, 64, 1), (1, 1, 1), strategy1, strategy2)
compile_net(net)
def test_stridedslice_no_strategy():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((1, 8, 1), (1, 8, 1))
strategy2 = None
net = Net2(_w2, (0, 0, 0), (128, 64, 1), (1, 1, 1), strategy1, strategy2)
compile_net(net)
def test_stridedslice_auto_parallel():
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, global_rank=0)
net = Net2(_w2, (0, 0, 0), (32, 64, 1), (1, 1, 1))
compile_net(net)
| 38.644578 | 115 | 0.66251 |
acf23452396c21a97545966143be260b340536b8 | 1,528 | py | Python | include/cpk/cli/commands/machine/remove.py | afdaniele/cpk | cfb4379707ca7b046d3dee1974666585afb89782 | [
"MIT"
] | 4 | 2021-07-26T16:27:46.000Z | 2021-12-16T21:54:23.000Z | include/cpk/cli/commands/machine/remove.py | afdaniele/cpk | cfb4379707ca7b046d3dee1974666585afb89782 | [
"MIT"
] | 21 | 2021-07-26T16:42:02.000Z | 2022-01-20T19:19:23.000Z | include/cpk/cli/commands/machine/remove.py | afdaniele/cpk | cfb4379707ca7b046d3dee1974666585afb89782 | [
"MIT"
] | null | null | null | import argparse
from typing import Optional
from cpk import cpkconfig
from cpk.cli import AbstractCLICommand, cpklogger
from cpk.types import Machine, Arguments
from cpk.utils.misc import ask_confirmation
class CLIMachineRemoveCommand(AbstractCLICommand):
KEY = 'machine remove'
@staticmethod
def parser(parent: Optional[argparse.ArgumentParser] = None,
args: Optional[Arguments] = None) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(parents=[parent], add_help=False)
parser.add_argument(
'name',
type=str,
help="Name of the machine to remove"
)
# ---
return parser
@staticmethod
def execute(_: Machine, parsed: argparse.Namespace) -> bool:
# make sure the machine exists
if parsed.name not in cpkconfig.machines:
cpklogger.error(f"The machine '{parsed.name}' does not exist.")
return False
# get machine
machine = cpkconfig.machines[parsed.name]
# show some info
cpklogger.info("The following machine will be deleted.")
print(machine)
# ask for confirmation
granted = ask_confirmation(
cpklogger,
message=f"The machine '{parsed.name}' will be deleted. This cannot be undone.",
default='n'
)
if not granted:
raise KeyboardInterrupt()
# remove machine
machine.remove(cpklogger)
cpklogger.info("Machine deleted.")
| 31.833333 | 91 | 0.632853 |
acf235ae049359efb3ca42a1c7cb8a219c0aa2f5 | 10,326 | py | Python | featuretools/tests/primitive_tests/test_features_deserializer.py | Featuretools/featuretools | 365abd9519d2eec8eec75936644a7b865d4ef40a | [
"BSD-3-Clause"
] | 4,299 | 2017-09-09T02:41:29.000Z | 2019-10-10T05:41:11.000Z | featuretools/tests/primitive_tests/test_features_deserializer.py | Featuretools/featuretools | 365abd9519d2eec8eec75936644a7b865d4ef40a | [
"BSD-3-Clause"
] | 729 | 2017-09-22T01:54:48.000Z | 2019-10-09T15:36:17.000Z | featuretools/tests/primitive_tests/test_features_deserializer.py | Featuretools/featuretools | 365abd9519d2eec8eec75936644a7b865d4ef40a | [
"BSD-3-Clause"
] | 532 | 2017-09-13T14:18:22.000Z | 2019-10-08T06:13:46.000Z | import logging
import pandas as pd
import pytest
import featuretools as ft
from featuretools.feature_base.features_deserializer import FeaturesDeserializer
from featuretools.feature_base.features_serializer import SCHEMA_VERSION
def test_single_feature(es):
feature = ft.IdentityFeature(es["log"].ww["value"])
dictionary = {
"ft_version": ft.__version__,
"schema_version": SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [feature.unique_name()],
"feature_definitions": {feature.unique_name(): feature.to_dictionary()},
}
deserializer = FeaturesDeserializer(dictionary)
expected = [feature]
assert expected == deserializer.to_list()
def test_multioutput_feature(es):
value = ft.IdentityFeature(es["log"].ww["product_id"])
threecommon = ft.primitives.NMostCommon()
tc = ft.Feature(value, parent_dataframe_name="sessions", primitive=threecommon)
features = [tc, value]
for i in range(3):
features.append(
ft.Feature(
tc[i],
parent_dataframe_name="customers",
primitive=ft.primitives.NumUnique,
)
)
features.append(tc[i])
flist = [feat.unique_name() for feat in features]
fd = [feat.to_dictionary() for feat in features]
fdict = dict(zip(flist, fd))
dictionary = {
"ft_version": ft.__version__,
"schema_version": SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": flist,
"feature_definitions": fdict,
}
deserializer = FeaturesDeserializer(dictionary).to_list()
for i in range(len(features)):
assert features[i].unique_name() == deserializer[i].unique_name()
def test_base_features_in_list(es):
value = ft.IdentityFeature(es["log"].ww["value"])
max_feat = ft.AggregationFeature(value, "sessions", ft.primitives.Max)
dictionary = {
"ft_version": ft.__version__,
"schema_version": SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [max_feat.unique_name(), value.unique_name()],
"feature_definitions": {
max_feat.unique_name(): max_feat.to_dictionary(),
value.unique_name(): value.to_dictionary(),
},
}
deserializer = FeaturesDeserializer(dictionary)
expected = [max_feat, value]
assert expected == deserializer.to_list()
def test_base_features_not_in_list(es):
value = ft.IdentityFeature(es["log"].ww["value"])
value_x2 = ft.TransformFeature(value, ft.primitives.MultiplyNumericScalar(value=2))
max_feat = ft.AggregationFeature(value_x2, "sessions", ft.primitives.Max)
dictionary = {
"ft_version": ft.__version__,
"schema_version": SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [max_feat.unique_name()],
"feature_definitions": {
max_feat.unique_name(): max_feat.to_dictionary(),
value_x2.unique_name(): value_x2.to_dictionary(),
value.unique_name(): value.to_dictionary(),
},
}
deserializer = FeaturesDeserializer(dictionary)
expected = [max_feat]
assert expected == deserializer.to_list()
def test_later_schema_version(es, caplog):
def test_version(major, minor, patch, raises=True):
version = ".".join([str(v) for v in [major, minor, patch]])
if raises:
warning_text = (
"The schema version of the saved features"
"(%s) is greater than the latest supported (%s). "
"You may need to upgrade featuretools. Attempting to load features ..."
% (version, SCHEMA_VERSION)
)
else:
warning_text = None
_check_schema_version(version, es, warning_text, caplog, "warn")
major, minor, patch = [int(s) for s in SCHEMA_VERSION.split(".")]
test_version(major + 1, minor, patch)
test_version(major, minor + 1, patch)
test_version(major, minor, patch + 1)
test_version(major, minor - 1, patch + 1, raises=False)
def test_earlier_schema_version(es, caplog):
def test_version(major, minor, patch, raises=True):
version = ".".join([str(v) for v in [major, minor, patch]])
if raises:
warning_text = (
"The schema version of the saved features"
"(%s) is no longer supported by this version "
"of featuretools. Attempting to load features ..." % (version)
)
else:
warning_text = None
_check_schema_version(version, es, warning_text, caplog, "log")
major, minor, patch = [int(s) for s in SCHEMA_VERSION.split(".")]
test_version(major - 1, minor, patch)
test_version(major, minor - 1, patch, raises=False)
test_version(major, minor, patch - 1, raises=False)
def test_unknown_feature_type(es):
dictionary = {
"ft_version": ft.__version__,
"schema_version": SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": ["feature_1"],
"feature_definitions": {
"feature_1": {"type": "FakeFeature", "dependencies": [], "arguments": {}}
},
}
deserializer = FeaturesDeserializer(dictionary)
with pytest.raises(RuntimeError, match='Unrecognized feature type "FakeFeature"'):
deserializer.to_list()
def test_unknown_primitive_type(es):
value = ft.IdentityFeature(es["log"].ww["value"])
max_feat = ft.AggregationFeature(value, "sessions", ft.primitives.Max)
max_dict = max_feat.to_dictionary()
max_dict["arguments"]["primitive"]["type"] = "FakePrimitive"
dictionary = {
"ft_version": ft.__version__,
"schema_version": SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [max_feat.unique_name(), value.unique_name()],
"feature_definitions": {
max_feat.unique_name(): max_dict,
value.unique_name(): value.to_dictionary(),
},
}
deserializer = FeaturesDeserializer(dictionary)
with pytest.raises(RuntimeError) as excinfo:
deserializer.to_list()
error_text = (
'Primitive "FakePrimitive" in module "%s" not found'
% ft.primitives.Max.__module__
)
assert error_text == str(excinfo.value)
def test_unknown_primitive_module(es):
value = ft.IdentityFeature(es["log"].ww["value"])
max_feat = ft.AggregationFeature(value, "sessions", ft.primitives.Max)
max_dict = max_feat.to_dictionary()
max_dict["arguments"]["primitive"]["module"] = "fake.module"
dictionary = {
"ft_version": ft.__version__,
"schema_version": SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [max_feat.unique_name(), value.unique_name()],
"feature_definitions": {
max_feat.unique_name(): max_dict,
value.unique_name(): value.to_dictionary(),
},
}
deserializer = FeaturesDeserializer(dictionary)
with pytest.raises(RuntimeError) as excinfo:
deserializer.to_list()
error_text = 'Primitive "Max" in module "fake.module" not found'
assert error_text == str(excinfo.value)
def test_feature_use_previous_pd_timedelta(es):
value = ft.IdentityFeature(es["log"].ww["id"])
td = pd.Timedelta(12, "W")
count_feature = ft.AggregationFeature(
value, "customers", ft.primitives.Count, use_previous=td
)
dictionary = {
"ft_version": ft.__version__,
"schema_version": SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [count_feature.unique_name(), value.unique_name()],
"feature_definitions": {
count_feature.unique_name(): count_feature.to_dictionary(),
value.unique_name(): value.to_dictionary(),
},
}
deserializer = FeaturesDeserializer(dictionary)
expected = [count_feature, value]
assert expected == deserializer.to_list()
def test_feature_use_previous_pd_dateoffset(es):
value = ft.IdentityFeature(es["log"].ww["id"])
do = pd.DateOffset(months=3)
count_feature = ft.AggregationFeature(
value, "customers", ft.primitives.Count, use_previous=do
)
dictionary = {
"ft_version": ft.__version__,
"schema_version": SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [count_feature.unique_name(), value.unique_name()],
"feature_definitions": {
count_feature.unique_name(): count_feature.to_dictionary(),
value.unique_name(): value.to_dictionary(),
},
}
deserializer = FeaturesDeserializer(dictionary)
expected = [count_feature, value]
assert expected == deserializer.to_list()
value = ft.IdentityFeature(es["log"].ww["id"])
do = pd.DateOffset(months=3, days=2, minutes=30)
count_feature = ft.AggregationFeature(
value, "customers", ft.primitives.Count, use_previous=do
)
dictionary = {
"ft_version": ft.__version__,
"schema_version": SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [count_feature.unique_name(), value.unique_name()],
"feature_definitions": {
count_feature.unique_name(): count_feature.to_dictionary(),
value.unique_name(): value.to_dictionary(),
},
}
deserializer = FeaturesDeserializer(dictionary)
expected = [count_feature, value]
assert expected == deserializer.to_list()
def _check_schema_version(version, es, warning_text, caplog, warning_type=None):
dictionary = {
"ft_version": ft.__version__,
"schema_version": version,
"entityset": es.to_dictionary(),
"feature_list": [],
"feature_definitions": {},
}
if warning_type == "log" and warning_text:
logger = logging.getLogger("featuretools")
logger.propagate = True
FeaturesDeserializer(dictionary)
assert warning_text in caplog.text
logger.propagate = False
elif warning_type == "warn" and warning_text:
with pytest.warns(UserWarning) as record:
FeaturesDeserializer(dictionary)
assert record[0].message.args[0] == warning_text
else:
FeaturesDeserializer(dictionary)
| 34.651007 | 87 | 0.645555 |
acf23683c1c886bc88ae40e44ddd87d5abf6a4d7 | 4,585 | py | Python | tools/test_apps/protocols/pppos/app_test.py | DCNick3/esp-idf | b0150615dff529662772a60dcb57d5b559f480e2 | [
"Apache-2.0"
] | 12 | 2020-06-03T18:21:26.000Z | 2021-12-15T02:11:40.000Z | tools/test_apps/protocols/pppos/app_test.py | DCNick3/esp-idf | b0150615dff529662772a60dcb57d5b559f480e2 | [
"Apache-2.0"
] | 1 | 2022-02-21T01:57:32.000Z | 2022-02-21T01:57:32.000Z | tools/test_apps/protocols/pppos/app_test.py | DCNick3/esp-idf | b0150615dff529662772a60dcb57d5b559f480e2 | [
"Apache-2.0"
] | 16 | 2020-06-01T05:50:11.000Z | 2022-03-20T05:12:34.000Z | from __future__ import print_function
from __future__ import unicode_literals
import re
import socket
import subprocess
import ttfw_idf
import time
import netifaces
from threading import Thread, Event
def run_server(server_stop, port, server_ip, client_ip):
print("Starting PPP server on port: {}".format(port))
try:
arg_list = ['pppd', port, '115200', '{}:{}'.format(server_ip, client_ip), 'modem', 'local', 'noauth', 'debug', 'nocrtscts', 'nodetach', '+ipv6']
p = subprocess.Popen(arg_list, stdout=subprocess.PIPE, bufsize=1)
while not server_stop.is_set():
if p.poll() is not None:
raise ValueError('ENV_TEST_FAILURE: PPP terminated unexpectedly with {}'.format(p.poll()))
line = p.stdout.readline()
if line:
print("[PPPD:]{}".format(line.rstrip()))
time.sleep(0.1)
except Exception as e:
print(e)
raise ValueError('ENV_TEST_FAILURE: Error running PPP server')
finally:
p.terminate()
print("PPP server stopped")
@ttfw_idf.idf_custom_test(env_tag="Example_PPP", group="test-apps")
def test_examples_protocol_pppos_connect(env, extra_data):
"""
steps:
1. starts PPP server
2. get DUT as PPP client to connect to the server
3. check TCP client-server connection between client-server
"""
dut1 = env.get_dut("pppos_connect_test", "tools/test_apps/protocols/pppos", dut_class=ttfw_idf.ESP32DUT)
# Look for test case symbolic names
try:
server_ip = dut1.app.get_sdkconfig()["CONFIG_TEST_APP_PPP_SERVER_IP"].replace('"','')
client_ip = dut1.app.get_sdkconfig()["CONFIG_TEST_APP_PPP_CLIENT_IP"].replace('"','')
port_nr = dut1.app.get_sdkconfig()["CONFIG_TEST_APP_TCP_PORT"]
except Exception:
print('ENV_TEST_FAILURE: Some mandatory configuration not found in sdkconfig')
raise
print("Starting the test on {}".format(dut1))
dut1.start_app()
# the PPP test env uses two ttyUSB's: one for ESP32 board, another one for ppp server
# use the other port for PPP server than the DUT/ESP
port = '/dev/ttyUSB0' if dut1.port == '/dev/ttyUSB1' else '/dev/ttyUSB1'
# Start the PPP server
server_stop = Event()
t = Thread(target=run_server, args=(server_stop, port, server_ip, client_ip))
t.start()
try:
ppp_server_timeout = time.time() + 30
while "ppp0" not in netifaces.interfaces():
print("PPP server haven't yet setup its netif, list of active netifs:{}".format(netifaces.interfaces()))
time.sleep(0.5)
if time.time() > ppp_server_timeout:
raise ValueError("ENV_TEST_FAILURE: PPP server failed to setup ppp0 interface within timeout")
ip6_addr = dut1.expect(re.compile(r"Got IPv6 address (\w{4}\:\w{4}\:\w{4}\:\w{4}\:\w{4}\:\w{4}\:\w{4}\:\w{4})"), timeout=30)[0]
print("IPv6 address of ESP: {}".format(ip6_addr))
dut1.expect(re.compile(r"Socket listening"))
print("Starting the IPv6 test...")
# Connect to TCP server on ESP using IPv6 address
for res in socket.getaddrinfo(ip6_addr + "%ppp0", int(port_nr), socket.AF_INET6,
socket.SOCK_STREAM, socket.SOL_TCP):
af, socktype, proto, canonname, addr = res
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.connect(addr)
sock.sendall(b"Espressif")
sock.close()
dut1.expect(re.compile(r"IPv6 test passed"))
print("IPv6 test passed!")
print("Starting the IPv4 test...")
# Start the TCP server and wait for the ESP to connect with IPv4 address
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', int(port_nr)))
sock.listen(1)
conn, addr = sock.accept()
except socket.error as msg:
print('Socket error: ' + str(msg[0]) + ': ' + msg[1])
raise
timeout = time.time() + 60
while time.time() < timeout:
data = conn.recv(128)
if not data:
break
data = data.decode()
print('Received data: ' + data)
if data.startswith('Espressif'):
conn.send(data.encode())
break
conn.close()
dut1.expect(re.compile(r"IPv4 test passed"))
print("IPv4 test passed!")
finally:
server_stop.set()
t.join()
if __name__ == '__main__':
test_examples_protocol_pppos_connect()
| 39.869565 | 152 | 0.618321 |
acf237833655773bb8fd38ea7b9f2942a09bf758 | 179 | py | Python | apps/data/models/model.py | TransparentHealth/smh-organization | ca32d1cbb7600e8b22e43e06edab83c323a2404d | [
"Apache-2.0"
] | 3 | 2019-03-03T22:41:00.000Z | 2020-04-16T04:07:07.000Z | apps/data/models/model.py | TransparentHealth/smh_app | ca32d1cbb7600e8b22e43e06edab83c323a2404d | [
"Apache-2.0"
] | 112 | 2019-03-04T14:20:50.000Z | 2020-04-29T21:32:07.000Z | apps/data/models/model.py | TransparentHealth/smh-organization | ca32d1cbb7600e8b22e43e06edab83c323a2404d | [
"Apache-2.0"
] | 2 | 2019-08-01T13:08:28.000Z | 2019-12-06T15:53:25.000Z | from datamodels import Model
class DataModel(Model):
"""base class for dataclass models"""
@property
def resourceType(self):
return self.__class__.__name__
| 17.9 | 41 | 0.698324 |
acf237ef043a3d8e602c759f03fde15c8c52eb4a | 809 | py | Python | setup.py | Gesrua/c2vb | 91d6413567de5b12d07538fd09114ae089d8623e | [
"MIT"
] | 2 | 2020-02-05T09:30:31.000Z | 2020-02-16T13:01:04.000Z | setup.py | Gesrua/c2vb | 91d6413567de5b12d07538fd09114ae089d8623e | [
"MIT"
] | null | null | null | setup.py | Gesrua/c2vb | 91d6413567de5b12d07538fd09114ae089d8623e | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='c2vb',
packages=['c2vb'],
version='0.0.4',
license='MIT',
description='A program convert simple C / C++ code to Visual Basic 6 code',
long_description=long_description,
long_description_content_type="text/markdown",
author='Chaigidel',
author_email='chaigidel@outlook.com',
url='https://github.com/Chaigidel/c2vb',
keywords=['c', 'cpp', 'visualbasic', 'ast'],
install_requires=[],
entry_points={
'console_scripts': [
'c2vb=c2vb:console',
]
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 26.966667 | 79 | 0.613103 |
acf237f5df98a026f2c5b129df1978b680b669ce | 3,883 | py | Python | ChipConnector.py | giladfr/ARCTic | ae653f3c460ea9df7be359fb2f64956e5a899faa | [
"Apache-2.0"
] | 1 | 2020-09-14T15:30:04.000Z | 2020-09-14T15:30:04.000Z | ChipConnector.py | giladfr/ARCTic | ae653f3c460ea9df7be359fb2f64956e5a899faa | [
"Apache-2.0"
] | 1 | 2016-05-28T12:15:46.000Z | 2016-05-28T12:15:46.000Z | ChipConnector.py | giladfr/ARCTic | ae653f3c460ea9df7be359fb2f64956e5a899faa | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Gilad Fride
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.from SocOnlineHParser import *
from JTAGWrapper import *
import pickle
import struct
class ChipConnector :
def __init__(self,path_to_soc_file):
# load chip model from the Soconline C file
self.model = parse(path_to_soc_file)
self.is_connected = False
def connect_board(self):
# connect to the board
try:
self.jtag_wrapper = JTAGWrapper()
self.jtag_wrapper.connect()
self.is_connected = True
except SystemError, err:
self.is_connected = False
return err.message
def read_model_from_board(self):
self.iterate_all_registers(self.read_reg)
def write_model_to_board(self):
self.iterate_all_registers(self.write_reg)
def read_reg(self,register):
self.jtag_wrapper.read_reg(register)
def write_reg(self,register):
self.jtag_wrapper.write_reg(register)
# def write_reg_by_name(self,name,value):
# for block in self.model.blocks:
# for register in block.registers:
# if (register.name == name):
# register.set_raw_val(value)
# self.jtag_wrapper.write_reg(register)
def update_reg_by_name(self,block_name,reg_name,reg_val):
updated = -1
block = self.model.block_by_name(block_name)
if not(block):
return -1
register = block.register_by_name(reg_name)
if not(register):
return -1
register.set_raw_val(reg_val)
return 0
def iterate_all_registers(self,func):
for block in self.model.blocks:
for register in block.registers:
func(register)
def get_save_line(self,register):
return "%s,0x%X"%(register.name,register.get_raw_val())
def save_block_on_disk(self,block,filename):
fh = open(filename,"a")
for register in block.registers:
fh.write(block.name + "," + self.get_save_line(register) + "\n")
fh.close()
def save_model_on_disk(self,filename):
fh = open(filename,"w")
for block in self.model.blocks:
for register in block.registers:
fh.write(block.name + "," + self.get_save_line(register) + "\n")
fh.close()
def load_model_from_disk(self,filename):
fh = open(filename,"r")
for line in fh.readlines():
block_name, reg_name, reg_value = line.split(",")
res = self.update_reg_by_name(block_name,reg_name,int(reg_value,16))
if res != 0:
return res
def load_bin_file_to_address(self,memory_addr,filename):
res = self.jtag_wrapper.file2mem(memory_addr,filename)
return res
def dump_bin_file_from_address(self,memory_addr,size_bytes,filename):
res = self.jtag_wrapper.mem2file(memory_addr,size_bytes,filename)
return res
def detach(self):
self.jtag_wrapper.detach()
def download_firmware(self,filename):
self.jtag_wrapper.download_firmware(filename)
if __name__ == "__main__":
model = ChipConnector("omic_ARC.h")
# model.read_model_from_board()
model.save_model_on_disk("reg_dump.csv")
model.load_model_from_disk("reg_dump.csv")
# print model.model.blocks[0].registers
| 32.630252 | 80 | 0.656194 |
acf23812c7e5e1ae2d69813178b0b525020eaa82 | 379 | py | Python | v1/serializers/boards/get_users.py | bergran/Tswift-backend | 4ca32d68bc0d5aa72dd2d849ce69ed3fa75f8f0e | [
"MIT"
] | 1 | 2020-04-22T15:27:45.000Z | 2020-04-22T15:27:45.000Z | v1/serializers/boards/get_users.py | bergran/Tswift-backend | 4ca32d68bc0d5aa72dd2d849ce69ed3fa75f8f0e | [
"MIT"
] | 3 | 2020-02-11T22:15:05.000Z | 2021-06-10T20:11:24.000Z | v1/serializers/boards/get_users.py | bergran/Tswift-backend | 4ca32d68bc0d5aa72dd2d849ce69ed3fa75f8f0e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from rest_framework import serializers
from v1.models import UserBoardPermissions
class BoardPermissions(serializers.ModelSerializer):
user = serializers.CharField(source='user.username')
permission = serializers.CharField(source='permission.name')
class Meta:
model = UserBoardPermissions
fields = ('user', 'permission')
| 25.266667 | 64 | 0.728232 |
acf239055a07d87bd1ee95932bf9dd1dde810ccd | 118 | py | Python | Exercicios/ex025.py | MateusBarboza99/Python-03- | 9c6df88aaa8ba83d385b92722ed1df5873df3a77 | [
"MIT"
] | null | null | null | Exercicios/ex025.py | MateusBarboza99/Python-03- | 9c6df88aaa8ba83d385b92722ed1df5873df3a77 | [
"MIT"
] | null | null | null | Exercicios/ex025.py | MateusBarboza99/Python-03- | 9c6df88aaa8ba83d385b92722ed1df5873df3a77 | [
"MIT"
] | null | null | null | nome = str(input('Qual é seu nome Completo?')).strip()
print('Seu Nome tem Silva?{}'.format('silva' in nome.lower()))
| 39.333333 | 62 | 0.669492 |
acf23975a60bd13e283883ed5da4acee70785784 | 6,391 | py | Python | netcup_api/dns.py | cedrikkaufmann/python-netcup-api | 89dc3e6d44b663a789801f68d4e99606855c05c7 | [
"MIT"
] | null | null | null | netcup_api/dns.py | cedrikkaufmann/python-netcup-api | 89dc3e6d44b663a789801f68d4e99606855c05c7 | [
"MIT"
] | null | null | null | netcup_api/dns.py | cedrikkaufmann/python-netcup-api | 89dc3e6d44b663a789801f68d4e99606855c05c7 | [
"MIT"
] | null | null | null | import requests
class DNSRecord:
def __init__(self, hostname, record_type, destination, priority=None, domain_id=None, delete_record=False):
self._hostname = hostname
self._record_type = record_type
self._domain_id = domain_id
self._destination = destination
self._priority = priority
self._delete_record = delete_record
def get_host(self):
return self._hostname
def set_host(self, hostname):
self._hostname = hostname
hostname = property(get_host, set_host)
def get_record_type(self):
return self._record_type
def set_record_type(self, record_type):
self._record_type = record_type
record_type = property(get_record_type, set_record_type)
def get_destination(self):
return self._destination
def set_destination(self, destination):
self._destination = destination
destination = property(get_destination, set_destination)
def get_delete_record(self):
return self._delete_record
def set_delete_record(self, delete):
self._delete_record = delete
delete = property(get_delete_record, set_delete_record)
def get_domain_id(self):
return self._domain_id
def set_domain_id(self, domain_id):
self._domain_id = domain_id
domain_id = property(get_domain_id, set_domain_id)
def get_priority(self):
return self._priority
def set_priority(self, priority):
self._priority = priority
priority = property(get_priority, set_priority)
def json(self):
return {
'id': self._domain_id,
'hostname': self._hostname,
'type': self._record_type,
'priority': self._priority,
'destination': self._destination,
'deleterecord': self._delete_record
}
class Client:
REST_URI = 'https://ccp.netcup.net/run/webservice/servers/endpoint.php?JSON'
def __init__(self, apiKey, apiPassword, customerNumber):
self._apiKey = apiKey
self._apiPassword = apiPassword
self._customerNumber = customerNumber
self._sessionID = ''
self._login()
def __del__(self):
self._logout()
def _login(self):
payload = {
'action': 'login',
'param': {
'apikey': self._apiKey,
'apipassword': self._apiPassword,
'customernumber': self._customerNumber
}
}
r = requests.post(url=Client.REST_URI, json=payload)
if r.json()['status'] != 'success':
raise Exception(r.json()['shortmessage'], r.json()['longmessage'], payload)
self._sessionID = r.json()['responsedata']['apisessionid']
def _logout(self):
payload = {
'action': 'logout',
'param': {
'apikey': self._apiKey,
'apisessionid': self._sessionID,
'customernumber': self._customerNumber
}
}
r = requests.post(url=Client.REST_URI, json=payload)
if r.json()['status'] != 'success':
raise Exception(r.json()['shortmessage'], r.json()['longmessage'], payload)
def get_dns_records(self, domainname):
payload = {
"action": "infoDnsRecords",
"param": {
"domainname": domainname,
"apikey": self._apiKey,
"apisessionid": self._sessionID,
"customernumber": self._customerNumber
}
}
r = requests.post(url=Client.REST_URI, json=payload)
if r.json()['status'] != 'success':
raise Exception(r.json()['shortmessage'], r.json()['longmessage'], payload)
dns_records_raw = r.json()['responsedata']['dnsrecords']
dns_records = list()
for record in dns_records_raw:
dns_records.append(DNSRecord(domain_id=record['id'], hostname=record['hostname'],
record_type=record['type'], destination=record['destination']))
return dns_records
def update_dns_records(self, domainname, *records):
payload = {
"action": "updateDnsRecords",
"param": {
"domainname": domainname,
"apikey": self._apiKey,
"apisessionid": self._sessionID,
"customernumber": self._customerNumber,
"dnsrecordset": {
"dnsrecords": []
}
}
}
for record in records:
payload['param']['dnsrecordset']['dnsrecords'].append(record.json())
r = requests.post(url=Client.REST_URI, json=payload)
if r.json()['status'] != 'success':
raise Exception(r.json()['shortmessage'], r.json()['longmessage'], payload)
def delete_dns_records(self, domainname, *records):
payload = {
"action": "updateDnsRecords",
"param": {
"domainname": domainname,
"apikey": self._apiKey,
"apisessionid": self._sessionID,
"customernumber": self._customerNumber,
"dnsrecordset": {
"dnsrecords": []
}
}
}
for record in records:
record.delete = True
payload['param']['dnsrecordset']['dnsrecords'].append(record.json())
r = requests.post(url=Client.REST_URI, json=payload)
if r.json()['status'] != 'success':
raise Exception(r.json()['shortmessage'], r.json()['longmessage'], payload)
def add_dns_records(self, domainname, *records):
payload = {
"action": "updateDnsRecords",
"param": {
"domainname": domainname,
"apikey": self._apiKey,
"apisessionid": self._sessionID,
"customernumber": self._customerNumber,
"dnsrecordset": {
"dnsrecords": []
}
}
}
for record in records:
payload['param']['dnsrecordset']['dnsrecords'].append(record.json())
r = requests.post(url=Client.REST_URI, json=payload)
if r.json()['status'] != 'success':
raise Exception(r.json()['shortmessage'], r.json()['longmessage'], payload)
| 30.578947 | 111 | 0.566578 |
acf23a6d42833e679b14e180e36d10995ff20735 | 1,148 | py | Python | compiler/views.py | imsurajsharma/compiler | 451070417a614da2db13afccadeafb76f4e9da78 | [
"MIT"
] | null | null | null | compiler/views.py | imsurajsharma/compiler | 451070417a614da2db13afccadeafb76f4e9da78 | [
"MIT"
] | null | null | null | compiler/views.py | imsurajsharma/compiler | 451070417a614da2db13afccadeafb76f4e9da78 | [
"MIT"
] | null | null | null | from django.shortcuts import render,redirect
from django.http import JsonResponse
import subprocess
from shell import shell
from os import system
def test(request):
return render(request,'test.html')
def embed(request):
return render(request,'embed.html')
def index(request):
if request.method =='POST':
datas= request.POST.get('compile')
print("datas")
user= request.POST['input']
print(user)
suser = user.split('\n')
print(suser)
with open('input.txt','w+') as file:
for l in suser:
file.write(l + '\n')
with open('script.py','w') as file:
file.write(datas)
# with open('script.py','r+') as file:
# data = file.read()
# ot = shell("python script.py ")
#
#
# ott = ot.output()
system('python script.py < input.txt > out.txt')
with open('out.txt','r') as file:
pyout = file.readlines()
return JsonResponse({"data":pyout})
else:
return render(request,'index.html')
| 23.916667 | 57 | 0.534843 |
acf23b82e5761240755bd5f672d6bb4db3e1e5c8 | 3,452 | py | Python | tests/test_basic.py | sanders41/glom-dict | 46af6aac1444d1fe90a3a9ff46dec6bd926e098a | [
"MIT"
] | 1 | 2021-09-06T23:34:15.000Z | 2021-09-06T23:34:15.000Z | tests/test_basic.py | sanders41/glom-dict | 46af6aac1444d1fe90a3a9ff46dec6bd926e098a | [
"MIT"
] | null | null | null | tests/test_basic.py | sanders41/glom-dict | 46af6aac1444d1fe90a3a9ff46dec6bd926e098a | [
"MIT"
] | 1 | 2021-09-14T12:16:44.000Z | 2021-09-14T12:16:44.000Z | """Tests for the `cli` module."""
import collections
import glom as g
import pytest
from glom_dict import GlomDict
@pytest.fixture
def sample() -> dict:
return {
"pretty": "standard",
"a": {"b": {"c": "d"}},
"a_list": ["hay0", "hay1", "needle", "hay3", "hay4"],
"my_tuple": (
0,
1,
2,
{
"deeply": {
"nested": {"key": ["not", "not", "not", "not", "it", "not", "not"]}
}
},
),
}
@pytest.mark.parametrize(
"path, expected",
[
(g.Path("pretty"), "standard"),
(g.Path("a", "b", "c"), "d"),
(g.Path("a_list", 2), "needle"),
(g.Path("my_tuple", 3, "deeply", "nested", "key", 4), "it"),
],
)
class TestGetItem:
def test_glom_path(self, sample, path: g.Path, expected):
gd = GlomDict(**sample)
assert gd[path] == expected
def test_str(self, sample, path: g.Path, expected):
gd = GlomDict(**sample)
str_path = ".".join(str(p) for p in path.values())
assert gd[str_path] == expected
def test_get(self, sample, path, expected):
gd = GlomDict(**sample)
assert gd.get(g.Path(*path)) == expected
def test_get_not_there(self, sample, path, expected):
gd = GlomDict(**sample)
glom_path = g.Path(*path[:-1], "not_there")
print(glom_path)
assert gd.get(glom_path) is None
@pytest.mark.parametrize(
"path, value",
[
(g.Path("a", "b", "c"), "D"),
(g.Path("a_list", 2), "ANOTHER_NEEDLE"),
],
)
class TestSetItem:
def test_glom_path(self, sample, path: g.Path, value):
gd = GlomDict(**sample)
glom_path = g.Path(*path)
gd[glom_path] = value
assert gd[glom_path] == value
def test_str_path(self, sample, path: g.Path, value):
gd = GlomDict(**sample)
str_path = ".".join(str(p) for p in path.values())
gd[str_path] = value
assert gd[str_path] == value
@pytest.mark.parametrize(
"path, value",
[
(g.Path("a", "b", "c"), "d"),
(g.Path("a_list", 2), "needle"),
],
)
class TestDelItem:
def test_del(self, sample, path: g.Path, value):
gd = GlomDict(**sample)
print(gd[path.values()[0]])
del gd[path]
assert gd.get(path) != value
def test_pop(self, sample, path: g.Path, value):
gd = GlomDict(**sample)
print(gd[path.values()[0]])
popped = gd.pop(path)
assert popped == value
assert gd.get(path) != value
def test_pop_not_there(self, sample, path: g.Path, value):
gd = GlomDict(**sample)
with pytest.raises(g.PathAccessError):
gd.pop(g.Path(path.values()[0], "not_there"))
assert gd[path] == value
def test_pop_not_there_w_default(self, sample, path: g.Path, value):
gd = GlomDict(**sample)
popped = gd.pop(g.Path(path.values()[0], "not_there"), default="DEFAULT")
assert popped == "DEFAULT"
@pytest.mark.parametrize("type_", [dict, collections.UserDict, GlomDict])
def test_isinstance(type_):
gd = GlomDict(foo="foo", bar="bar")
assert isinstance(gd, type_)
def test_init(sample: dict):
a = GlomDict(sample)
b = GlomDict(**sample)
for d in [a, b]:
assert d["pretty"] == "standard"
if __name__ == "__main__":
pytest.main(["-vv"])
| 24.48227 | 87 | 0.538818 |
acf23bb9d4b2be9db1f6be07e8a0628a21e6fff6 | 4,059 | py | Python | cpudist.py | ggaurav10/bcc-tools-REST | 4890620f0d1040795bab286c028c536e072744fd | [
"Apache-2.0"
] | 5 | 2018-10-28T21:43:31.000Z | 2021-06-05T09:42:36.000Z | cpudist.py | ggaurav10/bcc-tools-REST | 4890620f0d1040795bab286c028c536e072744fd | [
"Apache-2.0"
] | null | null | null | cpudist.py | ggaurav10/bcc-tools-REST | 4890620f0d1040795bab286c028c536e072744fd | [
"Apache-2.0"
] | 1 | 2019-02-23T10:37:18.000Z | 2019-02-23T10:37:18.000Z | #!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# cpudist Summarize on- and off-CPU time per task as a histogram.
#
# USAGE: cpudist [-h] [-O] [-T] [-m] [-P] [-L] [-p PID] [interval] [count]
#
# This measures the time a task spends on or off the CPU, and shows this time
# as a histogram, optionally per-process.
#
# Copyright 2016 Sasha Goldshtein
# Licensed under the Apache License, Version 2.0 (the "License")
from __future__ import print_function
from bcc import BPF
from time import sleep, strftime
from cStringIO import StringIO
import sys
pid = ""
section = ""
label = ""
debug = 0
def init_bpf(in_offcpu, in_pid, in_ms, in_pids, in_tids):
global label
global section
global pid
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
ONCPU_HEADER
typedef struct pid_key {
u64 id;
u64 slot;
} pid_key_t;
BPF_HASH(start, u32, u64);
STORAGE
static inline void store_start(u32 tgid, u32 pid, u64 ts)
{
if (FILTER)
return;
start.update(&pid, &ts);
}
static inline void update_hist(u32 tgid, u32 pid, u64 ts)
{
if (FILTER)
return;
u64 *tsp = start.lookup(&pid);
if (tsp == 0)
return;
if (ts < *tsp) {
// Probably a clock issue where the recorded on-CPU event had a
// timestamp later than the recorded off-CPU event, or vice versa.
return;
}
u64 delta = ts - *tsp;
FACTOR
STORE
}
int sched_switch(struct pt_regs *ctx, struct task_struct *prev)
{
u64 ts = bpf_ktime_get_ns();
u64 pid_tgid = bpf_get_current_pid_tgid();
u32 tgid = pid_tgid >> 32, pid = pid_tgid;
#ifdef ONCPU
if (prev->state == TASK_RUNNING) {
#else
if (1) {
#endif
u32 prev_pid = prev->pid;
u32 prev_tgid = prev->tgid;
#ifdef ONCPU
update_hist(prev_tgid, prev_pid, ts);
#else
store_start(prev_tgid, prev_pid, ts);
#endif
}
BAIL:
#ifdef ONCPU
store_start(tgid, pid, ts);
#else
update_hist(tgid, pid, ts);
#endif
return 0;
}
"""
if not in_offcpu:
bpf_text = bpf_text.replace('ONCPU_HEADER', "#define ONCPU\n")
else:
bpf_text = bpf_text.replace('ONCPU_HEADER', "")
if in_pid:
bpf_text = bpf_text.replace('FILTER', 'tgid != %s' % in_pid)
else:
bpf_text = bpf_text.replace('FILTER', '0')
if in_ms:
bpf_text = bpf_text.replace('FACTOR', 'delta /= 1000000;')
label = "msecs"
else:
bpf_text = bpf_text.replace('FACTOR', 'delta /= 1000;')
label = "usecs"
if in_pids or in_tids:
section = "pid"
pid = "tgid"
if in_tids:
pid = "pid"
section = "tid"
bpf_text = bpf_text.replace('STORAGE',
'BPF_HISTOGRAM(dist, pid_key_t);')
bpf_text = bpf_text.replace('STORE',
'pid_key_t key = {.id = ' + pid + ', .slot = bpf_log2l(delta)}; ' +
'dist.increment(key);')
else:
section = ""
bpf_text = bpf_text.replace('STORAGE', 'BPF_HISTOGRAM(dist);')
bpf_text = bpf_text.replace('STORE',
'dist.increment(bpf_log2l(delta));')
if debug:
print(bpf_text)
b = BPF(text=bpf_text)
b.attach_kprobe(event="finish_task_switch", fn_name="sched_switch")
return b
def cpudistUtil(in_offcpu=False, in_pid=None, in_ms=False, in_pids=False, in_tids=False, in_interval=10, in_count=1):
countdown = abs(int(in_count))
interval = abs(int(in_interval))
if in_pid:
in_pid = int(in_pid)
old_stdout = sys.stdout
sys.stdout = output = StringIO()
b = init_bpf(in_offcpu, in_pid, in_ms, in_pids, in_tids)
print("Tracing %s-CPU time... Hit Ctrl-C to end." %
("off" if in_offcpu else "on"), file=output)
dist = b.get_table("dist")
while (1):
sleep(interval)
print(file=output)
print("%-8s\n" % strftime("%H:%M:%S"), file=output, end="")
def pid_to_comm(pid):
try:
comm = open("/proc/%d/comm" % pid, "r").read()
return "%d %s" % (pid, comm)
except IOError:
return str(pid)
dist.print_log2_hist(label, section, section_print_fn=pid_to_comm)
dist.clear()
countdown -= 1
if countdown == 0:
b.detach_kprobe(event="finish_task_switch")
sys.stdout = old_stdout
return output.getvalue()
| 22.932203 | 117 | 0.654595 |
acf23c36cd978301a936ab282c3ec6ecefca186d | 2,013 | py | Python | pb_model/tests/models.py | LKI/django-pb-model | 5a2facfd692487b8fde6c90a4650ac843cb43689 | [
"MIT"
] | null | null | null | pb_model/tests/models.py | LKI/django-pb-model | 5a2facfd692487b8fde6c90a4650ac843cb43689 | [
"MIT"
] | null | null | null | pb_model/tests/models.py | LKI/django-pb-model | 5a2facfd692487b8fde6c90a4650ac843cb43689 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db import models
from django.utils import timezone
from pb_model.models import ProtoBufMixin
from . import models_pb2
class Relation(ProtoBufMixin, models.Model):
pb_model = models_pb2.Relation
num = models.IntegerField(default=0)
class M2MRelation(ProtoBufMixin, models.Model):
pb_model = models_pb2.M2MRelation
num = models.IntegerField(default=0)
class Main(ProtoBufMixin, models.Model):
pb_model = models_pb2.Main
string_field = models.CharField(max_length=32)
integer_field = models.IntegerField()
float_field = models.FloatField()
bool_field = models.BooleanField(default=False)
datetime_field = models.DateTimeField(default=timezone.now)
OPT0, OPT1, OPT2, OPT3 = 0, 1, 2, 3
OPTS = [
(OPT0, "option-0"),
(OPT1, "option-1"),
(OPT2, "option-2"),
(OPT3, "option-3"),
]
choices_field = models.IntegerField(default=OPT0, choices=OPTS)
fk_field = models.ForeignKey(Relation, on_delete=models.DO_NOTHING)
m2m_field = models.ManyToManyField(M2MRelation)
class Embedded(ProtoBufMixin, models.Model):
pb_model = models_pb2.Root.Embedded
pb_2_dj_fields = '__all__'
class ListWrapper(ProtoBufMixin, models.Model):
pb_model = models_pb2.Root.ListWrapper
pb_2_dj_fields = '__all__'
class MapWrapper(ProtoBufMixin, models.Model):
pb_model = models_pb2.Root.MapWrapper
pb_2_dj_fields = '__all__'
class Root(ProtoBufMixin, models.Model):
pb_model = models_pb2.Root
pb_2_dj_fields = '__all__'
pb_2_dj_field_map = {
'uint32_field': 'uint32_field_renamed',
'inlineField': {
'data': 'inline_field',
'doublyNestedField': {
'data': 'second_inline_field',
}
}
}
uuid_field = models.UUIDField(null=True)
inline_field = models.CharField(max_length=10, null=True)
second_inline_field = models.CharField(max_length=10, null=True)
| 26.142857 | 71 | 0.689021 |
acf23c8fd4fa88b719a714c6d0299643eca85e69 | 3,095 | py | Python | src/pysme/atmosphere/krzfile.py | AWehrhahn/SME | 542e880ed779381f7cbbaaacb59475fa6a6d3537 | [
"BSD-3-Clause"
] | 14 | 2019-06-26T18:43:09.000Z | 2022-03-12T00:53:42.000Z | src/pysme/atmosphere/krzfile.py | AWehrhahn/SME | 542e880ed779381f7cbbaaacb59475fa6a6d3537 | [
"BSD-3-Clause"
] | 10 | 2020-03-01T15:21:23.000Z | 2021-09-01T15:28:37.000Z | src/pysme/atmosphere/krzfile.py | AWehrhahn/SME | 542e880ed779381f7cbbaaacb59475fa6a6d3537 | [
"BSD-3-Clause"
] | 6 | 2019-03-01T15:25:24.000Z | 2022-03-30T10:26:33.000Z | import re
from os.path import basename
import numpy as np
from ..abund import Abund
from .atmosphere import Atmosphere
class KrzFile(Atmosphere):
"""Read .krz atmosphere files"""
def __init__(self, filename, source=None):
super().__init__()
if source is None:
self.source = basename(filename)
else:
self.source = source
self.method = "embedded"
self.citation_info = r"""
@MISC{2017ascl.soft10017K,
author = {{Kurucz}, Robert L.},
title = "{ATLAS9: Model atmosphere program with opacity distribution functions}",
keywords = {Software},
year = "2017",
month = "Oct",
eid = {ascl:1710.017},
pages = {ascl:1710.017},
archivePrefix = {ascl},
eprint = {1710.017},
adsurl = {https://ui.adsabs.harvard.edu/abs/2017ascl.soft10017K},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}}
"""
self.load(filename)
def load(self, filename):
"""
Load data from disk
Parameters
----------
filename : str
name of the file to load
"""
# TODO: this only works for some krz files
# 1..2 lines header
# 3 line opacity
# 4..13 elemntal abundances
# 14.. Table data for each layer
# Rhox Temp XNE XNA RHO
with open(filename, "r") as file:
header1 = file.readline()
header2 = file.readline()
opacity = file.readline()
abund = [file.readline() for _ in range(10)]
table = file.readlines()
# Combine the first two lines
header = header1 + header2
# Parse header
# vturb
self.vturb = float(re.findall(r"VTURB=?\s*(\d)", header)[0])
self.lonh = float(re.findall(r"L/H=?\s*(\d+.?\d*)", header)[0])
self.teff = float(re.findall(r"T ?EFF=?\s*(\d+.?\d*)", header)[0])
self.logg = float(re.findall(r"GRAV(ITY)?=?\s*(\d+.?\d*)", header)[0][1])
model_type = re.findall(r"MODEL TYPE=?\s*(\d)", header)[0]
self.model_type = int(model_type)
model_type_key = {0: "rhox", 1: "tau", 3: "sph"}
self.depth = model_type_key[self.model_type]
self.geom = "pp"
self.wlstd = float(re.findall(r"WLSTD=?\s*(\d+.?\d*)", header)[0])
# parse opacity
i = opacity.find("-")
opacity = opacity[:i].split()
self.opflag = np.array([int(k) for k in opacity])
# parse abundance
pattern = np.genfromtxt(abund).flatten()[:-1]
pattern[1] = 10 ** pattern[1]
self.abund = Abund(monh=0, pattern=pattern, type="sme")
# parse table
self.table = np.genfromtxt(table, delimiter=",", usecols=(0, 1, 2, 3, 4))
self.rhox = self.table[:, 0]
self.temp = self.table[:, 1]
self.xne = self.table[:, 2]
self.xna = self.table[:, 3]
self.rho = self.table[:, 4]
| 32.925532 | 97 | 0.526979 |
acf23d5e639e0cd72be61c1985829f2afae9ea20 | 637 | py | Python | postgrest_py/__init__.py | leynier/postgrest-py | 02c90a5ac02aa92ae9b69ee86a52412f0e06e75e | [
"MIT"
] | 3 | 2021-11-16T12:07:54.000Z | 2021-11-17T07:35:18.000Z | postgrest_py/__init__.py | leynier/postgrest-py | 02c90a5ac02aa92ae9b69ee86a52412f0e06e75e | [
"MIT"
] | 16 | 2022-01-05T18:31:50.000Z | 2022-03-29T18:35:36.000Z | postgrest_py/__init__.py | leynier/postgrest-py | 02c90a5ac02aa92ae9b69ee86a52412f0e06e75e | [
"MIT"
] | 1 | 2022-01-04T15:22:39.000Z | 2022-01-04T15:22:39.000Z | from __future__ import annotations
__version__ = "0.7.1"
from ._async.client import AsyncPostgrestClient
from ._async.request_builder import (
AsyncFilterRequestBuilder,
AsyncQueryRequestBuilder,
AsyncRequestBuilder,
AsyncSelectRequestBuilder,
)
from ._sync.client import SyncPostgrestClient
from ._sync.request_builder import (
SyncFilterRequestBuilder,
SyncQueryRequestBuilder,
SyncRequestBuilder,
SyncSelectRequestBuilder,
)
from .base_client import DEFAULT_POSTGREST_CLIENT_HEADERS
from .deprecated_client import Client, PostgrestClient
from .deprecated_get_request_builder import GetRequestBuilder
| 28.954545 | 61 | 0.828885 |
acf23e39ff4faf4ee5b7a014329b7229a75e5b77 | 2,802 | py | Python | benchmark/startCirq2337.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startCirq2337.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startCirq2337.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=29
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=26
c.append(cirq.X.on(input_qubit[3])) # number=27
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=28
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.X.on(input_qubit[1])) # number=25
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.Z.on(input_qubit[1])) # number=21
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=14
c.append(cirq.X.on(input_qubit[3])) # number=15
c.append(cirq.rx(1.8001325905069514).on(input_qubit[3])) # number=18
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=16
c.append(cirq.H.on(input_qubit[1])) # number=22
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=10
c.append(cirq.X.on(input_qubit[1])) # number=17
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=11
c.append(cirq.Y.on(input_qubit[0])) # number=12
c.append(cirq.Y.on(input_qubit[0])) # number=13
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[1])) # number=23
c.append(cirq.X.on(input_qubit[0])) # number=19
c.append(cirq.X.on(input_qubit[0])) # number=20
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2337.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 35.025 | 77 | 0.684154 |
acf23e63b57d92ebf80980e7608e215f964bef29 | 1,462 | py | Python | poll/forms.py | mirokrastev/poll-website | 4f26cce3f838ab05de91f0e1dba34d9bc59927b4 | [
"MIT"
] | 3 | 2021-07-03T19:05:56.000Z | 2022-02-02T17:22:17.000Z | poll/forms.py | mirokrastev/poll-website | 4f26cce3f838ab05de91f0e1dba34d9bc59927b4 | [
"MIT"
] | null | null | null | poll/forms.py | mirokrastev/poll-website | 4f26cce3f838ab05de91f0e1dba34d9bc59927b4 | [
"MIT"
] | null | null | null | from django import forms
from django.forms import modelformset_factory
from poll.models.poll_models import Poll, Answer, Comment
class PollForm(forms.ModelForm):
class Meta:
model = Poll
fields = ('name', 'telemetry')
help_texts = {
'telemetry': 'Enables telemetry for this Poll'
}
class AnswerForm(forms.ModelForm):
"""
Base Form to be used in formset.
It can be used alone, but for more dynamic content, use it with formset.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.empty_permitted = False
class Meta:
model = Answer
fields = ('answer',)
# model formset for creating multiple Answer objects (used when creating a new Poll)
answer_modelformset = modelformset_factory(model=Answer, form=AnswerForm,
validate_min=True, extra=1, max_num=8)
class VoteForm(forms.Form):
answers = forms.ModelChoiceField(queryset=Answer.objects.none(),
widget=forms.RadioSelect())
# Give a queryset in views to answers field
class CommentForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['comment'].widget.attrs.update({
'class': 'form-control',
'rows': '3'
})
class Meta:
model = Comment
fields = ('comment',)
| 27.584906 | 84 | 0.611491 |
acf23e926895b0b73758fc4ab9720f856d9a5e92 | 1,261 | py | Python | Leetcode/Depth-First_Search,Breadth-First_Search/2_-_Medium/130._Surrounded_Regions.py | Khalid-Sultan/Algorithms-Prep | 7773c5bc0448d8677bf324bd1d9bdc43b813fcd5 | [
"MIT"
] | 1 | 2020-09-21T10:01:26.000Z | 2020-09-21T10:01:26.000Z | Leetcode/Depth-First_Search,Breadth-First_Search/2_-_Medium/130._Surrounded_Regions.py | Khalid-Sultan/Algorithms-Prep | 7773c5bc0448d8677bf324bd1d9bdc43b813fcd5 | [
"MIT"
] | null | null | null | Leetcode/Depth-First_Search,Breadth-First_Search/2_-_Medium/130._Surrounded_Regions.py | Khalid-Sultan/Algorithms-Prep | 7773c5bc0448d8677bf324bd1d9bdc43b813fcd5 | [
"MIT"
] | 2 | 2020-09-01T12:33:55.000Z | 2020-11-30T13:23:50.000Z | class Solution:
def solve(self, board: List[List[str]]) -> None:
"""
Do not return anything, modify board in-place instead.
"""
for row in range(len(board)):
for col in range(len(board[0])):
if board[row][col]=='O':
tobechanged = set()
check = self.dfs(board, row, col, tobechanged)
if check:
board[row][col] = 'X'
for i in tobechanged:
board[i[0]][i[1]] = 'X'
def dfs(self, board, row, col, tobechanged):
if (row,col) in tobechanged:
return True
if row<0 or col<0 or row>=len(board) or col>=len(board[0]):
return False
if (row==0 or row==len(board)-1) and board[row][col]=='O':
return False
if (col==0 or col==len(board[0])-1) and board[row][col]=='O':
return False
if board[row][col]=='X':
return True
tobechanged.add((row,col))
directions = [[0,1],[0,-1],[1,0],[-1,0]]
conclusion = True
for i in directions:
conclusion = conclusion and self.dfs(board, row+i[0], col+i[1], tobechanged)
return conclusion | 40.677419 | 88 | 0.483743 |
acf23ea670ef7e19c77f8fb6a5b615aec328bf95 | 11,384 | py | Python | src/_repobee/git.py | tohanss/repobee | cf5eb1e83e62c20bbca00c8ad9f798a612e1664f | [
"MIT"
] | null | null | null | src/_repobee/git.py | tohanss/repobee | cf5eb1e83e62c20bbca00c8ad9f798a612e1664f | [
"MIT"
] | null | null | null | src/_repobee/git.py | tohanss/repobee | cf5eb1e83e62c20bbca00c8ad9f798a612e1664f | [
"MIT"
] | null | null | null | """Wrapper functions for git commands.
.. module:: git
:synopsis: Wrapper functions for git CLI commands, such as push and clone.
.. moduleauthor:: Simon Larsén
"""
import asyncio
import enum
import os
import pathlib
import shutil
import subprocess
import sys
import dataclasses
from typing import Iterable, List, Any, Callable, Tuple, Awaitable, Sequence
import more_itertools
import git # type: ignore
import repobee_plug as plug
from _repobee import exception
from _repobee import util
CONCURRENT_TASKS = 20
@dataclasses.dataclass(frozen=True)
class Push:
local_path: pathlib.Path
repo_url: str
branch: str
metadata: dict = dataclasses.field(default_factory=dict)
def __iter__(self):
"""Iter implementation just to make this dataclass unpackable."""
return iter((self.local_path, self.repo_url, self.branch))
@dataclasses.dataclass(frozen=True)
class CloneSpec:
dest: pathlib.Path
repo_url: str
branch: str = ""
metadata: dict = dataclasses.field(default_factory=dict)
_EMPTY_REPO_ERROR = b"""fatal: Couldn't find remote ref HEAD"""
def _ensure_repo_dir_exists(clone_spec: CloneSpec) -> None:
"""Checks if a dir for the repo url exists, and if it does not, creates it.
Also initializez (or reinitializes, if it alrady exists) as a git repo.
"""
if not clone_spec.dest.exists():
clone_spec.dest.mkdir(parents=True)
if not util.is_git_repo(str(clone_spec.dest)):
_git_init(clone_spec.dest)
def _git_init(dirpath):
captured_run(["git", "init"], cwd=str(dirpath))
async def _pull_clone_async(clone_spec: CloneSpec):
"""Simulate a clone with a pull to avoid writing remotes (that could
include secure tokens) to disk.
"""
_ensure_repo_dir_exists(clone_spec)
pull_command = (
f"git pull {clone_spec.repo_url} "
f"{clone_spec.branch or ''}".strip().split()
)
proc = await asyncio.create_subprocess_exec(
*pull_command,
cwd=str(clone_spec.dest),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
_, stderr = await proc.communicate()
return proc.returncode, stderr
def captured_run(*args, **kwargs):
"""Run a subprocess and capture the output."""
proc = subprocess.run(
*args, **kwargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
return proc.returncode, proc.stdout, proc.stderr
def clone_single(repo_url: str, branch: str = "", cwd: str = "."):
"""Clone a git repository with ``git clone``.
This should only be used for temporary cloning, as any secure tokens in the
repo URL are stored in the repository.
Args:
repo_url: HTTPS url to repository on the form
https://<host>/<owner>/<repo>.
branch: The branch to clone.
cwd: Working directory. Defaults to the current directory.
"""
command = [*"git clone --single-branch".split(), repo_url] + (
[branch] if branch else []
)
rc, _, stderr = captured_run(command, cwd=cwd)
if rc != 0:
raise exception.CloneFailedError(
"Failed to clone",
rc,
stderr,
CloneSpec(
repo_url=repo_url,
dest=pathlib.Path(cwd) / util.repo_name(repo_url),
branch=branch,
),
)
async def _clone_async(clone_spec: CloneSpec):
"""Clone git repositories asynchronously.
Args:
clone_spec: A clone specification.
"""
rc, stderr = await _pull_clone_async(clone_spec)
if rc != 0 and _EMPTY_REPO_ERROR not in stderr:
raise exception.CloneFailedError(
"Failed to clone {}".format(clone_spec.repo_url),
returncode=rc,
stderr=stderr,
clone_spec=clone_spec,
)
else:
plug.log.info("Cloned into {}".format(clone_spec.repo_url))
class CloneStatus(enum.Enum):
CLONED = enum.auto()
EXISTED = enum.auto()
FAILED = enum.auto()
def clone_student_repos(
repos: List[plug.StudentRepo],
clone_dir: pathlib.Path,
update_local: bool,
api: plug.PlatformAPI,
) -> Iterable[Tuple[CloneStatus, plug.StudentRepo]]:
assert all(map(lambda r: r.path is not None, repos))
local = [repo for repo in repos if repo.path.exists()]
if local and update_local:
_update_local_repos(local, api)
elif local and not update_local:
_warn_local_repos(local)
non_local = [repo for repo in repos if not repo.path.exists()]
plug.log.info(f"Cloning into {non_local}")
non_local_specs = [
CloneSpec(
dest=clone_dir / plug.fileutils.hash_path(repo.path),
repo_url=api.insert_auth(repo.url),
metadata=dict(repo=repo),
)
for repo in non_local
]
failed_specs = clone(non_local_specs)
failed_repos = {spec.metadata["repo"] for spec in failed_specs}
success_repos = {repo for repo in non_local if repo not in failed_repos}
for repo in success_repos:
shutil.copytree(
src=clone_dir / plug.fileutils.hash_path(repo.path), dst=repo.path
)
return (
[(CloneStatus.EXISTED, repo) for repo in local]
+ [(CloneStatus.CLONED, repo) for repo in success_repos]
+ [(CloneStatus.FAILED, repo) for repo in failed_repos]
)
def _warn_local_repos(local: List[plug.StudentRepo],):
local_repo_ids = [f"{repo.team.name}/{repo.name}" for repo in local]
plug.log.warning(
f"Found local repos, skipping: {', '.join(local_repo_ids)}"
)
def _update_local_repos(
local: List[plug.StudentRepo], api: plug.PlatformAPI
) -> None:
expected_basedir = local[0].path.parent.parent
assert all(
map(lambda repo: repo.path.parent.parent == expected_basedir, local)
)
specs = [
CloneSpec(repo_url=api.insert_auth(repo.url), dest=repo.path)
for repo in local
]
# TODO figure out what to do when a local update fails
clone(specs)
def clone(clone_specs: Iterable[CloneSpec]) -> List[CloneSpec]:
"""Clone all repos asynchronously.
Args:
clone_specs: Clone specifications for repos to clone.
cwd: Working directory. Defaults to the current directory.
Returns:
Specs for which the cloning failed.
"""
return [
exc.clone_spec
for exc in _batch_execution(_clone_async, clone_specs)
if isinstance(exc, exception.CloneFailedError)
]
async def _push_async(pt: Push):
"""Asynchronous call to git push, pushing directly to the repo_url and branch.
Args:
pt: A Push namedtuple.
"""
command = ["git", "push", pt.repo_url, pt.branch]
proc = await asyncio.create_subprocess_exec(
*command,
cwd=os.path.abspath(pt.local_path),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
_, stderr = await proc.communicate()
if proc.returncode != 0:
raise exception.PushFailedError(
"Failed to push to {}".format(pt.repo_url),
proc.returncode or -sys.maxsize,
stderr,
pt.repo_url,
)
elif b"Everything up-to-date" in stderr:
plug.log.info("{} is up-to-date".format(pt.repo_url))
else:
plug.log.info("Pushed files to {} {}".format(pt.repo_url, pt.branch))
def _push_no_retry(push_tuples: Iterable[Push]) -> List[str]:
"""Push to all repos defined in push_tuples asynchronously. Amount of
concurrent tasks is limited by CONCURRENT_TASKS.
Pushes once and only once to each repo.
Args:
push_tuples: Push namedtuples defining local and remote repos.
Returns:
urls to which pushes failed with exception.PushFailedError. Other
errors are only logged.
"""
return [
exc.url
for exc in _batch_execution(_push_async, push_tuples)
if isinstance(exc, exception.PushFailedError)
]
def push(
push_tuples: Iterable[Push], tries: int = 3
) -> Tuple[List[Push], List[Push]]:
"""Push to all repos defined in push_tuples asynchronously. Amount of
concurrent tasks is limited by CONCURRENT_TASKS. Pushing to repos is tried
a maximum of ``tries`` times (i.e. pushing is _retried_ ``tries - 1``
times.)
Args:
push_tuples: Push namedtuples defining local and remote repos.
tries: Amount of times to try to push (including initial push).
Returns:
A tuple of lists of push tuples on the form (successful, failures).
"""
if tries < 1:
raise ValueError("tries must be larger than 0")
push_tuples = list(push_tuples)
# confusing, but failed_pts needs an initial value
failed_pts = list(push_tuples)
for i in range(tries):
plug.log.info("Pushing, attempt {}/{}".format(i + 1, tries))
failed_urls = set(_push_no_retry(failed_pts))
failed_pts = [pt for pt in failed_pts if pt.repo_url in failed_urls]
if not failed_pts:
break
plug.log.warning("{} pushes failed ...".format(len(failed_pts)))
successful_pts = [pt for pt in push_tuples if pt not in failed_pts]
return successful_pts, failed_pts
def _batch_execution(
batch_func: Callable[..., Awaitable],
arg_list: Iterable[Any],
*batch_func_args,
**batch_func_kwargs,
) -> Sequence[Exception]:
"""Take a batch function (any function whos first argument is an iterable)
and send in send in CONCURRENT_TASKS amount of arguments from the arg_list
until it is exhausted. The batch_func_kwargs are provided on each call.
Args:
batch_func: A function that takes an iterable as a first argument and
returns a list of asyncio.Task objects.
arg_list: A list of objects that are of the same type as the
batch_func's first argument.
batch_func_kwargs: Additional keyword arguments to the batch_func.
Returns:
a list of exceptions raised in the tasks returned by the batch
function.
"""
loop = asyncio.get_event_loop()
return loop.run_until_complete(
_batch_execution_async(
batch_func, arg_list, *batch_func_args, **batch_func_kwargs
)
)
async def _batch_execution_async(
batch_func: Callable[..., Awaitable],
arg_list: Iterable[Any],
*batch_func_args,
**batch_func_kwargs,
) -> Sequence[Exception]:
import tqdm.asyncio # type: ignore
exceptions = []
loop = asyncio.get_event_loop()
for batch, args_chunk in enumerate(
more_itertools.ichunked(arg_list, CONCURRENT_TASKS), start=1
):
tasks = [
loop.create_task(
batch_func(arg, *batch_func_args, **batch_func_kwargs)
)
for arg in args_chunk
]
for coro in tqdm.asyncio.tqdm_asyncio.as_completed(
tasks, desc=f"Progress batch {batch}", file=sys.stdout
):
try:
await coro
except exception.GitError as exc:
exceptions.append(exc)
for e in exceptions:
plug.log.error(str(e))
return exceptions
def active_branch(repo_path: pathlib.Path) -> str:
"""Get the active branch from the given repo.
Args:
repo_path: Path to a repo.
Returns:
The active branch of the repo.
"""
return git.Repo(repo_path).active_branch.name
| 29.801047 | 82 | 0.652758 |
acf240bb8cd3e1df4c6db22f32191170ca13b395 | 8,898 | py | Python | gord/mockwin32.py | angvp/gord | f6b11023dcd8c3cf67bb7bd9ad026570d1debd0c | [
"Apache-2.0"
] | 2 | 2018-04-22T17:50:31.000Z | 2018-04-22T17:50:49.000Z | gord/mockwin32.py | jongillies/gord | 45680b72df1de91a0a9e04d909009342bebd6a29 | [
"Apache-2.0"
] | null | null | null | gord/mockwin32.py | jongillies/gord | 45680b72df1de91a0a9e04d909009342bebd6a29 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python2.4
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide mock pywin32 functionality on a non-windows platform, i.e. Linux.
Provide entire modules and methods.
Points event viewer like logs to stderr.
Provides a basic service framework where code can pretend to start, stop,
listen for sock or other events, etc.
Object naming scheme:
MockAllCapsFormModule = equivalent to pywin32 module all_caps_form
MockAllCapsForm = equivalent to pywin32 object AllCapsForm
Method parameters retain their Win32 naming scheme. This is
intentional and produces gpylint errors.
"""
import re
import select
import sys
import threading
import time
DEBUG = False
class Error(Exception):
"""Base Error."""
class ServiceUnknown(Exception):
"""Service is unknown."""
def SafeLoadMockModules(force=False):
"""Load the Win32 mock modules.
Note: This method is careful to only replace
the module in sys.modules if it doesn't already
exist. This avoids a problem where values changed
in the module may be wiped out as multiple
modules load and call this method.
Args:
force: bool, default False, True to load mocks even if
we're on Win32
"""
if sys.platform == 'win32' and not force:
return
load_list = [
('servicemanager', MockServiceManagerModule),
('win32serviceutil', MockWin32ServiceUtilModule),
('win32service', MockWin32ServiceModule),
('win32event', MockWin32EventModule),
('win32evtlogutil', MockWin32EvtLogUtilModule),
('win32file', MockWin32FileModule),
('pythoncom', MockPythonComModule),
('win32security', MockWin32SecurityModule),
]
for (module_name, module_class) in load_list:
if not module_name in sys.modules:
sys.modules[module_name] = module_class()
def LogDebugMsg(*args):
"""Log a debug message.
Args:
args: any number of args which will be logged with space separation
"""
if DEBUG:
servicemanager.LogMsg(' '.join(*args))
class MockServiceManagerModule(object):
"""Mock Win32 ServiceManager module."""
def __init__(self):
self.constant_re = re.compile(r'^[A-Z_]+$')
def Log(self, *x):
print >>sys.stderr, time.time(), x
def LogMsg(self, *x):
self.Log('LogMsg', x)
def LogErrorMsg(self, *x):
self.Log('LogErrorMsg', x)
def LogWarningMsg(self, *x):
self.Log('LogWarningMsg', x)
def LogInfoMsg(self, *x):
self.Log('LogInfoMsg', x)
def __getattr__(self, x):
if self.constant_re.search(x):
return x
else:
raise AttributeError(x)
msmm = MockServiceManagerModule()
servicemanager = msmm
class MockPythonComModule(object):
"""Mock Win32 PythonCom module."""
did_init = {}
#TODO(user): Expose did_init values in a way that testing would confirm
#Co{,Un}Initialize is run once per thread instance.
def CoInitialize(self):
self.did_init[threading.currentThread().getName()] = 1
def CoUninitialize(self):
self.did_init[threading.currentThread().getName()] = 0
class MockWin32EventModule(object):
"""Mock Win32 Win32Event module."""
# pylint: disable-msg=C6409
WAIT_OBJECT_0 = 0
INFINITE = -1
def SetEvent(self, eventobj):
eventobj.Set()
def CreateEvent(self, sa, bManualReset, bInitialState, objectName):
return MockWin32Event(sa, bManualReset, bInitialState, objectName)
# pylint: disable-msg=W0613
def WaitForMultipleObjects(self, handleList, bWaitAll, milliseconds):
LogDebugMsg(
'WFMObjects handleList=%s timeout=%s' % (handleList, milliseconds))
t1 = time.time()
while 1:
LogDebugMsg('loop, timeout=')
n = 0
for h in handleList:
LogDebugMsg('looking at %s' % str(h))
if h.IsSet():
LogDebugMsg('IsSet %d' % n)
return self.WAIT_OBJECT_0+n
LogDebugMsg('not set')
n += 1
if milliseconds != self.INFINITE:
elapsed = (time.time() - t1) * 1000
if elapsed > milliseconds:
break
time.sleep(1.0)
class MockWin32EvtLogUtilModule(object):
"""Mock Win32 Win32EvtLogUtil module."""
def AddSourceToRegistry(self, *x):
pass
class MockWin32ServiceUtilModule(object):
"""Mock Win32 Win32ServiceUtil module."""
class ServiceFramework(object):
def __init__(self, args):
self.args = args
def ReportServiceStatus(self, x):
servicemanager.Log('ReportServiceStatus', x)
services = {}
service_name = None
def SetServiceName(self, name):
"""Set the service name. Used during unittests, not a Win32 function."""
self.service_name = name
def GetService(self, service_name):
"""Get service. Used during unittests, not a Win32 function."""
if service_name in self.services:
return self.services[service_name]
else:
raise ServiceUnknown(service_name)
def ServiceStart(self, service_type, argv):
if self.service_name is None:
if 'ServiceNameUndef' in self.services:
raise Exception('Define a unique service name')
else:
self.service_name = 'ServiceNameUndef'
service = service_type(argv)
thread = threading.Thread(target=service.SvcDoRun)
self.services[self.service_name] = {
'service': service,
'thread': thread,
}
thread.start()
return service
# pylint: disable-msg=W0613
def ServiceStop(self, service_type=None, argv=None, service_name=None):
if service_name is None:
service_name = self.service_name
service = self.GetService(self.service_name)
service['service'].SvcStop()
service['thread'].join()
return service['service']
def ServiceInstall(self, service_type, argv):
pass
def Usage(self):
print 'MockWin32 Service Framework'
print
print '(command) [start|stop|install|debug]'
# pylint: disable-msg=W0613
def HandleCommandLine(self, service_type, instance_name=None, argv=()):
"""Parse command line and handle requested actions.
Args:
service_type: class to instantiate
instance_name: string name of instance e.g. "mod.mod.mod.Class"
argv: list of arguments to supply, e.g. ['start']
"""
if len(argv) < 2:
self.Usage()
elif argv[1] in ['start', 'debug']:
if argv[1] == 'debug':
self.SetServiceName('debug')
self.ServiceStart(service_type, argv)
elif argv[1] == 'stop':
self.ServiceStop(service_type, argv)
elif argv[1] == 'install':
self.ServiceInstall(service_type, argv)
else:
self.Usage()
class MockWin32Event(object):
"""Mock Win32 Win32Event class."""
def __init__(self, sa, bManualReset, bInitialState, objectName):
# pylint: disable-msg=C6409
self.sa = sa
self.bManualReset = bManualReset
self.bInitialState = bInitialState
self.objectName = objectName
self.event = threading.Event()
self.socket = None
self.networkEvents = None
def Set(self):
self.event.set()
def IsSet(self):
LogDebugMsg('IsSet? event.isSet=%s' % self.event.isSet())
LogDebugMsg(
'socket=%s ne=%s' % (str(self.socket), str(self.networkEvents)))
if self.event.isSet():
return True
# NOTE: networkEvents mask is basically ignored, any
# event taken to be interesting to our select loop.
if self.socket is not None and self.networkEvents > 0:
x = select.select((self.socket,), (), (), 0.25)
LogDebugMsg('select returns %s' % str(x))
if len(x[0]) > 0 and x[0][0] == self.socket:
return True
LogDebugMsg('returning False')
return False
class MockWin32FileModule(object):
"""Mock Win32 Win32File module."""
FD_READ = 1
FD_WRITE = 2
FD_OOB = 4
FD_ACCEPT = 8
FD_CONNECT = 16
FD_CLOSE = 32
FD_QOS = 64
FD_GROUP_QOS = 128
FD_ROUTING_INTERFACE_CHANGE = 256
FD_ADDRESS_LIST_CHANGE = 512
# pylint: disable-msg=C6409
def WSAEventSelect(self, socket, hEvent, networkEvents):
LogDebugMsg('WSAEventSelect')
hEvent.socket = socket
hEvent.networkEvents = networkEvents
class MockWin32ServiceModule(object):
SERVICE_STOP_PENDING = 3
class MockWin32SecurityModule(object):
"""Mock Win32security module."""
LOGON32_LOGON_NETWORK = 3
LOGON32_PROVIDER_DEFAULT = 0
# pylint: disable-msg=C6409
class error(Exception):
"""Error."""
def LogonUser(self, username, domain, password, logon_type, logon_provider):
raise NotImplementedError
| 27.045593 | 78 | 0.688694 |
acf241009853f5873e0000e586e4ffb06d72ec7a | 232 | py | Python | tbot/migrations/20190126_02_Fnsd3-drop-twitch-chatlog-insert-trigger.py | thomaserlang/tbot | 99cfa204d86ef35cf2cc9482ae5a44abb35b443a | [
"MIT"
] | null | null | null | tbot/migrations/20190126_02_Fnsd3-drop-twitch-chatlog-insert-trigger.py | thomaserlang/tbot | 99cfa204d86ef35cf2cc9482ae5a44abb35b443a | [
"MIT"
] | 10 | 2022-02-14T11:40:20.000Z | 2022-03-09T22:44:03.000Z | tbot/migrations/20190126_02_Fnsd3-drop-twitch-chatlog-insert-trigger.py | thomaserlang/tbot | 99cfa204d86ef35cf2cc9482ae5a44abb35b443a | [
"MIT"
] | 1 | 2020-09-19T16:38:24.000Z | 2020-09-19T16:38:24.000Z | """
drop twitch_chatlog insert trigger
"""
from yoyo import step
__depends__ = {'20190126_01_zvBuo-twitch-user-chat-deleted-counter'}
steps = [
step('''
DROP TRIGGER IF EXISTS `twitch_chatlog_AFTER_INSERT`
''')
]
| 16.571429 | 68 | 0.689655 |
acf24355aad635761160f0a31ce1f612e34e71ed | 6,396 | py | Python | inceptionv3/inception_transfer.py | carbo-T/TF | 56ebfc253615b22fc3a55ba5e952837c47bf85cf | [
"MIT"
] | 1 | 2018-11-01T04:16:58.000Z | 2018-11-01T04:16:58.000Z | inceptionv3/inception_transfer.py | carbo-T/TF | 56ebfc253615b22fc3a55ba5e952837c47bf85cf | [
"MIT"
] | null | null | null | inceptionv3/inception_transfer.py | carbo-T/TF | 56ebfc253615b22fc3a55ba5e952837c47bf85cf | [
"MIT"
] | null | null | null | # -*- utf-8 -*-
import glob
import os.path
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.python.slim.nets.inception_v3 as inception_v3
INPUT_DATA = 'preprocess/flower_processed_data.npy'
TRAIN_FILE = 'model/'
CKPT_FILE = '../../dataset/inception_v3.ckpt'
# params
LEARNING_RATE = 0.0001
STEPS = 1000
BATCH = 32
N_CLASSES = 5
# lasers don't load from ckpt, i.e. the last fc layer
CHECKPOINT_EXCLUDE_SCOPES = 'InceptionV3/Logits,InceptionV3/AuxLogits'
TRAINABLE_SCOPES = 'InceptionV3/Logits,InceptionV3/AuxLogits'
TRAINING = False
flower_label = ["daisy雏菊", "roses玫瑰", "tulips郁金香", "sunflowers向日葵", "dandelion蒲公英"]
def get_tuned_variables():
exclusions = [scope.strip() for scope in CHECKPOINT_EXCLUDE_SCOPES.split(',')]
variables_to_restore = []
# enumerate params in v3 model, check if it need to be loaded
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
excluded = True
break
if not excluded:
variables_to_restore.append(var)
return variables_to_restore
def get_trainable_variables():
scopes = [scope.strip() for scope in TRAINABLE_SCOPES.split(',')]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
return variables_to_train
def main():
# processed_data = np.load("preprocess/test_flower.npy", allow_pickle=True)
# test_images = processed_data[0]
# test_labels = processed_data[1]
# load preprocessed data
processed_data = np.load(INPUT_DATA, allow_pickle=True)
training_images = processed_data[0]
n_training_example = len(training_images)
training_labels = processed_data[1]
# np.save("preprocess/training_flower.npy", np.asarray([training_images, training_labels]))
validation_images = processed_data[2]
validation_labels = processed_data[3]
# np.save("preprocess/validation_flower.npy", np.asarray([validation_images, validation_labels]))
test_images = processed_data[4]
test_labels = processed_data[5]
# np.save("preprocess/test_flower.npy", np.asarray([test_images, test_labels]))
print("%d training examples, %d validation examples and %d testing examples." % (
n_training_example, len(validation_labels), len(test_labels)))
# define inputs
images = tf.placeholder(
tf.float32, [None, 299, 299, 3], name='input_images')
labels = tf.placeholder(tf.int64, [None], name='labels')
# define model
with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
logits, _ = inception_v3.inception_v3(images, num_classes=N_CLASSES, is_training=False)
# get trainable variable
trainable_variables = get_trainable_variables()
# define cross entropy
tf.losses.softmax_cross_entropy(tf.one_hot(labels, N_CLASSES), logits, weights=1.0)
train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(tf.losses.get_total_loss())
# calc accuracy
with tf.name_scope('evaluation'):
prediction = tf.argmax(logits, 1)
correct_answer = labels
correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# define func to load model
load_fn = slim.assign_from_checkpoint_fn(
CKPT_FILE,
get_tuned_variables(),
ignore_missing_vars=True
)
# define saver
saver = tf.train.Saver()
config = tf.ConfigProto(allow_soft_placement=True)
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.25)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
# init
init = tf.global_variables_initializer()
sess.run(init)
ckpt = tf.train.get_checkpoint_state(
TRAIN_FILE
)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
else:
# load origin model
print('loading tuned variables from %s' % CKPT_FILE)
load_fn(sess)
start = 0
end = BATCH
if TRAINING:
for i in range(STEPS):
sess.run(train_step, feed_dict={
images: training_images[start:end],
labels: training_labels[start:end]
})
if i % 20 == 0 or i + 1 == STEPS:
saver.save(sess, TRAIN_FILE, global_step=i)
validation_accuracy = sess.run(evaluation_step, feed_dict={
images: validation_images,
labels: validation_labels
})
print('step %d: validation accuracy = %.1f%%' % (i, validation_accuracy * 100.0))
start = end
if start == n_training_example:
start = 0
end = start + BATCH
if end > n_training_example:
end = n_training_example
# test accuracy
test_acccuracy = sess.run(evaluation_step, feed_dict={
images: test_images,
labels: test_labels
})
print('final test accuracy = %.1f%%' % (test_acccuracy * 100.0))
else:
while True:
index = np.random.randint(0, len(test_labels) - 2)
# test accuracy
prediction_score, correct_answer_score = sess.run([prediction, correct_answer], feed_dict={
images: test_images[index:index+1],
labels: test_labels[index:index+1]
})
result = [(flower_label[x]+str(x)) for x in prediction_score]
answer = [(flower_label[x]+str(x)) for x in correct_answer_score]
# print(result)
# print(answer)
plt.imshow(test_images[index])
print('test result: %s, correct answer: %s' % (
result, answer))
plt.show()
time.sleep(3)
if __name__ == '__main__':
main()
| 34.95082 | 107 | 0.631801 |
acf24365a699797849bd9869c05fd9160dcdfaf2 | 3,469 | py | Python | spec/dataset/dataset.py | deep-spin/spec-blackboxnlp | 23db7a559e09ff7f63ede06b04cad226432b90db | [
"MIT"
] | 2 | 2020-11-26T07:46:48.000Z | 2021-07-28T08:06:58.000Z | spec/dataset/dataset.py | deep-spin/spec-blackboxnlp | 23db7a559e09ff7f63ede06b04cad226432b90db | [
"MIT"
] | null | null | null | spec/dataset/dataset.py | deep-spin/spec-blackboxnlp | 23db7a559e09ff7f63ede06b04cad226432b90db | [
"MIT"
] | null | null | null | import warnings
from torchtext.data import interleave_keys
from spec.dataset.corpora import available_corpora
from spec.dataset.corpora.text import TextCorpus
from spec.dataset.corpora.text_pair import TextPairCorpus
from spec.dataset.corpora.text_pair_with_marks import TextPairWithMarksCorpus
from spec.dataset.modules.dataset import LazyDataset
def build(path, fields_tuples, options):
def filter_len(x):
return options.min_length <= len(x.words) <= options.max_length
corpus_cls = available_corpora[options.corpus]
corpus = corpus_cls(fields_tuples, lazy=options.lazy_loading)
examples = corpus.read(path)
return TextDataset(examples, fields_tuples, filter_pred=filter_len)
def build_texts(texts, fields_tuples, options):
def filter_len(x):
return options.min_length <= len(x.words) <= options.max_length
corpus = TextCorpus(fields_tuples, lazy=options.lazy_loading)
examples = corpus.read(texts)
return TextDataset(examples, fields_tuples, filter_pred=filter_len)
def build_pair_texts(texts_ab, fields_tuples, options):
def filter_len(x):
return options.min_length <= len(x.words) <= options.max_length
corpus = TextPairCorpus(fields_tuples, lazy=options.lazy_loading)
examples = corpus.read(texts_ab)
return TextDataset(examples, fields_tuples, filter_pred=filter_len)
def build_pair_texts_with_marks(texts_abc, fields_tuples, options):
def filter_len(x):
return options.min_length <= len(x.words) <= options.max_length
corpus = TextPairWithMarksCorpus(fields_tuples, lazy=options.lazy_loading)
examples = corpus.read(texts_abc)
return TextDataset(examples, fields_tuples, filter_pred=filter_len)
class TextDataset(LazyDataset):
"""Defines a dataset for TextClassification"""
@staticmethod
def sort_key(ex):
"""Use the number of words as the criterion for sorting a batch."""
if hasattr(ex, 'words_hyp'):
return interleave_keys(len(ex.words), len(ex.words_hyp))
return len(ex.words)
def __init__(self, examples, fields_tuples, filter_pred=None):
"""Create a dataset from a list of Examples and Fields.
Arguments:
examples: A list or a generator of examples. Usually, the output
of corpus.read()
filter_pred (callable or None): Use only examples for which
filter_pred(example) is True, or use all examples if None.
Default: None.
"""
is_lazy = hasattr(examples, 'lazy') and examples.lazy is True
super().__init__(examples, fields_tuples, filter_pred, not is_lazy)
def get_loss_weights(self):
from sklearn.utils.class_weight import compute_class_weight
target_vocab = self.fields['target'].vocab.stoi
y = [target_vocab[t] for ex in self.examples for t in ex.target]
classes = list(set(y))
return compute_class_weight('balanced', classes, y)
def __len__(self):
try:
return len(self.examples)
except ValueError:
warnings.warn("Corpus loaded in lazy mode and its length was not "
"determined yet. Returning 0 for now since in order "
"to calculate this number we'd have to go through "
"the entire dataset at least once, which can be very "
"expensive for large datasets.")
return 0
| 40.811765 | 80 | 0.692419 |
acf24384c4c02fb0a477bf80df1e745081735d8d | 1,387 | py | Python | vpype_cli/script.py | carewdavid/vpype | 2de339bb52b229cb7495c1f829c554ebf6d0ab32 | [
"MIT"
] | 453 | 2019-11-13T10:11:18.000Z | 2022-03-31T18:43:55.000Z | vpype_cli/script.py | carewdavid/vpype | 2de339bb52b229cb7495c1f829c554ebf6d0ab32 | [
"MIT"
] | 434 | 2019-11-21T07:20:01.000Z | 2022-03-31T17:21:56.000Z | vpype_cli/script.py | carewdavid/vpype | 2de339bb52b229cb7495c1f829c554ebf6d0ab32 | [
"MIT"
] | 47 | 2019-11-13T20:47:53.000Z | 2022-03-15T12:34:55.000Z | import importlib.util
import click
from vpype import LineCollection, generator
from .cli import cli
__all__ = ("script",)
@cli.command(group="Input")
@click.argument("file", type=click.Path(exists=True, dir_okay=False))
@generator
def script(file) -> LineCollection:
"""
Call an external python script to generate geometries.
The script must contain a `generate()` function which will be called without arguments. It
must return the generated geometries in one of the following format:
- Shapely's MultiLineString
- Iterable of Nx2 numpy float array
- Iterable of Nx1 numpy complex array (where the real and imag part corresponds to
the x, resp. y coordinates)
All coordinates are expected to be in SVG pixel units (1/96th of an inch).
"""
try:
spec = importlib.util.spec_from_file_location("<external>", file)
if spec is None:
raise FileNotFoundError(f"file {file} not found")
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
return LineCollection(module.generate()) # type: ignore
except Exception as exc:
raise click.ClickException(
(
f"the file path must point to a Python script containing a `generate()`"
f"function ({str(exc)})"
)
)
| 31.522727 | 94 | 0.659697 |
acf244540558c1dd8fb719bb9f7f3fea15317604 | 1,270 | py | Python | backend/instrument/migrations/0004_auto_20200513_2303.py | felipemaion/financial-management-gate-django | 5f93a7d28a55852fed0a16d1830f92b0ee065948 | [
"MIT"
] | 2 | 2019-04-15T20:36:48.000Z | 2020-02-09T23:20:27.000Z | backend/instrument/migrations/0004_auto_20200513_2303.py | felipemaion/financial-management-gate-django | 5f93a7d28a55852fed0a16d1830f92b0ee065948 | [
"MIT"
] | 5 | 2020-02-12T00:06:06.000Z | 2020-06-05T05:09:45.000Z | backend/instrument/migrations/0004_auto_20200513_2303.py | felipemaion/financial-management-gate-django | 5f93a7d28a55852fed0a16d1830f92b0ee065948 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.3 on 2020-05-13 23:03
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('instrument', '0003_auto_20200513_2143'),
]
operations = [
migrations.AlterModelOptions(
name='event',
options={'ordering': ['-event_date'], 'verbose_name': 'Event', 'verbose_name_plural': 'Events'},
),
migrations.AddField(
model_name='event',
name='event_date',
field=models.DateField(default=datetime.datetime(2020, 5, 13, 23, 3, 39, 676306, tzinfo=utc), verbose_name='event date'),
preserve_default=False,
),
migrations.AlterField(
model_name='event',
name='instrument',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='instrument_event', to='instrument.Instrument'),
),
migrations.AlterUniqueTogether(
name='event',
unique_together={('instrument', 'event_date')},
),
migrations.RemoveField(
model_name='event',
name='eventDate',
),
]
| 31.75 | 142 | 0.607874 |
acf2447145ba38ad374cae2ac6db30beb2470067 | 617 | py | Python | back/main/views.py | aninstein/sex_code_blog | 483fcb566f2873204e545db2e4688dba3bee9db9 | [
"BSD-2-Clause"
] | 116 | 2019-09-19T01:00:48.000Z | 2022-03-16T08:52:15.000Z | back/main/views.py | aninstein/sex_code_blog | 483fcb566f2873204e545db2e4688dba3bee9db9 | [
"BSD-2-Clause"
] | 13 | 2019-08-13T14:39:21.000Z | 2022-03-22T02:36:46.000Z | back/main/views.py | aninstein/sex_code_blog | 483fcb566f2873204e545db2e4688dba3bee9db9 | [
"BSD-2-Clause"
] | 30 | 2019-08-13T10:13:07.000Z | 2022-01-19T09:50:31.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import abort, current_app, render_template, jsonify
from jinja2 import TemplateNotFound
from . import main_bp
@main_bp.route('/hello')
def hello_world():
return 'Hello World!'
@main_bp.route('/favicon.ico')
def favicon():
return current_app.send_static_file('favicon.ico')
@main_bp.route('/')
@main_bp.route('/index/')
def index():
try:
return 'This is Index Page.'
# return render_template('index.html')
except TemplateNotFound:
abort(404)
@main_bp.route('/ping')
def ping_pong():
return jsonify('pong!')
| 18.69697 | 62 | 0.672609 |
acf245006f26c4a879a7048d6db652f4e40e4658 | 4,519 | py | Python | Trainer/train.py | Clayrisee/BachelorsProject-Covid19Detection | 8f1d88b04418b2d53f5a53260981dbc2c95e6c82 | [
"MIT"
] | null | null | null | Trainer/train.py | Clayrisee/BachelorsProject-Covid19Detection | 8f1d88b04418b2d53f5a53260981dbc2c95e6c82 | [
"MIT"
] | null | null | null | Trainer/train.py | Clayrisee/BachelorsProject-Covid19Detection | 8f1d88b04418b2d53f5a53260981dbc2c95e6c82 | [
"MIT"
] | null | null | null | import os
from comet_ml import Artifact, Experiment
import torch
from data.covid_dataloader import CovidDataModule
from utils.utils import generate_model_config, read_cfg, get_optimizer, get_device, generate_hyperparameters
from models.models import create_model
from trainer.Trainer import Trainer
from utils.schedulers import CosineAnealingWithWarmUp
from utils.callbacks import CustomCallback
from utils.logger import get_logger
import argparse
import torch.nn as nn
import pandas as pd
def count_weighted(csv):
df = pd.read_csv(csv)
weight = list()
total_class = len(df.groupby('folder').count().filename)
total_files = len(df)
for total_files_in_class in df.groupby('folder').count().filename:
# print(total_files_in_class)
# print(total_files)
w = total_files / (total_class * total_files_in_class)
# print(w)
weight.append(w)
return torch.Tensor(weight)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Argument for train the model")
parser.add_argument('-cfg', '--config', type=str, help="Path to config yaml file")
args = parser.parse_args()
cfg = read_cfg(cfg_file=args.config)
hyperparameters = generate_hyperparameters(cfg)
LOG = get_logger(cfg['model']['base']) # create logger based on model name for Track each proses in console
LOG.info("Training Process Start")
logger = Experiment(api_key=cfg['logger']['api_key'],
project_name=cfg['logger']['project_name'],
workspace=cfg['logger']['workspace']) # logger for track model in Comet ML
artifact = Artifact("Covid-19 Artifact", "Model")
LOG.info("Comet Logger has successfully loaded.")
device = get_device(cfg)
LOG.info(f"{str(device)} has choosen.")
kwargs = dict(pretrained=cfg['model']['pretrained'], output_class=cfg['model']['num_classes'])
network = create_model(cfg['model']['base'], **kwargs)
print(network)
LOG.info(f"Network {cfg['model']['base']} succesfully loaded.")
optimizer = get_optimizer(cfg, network)
LOG.info(f"Optimizer has been defined.")
lr_scheduler = CosineAnealingWithWarmUp(optimizer,
first_cycle_steps=250,
cycle_mult=0.5,
max_lr=1e-2,
min_lr=cfg['train']['lr'],
warmup_steps=100,
gamma=0.5)
LOG.info(f"Scheduler has been defined.")
weight = count_weighted(os.path.join(cfg.dataset.root_dir,cfg.dataset.train_csv))
weight = weight.to(device)
criterion = nn.CrossEntropyLoss(weight=weight)
LOG.info(f"Criterion has been defined")
dataset = CovidDataModule(cfg)
LOG.info(f"Dataset successfully loaded.")
cb_config = dict(
checkpoint_path=cfg['output_dir'],
patience=cfg['custom_cb']['patience'],
metric=cfg['custom_cb']['metric'],
mode=cfg['custom_cb']['mode']
)
custom_cb = CustomCallback(**cb_config)
LOG.info(f"Custom CB Initialized")
logger.log_parameters(hyperparameters)
LOG.info("Parameters has been Logged")
generate_model_config(cfg)
LOG.info("Model config has been generated")
try:
if cfg['model']['pretrained_path'] != 'None':
net_state_dict = torch.load(cfg['model']['pretrained_path'], map_location=device)
network = network.load_state_dict(state_dict=net_state_dict)
if cfg['optimizer']['pretrained_path'] != 'None':
opt_state_dict = torch.load(cfg['optimizer']['pretrained_path'], map_location=device)
optimizer = optimizer.load_state_dict(opt_state_dict)
print("Pretrained has been loaded...")
except:
print("Pretrained Failed to Load.. Continues training process using weight from imageNet")
trainer = Trainer(cfg, network, optimizer, criterion, dataset, device, callbacks=custom_cb, lr_scheduler=lr_scheduler, logger=logger)
trainer.train()
best_model_path = os.path.join(cfg['output_dir'], 'best_model.pth')
best_optimizer_path = os.path.join(cfg['output_dir'], 'best_optimizer.pth')
final_model_path = os.path.join(cfg['output_dir'], 'final_model.pth')
final_optimizer_path = os.path.join(cfg['output_dir'], 'final_optimizer.pth')
model_cfg_path = os.path.join(cfg['output_dir'], 'model-config.yaml')
artifact.add(best_model_path)
artifact.add(best_optimizer_path)
artifact.add(final_model_path)
artifact.add(final_optimizer_path)
artifact.add(model_cfg_path)
logger.log_artifact(artifact=artifact)
| 41.081818 | 137 | 0.698827 |
acf2459faaa4632d94572fa975df36b41118ed15 | 3,143 | py | Python | basicts/options/STNorm/STNorm_PEMS08.py | zezhishao/BasicTS | 584ca6f8215a6fc9976789b600996934ba2d499e | [
"Apache-2.0"
] | 3 | 2022-02-22T12:50:08.000Z | 2022-03-13T03:38:46.000Z | basicts/options/STNorm/STNorm_PEMS08.py | zezhishao/BasicTS | 584ca6f8215a6fc9976789b600996934ba2d499e | [
"Apache-2.0"
] | null | null | null | basicts/options/STNorm/STNorm_PEMS08.py | zezhishao/BasicTS | 584ca6f8215a6fc9976789b600996934ba2d499e | [
"Apache-2.0"
] | null | null | null | import os
from easydict import EasyDict
# architecture
from basicts.archs.STNorm_arch import STNorm
# runner
from basicts.runners.STNorm_runner import STNormRunner
from basicts.data.base_dataset import BaseDataset
from basicts.metrics.mae import masked_mae
from basicts.metrics.mape import masked_mape
from basicts.metrics.rmse import masked_rmse
from basicts.losses.losses import masked_l1_loss
CFG = EasyDict()
# ================= general ================= #
CFG.DESCRIPTION = 'STNorm model configuration'
CFG.RUNNER = STNormRunner
CFG.DATASET_CLS = BaseDataset
CFG.DATASET_NAME = "PEMS08"
CFG.DATASET_TYPE = 'Traffic flow'
CFG.GPU_NUM = 1
CFG.METRICS = {
"MAE": masked_mae,
"RMSE": masked_rmse,
"MAPE": masked_mape
}
# ================= environment ================= #
CFG.ENV = EasyDict()
CFG.ENV.SEED = 1
CFG.ENV.CUDNN = EasyDict()
CFG.ENV.CUDNN.ENABLED = True
# ================= model ================= #
CFG.MODEL = EasyDict()
CFG.MODEL.NAME = 'STNorm'
CFG.MODEL.ARCH = STNorm
CFG.MODEL.PARAM = {
"num_nodes" : 170,
"tnorm_bool": True,
"snorm_bool": True,
"in_dim" : 2,
"out_dim" : 12,
"channels" : 32,
"kernel_size": 2,
"blocks" : 4,
"layers" : 2,
}
CFG.MODEL.FROWARD_FEATURES = [0, 1] # traffic speed, time in day
CFG.MODEL.TARGET_FEATURES = [0] # traffic speed
# ================= optim ================= #
CFG.TRAIN = EasyDict()
CFG.TRAIN.LOSS = masked_l1_loss
CFG.TRAIN.OPTIM = EasyDict()
CFG.TRAIN.OPTIM.TYPE = "Adam"
CFG.TRAIN.OPTIM.PARAM= {
"lr":0.002,
"weight_decay":0.0001,
}
CFG.TRAIN.LR_SCHEDULER = EasyDict()
CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR"
CFG.TRAIN.LR_SCHEDULER.PARAM= {
"milestones":[1, 50],
"gamma":0.5
}
# ================= train ================= #
CFG.TRAIN.CLIP = 5
CFG.TRAIN.NUM_EPOCHS = 100
CFG.TRAIN.CKPT_SAVE_DIR = os.path.join(
'checkpoints',
'_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)])
)
# train data
CFG.TRAIN.DATA = EasyDict()
CFG.TRAIN.NULL_VAL = 0.0
## read data
CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME
## dataloader args, optional
CFG.TRAIN.DATA.BATCH_SIZE = 64
CFG.TRAIN.DATA.PREFETCH = False
CFG.TRAIN.DATA.SHUFFLE = True
CFG.TRAIN.DATA.NUM_WORKERS = 2
CFG.TRAIN.DATA.PIN_MEMORY = False
# ================= validate ================= #
CFG.VAL = EasyDict()
CFG.VAL.INTERVAL = 1
# validating data
CFG.VAL.DATA = EasyDict()
## read data
CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME
## dataloader args, optional
CFG.VAL.DATA.BATCH_SIZE = 64
CFG.VAL.DATA.PREFETCH = False
CFG.VAL.DATA.SHUFFLE = False
CFG.VAL.DATA.NUM_WORKERS = 2
CFG.VAL.DATA.PIN_MEMORY = False
# ================= test ================= #
CFG.TEST = EasyDict()
CFG.TEST.INTERVAL = 1
# validating data
CFG.TEST.DATA = EasyDict()
## read data
CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME
## dataloader args, optional
CFG.TEST.DATA.BATCH_SIZE = 64
CFG.TEST.DATA.PREFETCH = False
CFG.TEST.DATA.SHUFFLE = False
CFG.TEST.DATA.NUM_WORKERS = 2
CFG.TEST.DATA.PIN_MEMORY = False
| 27.570175 | 75 | 0.628381 |
acf2467d8080432009663c5648592fed5c9b6960 | 8,261 | py | Python | syd/syd_image.py | OpenSyd/syd | 0f7478c7dedb623ab955e906c103cb64a7abb4b3 | [
"Apache-2.0"
] | 4 | 2015-07-29T19:10:35.000Z | 2020-11-17T07:48:41.000Z | syd/syd_image.py | OpenSyd/syd | 0f7478c7dedb623ab955e906c103cb64a7abb4b3 | [
"Apache-2.0"
] | 9 | 2015-05-14T09:07:37.000Z | 2022-03-15T10:13:59.000Z | syd/syd_image.py | OpenSyd/syd | 0f7478c7dedb623ab955e906c103cb64a7abb4b3 | [
"Apache-2.0"
] | 3 | 2016-09-07T06:26:52.000Z | 2016-10-04T12:29:03.000Z | #!/usr/bin/env python3
import dataset
import syd
import pydicom
import os
import itk
import numpy as np
from .syd_db import *
import gatetools as gt
# -----------------------------------------------------------------------------
def create_image_table(db):
'''
Create the Image table
'''
# other fields ?
# image size dimension spacing etc ?
# create DicomSerie table
q = 'CREATE TABLE Image (\
id INTEGER PRIMARY KEY NOT NULL,\
patient_id INTEGER NOT NULL,\
injection_id INTEGER,\
dicom_series_id INTEGER,\
roi_id INTEGER,\
acquisition_id INTEGER,\
file_mhd_id INTEGER,\
file_raw_id INTEGER,\
pixel_type TEXT,\
pixel_unit TEXT,\
frame_of_reference_uid TEXT,\
modality TEXT,\
FOREIGN KEY(patient_id) REFERENCES Patient(id) on delete cascade,\
FOREIGN KEY(injection_id) REFERENCES Injection(id) on delete cascade,\
FOREIGN KEY(dicom_series_id) REFERENCES DicomSeries(id) on delete cascade,\
FOREIGN KEY(roi_id) REFERENCES Roi(id) on delete cascade,\
FOREIGN KEY(file_mhd_id) REFERENCES File(id) on delete cascade,\
FOREIGN KEY(acquisition_id) REFERENCES Acquisition(id) on delete cascade,\
FOREIGN KEY(file_raw_id) REFERENCES File(id) on delete cascade\
)'
result = db.query(q)
image_table = db['image']
image_table.create_column('acquisition_date', db.types.datetime)
# define trigger
con = db.engine.connect()
cur = con.connection.cursor()
cur.execute('CREATE TRIGGER on_image_delete AFTER DELETE ON Image\
BEGIN\
DELETE FROM File WHERE id = OLD.file_mhd_id;\
DELETE FROM File WHERE id = OLD.file_raw_id;\
END;')
con.close()
# -----------------------------------------------------------------------------
def build_image_folder(db, image):
'''
Create the file folder of an Image
'''
pname = syd.find_one(db['Patient'], id=image['patient_id'])['name']
# date = image['acquisition_date'].strftime('%Y-%m-%d')
# modality = image['modality']
# folder = build_folder(db, pname, date, modality)
folder = pname
return folder
# -----------------------------------------------------------------------------
def insert_image_from_dicom(db, dicom_series,dicom_path):
'''
Convert one Dicom image to MHD image
'''
# modality
modality = dicom_series['modality']
#Try guessing pixel unit from dicom
try:
ds = pydicom.read_file(dicom_path)
unit = ds[0x0054,0x1001].value
if unit == "BQML":
pixel_unit = 'Bq/mL'
else:
pixel_unit=unit
except:
# guess pixel unit FIXME --> check dicom tag ?
pixel_unit = 'undefined'
if modality == 'CT':
pixel_unit = 'HU'
if modality == 'NM':
pixel_unit = 'counts'
if modality == 'RTDOSE':
pixel_unit = 'Gy'
# get dicom files
files = syd.get_dicom_series_files(db, dicom_series)
if len(files) == 0:
s = 'Error, no file associated with this dicom serie'
syd.raise_except(s)
# get folder
folder = dicom_series['folder']
folder = os.path.join(db.absolute_data_folder, folder)
acquisition_id = int(os.path.basename(os.path.dirname(folder)).split('_')[1])
suid = dicom_series['series_uid']
PixelType = itk.ctype('float')
Dimension = 3
namesGenerator = itk.GDCMSeriesFileNames.New()
namesGenerator.SetUseSeriesDetails(False)
namesGenerator.AddSeriesRestriction("0008|0021")
namesGenerator.SetGlobalWarningDisplay(False)
namesGenerator.SetDirectory(folder)
seriesUID = namesGenerator.GetSeriesUIDs()
fileNames = namesGenerator.GetFileNames(seriesUID[0])
# read dicom image
if len(fileNames) > 1:
itk_image = gt.read_dicom(fileNames)
else:
itk_image = gt.read_3d_dicom(fileNames)
# pixel_type (ignored)
# pixel_type = image.GetPixelIDTypeAsString()
# GetNumberOfComponentsPerPixel
# convert: assume only 2 type short for CT and float for everything else
pixel_type = 'float'
if modality == 'CT':
pixel_type = 'signed_short'
InputImageType = itk.Image[itk.F, Dimension]
OutputImageType = itk.Image[itk.SS, Dimension]
castImageFilter = itk.CastImageFilter[InputImageType, OutputImageType].New()
castImageFilter.SetInput(itk_image)
castImageFilter.Update()
itk_image = castImageFilter.GetOutput()
# else:
# pixel_type = 'float'
# try:
# itk_image = sitk.Cast(itk_image, sitk.sitkFloat32)
# except:
# s = 'Cannot cast image. Ignoring '+str(dicom_series)
# warning(s)
# return None
# injection ?
injid = None
if 'injection_id' in dicom_series:
injid = dicom_series.injection_id
# create Image
#syd.update_nested_one(db, dicom_series)
labels = ''
if 'labels' in dicom_series:
labels = dicom_series.labels
dicom_study = syd.find_one(db['DicomStudy'], id=dicom_series.dicom_study_id)
patient = syd.find_one(db['Patient'], id = dicom_study.patient_id)
img = {
'patient_id': patient.id,
'injection_id': injid,
'dicom_series_id': dicom_series.id,
'acquisition_id': acquisition_id,
'pixel_type': pixel_type,
'pixel_unit': pixel_unit,
'frame_of_reference_uid': dicom_series.frame_of_reference_uid,
'modality': modality,
'acquisition_date': dicom_series.acquisition_date,
'labels': labels
}
# insert the image in the db
img = syd.insert_new_image(db, img, itk_image)
# write the mhd file
p = syd.get_image_filename(db, img)
itk.imwrite(itk_image, p)
return img
# -----------------------------------------------------------------------------
def get_image_patient(db, image):
'''
Retrieve the patient associated with the image
'''
patient = syd.find_one(db['Patient'], id=image['patient_id'])
return patient
# -----------------------------------------------------------------------------
def get_image_filename(db, image):
'''
Retrieve the filename associated with the image
'''
file_mhd = syd.find_one(db['File'], id=image['file_mhd_id'])
filepath = get_file_absolute_filename(db, file_mhd)
return filepath
# -----------------------------------------------------------------------------
def read_itk_image(db, image):
'''
Retrieve the filename associated with the image and read the itk image
'''
p = get_image_filename(db, image)
itk_image = itk.imread(p)
return itk_image
# -----------------------------------------------------------------------------
def insert_new_image(db, img, itk_image):
'''
Create a new image in the database: DO NOT COPY itk_image in the db.
Should be performed after:
p = syd.get_image_filename(db, img)
itk.imwrite(itk_image, p)
img : dict
itk_image : image itk
'''
# set the id to None to force a new image
img['id'] = None
# insert Image to get the id
img = syd.insert_one(db['Image'], img)
# create file mhd/raw
folder = build_image_folder(db, img)
if not os.path.exists(os.path.join(db.absolute_data_folder, folder)):
os.makedirs(os.path.join(db.absolute_data_folder, folder))
modality = img['modality']
id = img['id']
file_mhd = syd.new_file(db, folder, str(id) + '_' + modality + '.mhd')
file_raw = syd.new_file(db, folder, str(id) + '_' + modality + '.raw')
# FIXME check and set image_type
# update files in img
img['file_mhd_id'] = file_mhd['id']
img['file_raw_id'] = file_raw['id']
syd.update_one(db['Image'], img)
return img
# -----------------------------------------------------------------------------
def insert_write_new_image(db, image, itk_image, tags=[]):
'''
Create a new image in the database and WRITE the itk_image
(id will be changed)
'''
if len(tags) != 0:
syd.add_tags(image, tags)
image = syd.insert_new_image(db, image, itk_image)
p = syd.get_image_filename(db, image)
itk.imwrite(itk_image, p)
return image
| 30.371324 | 84 | 0.599685 |
acf246b29add81b3b6b42e79472c2191837f9072 | 2,868 | py | Python | tests/test_layers/test_pooling.py | abiricz/spektral | 9bacc1f014c599c67523fdc55974175904066453 | [
"MIT"
] | null | null | null | tests/test_layers/test_pooling.py | abiricz/spektral | 9bacc1f014c599c67523fdc55974175904066453 | [
"MIT"
] | null | null | null | tests/test_layers/test_pooling.py | abiricz/spektral | 9bacc1f014c599c67523fdc55974175904066453 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from keras import Input, Model
from keras import backend as K
from spektral.layers import TopKPool
import scipy.sparse as sp
sess = K.get_session()
batch_size = 3
N1, N2, N3 = 4, 5, 2
N = N1 + N2 + N3
F = 7
def _check_output_and_model_output_shapes(true_shape, model_shape):
assert len(true_shape) == len(model_shape)
for i in range(len(true_shape)):
assert len(true_shape[i]) == len(model_shape[i])
for j in range(len(true_shape[i])):
assert model_shape[i][j] in {true_shape[i][j], None}
def _check_number_of_nodes(N_pool_expected, N_pool_true):
if N_pool_expected is not None:
assert N_pool_expected == N_pool_true or N_pool_true is None
def _test_single_mode(layer, **kwargs):
A = np.ones((N, N))
X = np.random.normal(size=(N, F))
A_in = Input(shape=(None, ))
X_in = Input(shape=(F,))
layer_instance = layer(**kwargs)
output = layer_instance([X_in, A_in])
model = Model([X_in, A_in], output)
sess.run(tf.global_variables_initializer())
output = sess.run(model.output, feed_dict={X_in: X, A_in: A})
X_pool, A_pool, mask = output
N_pool_expected = np.ceil(kwargs['ratio'] * N)
N_pool_true = A_pool.shape[0]
_check_number_of_nodes(N_pool_expected, N_pool_true)
assert X_pool.shape == (N_pool_expected, F)
assert A_pool.shape == (N_pool_expected, N_pool_expected)
output_shape = [o.shape for o in output]
_check_output_and_model_output_shapes(output_shape, model.output_shape)
def _test_graph_mode(layer, **kwargs):
A = sp.block_diag([np.ones((N1, N1)), np.ones((N2, N2)), np.ones((N3, N3))]).todense()
X = np.random.normal(size=(N, F))
I = np.array([0] * N1 + [1] * N2 + [2] * N3).astype(int)
A_in = Input(shape=(None, ))
X_in = Input(shape=(F,))
I_in = Input(shape=(), dtype=tf.int32)
layer_instance = layer(**kwargs)
output = layer_instance([X_in, A_in, I_in])
model = Model([X_in, A_in, I_in], output)
sess.run(tf.global_variables_initializer())
output = sess.run(model.output, feed_dict={X_in: X, A_in: A, I_in: I})
X_pool, A_pool, I_pool, mask = output
N_pool_expected = np.ceil(kwargs['ratio'] * N1) + \
np.ceil(kwargs['ratio'] * N2) + \
np.ceil(kwargs['ratio'] * N3)
N_pool_true = A_pool.shape[0]
_check_number_of_nodes(N_pool_expected, N_pool_true)
assert X_pool.shape == (N_pool_expected, F)
assert A_pool.shape == (N_pool_expected, N_pool_expected)
assert I_pool.shape == (N_pool_expected, )
output_shape = [o.shape for o in output]
_check_output_and_model_output_shapes(output_shape, model.output_shape)
def test_top_k_pool():
_test_single_mode(TopKPool, ratio=0.5, return_mask=True)
_test_graph_mode(TopKPool, ratio=0.5, return_mask=True)
| 31.516484 | 90 | 0.671199 |
acf24780db1f5f3dccd4cd133c92a19bc3961406 | 3,321 | py | Python | coda/coda_mdstore/tests/test_urls.py | unt-libraries/coda | 959abc220f49bb9f5b19e13b61714b8be0615412 | [
"BSD-3-Clause"
] | 2 | 2015-06-30T04:20:47.000Z | 2021-03-31T16:35:35.000Z | coda/coda_mdstore/tests/test_urls.py | unt-libraries/coda | 959abc220f49bb9f5b19e13b61714b8be0615412 | [
"BSD-3-Clause"
] | 141 | 2015-06-16T20:33:49.000Z | 2021-07-12T16:35:06.000Z | coda/coda_mdstore/tests/test_urls.py | unt-libraries/coda | 959abc220f49bb9f5b19e13b61714b8be0615412 | [
"BSD-3-Clause"
] | 1 | 2017-01-23T19:17:29.000Z | 2017-01-23T19:17:29.000Z | from django.contrib import sitemaps
from django.urls import resolve
from django.conf import settings
import pytest
from coda_mdstore import resourcesync
from coda_mdstore import views
def test_index():
assert resolve('/').func == views.index
def test_all_bags():
assert resolve('/bag/').func == views.all_bags
def test_app_bag_no_parameters():
assert resolve('/APP/bag/').func == views.app_bag
def test_app_with_parameters():
assert resolve('/APP/bag/ark:/%d/coda2/' % settings.ARK_NAAN).func == views.app_bag
def test_bagHTML():
assert resolve('/bag/ark:/%d/coda2/' % settings.ARK_NAAN).func == views.bagHTML
def test_bagURLList():
assert resolve('/bag/ark:/%d/coda2.urls' % settings.ARK_NAAN).func == views.bagURLList
def test_bag_zip_download():
assert resolve('/bag/ark:/%d/coda2.zip' % settings.ARK_NAAN).func == views.bagDownload
def test_bag_links():
assert resolve('/bag/ark:/%d/coda2/links/' % settings.ARK_NAAN).func == views.bagURLLinks
def test_bagProxy():
assert resolve('/bag/ark:/%d/foo/bar' % settings.ARK_NAAN).func == views.bagProxy
def test_stats():
assert resolve('/stats/').func == views.stats
def test_json_stats():
assert resolve('/stats.json').func == views.json_stats
def test_app_node():
assert resolve('/APP/node/').func == views.app_node
def test_app_node_with_identifier():
assert resolve('/APP/node/coda-123/').func == views.app_node
def test_showNodeStatus():
assert resolve('/node/').func == views.showNodeStatus
def test_showNodeStatus_with_identifier():
assert resolve('/node/coda-123/').func == views.showNodeStatus
def test_externalIdentifierSearch_with_identifier():
url = resolve('/extidentifier/test_value/')
assert url.func == views.externalIdentifierSearch
def test_externalIdentifierSearch():
url = resolve('/extidentifier/')
assert url.func == views.externalIdentifierSearch
def test_externalIdentifierSearchJSON():
url = resolve('/extidentifier.json')
assert url.func == views.externalIdentifierSearchJSON
def test_bagFullTextSearchHTML():
url = resolve('/search/')
assert url.func == views.bagFullTextSearchHTML
def test_about():
url = resolve('/about/')
assert url.func == views.about
def test_robots():
url = resolve('/robots.txt')
assert url.func == views.shooRobot
def test_feed():
assert resolve('/feed/').func.__class__ == views.AtomSiteNewsFeed
@pytest.mark.django_db
def test_resourceindex(client):
assert resolve('/resourceindex.xml').func == sitemaps.views.index
# Verify correct arguments are being passed in urls.py.
assert client.get('/resourceindex.xml').status_code == 200
@pytest.mark.django_db
def test_resourcelist_section(client):
assert resolve('/resourcelist-001.xml').func == sitemaps.views.sitemap
# Verify correct arguments are being passed in urls.py.
assert client.get('/resourcelist-001.xml').status_code == 200
@pytest.mark.django_db
def test_changelist(client):
assert resolve('/changelist.xml').func == resourcesync.changelist
# Verify correct arguments are being passed in urls.py.
assert client.get('/changelist.xml').status_code == 200
def test_capabilitylist():
assert resolve('/capabilitylist.xml').func == resourcesync.capabilitylist
| 25.945313 | 93 | 0.726889 |
acf247a159b16e7c6d57ad5cba97ab468dacea56 | 13,944 | py | Python | examples/train_battle.py | yc005/MAgent-lc | 47ab22fbc919650b6a6516ba7b96813d1802e07b | [
"MIT"
] | 1 | 2020-04-21T16:35:47.000Z | 2020-04-21T16:35:47.000Z | examples/train_battle.py | yc005/MAgent | 47ab22fbc919650b6a6516ba7b96813d1802e07b | [
"MIT"
] | null | null | null | examples/train_battle.py | yc005/MAgent | 47ab22fbc919650b6a6516ba7b96813d1802e07b | [
"MIT"
] | null | null | null | # v1.5.2
# A2C vs COMA
# right is COMA, left is A2C
"""
Train battle, two models in two processes
"""
import argparse
import time
import logging as log
import math
import os
# os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2'
# # 只显示 warning 和 Error
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '3'
# 只显示 Error
import numpy as np
import magent
from magent.builtin.tf_model import AdvantageActorCritic
from magent.builtin.tf_model import Random
from magent.builtin.tf_model import DeepQNetwork
def load_config(map_size):
gw = magent.gridworld
cfg = gw.Config()
cfg.set({"map_width": map_size, "map_height": map_size})
cfg.set({"minimap_mode": True})
cfg.set({"embedding_size": 10})
troops = cfg.register_agent_type(
"troops",
{'width': 1, 'length': 1, 'hp': 10, 'speed': 2,
'view_range': gw.CircleRange(6), 'attack_range': gw.CircleRange(1.5),
'damage': 2, 'step_recover': 0.1,
'step_reward': -0.005, 'kill_reward': 0, 'dead_penalty': -0.4, 'attack_penalty': -0.05,
})
tanks = cfg.register_agent_type(
"tanks",
{'width': 2, 'length': 2, 'hp': 40, 'speed': 1.5,
'view_range': gw.CircleRange(6), 'attack_range': gw.CircleRange(3),
'damage': 7, 'step_recover': 0.1,
'step_reward': -0.005, 'kill_reward': 0, 'dead_penalty': -1.6, 'attack_penalty': -0.1,
})
rtroops = cfg.add_group(troops)
rtanks = cfg.add_group(tanks)
ltroops = cfg.add_group(troops)
ltanks = cfg.add_group(tanks)
r_troops = gw.AgentSymbol(rtroops, index='any')
r_tanks = gw.AgentSymbol(rtanks, index='any')
l_troops = gw.AgentSymbol(ltroops, index='any')
l_tanks = gw.AgentSymbol(ltanks, index='any')
# reward shaping to encourage attack
attack_reward = 1
cfg.add_reward_rule(gw.Event(l_troops, 'attack', r_troops), receiver=l_troops, value=attack_reward)
cfg.add_reward_rule(gw.Event(r_troops, 'attack', l_troops), receiver=r_troops, value=attack_reward)
cfg.add_reward_rule(gw.Event(l_tanks, 'attack', r_tanks), receiver=l_tanks, value=attack_reward)
cfg.add_reward_rule(gw.Event(r_tanks, 'attack', l_tanks), receiver=r_tanks, value=attack_reward)
cfg.add_reward_rule(gw.Event(l_troops, 'attack', r_tanks), receiver=l_troops, value=attack_reward)
cfg.add_reward_rule(gw.Event(r_tanks, 'attack', l_troops), receiver=r_tanks, value=attack_reward)
cfg.add_reward_rule(gw.Event(l_tanks, 'attack', r_troops), receiver=l_tanks, value=attack_reward)
cfg.add_reward_rule(gw.Event(r_troops, 'attack', l_tanks), receiver=r_troops, value=attack_reward)
cfg.add_reward_rule(gw.Event(l_troops, 'attack', l_tanks), receiver=l_troops, value=-10)
cfg.add_reward_rule(gw.Event(l_tanks, 'attack', l_troops), receiver=l_tanks, value=-10)
cfg.add_reward_rule(gw.Event(r_tanks, 'attack', r_troops), receiver=r_tanks, value=-10)
cfg.add_reward_rule(gw.Event(r_troops, 'attack', r_tanks), receiver=r_troops, value=-10)
return cfg
r_troopsID = 0
r_tanksID = 1
l_troopsID = 2
l_tanksID = 3
def generate_map(env, map_size, handles):
""" generate a map, which consists of two squares of agents"""
width = height = map_size
init_num = map_size * map_size * 0.02
gap = 3
# right
n = init_num
side = int(math.sqrt(n)) * 2
pos_troops = []
pos_tanks = []
pos_flag = 0
for x in range(width//2 + gap, width//2 + gap + side, 2):
for y in range((height - side)//2, (height - side)//2 + side, 2):
if pos_flag % 6 != 0:
pos_troops.append([x, y, 0])
else:
pos_tanks.append([x, y, 0])
pos_flag += 1
env.add_agents(handles[r_troopsID], method="custom", pos=pos_troops)
env.add_agents(handles[r_tanksID], method="custom", pos=pos_tanks)
# left
n = init_num
side = int(math.sqrt(n)) * 2
pos_troops = []
pos_tanks = []
pos_flag = 0
for x in range(width//2 - gap - side, width//2 - gap - side + side, 2):
for y in range((height - side)//2, (height - side)//2 + side, 2):
if pos_flag % 6 != 0:
pos_troops.append([x, y, 0])
else:
pos_tanks.append([x, y, 0])
pos_flag += 1
env.add_agents(handles[l_troopsID], method="custom", pos=pos_troops)
env.add_agents(handles[l_tanksID], method="custom", pos=pos_tanks)
def play_a_round(env, map_size, handles, models, rlmodels, print_every, train=True, render=False, eps=None):
"""play a ground and train"""
env.reset()
generate_map(env, map_size, handles)
step_ct = 0
done = False
n = len(handles)
obs = [[] for _ in range(n)]
ids = [[] for _ in range(n)]
acts = [[] for _ in range(n)]
nums = [env.get_num(handle) for handle in handles]
total_reward = [0 for _ in range(n)]
print("===== sample =====")
print("eps %.2f number %s" % (eps, nums))
start_time = time.time()
while not done:
# take actions for every model
for i in range(n):
obs[i] = env.get_observation(handles[i])
ids[i] = env.get_agent_id(handles[i])
# print("the ", i, "id th id is, ", ids[i], type(ids), type(ids[i]), type(ids[i][0]))
# let models infer action in parallel (non-blocking)
models[i].infer_action(obs[i], ids[i], 'e_greedy', eps, block=False)
for i in range(n):
acts[i] = models[i].fetch_action() # fetch actions (blocking)
env.set_action(handles[i], acts[i])
# simulate one step
done = env.step()
# sample
step_reward = []
for i in range(n):
rewards = env.get_reward(handles[i])
if train:
alives = env.get_alive(handles[i])
# store samples in replay buffer (non-blocking)
models[i].sample_step(rewards, alives, block=False)
s = sum(rewards)
step_reward.append(s)
total_reward[i] += s
# render
if render:
env.render()
# stat info
nums = [env.get_num(handle) for handle in handles]
# clear dead agents
env.clear_dead()
# check return message of previous called non-blocking function sample_step()
if args.train:
for model in models:
model.check_done()
if step_ct % print_every == 0:
print("step %3d, nums: %s reward: %s, total_reward: %s " %
(step_ct, nums, np.around(step_reward, 2), np.around(total_reward, 2)))
step_ct += 1
if step_ct > 550:
break
sample_time = time.time() - start_time
print("steps: %d, total time: %.2f, step average %.2f" % (step_ct, sample_time, sample_time / step_ct))
# train
# total_loss, value = [0 for _ in range(n)], [0 for _ in range(n)]
# if train:
# print("===== train =====")
# start_time = time.time()
#
# # train models in parallel
# for i in range(n):
# models[i].train(print_every=1000, block=False)
# for i in range(n):
# total_loss[i], value[i] = models[i].fetch_train()
#
# train_time = time.time() - start_time
# print("train_time %.2f" % train_time)
total_loss, value = [0 for _ in range(n)], [0 for _ in range(n)]
if train:
print("===== train =====")
start_time = time.time()
# train first half (right) models in parallel
# for i in range(round(n / 2), n):
# models[i].train(print_every=1000, block=False)
# for i in range(round(n / 2), n):
# total_loss[i], value[i] = models[i].fetch_train()
# train models in parallel
for i in range(n):
if rlmodels[i] == DeepQNetwork:
models[i].train(print_every=1000, block=False)
if rlmodels[i] == Random:
pass
else:
total_loss_list, value[i] = models[i].train(1000)
total_loss[i] = total_loss_list[1]
for i in range(n):
if rlmodels[i] == DeepQNetwork:
total_loss[i], value[i] = models[i].fetch_train()
train_time = time.time() - start_time
print("train_time %.2f" % train_time)
def round_list(l): return [round(x, 2) for x in l]
return round_list(total_loss), nums, round_list(total_reward), round_list(value)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--save_every", type=int, default=5)
parser.add_argument("--render_every", type=int, default=10)
parser.add_argument("--n_round", type=int, default=2000)
parser.add_argument("--render", action="store_true")
parser.add_argument("--load_from", type=int)
parser.add_argument("--train", action="store_true")
parser.add_argument("--map_size", type=int, default=125)
parser.add_argument("--greedy", action="store_true")
parser.add_argument("--name", type=str, default="battle")
parser.add_argument("--eval", action="store_true")
parser.add_argument('--alg', default='dqn', choices=['dqn', 'drqn', 'a2c'])
args = parser.parse_args()
# set logger
magent.utility.init_logger(args.name)
# init the game
env = magent.GridWorld(load_config(args.map_size))
env.set_render_dir("build/render")
# two groups of agents
handles = env.get_handles()
# sample eval observation set
eval_obs = [None for _ in range(len(handles))]
if args.eval:
print("sample eval set...")
env.reset()
generate_map(env, args.map_size, handles)
for i in range(len(handles)):
eval_obs[i] = magent.utility.sample_observation(env, handles, 2048, 500)
# load models
batch_size = 256
unroll_step = 8
target_update = 1200
train_freq = 5
# if args.alg == 'dqn':
# from magent.builtin.tf_model import DeepQNetwork
# RLModel = DeepQNetwork
# # RLModels.append(RLModel)
# base_args = {'batch_size': batch_size,
# 'memory_size': 2 ** 20, 'learning_rate': 1e-4,
# 'target_update': target_update, 'train_freq': train_freq}
# elif args.alg == 'drqn':
# from magent.builtin.tf_model import DeepRecurrentQNetwork
# RLModel = DeepRecurrentQNetwork
# # RLModels.append(RLModel)
# base_args = {'batch_size': batch_size / unroll_step, 'unroll_step': unroll_step,
# 'memory_size': 8 * 625, 'learning_rate': 1e-4,
# 'target_update': target_update, 'train_freq': train_freq}
# elif args.alg == 'a2c':
# # see train_against.py to know how to use a2c
# from magent.builtin.tf_model import AdvantageActorCritic
# RLModel = AdvantageActorCritic
# # RLModels.append(RLModel)
# step_batch_size = 10 * args.map_size * args.map_size * 0.04
# base_args = {'learning_rate': 1e-4}
#
# RLModels = [AdvantageActorCritic, AdvantageActorCritic, AdvantageActorCritic, AdvantageActorCritic]
# init models
names = [args.name + "-r0", args.name + "-r1", args.name + "-l0", args.name + "-l1"]
models = []
RLModels = []
for i in range(len(names)):
model_args = {'eval_obs': eval_obs[i]}
if i < len(names) / 2:
# from magent.builtin.tf_model import AdvantageActorCritic
#
# RLModel = AdvantageActorCritic
from magent.builtin.tf_model import COMA
RLModel = COMA
step_batch_size = 10 * args.map_size * args.map_size * 0.04
base_args = {'learning_rate': 1e-4}
RLModels.append(RLModel)
model_args.update(base_args)
models.append(magent.ProcessingModel(env, handles[i], names[i], 20000+i, 1000, RLModel, **model_args))
else:
from magent.builtin.tf_model import AdvantageActorCritic
RLModel = AdvantageActorCritic
step_batch_size = 10 * args.map_size * args.map_size * 0.04
base_args = {'learning_rate': 1e-4}
RLModels.append(RLModel)
model_args.update(base_args)
models.append(magent.ProcessingModel(env, handles[i], names[i], 20000+i, 1000, RLModel, **model_args))
# load if
savedir = 'save_model'
if args.load_from is not None:
start_from = args.load_from
print("load ... %d" % start_from)
for idx, model in enumerate(models):
if RLModels[idx] != Random:
model.load(savedir, start_from)
else:
start_from = 0
# print state info
print(args)
print("view_space", env.get_view_space(handles[0]))
print("feature_space", env.get_feature_space(handles[0]))
# play
start = time.time()
for k in range(start_from, start_from + args.n_round):
tic = time.time()
eps = magent.utility.piecewise_decay(k, [0, 700, 1400], [1, 0.2, 0.05]) if not args.greedy else 0
loss, num, reward, value = play_a_round(env, args.map_size, handles, models, RLModels,
train=args.train, print_every=50,
render=args.render or (k+1) % args.render_every == 0,
eps=eps) # for e-greedy
log.info("round %d\t loss: %s\t num: %s\t reward: %s\t value: %s" % (k, loss, num, reward, value))
print("round time %.2f total time %.2f\n" % (time.time() - tic, time.time() - start))
# save models
if (k + 1) % args.save_every == 0 and args.train:
print("save model... ")
for idx, model in enumerate(models):
if RLModels[idx] != Random:
model.save(savedir, k)
# send quit command
for model in models:
model.quit()
| 36.791557 | 114 | 0.597103 |
acf247f3953dbfb2c12c9c14f17b7051817a2113 | 3,217 | py | Python | Demonetization.py | soumendrak/demonetization | e30a148ff2827485f77001594059b619940edefc | [
"Apache-2.0"
] | null | null | null | Demonetization.py | soumendrak/demonetization | e30a148ff2827485f77001594059b619940edefc | [
"Apache-2.0"
] | null | null | null | Demonetization.py | soumendrak/demonetization | e30a148ff2827485f77001594059b619940edefc | [
"Apache-2.0"
] | null | null | null | """
Created by Soumendra Kumar Sahoo
Date: 26th November 2016
Function: This program will calculate the overall sentiment of public
on the demonetization issue by fetching data from twitter
Future plans:
1. Data extraction from twitter functionality will be added
2. Visualization of the sentiments using seaborn/matplotlib module
3. Performance improvement
4. Converting it to Unsupervised learning
"""
import csv
import re
from nltk.tokenize import word_tokenize
import math
# AFINN-111 is as of June 2011 the most recent version of AFINN
# filenameAFINN = 'AFINN/AFINN-111.txt'
afinn = {}
with open('AFINN/sentiments.txt') as SentimentFile:
for row in SentimentFile:
afinn[row.split('\t')[0]] = int(row.split('\t')[1].strip())
emoticons_str = r'(?:[:=;][oO\-]? [D\)\]\(\]/\\OpP])'
regex_str = [
emoticons_str,
r'<[^>]+>', # HTML tags
r'(?:@[\w_]+)', # @-mentions
r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", # hash-tags
# URLs
r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+',
r'(?:(?:\d+,?)+(?:\.?\d+)?)', # numbers
r"(?:[a-z][a-z'\-_]+[a-z])", # words with - and '
r'(?:[\w_]+)', # other words
r'(?:\S)' # anything else
]
tokens_re = re.compile(r'(' + '|'.join(regex_str) + ')',
re.VERBOSE | re.IGNORECASE)
emoticon_re = re.compile(r'^' + emoticons_str + '$',
re.VERBOSE | re.IGNORECASE)
def sentiment(words):
"""
Returns a float for sentiment strength based on the input text.
Positive values are positive valence, negative value are negative valence.
"""
# words = pattern_split.split(text.lower())
sentiments = map(lambda word: afinn.get(word, 0), words)
if sentiments:
# How should you weight the individual word sentiments?
# You could do N, sqrt(N) or 1 for example. Here I use sqrt(N)
sntmnt = float(sum(sentiments)) / math.sqrt(len(sentiments))
else:
sntmnt = 0
return sntmnt
def tokenize(s):
# return tokens_re.findall(s)
return word_tokenize(s)
def preprocess(s, lowercase=False):
tokens = tokenize(s)
if lowercase:
tokens = [token if emoticon_re.search(
token) else token.lower() for token in tokens]
return tokens
def filereader(total=0):
"""
This has been used to read the csv file
:return read handler
"""
with open('demonetization-tweets-Test.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
try:
tweet = row['text']
total += sentiment(preprocess(tweet))
except UnicodeDecodeError:
# There are some characters which can not be handled by Python
# We need to ignore those characters
pass
return total
def main():
"""
main paragraph to handle the processes
:return:
"""
Total = filereader()
if Total > 0:
print "Positive sentiments"
else:
print "Negative sentiments"
if __name__ == "__main__":
main()
| 30.065421 | 82 | 0.575692 |
acf24801ffea645483bf4c0d9a76c8df1d88112b | 500 | py | Python | docs/source/gettingstarted_lambda_main.py | OrquestraCD/yolo | a49adf8e652240d8acc80f062ff50bf72bc8a339 | [
"Apache-2.0"
] | 11 | 2017-09-21T15:35:33.000Z | 2019-11-12T03:30:48.000Z | docs/source/gettingstarted_lambda_main.py | OrquestraCD/yolo | a49adf8e652240d8acc80f062ff50bf72bc8a339 | [
"Apache-2.0"
] | 34 | 2017-09-25T13:39:56.000Z | 2018-08-17T14:55:44.000Z | docs/source/gettingstarted_lambda_main.py | OrquestraCD/yolo | a49adf8e652240d8acc80f062ff50bf72bc8a339 | [
"Apache-2.0"
] | 12 | 2017-09-20T19:15:08.000Z | 2020-04-23T10:28:30.000Z | from myapplication import api
def lambda_handler(event, context):
"""Main entry point of Lambda function.
:param dict event:
Dictionary containing the entire request template. This can vary wildly
depending on the template structure and contents.
:param context:
Instance of an AWS Lambda Python Context object, as described on
http://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html.
"""
return api.process_event(event, context)
| 33.333333 | 79 | 0.716 |
acf248a20c1f076727e31e699cd71aa06a0ff36a | 1,506 | py | Python | eappacket.py | 719733328/drcom | d94811e755bd4ecabb2371ee2c2be0c9f53ba08b | [
"MIT"
] | 1 | 2017-12-20T03:17:51.000Z | 2017-12-20T03:17:51.000Z | eappacket.py | 719733328/drcom | d94811e755bd4ecabb2371ee2c2be0c9f53ba08b | [
"MIT"
] | null | null | null | eappacket.py | 719733328/drcom | d94811e755bd4ecabb2371ee2c2be0c9f53ba08b | [
"MIT"
] | null | null | null | #coding=utf-8
from struct import *
from zlib import crc32
## Constants
# Reference: http://tools.ietf.org/html/rfc3748
ETHERTYPE_PAE = 0x888e
PAE_GROUP_ADDR = "\x01\x80\xc2\x00\x00\x03" # same for all
BROADCAST_ADDR = "\xff\xff\xff\xff\xff\xff"
EAPOL_VERSION = 1
EAPOL_EAPPACKET = 0
# packet info for EAPOL_EAPPACKET
EAPOL_START = 1
EAPOL_LOGOFF = 2
EAPOL_KEY = 3
EAPOL_ASF = 4
EAP_REQUEST = 1
EAP_RESPONSE = 2
EAP_SUCCESS = 3
EAP_FAILURE = 4
# packet info followed by EAP_RESPONSE
# 1 Identity
# 2 Notification
# 3 Nak (Response only)
# 4 MD5-Challenge
# 5 One Time Password (OTP)
# 6 Generic Token Card (GTC)
# 254 Expanded Types
# 255 Experimental use
EAP_TYPE_ID = 1 # identity
EAP_TYPE_MD5 = 4 # md5 Challenge
### Packet builders
def get_crc32(data):
return pack("!i", crc32(data))
def get_EAPOL(type, payload=""):
return pack("!BBH", EAPOL_VERSION, type, len(payload))+payload
def get_EAP(code, id, type=0, data=""):
if code in [EAP_SUCCESS, EAP_FAILURE]:
return pack("!BBH", code, id, 4)
else:
return pack("!BBHB", code, id, 5+len(data), type)+data
def get_ethernet_header(src, dst, type):
return dst+src+pack("!H",type)
def get_identity_data(login_info, _ = []):
if not _:
_.append(True)
return login_info['username']
return login_info['username'][:-1] + chr(ord(login_info['username'][-1]) + 3)
def fill_bytes(data):
return data.ljust(96, '\x00')
| 23.904762 | 81 | 0.653386 |
acf248e622a9248b60205cadaf16f8c97a4bde07 | 2,506 | py | Python | Module 1/client3.py | Jitender46559/Software-Engineering-Virtual-Experience | 1f69f5648e56acc03cc31da8cbdea7a501b7bde2 | [
"MIT"
] | null | null | null | Module 1/client3.py | Jitender46559/Software-Engineering-Virtual-Experience | 1f69f5648e56acc03cc31da8cbdea7a501b7bde2 | [
"MIT"
] | null | null | null | Module 1/client3.py | Jitender46559/Software-Engineering-Virtual-Experience | 1f69f5648e56acc03cc31da8cbdea7a501b7bde2 | [
"MIT"
] | null | null | null | ################################################################################
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import urllib.request
import time
import json
import random
# Server API URLs
QUERY = "http://localhost:8080/query?id={}"
# 500 server request
N = 500
def getDataPoint(quote):
""" Produce all of the needed values to generate a datapoint """
""" ------------- Update this function ------------- """
stock = quote['stock']
bid_price = float(quote['top_bid']['price'])
ask_price = float(quote['top_ask']['price'])
price = (bid_price + ask_price)/2
return stock, bid_price, ask_price, price
def getRatio(price_a, price_b):
""" Get ratio of price_a and price_b """
""" ------------- Update this function ------------- """
""" Also create some unit tests for this function in client_test.py """
if price_b == 0:
return
return (price_a / price_b)
# Main
if __name__ == "__main__":
# Query the price once every N seconds.
for _ in iter(range(N)):
quotes = json.loads(urllib.request.urlopen(QUERY.format(random.random())).read())
""" ----------- Update to get the ratio --------------- """
prices = {}
for quote in quotes:
stock, bid_price, ask_price, price = getDataPoint(quote)
prices[stock] = price
print ("Quoted %s at (bid:%s, ask:%s, price:%s)" % (stock, bid_price, ask_price, price))
print ("Ratio %s" % getRatio(prices['ABC'], prices['DEF']))
| 39.15625 | 92 | 0.655626 |
acf249ad6579143422452bf0580516d436eea788 | 2,670 | py | Python | metecho/oauth2/salesforce/tests/views.py | almostolmos/Metecho | 7f58eca163faafea1ce07ffb6f4de2449fa0b8df | [
"BSD-3-Clause"
] | 1 | 2021-12-09T20:39:01.000Z | 2021-12-09T20:39:01.000Z | metecho/oauth2/salesforce/tests/views.py | almostolmos/Metecho | 7f58eca163faafea1ce07ffb6f4de2449fa0b8df | [
"BSD-3-Clause"
] | 1,613 | 2020-03-26T16:39:57.000Z | 2022-03-07T14:54:16.000Z | metecho/oauth2/salesforce/tests/views.py | almostolmos/Metecho | 7f58eca163faafea1ce07ffb6f4de2449fa0b8df | [
"BSD-3-Clause"
] | 2 | 2021-04-09T18:51:10.000Z | 2022-01-04T20:32:48.000Z | from unittest import mock
import pytest
from django.core.exceptions import SuspiciousOperation
from sfdo_template_helpers.crypto import fernet_decrypt, fernet_encrypt
from ..views import SalesforceOAuth2Adapter
class TestSalesforceOAuth2Adapter:
def test_base_url(self, rf):
request = rf.get("/")
request.session = {}
adapter = SalesforceOAuth2Adapter(request)
assert adapter.base_url == "https://login.salesforce.com"
def test_base_url__custom_domain(self, rf):
request = rf.get("/?custom_domain=foo-bar.baz")
request.session = {}
adapter = SalesforceOAuth2Adapter(request)
assert adapter.base_url == "https://foo-bar.baz.my.salesforce.com"
def test_base_url__invalid_domain(self, rf):
request = rf.get("/?custom_domain=google.com?-")
request.session = {}
with pytest.raises(SuspiciousOperation):
SalesforceOAuth2Adapter(request).base_url
def test_complete_login(self, mocker, rf):
# This is a mess of terrible mocking and I do not like it.
# This is really just to exercise the mixin, and confirm that it
# assigns instance_url
get = mocker.patch("requests.get")
userinfo_mock = mock.MagicMock()
userinfo_mock.json.return_value = {
"organization_id": "00D000000000001EAA",
"urls": mock.MagicMock(),
}
get.side_effect = [userinfo_mock, mock.MagicMock(), mock.MagicMock()]
request = rf.get("/")
request.session = {"socialaccount_state": (None, "some-verifier")}
adapter = SalesforceOAuth2Adapter(request)
adapter.get_provider = mock.MagicMock()
slfr = mock.MagicMock()
slfr.account.extra_data = {}
prov_ret = mock.MagicMock()
prov_ret.sociallogin_from_response.return_value = slfr
adapter.get_provider.return_value = prov_ret
token = mock.MagicMock()
token.token = fernet_encrypt("token")
ret = adapter.complete_login(
request, None, token, response={"instance_url": "https://example.com"}
)
assert ret.account.extra_data["instance_url"] == "https://example.com"
def test_parse_token(self):
adapter = SalesforceOAuth2Adapter(None)
data = {"access_token": "token", "refresh_token": "token"}
token = adapter.parse_token(data)
assert "token" == fernet_decrypt(token.token)
def test_validate_org_id__invalid(self, rf):
request = rf.get("/")
adapter = SalesforceOAuth2Adapter(request)
with pytest.raises(SuspiciousOperation):
adapter._validate_org_id("bogus")
| 38.695652 | 82 | 0.661798 |
acf24ad8dfd5b6fcecda854f13f8347e992aa9b6 | 3,687 | py | Python | idaes/apps/ripe/bounds.py | OOAmusat/idaes-pse | ae7d3bb8e372bc32822dcdcb75e9fd96b78da539 | [
"RSA-MD"
] | null | null | null | idaes/apps/ripe/bounds.py | OOAmusat/idaes-pse | ae7d3bb8e372bc32822dcdcb75e9fd96b78da539 | [
"RSA-MD"
] | null | null | null | idaes/apps/ripe/bounds.py | OOAmusat/idaes-pse | ae7d3bb8e372bc32822dcdcb75e9fd96b78da539 | [
"RSA-MD"
] | 1 | 2022-03-17T11:08:43.000Z | 2022-03-17T11:08:43.000Z | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
# This file contians subroutines related to finding the bounds for big M constraints
# additionally, depedent reaction stoichiometries are detected here
from idaes.apps import ripe
import numpy as np
def stoich_cons(stoichs):
# This subroutine identifies dependent stoichiometries
smat = np.array(stoichs)
nt, ns = np.shape(stoichs)
scons_small = []
scons_large = []
fam_cons = []
nfcons = []
ncons = []
for con in [3, 4]:
for ins in nt.permutations(range(nt), con): # was it
inds = list(ins)
tmat = smat[inds, :]
rank = np.linalg.matrix_rank(tmat)
if rank < con:
msize = count_neg(tmat)
if all(msize[0] == item for item in msize):
fam_cons.append([inds[i] + 1 for i in inds])
nfcons.append(con - 1)
else:
bigones = np.argwhere(msize == np.max(msize)).flatten().tolist()
smallones = np.argwhere(msize != np.max(msize)).flatten().tolist()
scons_small.append([inds[i] + 1 for i in smallones])
scons_large.append([inds[i] + 1 for i in bigones])
ncons.append(con - 1)
return scons_small, scons_large, ncons, fam_cons, nfcons
def count_neg(mat):
r = []
for i in range(len(mat)):
vec = mat[i]
count = 0
for item in vec:
# This line is commented out to enforce total reaction order (not just reactants)
# if item < 0:
count += abs(item)
r.append(count)
return r
def get_bounds(inargs):
sharedata = inargs[-1]
inargs[-1]["maxmiptime"] = 60.0
pc = inargs[-2]
# try except commented out
# try:
res = ripe.genpyomo.ripeomo(*inargs)
# except:
# res = {}
# res['maxk'] = sharedata['bounds']['k']['max']
# res['maxe'] = sharedata['bounds']['e']['max']
# Calculate upperbound of E from closed form expression
# c_1,2 = (-1/(R*T_min,max))
# E_max = ln(c2/c1)/(c1-c2)
T = [pc["T"][i][0] for i in range(pc["npc"])]
Tmax = np.max(T)
Tmin = np.min(T)
c1 = -1 / (sharedata["gasconst"] * Tmin)
c2 = -1 / (sharedata["gasconst"] * Tmax)
arrpen = c2 - c1
# sharedata['bounds']['e']['max'] = np.log(c2/c1)/(c1-c2)
kmax = res["maxk"]
# emax = res["maxe"]
if kmax >= sharedata["bounds"]["k"]["max"] or kmax == 0.0:
kmax = 10 * sharedata["bounds"]["k"]["max"]
elif kmax == 0.0:
kmax = 10 * sharedata["bounds"]["k"]["max"]
else:
kmax = 10 * kmax
# if emax >= sharedata['bounds']['e']['max'] or emax == 0.0:
# emax = 5*sharedata['bounds']['e']['max']
# else:
# emax = 1000*emax
sharedata["bounds"]["k"]["max"] = kmax
sharedata["arrpen"] = arrpen
return sharedata
| 38.40625 | 93 | 0.551397 |
acf24c46a8cd887108b4775a9c031f9db69e4ae3 | 2,737 | py | Python | conductor/conductor/tests/unit/data/plugins/inventory_provider/test_candidates.py | onap/optf-has | dd06e2675aedd7ae6344f2f51e70bbd468f36ce5 | [
"Apache-2.0"
] | 4 | 2019-02-14T19:18:09.000Z | 2019-10-21T17:17:59.000Z | conductor/conductor/tests/unit/data/plugins/inventory_provider/test_candidates.py | onap/optf-has | dd06e2675aedd7ae6344f2f51e70bbd468f36ce5 | [
"Apache-2.0"
] | null | null | null | conductor/conductor/tests/unit/data/plugins/inventory_provider/test_candidates.py | onap/optf-has | dd06e2675aedd7ae6344f2f51e70bbd468f36ce5 | [
"Apache-2.0"
] | 4 | 2019-05-09T07:05:54.000Z | 2020-11-20T05:56:47.000Z | #
# -------------------------------------------------------------------------
# Copyright (C) 2020 Wipro Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
import unittest
import uuid
from conductor.data.plugins.inventory_provider.candidates.candidate import Candidate
from conductor.data.plugins.inventory_provider.candidates.slice_profiles_candidate import SliceProfilesCandidate
class TestCandidates(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_slice_profiles_candidate(self):
id = str(uuid.uuid4())
expected_candidate = {
"candidate_id": id,
"core_latency": 15,
"core_reliability": 99.99,
"cost": 1.0,
"coverage_area": "City: Chennai",
"coverage_area_ta_list": "City: Chennai",
"inventory_provider": "generator",
"inventory_type": "slice_profiles",
"latency": 25,
"ran_coverage_area_ta_list": "City: Chennai",
"ran_latency": 10,
"ran_reliability": 99.99,
"reliability": 99.99,
"uniqueness": "true",
"creation_cost": 0.9
}
info = Candidate.build_candidate_info("generator", "slice_profiles", 1.0, "true", id)
subnet_requirements = {"core": {"latency": 15, "reliability": 99.99},
"ran": {"latency": 10, "reliability": 99.99, "coverage_area_ta_list": "City: Chennai"}
}
candidate = SliceProfilesCandidate(info=info, subnet_requirements=subnet_requirements,
default_fields={"creation_cost": 0.9},coverage_area="City: Chennai")
self.assertEqual(expected_candidate, candidate.convert_nested_dict_to_dict())
| 44.145161 | 117 | 0.525758 |
acf24cbbe1ee1edb839e2a7090fd5affa58ea184 | 2,021 | py | Python | examples/to-be-fixed/pihm2netcdf/mint_netcdf_write_func.py | raunaqtri1/MINT-Transformation | cdb95ff3ee05b109e5f2a1016d52702819691701 | [
"MIT"
] | 1 | 2020-09-29T15:27:52.000Z | 2020-09-29T15:27:52.000Z | examples/to-be-fixed/pihm2netcdf/mint_netcdf_write_func.py | raunaqtri1/MINT-Transformation | cdb95ff3ee05b109e5f2a1016d52702819691701 | [
"MIT"
] | 19 | 2020-05-01T21:16:39.000Z | 2021-06-09T19:42:59.000Z | examples/to-be-fixed/pihm2netcdf/mint_netcdf_write_func.py | raunaqtri1/MINT-Transformation | cdb95ff3ee05b109e5f2a1016d52702819691701 | [
"MIT"
] | 3 | 2020-05-04T22:44:54.000Z | 2020-05-06T23:16:35.000Z | import datetime
from pathlib import Path
from typing import Union
import xarray as xr
from dtran import IFunc, ArgType
class MintNetCDFWriteFunc(IFunc):
id = "netcdf_write_func"
inputs = {
"data": ArgType.NDimArray,
"output_file": ArgType.FilePath,
"title": ArgType.String,
"comment": ArgType.String,
"naming_authority": ArgType.String,
"id": ArgType.String,
"creator_name": ArgType.String,
"creator_email": ArgType.String,
}
outputs = {
"result": ArgType.Boolean
}
def __init__(
self,
data: xr.Dataset,
output_file: Union[str, Path], title: str, comment: str, naming_authority: str, id: str, creator_name: str,
creator_email: str
):
self.ndarray = data
self.output_file = Path(output_file)
self.title = title
self.comment = comment
self.naming_authority = naming_authority
self.id = id
self.creator_name = creator_name
self.creator_email = creator_email
def exec(self) -> dict:
x_min = min(self.ndarray.coords["X"])
x_max = max(self.ndarray.coords["X"])
y_min = min(self.ndarray.coords["Y"])
y_max = max(self.ndarray.coords["Y"])
self.ndarray.attrs.update({
"title": self.title,
"comment": self.comment,
"naming_authority": self.naming_authority,
"id": self.id,
"date_created": datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'),
"date_modified": datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'),
"creator_name": self.creator_name,
"geospatial_bounds_crs": "+init=epsg:4326",
"geospatial_bounds": [x_min, y_min, x_max, y_max],
"creator_email": self.creator_email}
)
self.ndarray.to_netcdf(self.output_file, format="NETCDF4")
return {"result": True}
def validate(self) -> bool:
return True
| 31.092308 | 119 | 0.591291 |
acf24cd9663a555c1925267e3b42985759133a0a | 5,452 | py | Python | train_num_beads.py | marrink-lab/cartographer | f1e718617535867e2d94a63701163fd029e5ec86 | [
"Apache-2.0"
] | 5 | 2018-07-18T19:35:38.000Z | 2022-02-16T10:57:41.000Z | train_num_beads.py | marrink-lab/cartographer | f1e718617535867e2d94a63701163fd029e5ec86 | [
"Apache-2.0"
] | 1 | 2020-07-25T00:03:37.000Z | 2020-07-27T07:46:02.000Z | train_num_beads.py | marrink-lab/cartographer | f1e718617535867e2d94a63701163fd029e5ec86 | [
"Apache-2.0"
] | 1 | 2021-07-23T10:27:10.000Z | 2021-07-23T10:27:10.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 University of Groningen
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import networkx as nx
import numpy as np
from sklearn import model_selection, svm, preprocessing, pipeline, base
from sklearn.externals import joblib
from ecfp import XCFPFingerprinter
from parse_db import parse_db
def invariant(graph, node_key):
hetero_neighbors = [neighbor for neighbor in graph[node_key]
if graph.nodes[neighbor]['element'] not in 'CH']
halogens = 'F Cl Br I'.split()
halogen_neighbors = [neighbor for neighbor in graph[node_key]
if graph.nodes[neighbor]['element'] in halogens]
cycles = nx.cycle_basis(graph)
my_cycles = []
for cycle in cycles:
if node_key in cycle:
my_cycles.append(cycle)
if my_cycles:
cycle_invar = len(min(my_cycles, key=len))
else:
cycle_invar = 0
invariant = tuple((len(graph[node_key]), # number of neighbours
len(hetero_neighbors),
len(halogen_neighbors),
graph.nodes[node_key]['hcount'],
graph.nodes[node_key]['element'] not in 'CH',
graph.nodes[node_key]['charge'],
cycle_invar))
return invariant
class RounderMixIn:
def predict(self, *args, **kwargs):
predicted = super().predict(*args, **kwargs)
return self.do_round(predicted)
@staticmethod
def do_round(y):
return y.round().astype(int)
def draw_mol(mol):
labels = nx.get_node_attributes(mol, 'name')
nx.draw_networkx(mol, labels=labels)
class SVRR(RounderMixIn, svm.SVR):
pass
def featurize(mol, feature_size=7, fingerprint_size=2):
fingerprinter = XCFPFingerprinter(fingerprint_size, invariant=invariant)
fp = fingerprinter.fingerprint(mol)
feat_arr = np.zeros(feature_size, dtype=int)
for val, count in fp.items():
feat_arr[val % NUM_FEATURES] += count
return feat_arr.reshape(1, -1)
class FingerPrinter(base.TransformerMixin):
def __init__(self, feature_size, fp_radius):
self.feature_size = feature_size
self.fp_radius = fp_radius
def get_params(self, deep=False):
return {'feature_size': self.feature_size,
'fp_radius': self.fp_radius}
def set_params(self, **vals):
for name, val in vals.items():
setattr(self, name, val)
def transform(self, X):
if isinstance(X, nx.Graph):
return featurize(X, self.feature_size, self.fp_radius)
features = []
for mol in X:
features.append(featurize(mol, self.feature_size, self.fp_radius))
return np.reshape(features, (-1, self.feature_size))
def fit(self, X, y=None):
return self
NUM_FEATURES = 7
FINGERPRINT_SIZE = 3
FILENAME = 'numbead_predictor.gz'
BASE_PATH = '/home/.../Documents/database'
XLS_FILE = os.path.join(BASE_PATH, 'DRUGS-06.xlsx')
AA_DIR = os.path.join(BASE_PATH, 'atomistic')
CG_DIR = os.path.join(BASE_PATH, 'Martini')
MAP_DIR = os.path.join(BASE_PATH, 'mapping')
scaler = preprocessing.RobustScaler()
finger_printer = FingerPrinter(NUM_FEATURES, FINGERPRINT_SIZE)
# Hyperparameters are determined by hyperopt_num_beads.py
svrr = SVRR()
params = dict((('estimator__C', 825.404185268019),
('estimator__epsilon', 0.05623413251903491),
('estimator__kernel', 'rbf'),
('estimator__gamma', 0.0031622776601683794),
('estimator__shrinking', True)))
MODEL = pipeline.Pipeline(steps=[('fingerprint', finger_printer),
('scale', scaler),
('estimator', svrr)])
MODEL.set_params(**params)
if __name__ == '__main__':
import matplotlib.pyplot as plt
molecules = parse_db(XLS_FILE, AA_DIR, CG_DIR, MAP_DIR)
results = []
aa_mols = []
for _, aa_mol, cg_mol, _, _ in molecules:
results.append(len(cg_mol))
aa_mols.append(aa_mol)
splits = model_selection.train_test_split(aa_mols, results, test_size=0.2)
trainX, testX, trainY, testY = splits
train = trainX, trainY
test = testX, testY
cv_out = model_selection.cross_validate(MODEL, *train, cv=4)
print('Cross validation scores on the training set:')
print(cv_out['test_score'])
MODEL.fit(*train)
joblib.dump(MODEL, FILENAME, compress=True)
print('Score for the validation set:')
print(MODEL.score(*test))
plt.scatter(train[1], MODEL.predict(train[0]), c='b', label='train')
plt.scatter(test[1], MODEL.predict(test[0]), c='r', label='test')
xmin, xmax = plt.xlim()
plt.plot([xmin, xmax], [xmin, xmax], '--')
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
predicted = MODEL.predict(aa_mols)
diff = np.abs(results - predicted)
print('Differences between expected and predicted:')
print(diff)
| 32.260355 | 78 | 0.658107 |
acf24d68daa2ee8f871a9ca4b24538950ee2f346 | 1,023 | py | Python | test/language/expressions/python/FieldTypeWithClashTest.py | dkBrazz/zserio | 29dd8145b7d851fac682d3afe991185ea2eac318 | [
"BSD-3-Clause"
] | 86 | 2018-09-06T09:30:53.000Z | 2022-03-27T01:12:36.000Z | test/language/expressions/python/FieldTypeWithClashTest.py | dkBrazz/zserio | 29dd8145b7d851fac682d3afe991185ea2eac318 | [
"BSD-3-Clause"
] | 362 | 2018-09-04T20:21:24.000Z | 2022-03-30T15:14:38.000Z | test/language/expressions/python/FieldTypeWithClashTest.py | dkBrazz/zserio | 29dd8145b7d851fac682d3afe991185ea2eac318 | [
"BSD-3-Clause"
] | 20 | 2018-09-10T15:59:02.000Z | 2021-12-01T15:38:22.000Z | import unittest
from testutils import getZserioApi
class FieldTypeWithClashTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "expressions.zs").field_type_with_clash
def testBitSizeOfWithOptional(self):
containedType = self.api.ContainedType(True)
fieldTypeExpression = self.api.FieldTypeExpression(containedType, self.EXTRA_VALUE)
self.assertEqual(self.COMPOUND_TYPE_EXPRESSION_BIT_SIZE_WITH_OPTIONAL, fieldTypeExpression.bitsizeof())
def testBitSizeOfWithoutOptional(self):
containedType = self.api.ContainedType(False)
fieldTypeExpression = self.api.FieldTypeExpression()
fieldTypeExpression.contained_type = containedType
self.assertEqual(self.COMPOUND_TYPE_EXPRESSION_BIT_SIZE_WITHOUT_OPTIONAL,
fieldTypeExpression.bitsizeof())
COMPOUND_TYPE_EXPRESSION_BIT_SIZE_WITH_OPTIONAL = 4
COMPOUND_TYPE_EXPRESSION_BIT_SIZE_WITHOUT_OPTIONAL = 1
EXTRA_VALUE = 0x02
| 39.346154 | 111 | 0.768328 |
acf24daf3e7b157ae436fee4e8c86a833ba85558 | 2,098 | py | Python | Pagespeed-API/pagespeed.py | avinashkranjan/PraticalPythonProjects | 12c1f7cedae57a843ceb6aba68cca48df505f341 | [
"MIT"
] | 930 | 2020-09-05T22:07:28.000Z | 2022-03-30T07:56:18.000Z | Pagespeed-API/pagespeed.py | maheshdbabar9340/Amazing-Python-Scripts | e2272048cbe49b4bda5072bbdd8479739bb6c18d | [
"MIT"
] | 893 | 2020-09-04T07:57:24.000Z | 2022-02-08T02:12:26.000Z | Pagespeed-API/pagespeed.py | maheshdbabar9340/Amazing-Python-Scripts | e2272048cbe49b4bda5072bbdd8479739bb6c18d | [
"MIT"
] | 497 | 2020-09-05T08:16:24.000Z | 2022-03-31T00:55:57.000Z | import requests
import json
from responses import PageSpeedResponse
class PageSpeed(object):
"""
Google PageSpeed analysis client
Attributes:
api_key (str): Optional API key for client account.
endpoint (str): Endpoint for HTTP request
"""
def __init__(self, api_key=None):
self.api_key = api_key
self.endpoint = 'https://www.googleapis.com/pagespeedonline/v5/runPagespeed'
def analyse(self, url, strategy='desktop', category='performance'):
"""
Run PageSpeed test
Args:
url (str): The URL to fetch and analyse.
strategy (str, optional): The analysis strategy to use. Acceptable values: 'desktop', 'mobile'
category (str, optional): A Lighthouse category to run; if none are given, only Performance category will be run
Returns:
response: PageSpeed API results
"""
strategy = strategy.lower()
params = {
'strategy': strategy,
'url': url,
'category': category,
}
if self.api_key:
params['key'] = self.api_key
# Sanity Check
if strategy not in ('mobile', 'desktop'):
raise ValueError('invalid strategy: {0}'.format(strategy))
# Returns raw data
raw = requests.get(self.endpoint, params=params)
response = PageSpeedResponse(raw)
return response
def save(self, response, path='./'):
json_data = response._json
with open(path + "json_data.json", 'w+') as f:
json.dump(json_data, f, indent=2)
if __name__ == "__main__":
ps = PageSpeed()
response = ps.analyse('https://www.example.com', strategy='mobile')
ls = [
response.url, response.loadingExperience,
response.originLoadingExperience,
response.originLoadingExperienceDetailed,
response.loadingExperienceDetailed, response.finalUrl,
response.requestedUrl, response.version, response.userAgent
] # , response.lighthouseResults]
ps.save(response)
print(ls)
| 29.549296 | 124 | 0.620114 |
acf24e782385d7ec98769e4bd8761011d1600a44 | 8,127 | py | Python | io_scene_niftools/modules/nif_export/animation/__init__.py | SubhadeepG/blender_niftools_addon | 87da19643c9c361219d9d9f4b294b17ad04933ac | [
"BSD-3-Clause"
] | 4 | 2021-09-27T09:58:44.000Z | 2022-02-05T16:12:28.000Z | io_scene_niftools/modules/nif_export/animation/__init__.py | SubhadeepG/blender_niftools_addon | 87da19643c9c361219d9d9f4b294b17ad04933ac | [
"BSD-3-Clause"
] | null | null | null | io_scene_niftools/modules/nif_export/animation/__init__.py | SubhadeepG/blender_niftools_addon | 87da19643c9c361219d9d9f4b294b17ad04933ac | [
"BSD-3-Clause"
] | null | null | null | """This script contains classes to help import animations."""
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright © 2013, NIF File Format Library and Tools contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NIF File Format Library and Tools
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
from abc import ABC
import bpy
from pyffi.formats.nif import NifFormat
import io_scene_niftools.utils.logging
from io_scene_niftools.modules.nif_export import animation
from io_scene_niftools.modules.nif_export.block_registry import block_store
from io_scene_niftools.utils.singleton import NifOp, NifData
from io_scene_niftools.utils.logging import NifLog
# FPS = 30
class Animation(ABC):
def __init__(self):
self.fps = bpy.context.scene.render.fps
def set_flags_and_timing(self, kfc, exp_fcurves, start_frame=None, stop_frame=None):
# fill in the non-trivial values
kfc.flags = 8 # active
kfc.flags |= self.get_flags_from_fcurves(exp_fcurves)
kfc.frequency = 1.0
kfc.phase = 0.0
if not start_frame and not stop_frame:
start_frame, stop_frame = exp_fcurves[0].range()
# todo [anim] this is a hack, move to scene
kfc.start_time = start_frame / self.fps
kfc.stop_time = stop_frame / self.fps
@staticmethod
def get_flags_from_fcurves(fcurves):
# see if there are cyclic extrapolation modifiers on exp_fcurves
cyclic = False
for fcu in fcurves:
# sometimes fcurves can include empty fcurves - see uv controller export
if fcu:
for mod in fcu.modifiers:
if mod.type == "CYCLES":
cyclic = True
break
if cyclic:
return 0
else:
return 4 # 0b100
@staticmethod
def get_active_action(b_obj):
# check if the blender object has a non-empty action assigned to it
if b_obj:
if b_obj.animation_data and b_obj.animation_data.action:
b_action = b_obj.animation_data.action
if b_action.fcurves:
return b_action
@staticmethod
def get_controllers(nodes):
"""find all nodes and relevant controllers"""
node_kfctrls = {}
for node in nodes:
if not isinstance(node, NifFormat.NiAVObject):
continue
# get list of all controllers for this node
ctrls = node.get_controllers()
for ctrl in ctrls:
if bpy.context.scene.niftools_scene.game == 'MORROWIND':
# morrowind: only keyframe controllers
if not isinstance(ctrl, NifFormat.NiKeyframeController):
continue
if node not in node_kfctrls:
node_kfctrls[node] = []
node_kfctrls[node].append(ctrl)
return node_kfctrls
@staticmethod
def create_controller(parent_block, target_name, priority=0):
n_kfi = None
n_kfc = None
if NifOp.props.animation == 'GEOM_NIF' and NifData.data.version < 0x0A020000:
# keyframe controllers are not present in geometry only files
# for more recent versions, the controller and interpolators are
# present, only the data is not present (see further on)
return n_kfc, n_kfi
# add a KeyframeController block, and refer to this block in the
# parent's time controller
if NifData.data.version < 0x0A020000:
n_kfc = block_store.create_block("NiKeyframeController", None)
else:
n_kfc = block_store.create_block("NiTransformController", None)
n_kfi = block_store.create_block("NiTransformInterpolator", None)
# link interpolator from the controller
n_kfc.interpolator = n_kfi
# if parent is a node, attach controller to that node
if isinstance(parent_block, NifFormat.NiNode):
parent_block.add_controller(n_kfc)
if n_kfi:
# set interpolator default data
n_kfi.scale, n_kfi.rotation, n_kfi.translation = parent_block.get_transform().get_scale_quat_translation()
# else ControllerSequence, so create a link
elif isinstance(parent_block, NifFormat.NiControllerSequence):
controlled_block = parent_block.add_controlled_block()
controlled_block.priority = priority
if NifData.data.version < 0x0A020000:
# older versions need the actual controller blocks
controlled_block.target_name = target_name
controlled_block.controller = n_kfc
# erase reference to target node
n_kfc.target = None
else:
# newer versions need the interpolator blocks
controlled_block.interpolator = n_kfi
controlled_block.node_name = target_name
controlled_block.controller_type = "NiTransformController"
else:
raise io_scene_niftools.utils.logging.NifError("Unsupported KeyframeController parent!")
return n_kfc, n_kfi
# todo [anim] currently not used, maybe reimplement this
@staticmethod
def get_n_interp_from_b_interp(b_ipol):
if b_ipol == "LINEAR":
return NifFormat.KeyType.LINEAR_KEY
elif b_ipol == "BEZIER":
return NifFormat.KeyType.QUADRATIC_KEY
elif b_ipol == "CONSTANT":
return NifFormat.KeyType.CONST_KEY
NifLog.warn(f"Unsupported interpolation mode ({b_ipol}) in blend, using quadratic/bezier.")
return NifFormat.KeyType.QUADRATIC_KEY
def add_dummy_markers(self, b_action):
# if we exported animations, but no animation groups are defined,
# define a default animation group
NifLog.info("Checking action pose markers.")
if not b_action.pose_markers:
# has_controllers = False
# for block in block_store.block_to_obj:
# # has it a controller field?
# if isinstance(block, NifFormat.NiObjectNET):
# if block.controller:
# has_controllers = True
# break
# if has_controllers:
NifLog.info("Defining default action pose markers.")
for frame, text in zip(b_action.frame_range, ("Idle: Start/Idle: Loop Start", "Idle: Loop Stop/Idle: Stop")):
marker = b_action.pose_markers.new(text)
marker.frame = frame
| 42.549738 | 122 | 0.650671 |
acf24e8c1fd3532231d3748443886739b5e8df84 | 4,418 | py | Python | qiskit/providers/ibmq/jupyter/config_widget.py | Zoufalc/qiskit-ibmq-provider | f30301816441f5a517f82480731e1916c2c991fe | [
"Apache-2.0"
] | 1 | 2020-07-14T20:08:55.000Z | 2020-07-14T20:08:55.000Z | qiskit/providers/ibmq/jupyter/config_widget.py | abbycross/qiskit-ibmq-provider | 228bdf0be6b767880b8614a1848a0d543a7b3bf8 | [
"Apache-2.0"
] | null | null | null | qiskit/providers/ibmq/jupyter/config_widget.py | abbycross/qiskit-ibmq-provider | 228bdf0be6b767880b8614a1848a0d543a7b3bf8 | [
"Apache-2.0"
] | 1 | 2021-04-01T17:28:23.000Z | 2021-04-01T17:28:23.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=protected-access
"""Widgert for backend configuration tab.
"""
import ipywidgets as wid
from qiskit.providers.ibmq.visualization.interactive import iplot_gate_map
def config_tab(backend):
"""The backend configuration widget.
Args:
backend (IBMQBackend | FakeBackend): The backend.
Returns:
GridBox: A GridBox widget.
"""
status = backend.status().to_dict()
config = backend.configuration().to_dict()
config_dict = {**status, **config}
upper_list = ['n_qubits']
if 'quantum_volume' in config.keys():
if config['quantum_volume']:
upper_list.append('quantum_volume')
upper_list.extend(['operational',
'status_msg', 'pending_jobs',
'backend_version', 'basis_gates',
'max_shots', 'max_experiments'])
lower_list = list(set(config_dict.keys()).difference(upper_list))
# Remove gates because they are in a different tab
lower_list.remove('gates')
# Look for hamiltonian
if 'hamiltonian' in lower_list:
htex = config_dict['hamiltonian']['h_latex']
config_dict['hamiltonian'] = "$$%s$$" % htex
upper_str = "<table>"
upper_str += """<style>
table {
border-collapse: collapse;
width: auto;
font-family:IBM Plex Sans, Arial, sans-serif !important;
}
th, td {
text-align: left;
padding: 8px;
}
tr:nth-child(even) {background-color: #f6f6f6;}
</style>"""
footer = "</table>"
# Upper HBox widget data
upper_str += "<tr><th>Property</th><th>Value</th></tr>"
for key in upper_list:
upper_str += "<tr><td><font style='font-weight:bold'>%s</font></td><td>%s</td></tr>" % (
key, config_dict[key])
upper_str += footer
upper_table = wid.HTMLMath(
value=upper_str, layout=wid.Layout(width='100%', grid_area='left'))
img_child = []
if not config['simulator']:
img_child = [iplot_gate_map(backend, as_widget=True)]
image_widget = wid.HBox(children=img_child,
layout=wid.Layout(grid_area='right',
max_height='350px',
margin='0px 0px 0px 0px',
display='flex-inline',
align_items='center',
justify_content='center',
width='auto'))
lower_str = "<table>"
lower_str += """<style>
table {
border-collapse: collapse;
width: auto;
}
th, td {
text-align: left;
padding: 8px !important;
}
tr:nth-child(even) {background-color: #f6f6f6;}
</style>"""
lower_str += "<tr><th></th><th></th></tr>"
for key in lower_list:
if key != 'name':
lower_str += "<tr><td>%s</td><td>%s</td></tr>" % (
key, config_dict[key])
lower_str += footer
lower_table = wid.HTMLMath(value=lower_str,
layout=wid.Layout(width='auto',
grid_area='bottom'))
grid = wid.GridBox(children=[upper_table, image_widget, lower_table],
layout=wid.Layout(max_height='500px',
margin='10px',
overflow='hidden scroll',
grid_template_rows='auto auto',
grid_template_columns='33% 21% 21% 21%',
grid_template_areas='''
"left right right right"
"bottom bottom bottom bottom"
''',
grid_gap='0px 0px'))
return grid
| 32.725926 | 96 | 0.534178 |
acf24ee1b794a58b93a405b48abfe9c21fe5685e | 49,363 | py | Python | tests/python/unittest/test_optimizer.py | Vikas-kum/incubator-mxnet | ba02bf2fe2da423caa59ddb3fd5e433b90b730bf | [
"Apache-2.0"
] | 1 | 2019-06-19T09:23:37.000Z | 2019-06-19T09:23:37.000Z | tests/python/unittest/test_optimizer.py | Vikas-kum/incubator-mxnet | ba02bf2fe2da423caa59ddb3fd5e433b90b730bf | [
"Apache-2.0"
] | 2 | 2021-12-10T01:40:20.000Z | 2021-12-14T21:42:22.000Z | tests/python/unittest/test_optimizer.py | Vikas-kum/incubator-mxnet | ba02bf2fe2da423caa59ddb3fd5e433b90b730bf | [
"Apache-2.0"
] | 1 | 2018-12-09T16:53:56.000Z | 2018-12-09T16:53:56.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import itertools
import numpy as np
import itertools
import mxnet as mx
import mxnet.lr_scheduler as lr_scheduler
from mxnet import gluon
import unittest
from nose.tools import raises
import math
from mxnet.test_utils import *
from common import setup_module, with_seed, teardown
@with_seed()
def test_learning_rate():
o1 = mx.optimizer.Optimizer(learning_rate=0.01)
o1.set_learning_rate(0.2)
assert o1.learning_rate == 0.2
lr_s = lr_scheduler.FactorScheduler(step=1)
o2 = mx.optimizer.Optimizer(lr_scheduler=lr_s, learning_rate=0.3)
assert o2.learning_rate == 0.3
o2.lr_scheduler.base_lr = 0.4
assert o2.learning_rate == 0.4
@raises(UserWarning)
@with_seed()
def test_learning_rate_expect_user_warning():
lr_s = lr_scheduler.FactorScheduler(step=1)
o = mx.optimizer.Optimizer(lr_scheduler=lr_s, learning_rate=0.3)
o.set_learning_rate(0.5)
@with_seed()
def test_lr_wd_mult():
data = mx.sym.Variable('data')
bias = mx.sym.Variable('fc1_bias', lr_mult=1.0)
fc1 = mx.sym.FullyConnected(data=data, bias=bias, name='fc1', num_hidden=10, lr_mult=0)
fc2 = mx.sym.FullyConnected(data=fc1, name='fc2', num_hidden=10, wd_mult=0.5)
mod = mx.mod.Module(symbol=fc2, label_names=None, context=default_context())
mod.bind(data_shapes=[('data', (5,10))])
mod.init_params(initializer=mx.init.Uniform(1.0))
mod.init_optimizer(optimizer_params={'learning_rate': 1.0})
args1, _ = mod.get_params()
args1 = {k: v.asnumpy() for k, v in args1.items()}
mod.forward(mx.io.DataBatch(data=[mx.random.uniform(low=-1.0, high=1.0, shape=(5,10))], label=None), is_train=True)
mod.backward(mod.get_outputs())
mod.update()
args2, _ = mod.get_params()
args2 = {k: v.asnumpy() for k, v in args2.items()}
assert mod._optimizer.lr_mult == {'fc1_bias': 1.0, 'fc1_weight': 0.0}
assert mod._optimizer.wd_mult == {'fc2_bias': 0.5, 'fc2_weight': 0.5, 'fc1_bias': 0.0}
assert mx.test_utils.almost_equal(args1['fc1_weight'], args2['fc1_weight'], 1e-10)
assert not mx.test_utils.almost_equal(args1['fc1_bias'], args2['fc1_bias'], 1e-1)
assert not mx.test_utils.almost_equal(args1['fc2_weight'], args2['fc2_weight'], 1e-1)
# SGD
class PySGD(mx.optimizer.Optimizer):
"""python reference implemenation of sgd"""
def __init__(self, learning_rate=0.01, momentum=0.0, multi_precision=False, **kwargs):
super(PySGD, self).__init__(learning_rate=learning_rate, **kwargs)
self.momentum = momentum
self.multi_precision = multi_precision
def create_state(self, index, weight):
"""Create additional optimizer state: momentum
Parameters
----------
weight : NDArray
The weight data
"""
momentum = None
weight_master_copy = None
do_multi_precision = self.multi_precision and weight.dtype == np.float16
if do_multi_precision:
if self.momentum != 0.0:
momentum = mx.nd.zeros(weight.shape, weight.context, dtype=np.float32)
weight_master_copy = array(weight, ctx=weight.context, dtype=np.float32)
return (momentum, weight_master_copy)
else:
if self.momentum != 0.0:
momentum = mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)
return momentum
def create_state_multi_precision(self, index, weight):
return self.create_state(index, weight)
def update(self, index, weight, grad, state):
"""Update the parameters.
Parameters
----------
index : int
An unique integer key used to index the parameters
weight : NDArray
weight ndarray
grad : NDArray
grad ndarray
state : NDArray or other objects returned by init_state
The auxiliary state used in optimization.
"""
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
use_multi_precision = isinstance(state, list) or isinstance(state, tuple)
if not use_multi_precision:
if self.momentum == 0.0:
if self.clip_gradient is not None:
weight[:] = ((1 - lr*wd)*weight -
lr*mx.nd.clip(grad*self.rescale_grad, -self.clip_gradient, self.clip_gradient))
else:
weight[:] = (1 - lr*wd)*weight - lr*self.rescale_grad*grad
else:
mom = state
if self.clip_gradient is not None:
mom[:] = (self.momentum*mom - lr*wd*weight -
lr*mx.nd.clip(grad*self.rescale_grad, -self.clip_gradient, self.clip_gradient))
weight += mom
else:
mom[:] = self.momentum*mom - lr*wd*weight - lr*self.rescale_grad*grad
weight += mom
else:
grad32 = array(grad, ctx=grad.context, dtype=np.float32)
mom = state[0]
weight32 = state[1]
if self.momentum == 0.0:
if self.clip_gradient is not None:
weight32[:] = ((1 - lr*wd)*weight32 -
lr*mx.nd.clip(grad32*self.rescale_grad, -self.clip_gradient, self.clip_gradient))
else:
weight32[:] = (1 - lr*wd)*weight32 - lr*self.rescale_grad*grad32
else:
if self.clip_gradient is not None:
mom[:] = (self.momentum*mom - lr*wd*weight32 -
lr*mx.nd.clip(grad32*self.rescale_grad, -self.clip_gradient, self.clip_gradient))
weight32 += mom
else:
mom[:] = self.momentum*mom - lr*wd*weight32 - lr*self.rescale_grad*grad32
weight32 += mom
tmp = weight32.astype(weight.dtype)
tmp.copyto(weight)
def update_multi_precision(self, index, weight, grad, state):
self.update(index, weight, grad, state)
@with_seed()
def test_sgd():
opt1 = PySGD
opt2 = mx.optimizer.SGD
shape = (3, 4, 5)
mom_options = [{}, {'momentum': 0.9}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]
for dtype in [np.float16, np.float32, np.float64]:
for mom_option in mom_options:
for cg_option in cg_options:
for rg_option in rg_options:
for wd_option in wd_options:
for mp_option in mp_options:
kwarg = {}
kwarg.update(mom_option)
kwarg.update(cg_option)
kwarg.update(rg_option)
kwarg.update(wd_option)
kwarg.update(mp_option)
if (dtype == np.float16 and
('multi_precision' not in kwarg or
not kwarg['multi_precision'])):
continue
if dtype == np.float16:
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype, rtol=1e-3)
else:
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)
# test operator fallback on cpu
if dtype != np.float16:
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape[:2],
dtype, w_stype='csr', g_stype='csr')
class PySparseSGD(mx.optimizer.Optimizer):
"""python reference implemenation of sgd"""
def __init__(self, learning_rate=0.01, momentum=0.0, **kwargs):
super(PySparseSGD, self).__init__(learning_rate=learning_rate, **kwargs)
self.momentum = momentum
def create_state(self, index, weight):
"""Create additional optimizer state: momentum
Parameters
----------
weight : NDArray
The weight data
"""
if self.momentum == 0.0:
return None
else:
return mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)
def update(self, index, weight, grad, state):
"""Update the parameters.
Parameters
----------
index : int
An unique integer key used to index the parameters
weight : NDArray
weight ndarray
grad : NDArray
grad ndarray
state : NDArray or other objects returned by init_state
The auxiliary state used in optimization.
"""
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
num_rows = weight.shape[0]
if self.momentum == 0.0:
# Update on a per row basis, skip all-zero rows
for row in range(num_rows):
grad_row = grad[row].asnumpy()
all_zeros = mx.test_utils.almost_equal(grad_row, np.zeros_like(grad_row))
if all_zeros:
continue
if self.clip_gradient is not None:
weight[row] = ((1 - lr*wd)*weight[row] -
lr*mx.nd.clip(grad[row]*self.rescale_grad,
-self.clip_gradient, self.clip_gradient))
else:
weight[row] = (1 - lr*wd)*weight[row] - lr*self.rescale_grad*grad[row]
else:
mom = state
for row in range(num_rows):
grad_row = grad[row].asnumpy()
all_zeros = mx.test_utils.almost_equal(grad_row, np.zeros_like(grad_row))
if all_zeros:
continue
if self.clip_gradient is not None:
mom[row] = (self.momentum*mom[row] - lr*wd*weight[row] -
lr*mx.nd.clip(grad[row]*self.rescale_grad, -self.clip_gradient, self.clip_gradient))
weight[row] += mom[row]
else:
mom[row] = self.momentum*mom[row] - lr*wd*weight[row] - lr*self.rescale_grad*grad[row]
weight[row] += mom[row]
@with_seed()
def test_sparse_sgd():
opt1 = PySparseSGD
opt2 = mx.optimizer.SGD
shape = (3, 4, 5)
mom_options = [{}, {'momentum': 0.9}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]
for dtype in [np.float32]:
for mom_option in mom_options:
for cg_option in cg_options:
for rg_option in rg_options:
for wd_option in wd_options:
for mp_option in mp_options:
kwarg = {}
kwarg.update(mom_option)
kwarg.update(cg_option)
kwarg.update(rg_option)
kwarg.update(wd_option)
kwarg.update(mp_option)
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype,
w_stype='row_sparse', g_stype='row_sparse')
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype,
w_stype='default', g_stype='row_sparse')
@with_seed()
def test_std_sparse_sgd():
opt1 = PySGD
opt2 = mx.optimizer.SGD
shape = (3, 4, 5)
mom_options = [{'momentum': 0.0}, {'momentum': 0.9}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
for dtype in [np.float32]:
for mom_option in mom_options:
for cg_option in cg_options:
for rg_option in rg_options:
for wd_option in wd_options:
kwarg = {}
kwarg.update(mom_option)
kwarg.update(cg_option)
kwarg.update(rg_option)
kwarg.update(wd_option)
compare_optimizer(opt1(**kwarg), opt2(lazy_update=False, **kwarg), shape, dtype,
w_stype='row_sparse', g_stype='row_sparse')
compare_optimizer(opt1(**kwarg), opt2(lazy_update=False, **kwarg), shape, dtype,
w_stype='default', g_stype='row_sparse')
class PyNAG(PySGD):
def __init__(self, **kwargs):
super(PyNAG, self).__init__(**kwargs)
def create_state(self, index, weight):
"""Create additional optimizer state: momentum
Parameters
----------
weight : NDArray
The weight data
"""
momentum = None
weight_master_copy = None
do_multi_precision = self.multi_precision and weight.dtype == np.float16
if do_multi_precision:
if self.momentum != 0.0:
momentum = mx.nd.zeros(weight.shape, weight.context, dtype=np.float32)
weight_master_copy = array(weight, ctx=weight.context, dtype=np.float32)
return (weight_master_copy, momentum)
else:
if self.momentum != 0.0:
momentum = mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)
return momentum
def create_state_multi_precision(self, index, weight):
return self.create_state(index, weight)
def update(self, index, weight, grad, state):
"""Update the parameters.
Parameters
----------
index : int
An unique integer key used to index the parameters
weight : NDArray
weight ndarray
grad : NDArray
grad ndarray
state : NDArray or other objects returned by init_state
The auxiliary state used in optimization.
"""
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
use_multi_precision = isinstance(state, list) or isinstance(state, tuple)
if not use_multi_precision:
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)
if self.momentum == 0.0:
weight[:] += -lr * (grad + wd * weight)
else:
mom = state
mom[:] *= self.momentum
grad += wd * weight
mom[:] += grad
grad[:] += self.momentum * mom
weight[:] += -lr * grad
else:
grad32 = array(grad, ctx=grad.context, dtype=np.float32)
grad32 = grad32 * self.rescale_grad
if self.clip_gradient is not None:
grad32 = mx.nd.clip(grad32, -self.clip_gradient, self.clip_gradient)
mom = state[1]
weight32 = state[0]
if self.momentum == 0.0:
weight32[:] += -lr * (grad32 + wd * weight32)
else:
mom[:] *= self.momentum
grad32 += wd * weight32
mom[:] += grad32
grad32[:] += self.momentum * mom
weight32[:] += -lr * grad32
tmp = weight32.astype(weight.dtype)
tmp.copyto(weight)
@with_seed()
def test_nag():
opt1 = PyNAG
opt2 = mx.optimizer.NAG
shape = (3, 4, 5)
mom_options = [{}, {'momentum': 0.9}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]
for dtype in [np.float16, np.float32, np.float64]:
for mom_option in mom_options:
for cg_option in cg_options:
for rg_option in rg_options:
for wd_option in wd_options:
for mp_option in mp_options:
kwarg = {}
kwarg.update(mom_option)
kwarg.update(cg_option)
kwarg.update(rg_option)
kwarg.update(wd_option)
kwarg.update(mp_option)
if (dtype == np.float16 and
('multi_precision' not in kwarg or
not kwarg['multi_precision'])):
continue
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)
# FTML
class PyFTML(mx.optimizer.Optimizer):
"""python reference implemenation of FTML"""
def __init__(self, beta1=0.6, beta2=0.999, epsilon=1e-8, **kwargs):
super(PyFTML, self).__init__(**kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
def create_state(self, index, weight):
return (mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # d_0
mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # v_0
mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)) # z_0
def update(self, index, weight, grad, state):
assert(isinstance(weight, mx.nd. NDArray))
assert(isinstance(grad, mx.nd.NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
t = self._index_update_count[index]
grad = grad * self.rescale_grad + wd * weight
if self.clip_gradient is not None:
grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)
# get previous states
prev_d, prev_v, prev_z = state
# compute states
v_t = self.beta2 * prev_v + (1 - self.beta2) * mx.nd.square(grad)
d_t = (1 - pow(self.beta1, t)) / lr * (mx.nd.sqrt(v_t / (1 - pow(self.beta2, t))) + self.epsilon)
sigma_t = d_t - self.beta1 * prev_d
z_t = self.beta1 * prev_z + (1 - self.beta1) * grad - sigma_t * weight
# update weight
weight[:] = - z_t / d_t
# update states
prev_d[:] = d_t
prev_v[:] = v_t
prev_z[:] = z_t
@with_seed()
def test_ftml():
opt1 = PyFTML
opt2 = mx.optimizer.FTML
shape = (3, 4, 5)
beta1_options = [{}, {'beta1': 0.5}, {'beta1': 0.7}]
beta2_options = [{}, {'beta2': 0.8}, {'beta2': 0.9}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
for dtype in [np.float32]:
for beta1_option in beta1_options:
for beta2_option in beta2_options:
for cg_option in cg_options:
for rg_option in rg_options:
for wd_option in wd_options:
kwarg = {}
kwarg.update(beta1_option)
kwarg.update(beta2_option)
kwarg.update(cg_option)
kwarg.update(rg_option)
kwarg.update(wd_option)
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype, rtol=1e-3, atol=1e-4)
# ADAM
class PyAdam(mx.optimizer.Optimizer):
"""python reference implemenation of adam"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
decay_factor=(1 - 1e-8), lazy_update=True, **kwargs):
super(PyAdam, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.decay_factor = decay_factor
self.lazy_update = lazy_update
def create_state(self, index, weight):
"""Create additional optimizer state: mean, variance
Parameters
----------
weight : NDArray
The weight data
"""
return (mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # mean
mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance
def update(self, index, weight, grad, state):
"""Update the parameters.
Parameters
----------
index : int
An unique integer key used to index the parameters
weight : NDArray
weight ndarray
grad : NDArray
grad ndarray
state : NDArray or other objects returned by init_state
The auxiliary state used in optimization.
"""
lr = self._get_lr(index)
self._update_count(index)
t = self._index_update_count[index]
mean, variance = state
wd = self._get_wd(index)
num_rows = weight.shape[0]
coef1 = 1. - self.beta1**t
coef2 = 1. - self.beta2**t
lr *= math.sqrt(coef2)/coef1
for row in range(num_rows):
# check row slices of all zeros
all_zeros = mx.test_utils.almost_equal(grad[row].asnumpy(), np.zeros_like(grad[row].asnumpy()))
# skip zeros during lazy update
if all_zeros and self.lazy_update:
continue
grad[row] = grad[row] * self.rescale_grad + wd * weight[row]
# clip gradients
if self.clip_gradient is not None:
mx.nd.clip(grad[row], -self.clip_gradient, self.clip_gradient, out=grad[row])
# update mean
mean[row] *= self.beta1
mean[row] += grad[row] * (1. - self.beta1)
# update variance
variance[row] *= self.beta2
variance[row] += (1 - self.beta2) * mx.nd.square(grad[row], out=grad[row])
# update weight
weight[row] -= lr*mean[row]/(mx.nd.sqrt(variance[row]) + self.epsilon)
@with_seed()
def test_adam():
opt1 = PyAdam
opt2 = mx.optimizer.Adam
shape = (3, 4, 5)
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]
for dtype in [np.float16, np.float32, np.float64]:
for cg_option in cg_options:
for rg_option in rg_options:
for wd_option in wd_options:
for mp_option in mp_options:
kwarg = {}
kwarg.update(cg_option)
kwarg.update(rg_option)
kwarg.update(wd_option)
kwarg.update(mp_option)
if (dtype == np.float16 and
('multi_precision' not in kwarg or
not kwarg['multi_precision'])):
continue
# atol 2e-5 needed to pass with seed 1248389097
compare_optimizer(opt1(lazy_update=False, **kwarg), opt2(**kwarg), shape, dtype,
rtol=1e-4, atol=2e-5)
# atol 2e-5 needed to pass with seed 781809840
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape,
dtype, w_stype='row_sparse', g_stype='row_sparse',
rtol=1e-4, atol=2e-5)
compare_optimizer(opt1(lazy_update=False, **kwarg), opt2(lazy_update=False, **kwarg), shape,
dtype, w_stype='row_sparse', g_stype='row_sparse',
rtol=1e-4, atol=2e-5)
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape,
dtype, w_stype='default', g_stype='row_sparse',
rtol=1e-4, atol=2e-5)
compare_optimizer(opt1(lazy_update=False, **kwarg), opt2(lazy_update=False, **kwarg), shape,
dtype, w_stype='default', g_stype='row_sparse',
rtol=1e-4, atol=2e-5)
# AdaMax
class PyAdamax(mx.optimizer.Optimizer):
"""The python reference of AdaMax optimizer.
This class implements the AdaMax optimizer, one variant of Adam based on the infinity norm,
available at http://arxiv.org/abs/1412.6980 Section 7.
The optimizer updates the weight by::
grad = clip(grad * rescale_grad + wd * weight, clip_gradient)
m = beta1 * m_t + (1 - beta1) * grad
u = maximum(beta2 * u, abs(grad))
weight -= lr / (1 - beta1**t) * m / u
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
beta1 : float, optional
Exponential decay rate for the first moment estimates.
beta2 : float, optional
Exponential decay rate for the second moment estimates.
"""
def __init__(self, learning_rate=0.002, beta1=0.9, beta2=0.999, **kwargs):
super(PyAdamax, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
def create_state(self, index, weight):
return (mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # mean
mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance
def update(self, index, weight, grad, state):
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
t = self._index_update_count[index]
lr /= (1. - self.beta1**t)
# preprocess grad
grad = grad * self.rescale_grad + wd * weight
if self.clip_gradient is not None:
grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)
# update m_t and u_t
m_t, u_t = state
m_t[:] = self.beta1 * m_t + (1. - self.beta1) * grad
u_t[:] = mx.nd.maximum(self.beta2 * u_t, mx.nd.abs(grad))
# update weight
weight[:] -= lr * m_t / u_t
@with_seed()
def test_adamax():
opt1 = PyAdamax
opt2 = mx.optimizer.Adamax
shape = (3, 4, 5)
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]
for dtype in [np.float16, np.float32, np.float64]:
for params in itertools.product(cg_options, rg_options, wd_options, mp_options):
kwarg = {k: v for param in params for k, v in param.items()}
if (dtype == np.float16 and
('multi_precision' not in kwarg or
not kwarg['multi_precision'])):
continue
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)
# Signum
class PySignum(mx.optimizer.Optimizer):
"""The python reference of Signum optimizer.
The optimizer updates the weight by:
rescaled_grad = rescale_grad * clip(grad, clip_gradient) + wd * weight
state = momentum * state + (1-momentum)*rescaled_grad
weight = (1 - lr * wd_lh) * weight - lr * sign(state)
See the original paper at: https://jeremybernste.in/projects/amazon/signum.pdf
For details of the update algorithm see
:class:`~mxnet.ndarray.signsgd_update` and :class:`~mxnet.ndarray.signum_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
momentum : float, optional
The momentum value.
wd_lh : float, optitional
The amount of decoupled weight decay regularization.
"""
def __init__(self, learning_rate=0.01, momentum=0.9, wd_lh = 0.0, **kwargs):
super(PySignum, self).__init__(learning_rate = learning_rate, **kwargs)
self.momentum = momentum
self.wd_lh = wd_lh
def create_state(self, index, weight):
momentum = None
if self.momentum != 0.0:
momentum = mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype, stype=weight.stype)
return momentum
def update(self, index, weight, grad, state):
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
if state is not None:
mom = state
if self.clip_gradient is not None:
mom[:] = (self.momentum*mom - (1-self.momentum)*(wd*weight +
mx.nd.clip(grad*self.rescale_grad, -self.clip_gradient, self.clip_gradient)))
else:
mom[:] = self.momentum*mom - (1-self.momentum)*wd*weight - (1-self.momentum)*self.rescale_grad*grad
weight[:] = (1 - lr*self.wd_lh)*weight + lr*mx.nd.sign(mom)
else:
weight[:] = (1 - lr*(wd+self.wd_lh))*weight - lr*mx.nd.sign(grad)
@with_seed()
def test_signum():
opt1 = PySignum
opt2 = mx.optimizer.Signum
shape = (3, 4, 5)
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
wd_lh_options = [{}, {'wd_lh': 0.015}, {'wd_lh': 0.0}]
mom_options = [{}, {'momentum': 0.9}]
lr_options = [{'learning_rate': 0.05},{'learning_rate': 0.01}]
for dtype in [np.float32, np.float64]:
for cg_option in cg_options:
for rg_option in rg_options:
for wd_option in wd_options:
for mp_option in wd_lh_options:
for lr_option in lr_options:
for mom_option in mom_options:
kwarg = {}
kwarg.update(cg_option)
kwarg.update(rg_option)
kwarg.update(wd_option)
kwarg.update(mp_option)
kwarg.update(lr_option)
kwarg.update(mom_option)
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)
# RMSProp
class PyRMSProp(mx.optimizer.Optimizer):
"""RMSProp optimizer of Tieleman & Hinton, 2012,
For centered=False, the code follows the version in
http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf by
Tieleman & Hinton, 2012
For centered=True, the code follows the version in
http://arxiv.org/pdf/1308.0850v5.pdf Eq(38) - Eq(45) by Alex Graves, 2013.
Parameters
----------
learning_rate : float, optional
Step size.
Default value is set to 0.001.
gamma1: float, optional
decay factor of moving average for gradient, gradient^2.
Default value is set to 0.9.
gamma2: float, optional
"momentum" factor.
Default value if set to 0.9.
Only used if centered=True
epsilon : float, optional
Default value is set to 1e-8.
centered : boolean, optional
Use Graves or Tielemans & Hintons version of RMSProp
wd : float, optional
L2 regularization coefficient add to all the weights
rescale_grad : float, optional
rescaling factor of gradient.
clip_gradient : float, optional
clip gradient in range [-clip_gradient, clip_gradient]
clip_weights : float, optional
clip weights in range [-clip_weights, clip_weights]
"""
def __init__(self, learning_rate=0.001, gamma1=0.9, gamma2=0.9,
epsilon=1e-8, centered=False, clip_weights=None, **kwargs):
super(PyRMSProp, self).__init__(learning_rate=learning_rate, **kwargs)
self.centered = centered
self.gamma1 = gamma1
self.gamma2 = gamma2
self.epsilon = epsilon
self.clip_weights = clip_weights
def create_state(self, index, weight):
"""Create additional optimizer state.
For centered=False: n
For centered=True: n, g, delta
Parameters
----------
weight : NDArray
The weight data
"""
if self.centered:
return (mx.nd.zeros(weight.shape, weight.context), # n
mx.nd.zeros(weight.shape, weight.context), # g
mx.nd.zeros(weight.shape, weight.context)) # delta
else:
return (mx.nd.zeros(weight.shape, weight.context), ) # n
def update(self, index, weight, grad, state):
"""Update the parameters.
Parameters
----------
index : int
An unique integer key used to index the parameters
weight : NDArray
weight ndarray
grad : NDArray
grad ndarray
state : NDArray or other objects returned by init_state
The auxiliary state used in optimization.
"""
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
grad = grad * self.rescale_grad + wd * weight
if not self.centered:
(n, ) = state
if self.clip_gradient is not None:
grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)
n[:] = (1 - self.gamma1) * (grad * grad) + self.gamma1 * n
weight[:] -= lr * grad/(mx.nd.sqrt(n + self.epsilon))
else:
n, g, delta = state
if self.clip_gradient is not None:
grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)
n[:] = (1 - self.gamma1) * (grad * grad) + self.gamma1 * n
g[:] = (1 - self.gamma1) * grad + self.gamma1 * g
delta[:] = (self.gamma2) * delta - lr * grad/(mx.nd.sqrt(n - g*g + self.epsilon))
weight[:] += delta
if self.clip_weights:
mx.ndarray.clip(weight, -self.clip_weights, self.clip_weights, out=weight)
@with_seed()
def test_rms():
opt1 = PyRMSProp
opt2 = mx.optimizer.RMSProp
shape = (3, 4, 5)
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
cw_options = [{}, {'clip_weights': 0.01}]
center_options = [{}, {'centered': False}, {'centered': True}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]
for dtype in [np.float16, np.float32]:
# Reduce foating point compare tolerance to avoid flaky test failure.
rtol, atol = (1e-1, 1e-1) if dtype is np.float16 else (1e-2, 1e-2)
for cw_option in cw_options:
for cg_option in cg_options:
for center_option in center_options:
for rg_option in rg_options:
for wd_option in wd_options:
for mp_option in mp_options:
kwarg = {}
kwarg.update(cw_option)
kwarg.update(cg_option)
kwarg.update(center_option)
kwarg.update(rg_option)
kwarg.update(wd_option)
kwarg.update(mp_option)
if (dtype == np.float16 and
('multi_precision' not in kwarg or
not kwarg['multi_precision'])):
continue
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype, rtol=rtol, atol=atol)
if (default_context() == mx.cpu()):
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype, g_stype='row_sparse', rtol=rtol, atol=atol)
class PyFtrl(mx.optimizer.Optimizer):
"""The Ftrl optimizer.
Referenced from *Ad Click Prediction: a View from the Trenches*, available at
http://dl.acm.org/citation.cfm?id=2488200.
Parameters
----------
lamda1 : float, optional
L1 regularization coefficient.
learning_rate : float, optional
The initial learning rate.
beta : float, optional
Per-coordinate learning rate correlation parameter.
eta :
.. math::
\\eta_{t,i} = \\frac{learningrate}{\\beta+\\sqrt{\\sum_{s=1}^tg_{s,i}^t}}
"""
def __init__(self, lamda1=0.01, learning_rate=0.1, beta=1, lazy_update=False, **kwargs):
super(PyFtrl, self).__init__(**kwargs)
self.lamda1 = lamda1
self.beta = beta
self.lr = learning_rate
self.lazy_update = lazy_update
def create_state(self, index, weight):
return (mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # dn
mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)) # n
def update(self, index, weight, grad, state):
self._update_count(index)
wd = self._get_wd(index)
lr = self._get_lr(index)
num_rows = weight.shape[0]
dn, n = state
for row in range(num_rows):
all_zeros = mx.test_utils.almost_equal(grad[row].asnumpy(), np.zeros_like(grad[row].asnumpy()))
if all_zeros and self.lazy_update:
continue
grad[row] = grad[row] * self.rescale_grad
if self.clip_gradient is not None:
mx.nd.clip(grad[row], -self.clip_gradient, self.clip_gradient, out=grad[row])
#update dn, n
dn[row] += grad[row] - (mx.nd.sqrt(n[row] + grad[row] * grad[row]) - mx.nd.sqrt(n[row])) * weight[row] / lr
n[row] += grad[row] * grad[row]
# update weight
weight[row] = (mx.nd.sign(dn[row]) * self.lamda1 - dn[row]) / \
((self.beta + mx.nd.sqrt(n[row])) / lr + wd) * (mx.nd.abs(dn[row]) > self.lamda1)
@with_seed()
def test_ftrl():
opt1 = PyFtrl
opt2 = mx.optimizer.Ftrl
shape = (3, 4, 5)
kwargs = [{},
{'clip_gradient': 0.5},
{'clip_gradient': 0.4, 'rescale_grad': 0.14},
{'rescale_grad': 0.8},
{'clip_gradient': 0.5, 'wd': 0.07},
{'clip_gradient': 0.4, 'rescale_grad': 0.14, 'wd': 0.03},
{'rescale_grad': 0.8, 'wd': 0.05},
{'rescale_grad': 0.8, 'wd': 0.05, 'lamda1': 0.01},
{'clip_gradient': 0.5, 'wd': 0.07, 'lamda1': 1.0}]
for kwarg in kwargs:
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, np.float32)
compare_optimizer(opt1(lazy_update=True, **kwarg), opt2(**kwarg), shape,
np.float32, w_stype='row_sparse', g_stype='row_sparse')
@with_seed()
def test_nadam():
def get_net(num_hidden, flatten=True):
data = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data, name='fc1', num_hidden=128, flatten=flatten)
act1 = mx.symbol.Activation(fc1, name='relu1', act_type="relu")
fc2 = mx.symbol.FullyConnected(act1, name = 'fc2', num_hidden = 64, flatten=flatten)
act2 = mx.symbol.Activation(fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(act2, name='fc3', num_hidden=num_hidden, flatten=flatten)
return fc3
N = 20
data = mx.random.uniform(-1, 1, shape=(N, 10))
label = mx.random.uniform(-1, 1, shape=(N, 1))
data_iter = mx.io.NDArrayIter(data, label, batch_size=5, label_name='label', shuffle=True)
output = get_net(1)
l = mx.symbol.Variable('label')
Loss = gluon.loss.L1Loss()
loss = Loss(output, l)
loss = mx.sym.make_loss(loss)
mod = mx.mod.Module(loss, data_names=('data',), label_names=('label',))
mod.fit(data_iter, num_epoch=60, optimizer_params={'learning_rate': 0.001, 'wd': 0.0005},
initializer=mx.init.Xavier(magnitude=2), eval_metric=mx.metric.Loss(),
optimizer='nadam')
assert mod.score(data_iter, eval_metric=mx.metric.Loss())[0][1] < 0.11
# AdaGrad
class PyAdaGrad(mx.optimizer.Optimizer):
"""The python reference of AdaGrad optimizer.
This class implements the AdaGrad optimizer described in *Adaptive Subgradient
Methods for Online Learning and Stochastic Optimization*, and available at
http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf.
Updates are applied by::
rescaled_grad = clip(grad * rescale_grad + wd * weight, clip_gradient)
history = history + square(rescaled_grad)
w = w - learning_rate * rescaled_grad / sqrt(history + epsilon)
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
eps: float, optional
Small value to avoid division by 0.
"""
def __init__(self, eps=1e-7, **kwargs):
super(PyAdaGrad, self).__init__(**kwargs)
self.float_stable_eps = eps
def create_state(self, index, weight):
return mx.nd.zeros(weight.shape, weight.context, stype=weight.stype)
def update(self, index, weight, grad, state):
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
history = state
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)
history[:] += mx.nd.square(grad)
div = grad / mx.nd.sqrt(history + self.float_stable_eps)
weight[:] += (div + weight * wd) * -lr
@with_seed()
def test_adagrad():
opt1 = PyAdaGrad
opt2 = mx.optimizer.AdaGrad
shape = (3, 4, 5)
eps_options = [{}, {'eps': 1e-8}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.0}]
for dtype in [np.float32]:
for eps_option in eps_options:
for cg_option in cg_options:
for rg_option in rg_options:
for wd_option in wd_options:
kwarg = {}
kwarg.update(eps_option)
kwarg.update(cg_option)
kwarg.update(rg_option)
kwarg.update(wd_option)
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)
if wd_option.get('wd', 0.0) == 0.0:
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype,
w_stype='row_sparse', g_stype='row_sparse')
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype,
g_stype='row_sparse')
# AdaDelta
class PyAdaDelta(mx.optimizer.Optimizer):
"""The python reference of AdaDelta optimizer.
This class implements AdaDelta, an optimizer described in *ADADELTA: An adaptive
learning rate method*, available at https://arxiv.org/abs/1212.5701.
This optimizer updates each weight by::
grad = clip(grad * rescale_grad + wd * weight, clip_gradient)
acc_grad = rho * acc_grad + (1. - rho) * grad ** 2
cur_delta = sqrt(acc_delta + epsilon) / sqrt(acc_grad + epsilon) * grad
acc_delta = rho * acc_delta + (1. - rho) * cur_delta ** 2
weight -= (cur_delta + wd * weight)
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
rho: float
Decay rate for both squared gradients and delta.
epsilon : float
Small value to avoid division by 0.
"""
def __init__(self, rho=0.90, epsilon=1e-5, **kwargs):
super(PyAdaDelta, self).__init__(**kwargs)
self.rho = rho
self.epsilon = epsilon
def create_state(self, index, weight):
return (mx.nd.zeros(weight.shape, weight.context),
mx.nd.zeros(weight.shape, weight.context))
def update(self, index, weight, grad, state):
self._update_count(index)
wd = self._get_wd(index)
grad *= self.rescale_grad
if self.clip_gradient is not None:
grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)
acc_grad, acc_delta = state
acc_grad[:] = self.rho * acc_grad + (1. - self.rho) * grad ** 2
current_delta = (mx.nd.sqrt(acc_delta + self.epsilon) /
mx.nd.sqrt(acc_grad + self.epsilon)) * grad
acc_delta[:] = self.rho * acc_delta + (1. - self.rho) * current_delta ** 2
# update weight
weight[:] -= current_delta + wd * weight
@with_seed()
def test_adadelta():
opt1 = PyAdaDelta
opt2 = mx.optimizer.AdaDelta
shape = (3, 4, 5)
rho_options = [{'rho': 0.9}]
eps_options = [{}, {'epsilon': 1e-8}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.0}]
for dtype in [np.float16, np.float32]:
for params in itertools.product(rho_options, eps_options, cg_options,
rg_options, wd_options):
kwarg = {k: v for param in params for k, v in param.items()}
if dtype is np.float16:
kwarg.update({'multi_precision': True})
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)
def test_factor_scheduler():
base_lr = 1
step = 100
factor = 0.1
sched = mx.lr_scheduler.FactorScheduler(step, factor, stop_factor_lr=1e-4, base_lr=base_lr,
warmup_steps=20, warmup_begin_lr=0.1, warmup_mode='constant')
assert (sched(0) == 0.1)
np.testing.assert_almost_equal(sched(10), 0.1)
assert (sched(21) == base_lr), sched(21)
np.testing.assert_almost_equal(sched(101), base_lr * factor)
np.testing.assert_almost_equal(sched(201), base_lr * factor * factor)
np.testing.assert_almost_equal(sched(1000), 1e-4)
def test_multifactor_scheduler():
base_lr = 0.1
steps = [15, 25]
factor = 0.1
sched = mx.lr_scheduler.MultiFactorScheduler(steps, factor, base_lr=base_lr,
warmup_steps=10, warmup_begin_lr=0.05, warmup_mode='linear')
assert sched(0) == 0.05
np.testing.assert_almost_equal(sched(5), 0.05 + (base_lr - 0.05)/2)
np.testing.assert_almost_equal(sched(15), base_lr)
np.testing.assert_almost_equal(sched(16), base_lr * factor)
np.testing.assert_almost_equal(sched(20), base_lr * factor)
np.testing.assert_almost_equal(sched(26), base_lr * factor * factor)
np.testing.assert_almost_equal(sched(100), base_lr * factor * factor)
def test_poly_scheduler():
base_lr = 3
final_lr = 0
steps = 1000
poly_sched = mx.lr_scheduler.PolyScheduler(steps, base_lr=base_lr, pwr=2, final_lr=final_lr,
warmup_steps=100, warmup_begin_lr=0, warmup_mode='linear')
np.testing.assert_almost_equal(poly_sched(0), 0)
np.testing.assert_almost_equal(poly_sched(50), float(base_lr)/2)
np.testing.assert_almost_equal(poly_sched(100), base_lr)
assert (poly_sched(101) < poly_sched(100))
assert (poly_sched(500) < 1.6)
np.testing.assert_almost_equal(poly_sched(steps), final_lr)
def test_cosine_scheduler():
# also tests case without warmup
base_lr = 3
final_lr = 0.1
steps = 1000
cosine_sched = mx.lr_scheduler.CosineScheduler(steps, base_lr=base_lr, final_lr=final_lr)
np.testing.assert_almost_equal(cosine_sched(0), base_lr)
np.testing.assert_almost_equal(cosine_sched(steps), final_lr)
assert (cosine_sched(500) > 1.5)
if __name__ == '__main__':
import nose
nose.runmodule()
| 40.897266 | 141 | 0.566376 |
acf24f2af10177e5404cfea56f5b778ea13e7cd1 | 7,245 | py | Python | mayan/apps/documents/tests/test_document_page_views.py | wan1869/dushuhu | 934dd178e67140cffc6b9203e793fdf8bbc73a54 | [
"Apache-2.0"
] | null | null | null | mayan/apps/documents/tests/test_document_page_views.py | wan1869/dushuhu | 934dd178e67140cffc6b9203e793fdf8bbc73a54 | [
"Apache-2.0"
] | null | null | null | mayan/apps/documents/tests/test_document_page_views.py | wan1869/dushuhu | 934dd178e67140cffc6b9203e793fdf8bbc73a54 | [
"Apache-2.0"
] | 1 | 2021-04-30T09:44:14.000Z | 2021-04-30T09:44:14.000Z | from django.utils.encoding import force_text
from ..permissions import (
permission_document_edit, permission_document_view
)
from .base import GenericDocumentViewTestCase
from .mixins import (
DocumentPageDisableViewTestMixin, DocumentPageViewTestMixin
)
class DocumentPageDisableViewTestCase(
DocumentPageDisableViewTestMixin, GenericDocumentViewTestCase
):
def test_document_page_disable_view_no_permission(self):
test_document_page_count = self.test_document.pages_valid.count()
response = self._request_test_document_page_disable_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
test_document_page_count, self.test_document.pages_valid.count()
)
def test_document_page_disable_view_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_edit
)
test_document_page_count = self.test_document.pages_valid.count()
response = self._request_test_document_page_disable_view()
self.assertEqual(response.status_code, 302)
self.assertNotEqual(
test_document_page_count, self.test_document.pages_valid.count()
)
def test_document_page_multiple_disable_view_no_permission(self):
test_document_page_count = self.test_document.pages_valid.count()
response = self._request_test_document_page_multiple_disable_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
test_document_page_count, self.test_document.pages_valid.count()
)
def test_document_page_multiple_disable_view_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_edit
)
test_document_page_count = self.test_document.pages_valid.count()
response = self._request_test_document_page_multiple_disable_view()
self.assertEqual(response.status_code, 302)
self.assertNotEqual(
test_document_page_count, self.test_document.pages_valid.count()
)
def test_document_page_enable_view_no_permission(self):
self._disable_test_document_page()
test_document_page_count = self.test_document.pages_valid.count()
response = self._request_test_document_page_enable_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
test_document_page_count, self.test_document.pages_valid.count()
)
def test_document_page_enable_view_with_access(self):
self._disable_test_document_page()
self.grant_access(
obj=self.test_document, permission=permission_document_edit
)
test_document_page_count = self.test_document.pages_valid.count()
response = self._request_test_document_page_enable_view()
self.assertEqual(response.status_code, 302)
self.assertNotEqual(
test_document_page_count, self.test_document.pages_valid.count()
)
def test_document_page_multiple_enable_view_no_permission(self):
self._disable_test_document_page()
test_document_page_count = self.test_document.pages_valid.count()
response = self._request_test_document_page_multiple_enable_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
test_document_page_count, self.test_document.pages_valid.count()
)
def test_document_page_multiple_enable_view_with_access(self):
self._disable_test_document_page()
self.grant_access(
obj=self.test_document, permission=permission_document_edit
)
test_document_page_count = self.test_document.pages_valid.count()
response = self._request_test_document_page_multiple_enable_view()
self.assertEqual(response.status_code, 302)
self.assertNotEqual(
test_document_page_count, self.test_document.pages_valid.count()
)
class DocumentPageViewTestCase(
DocumentPageViewTestMixin, GenericDocumentViewTestCase
):
def test_document_page_list_view_no_permission(self):
response = self._request_test_document_page_list_view()
self.assertEqual(response.status_code, 404)
def test_document_page_list_view_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_view
)
response = self._request_test_document_page_list_view()
self.assertContains(
response=response, status_code=200, text=self.test_document.label
)
def test_document_page_rotate_left_view_no_permission(self):
response = self._request_test_document_page_rotate_left_view()
self.assertEqual(response.status_code, 404)
def test_document_page_rotate_left_view_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_view
)
response = self._request_test_document_page_rotate_left_view()
self.assertEqual(response.status_code, 302)
def test_document_page_rotate_right_view_no_permission(self):
response = self._request_test_document_page_rotate_right_view()
self.assertEqual(response.status_code, 404)
def test_document_page_rotate_right_view_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_view
)
response = self._request_test_document_page_rotate_right_view()
self.assertEqual(response.status_code, 302)
def test_document_page_view_no_permission(self):
response = self._request_test_document_page_view(
document_page=self.test_document.pages.first()
)
self.assertEqual(response.status_code, 404)
def test_document_page_view_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_view
)
response = self._request_test_document_page_view(
document_page=self.test_document.pages.first()
)
self.assertContains(
response=response, status_code=200, text=force_text(
s=self.test_document.pages.first()
)
)
def test_document_page_zoom_in_view_no_permission(self):
response = self._request_test_document_page_zoom_in_view()
self.assertEqual(response.status_code, 404)
def test_document_page_zoom_in_view_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_view
)
response = self._request_test_document_page_zoom_in_view()
self.assertEqual(response.status_code, 302)
def test_document_page_zoom_out_view_no_permission(self):
response = self._request_test_document_page_zoom_out_view()
self.assertEqual(response.status_code, 404)
def test_document_page_zoom_out_view_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_view
)
response = self._request_test_document_page_zoom_out_view()
self.assertEqual(response.status_code, 302)
| 35.866337 | 77 | 0.730987 |
acf24f515346785bfd465bc2109433cbcb03b3b1 | 22,349 | py | Python | bc/recruitment/migrations/0042_heading_subheading_helptext.py | Buckinghamshire-Digital-Service/buckinghamshire-council | bbbdb52b515bcdfc79a2bd9198dfa4828405370e | [
"BSD-3-Clause"
] | 1 | 2021-02-27T07:27:17.000Z | 2021-02-27T07:27:17.000Z | bc/recruitment/migrations/0042_heading_subheading_helptext.py | Buckinghamshire-Digital-Service/buckinghamshire-council | bbbdb52b515bcdfc79a2bd9198dfa4828405370e | [
"BSD-3-Clause"
] | null | null | null | bc/recruitment/migrations/0042_heading_subheading_helptext.py | Buckinghamshire-Digital-Service/buckinghamshire-council | bbbdb52b515bcdfc79a2bd9198dfa4828405370e | [
"BSD-3-Clause"
] | 1 | 2021-06-09T15:56:54.000Z | 2021-06-09T15:56:54.000Z | # Generated by Django 2.2.13 on 2020-12-02 09:02
import bc.utils.blocks
from django.db import migrations
import wagtail.contrib.table_block.blocks
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.embeds.blocks
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
("recruitment", "0041_auto_20201130_1714"),
]
operations = [
migrations.AlterField(
model_name="recruitmentindexpage",
name="body",
field=wagtail.core.fields.StreamField(
[
(
"heading",
wagtail.core.blocks.CharBlock(
classname="full title",
group="Heading",
help_text='The link to this heading uses the heading text in lowercase, with no symbols, and with the spaces replaced with hyphens. e.g. "Lorem @ 2 ipsum" becomes "lorem-2-ipsum"',
icon="title",
label="Main heading",
template="patterns/molecules/streamfield/blocks/heading_block.html",
),
),
(
"subheading",
wagtail.core.blocks.CharBlock(
classname="full title",
group="Heading",
help_text='The link to this subheading uses the subheading text in lowercase, with no symbols, and with the spaces replaced with hyphens. e.g. "Lorem @ 2 ipsum" becomes "lorem-2-ipsum"',
icon="title",
template="patterns/molecules/streamfield/blocks/subheading_block.html",
),
),
(
"paragraph",
wagtail.core.blocks.RichTextBlock(
features=[
"bold",
"italic",
"ol",
"ul",
"link",
"document-link",
]
),
),
(
"image",
wagtail.core.blocks.StructBlock(
[
("image", wagtail.images.blocks.ImageChooserBlock()),
(
"alt_text",
wagtail.core.blocks.CharBlock(
help_text="Describe the information, not the picture. Leave blank if the image is purely decorative. Do not repeat captions or content already on the page.",
required=False,
),
),
(
"caption",
wagtail.core.blocks.CharBlock(required=False),
),
]
),
),
("embed", wagtail.embeds.blocks.EmbedBlock()),
(
"local_area_links",
wagtail.core.blocks.StructBlock(
[
(
"introduction",
wagtail.core.blocks.RichTextBlock(
default="<p>Select your local area for information:</p>",
features=[
"bold",
"italic",
"ol",
"ul",
"link",
"document-link",
],
),
),
(
"aylesbury_vale_url",
wagtail.core.blocks.URLBlock(
label="Aylesbury Vale URL", required=False
),
),
(
"chiltern_url",
wagtail.core.blocks.URLBlock(
label="Chiltern URL", required=False
),
),
(
"south_bucks_url",
wagtail.core.blocks.URLBlock(
label="South Bucks URL", required=False
),
),
(
"wycombe_url",
wagtail.core.blocks.URLBlock(
label="Wycombe URL", required=False
),
),
]
),
),
("table", wagtail.contrib.table_block.blocks.TableBlock()),
(
"button",
wagtail.core.blocks.StructBlock(
[
(
"text",
wagtail.core.blocks.CharBlock(classname="title"),
),
(
"link_url",
wagtail.core.blocks.URLBlock(required=False),
),
(
"link_page",
wagtail.core.blocks.PageChooserBlock(
required=False
),
),
]
),
),
("highlight", bc.utils.blocks.HighlightBlock()),
(
"accordion",
wagtail.core.blocks.StructBlock(
[
(
"items",
wagtail.core.blocks.ListBlock(
wagtail.core.blocks.StructBlock(
[
(
"title",
wagtail.core.blocks.CharBlock(
classname="full title",
icon="title",
label="Accordion title",
),
),
(
"content",
wagtail.core.blocks.StreamBlock(
[
(
"heading",
wagtail.core.blocks.CharBlock(
classname="full title",
group="Heading",
help_text='The link to this heading uses the heading text in lowercase, with no symbols, and with the spaces replaced with hyphens. e.g. "Lorem @ 2 ipsum" becomes "lorem-2-ipsum"',
icon="title",
label="Main heading",
template="patterns/molecules/streamfield/blocks/heading_block.html",
),
),
(
"subheading",
wagtail.core.blocks.CharBlock(
classname="full title",
group="Heading",
help_text='The link to this subheading uses the subheading text in lowercase, with no symbols, and with the spaces replaced with hyphens. e.g. "Lorem @ 2 ipsum" becomes "lorem-2-ipsum"',
icon="title",
template="patterns/molecules/streamfield/blocks/subheading_block.html",
),
),
(
"paragraph",
wagtail.core.blocks.RichTextBlock(
features=[
"bold",
"italic",
"ol",
"ul",
"link",
"document-link",
]
),
),
(
"image",
wagtail.core.blocks.StructBlock(
[
(
"image",
wagtail.images.blocks.ImageChooserBlock(),
),
(
"alt_text",
wagtail.core.blocks.CharBlock(
help_text="Describe the information, not the picture. Leave blank if the image is purely decorative. Do not repeat captions or content already on the page.",
required=False,
),
),
(
"caption",
wagtail.core.blocks.CharBlock(
required=False
),
),
]
),
),
(
"embed",
wagtail.embeds.blocks.EmbedBlock(),
),
(
"local_area_links",
wagtail.core.blocks.StructBlock(
[
(
"introduction",
wagtail.core.blocks.RichTextBlock(
default="<p>Select your local area for information:</p>",
features=[
"bold",
"italic",
"ol",
"ul",
"link",
"document-link",
],
),
),
(
"aylesbury_vale_url",
wagtail.core.blocks.URLBlock(
label="Aylesbury Vale URL",
required=False,
),
),
(
"chiltern_url",
wagtail.core.blocks.URLBlock(
label="Chiltern URL",
required=False,
),
),
(
"south_bucks_url",
wagtail.core.blocks.URLBlock(
label="South Bucks URL",
required=False,
),
),
(
"wycombe_url",
wagtail.core.blocks.URLBlock(
label="Wycombe URL",
required=False,
),
),
]
),
),
(
"table",
wagtail.contrib.table_block.blocks.TableBlock(),
),
(
"button",
wagtail.core.blocks.StructBlock(
[
(
"text",
wagtail.core.blocks.CharBlock(
classname="title"
),
),
(
"link_url",
wagtail.core.blocks.URLBlock(
required=False
),
),
(
"link_page",
wagtail.core.blocks.PageChooserBlock(
required=False
),
),
]
),
),
(
"highlight",
bc.utils.blocks.HighlightBlock(),
),
],
label="Accordion content",
),
),
]
),
label="Accordion items",
),
)
]
),
),
(
"detail",
wagtail.core.blocks.StructBlock(
[
(
"title",
wagtail.core.blocks.CharBlock(
classname="full title",
icon="title",
label="Detail title",
),
),
(
"content",
wagtail.core.blocks.RichTextBlock(
features=[
"bold",
"italic",
"ol",
"ul",
"link",
"document-link",
],
label="Detail content",
),
),
]
),
),
],
blank=True,
),
),
]
| 62.602241 | 254 | 0.187883 |
acf24f6d3de53ddb564fa5ae7521fc80c70323dc | 570 | py | Python | python/flask/agent/__init__.py | nolmeunion/learn | 482f4b8a5de81375e5663ea0b065d25f2f7da4b6 | [
"MIT"
] | null | null | null | python/flask/agent/__init__.py | nolmeunion/learn | 482f4b8a5de81375e5663ea0b065d25f2f7da4b6 | [
"MIT"
] | null | null | null | python/flask/agent/__init__.py | nolmeunion/learn | 482f4b8a5de81375e5663ea0b065d25f2f7da4b6 | [
"MIT"
] | null | null | null | import os
from flask import Flask
def create_app(test_config=None):
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRETE_KEY='dev',
DATABASE=os.path.join(app.instance_path, 'flaskr.sqllite')
)
if test_config is None:
app.config.from_pyfile('config.py', silent=True)
else:
app.config.from_mapping(test_config)
try:
os.makedirs(app.instance_path)
except OSError:
pass
@app.route('/hello')
def hello():
return 'Hello, World!'
return app
| 21.923077 | 66 | 0.64386 |
acf2504a4a9cfc8f4b4215da615e4f92b5d9b14c | 16,342 | py | Python | generated/intermediate/ansible-module-rest/azure_rm_azurefirewall_info.py | audevbot/autorest.devops.debug | a507fb6e2dd7826212537f27d583f203aac1c28f | [
"MIT"
] | null | null | null | generated/intermediate/ansible-module-rest/azure_rm_azurefirewall_info.py | audevbot/autorest.devops.debug | a507fb6e2dd7826212537f27d583f203aac1c28f | [
"MIT"
] | null | null | null | generated/intermediate/ansible-module-rest/azure_rm_azurefirewall_info.py | audevbot/autorest.devops.debug | a507fb6e2dd7826212537f27d583f203aac1c28f | [
"MIT"
] | null | null | null | #!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_azurefirewall_info
version_added: '2.9'
short_description: Get AzureFirewall info.
description:
- Get info of AzureFirewall.
options:
resource_group:
description:
- The name of the resource group.
type: str
name:
description:
- Resource name.
type: str
id:
description:
- Resource ID.
type: str
type:
description:
- Resource type.
type: str
location:
description:
- Resource location.
type: str
application_rule_collections:
description:
- Collection of application rule collections used by Azure Firewall.
type: list
suboptions:
priority:
description:
- Priority of the application rule collection resource.
type: number
action:
description:
- The action type of a rule collection
type: dict
rules:
description:
- Collection of rules used by a application rule collection.
type: list
suboptions:
name:
description:
- Name of the application rule.
type: str
description:
description:
- Description of the rule.
type: str
source_addresses:
description:
- List of source IP addresses for this rule.
type: list
protocols:
description:
- Array of ApplicationRuleProtocols.
type: list
target_fqdns:
description:
- List of FQDNs for this rule.
type: list
fqdn_tags:
description:
- List of FQDN Tags for this rule.
type: list
provisioning_state:
description:
- The provisioning state of the resource.
type: str
name:
description:
- >-
Gets name of the resource that is unique within a resource group.
This name can be used to access the resource.
type: str
etag:
description:
- >-
Gets a unique read-only string that changes whenever the resource is
updated.
type: str
nat_rule_collections:
description:
- Collection of NAT rule collections used by Azure Firewall.
type: list
suboptions:
priority:
description:
- Priority of the NAT rule collection resource.
type: number
action:
description:
- The action type of a NAT rule collection
type: dict
suboptions:
type:
description:
- The type of action.
type: str
rules:
description:
- Collection of rules used by a NAT rule collection.
type: list
suboptions:
name:
description:
- Name of the NAT rule.
type: str
description:
description:
- Description of the rule.
type: str
source_addresses:
description:
- List of source IP addresses for this rule.
type: list
destination_addresses:
description:
- List of destination IP addresses for this rule.
type: list
destination_ports:
description:
- List of destination ports.
type: list
protocols:
description:
- >-
Array of AzureFirewallNetworkRuleProtocols applicable to this
NAT rule.
type: list
translated_address:
description:
- The translated address for this NAT rule.
type: str
translated_port:
description:
- The translated port for this NAT rule.
type: str
provisioning_state:
description:
- The provisioning state of the resource.
type: str
name:
description:
- >-
Gets name of the resource that is unique within a resource group.
This name can be used to access the resource.
type: str
etag:
description:
- >-
Gets a unique read-only string that changes whenever the resource is
updated.
type: str
network_rule_collections:
description:
- Collection of network rule collections used by Azure Firewall.
type: list
suboptions:
priority:
description:
- Priority of the network rule collection resource.
type: number
action:
description:
- The action type of a rule collection
type: dict
suboptions:
type:
description:
- The type of action.
type: str
rules:
description:
- Collection of rules used by a network rule collection.
type: list
suboptions:
name:
description:
- Name of the network rule.
type: str
description:
description:
- Description of the rule.
type: str
protocols:
description:
- Array of AzureFirewallNetworkRuleProtocols.
type: list
source_addresses:
description:
- List of source IP addresses for this rule.
type: list
destination_addresses:
description:
- List of destination IP addresses.
type: list
destination_ports:
description:
- List of destination ports.
type: list
provisioning_state:
description:
- The provisioning state of the resource.
type: str
name:
description:
- >-
Gets name of the resource that is unique within a resource group.
This name can be used to access the resource.
type: str
etag:
description:
- >-
Gets a unique read-only string that changes whenever the resource is
updated.
type: str
ip_configurations:
description:
- IP configuration of the Azure Firewall resource.
type: list
suboptions:
private_ip_address:
description:
- >-
The Firewall Internal Load Balancer IP to be used as the next hop in
User Defined Routes.
type: str
id:
description:
- Resource ID.
type: str
provisioning_state:
description:
- The provisioning state of the resource.
type: str
name:
description:
- >-
Name of the resource that is unique within a resource group. This
name can be used to access the resource.
type: str
etag:
description:
- >-
A unique read-only string that changes whenever the resource is
updated.
type: str
provisioning_state:
description:
- The provisioning state of the resource.
type: str
etag:
description:
- >-
Gets a unique read-only string that changes whenever the resource is
updated.
type: str
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: List all Azure Firewalls for a given subscription
azure_rm_azurefirewall_info: {}
- name: List all Azure Firewalls for a given resource group
azure_rm_azurefirewall_info:
resource_group: myResourceGroup
- name: Get Azure Firewall
azure_rm_azurefirewall_info:
resource_group: myResourceGroup
name: myAzureFirewall
'''
RETURN = '''
azure_firewalls:
description: >-
A list of dict results where the key is the name of the AzureFirewall and
the values are the facts for that AzureFirewall.
returned: always
type: complex
contains:
azurefirewall_name:
description: The key is the name of the server that the values relate to.
type: complex
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
name:
description:
- Resource name.
returned: always
type: str
sample: null
type:
description:
- Resource type.
returned: always
type: str
sample: null
location:
description:
- Resource location.
returned: always
type: str
sample: null
tags:
description:
- Resource tags.
returned: always
type: >-
unknown[DictionaryType
{"$id":"440","$type":"DictionaryType","valueType":{"$id":"441","$type":"PrimaryType","knownPrimaryType":"string","name":{"$id":"442","fixed":false,"raw":"String"},"deprecated":false},"supportsAdditionalProperties":false,"name":{"$id":"443","fixed":false},"deprecated":false}]
sample: null
properties:
description:
- ''
returned: always
type: dict
sample: null
etag:
description:
- >-
Gets a unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
'''
import time
import json
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
from copy import deepcopy
from msrestazure.azure_exceptions import CloudError
class AzureRMAzureFirewallsInfo(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str'
),
name=dict(
type='str'
)
)
self.resource_group = None
self.name = None
self.id = None
self.name = None
self.type = None
self.location = None
self.tags = None
self.properties = None
self.etag = None
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.url = None
self.status_code = [200]
self.query_parameters = {}
self.query_parameters['api-version'] = '2018-11-01'
self.header_parameters = {}
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
self.mgmt_client = None
super(AzureRMAzureFirewallsInfo, self).__init__(self.module_arg_spec, supports_tags=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
base_url=self._cloud_environment.endpoints.resource_manager)
if (self.resource_group is not None and
self.name is not None):
self.results['azure_firewalls'] = self.format_item(self.get())
elif (self.resource_group is not None):
self.results['azure_firewalls'] = self.format_item(self.list())
else:
self.results['azure_firewalls'] = [self.format_item(self.listall())]
return self.results
def get(self):
response = None
results = {}
# prepare url
self.url = ('/subscriptions' +
'/{{ subscription_id }}' +
'/resourceGroups' +
'/{{ resource_group }}' +
'/providers' +
'/Microsoft.Network' +
'/azureFirewalls' +
'/{{ azure_firewall_name }}')
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
self.url = self.url.replace('{{ azure_firewall_name }}', self.name)
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
results['temp_item'] = json.loads(response.text)
# self.log('Response : {0}'.format(response))
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return results
def list(self):
response = None
results = {}
# prepare url
self.url = ('/subscriptions' +
'/{{ subscription_id }}' +
'/resourceGroups' +
'/{{ resource_group }}' +
'/providers' +
'/Microsoft.Network' +
'/azureFirewalls')
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
self.url = self.url.replace('{{ azure_firewall_name }}', self.name)
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
results['temp_item'] = json.loads(response.text)
# self.log('Response : {0}'.format(response))
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return results
def listall(self):
response = None
results = {}
# prepare url
self.url = ('/subscriptions' +
'/{{ subscription_id }}' +
'/providers' +
'/Microsoft.Network' +
'/azureFirewalls')
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
self.url = self.url.replace('{{ azure_firewall_name }}', self.name)
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
results['temp_item'] = json.loads(response.text)
# self.log('Response : {0}'.format(response))
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return results
def format_item(item):
return item
def main():
AzureRMAzureFirewallsInfo()
if __name__ == '__main__':
main()
| 31.306513 | 288 | 0.519337 |
acf251404648a9f91d2c73df50670291b3d40698 | 1,600 | py | Python | pyvisa-i3py-sim/version.py | Ecpy/pyvisa-i3py-sim | 7eb8b498b045f11eb0dae4475cbcb5c664fe1c4a | [
"BSD-3-Clause"
] | null | null | null | pyvisa-i3py-sim/version.py | Ecpy/pyvisa-i3py-sim | 7eb8b498b045f11eb0dae4475cbcb5c664fe1c4a | [
"BSD-3-Clause"
] | null | null | null | pyvisa-i3py-sim/version.py | Ecpy/pyvisa-i3py-sim | 7eb8b498b045f11eb0dae4475cbcb5c664fe1c4a | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2016 by I3py Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Version of the pyvisa-i3py-sim package.
"""
from __future__ import (division, unicode_literals, print_function,
absolute_import)
from collections import namedtuple
# The major release number. Differences in the major number indicate
# possibly large differences in API.
MAJOR = 0
# The minor release number. Differences in the minor number indicate
# possibly small differences in the API, but these changes will come
# backwards compatibility support when possible. Minor releases are
# typically used for large feature additions.
MINOR = 1
# The micro release number. The micro release number is incremented
# for bug fix releases and small feature additions.
MICRO = 0
# The status indicate if this is a development or pre-release version
STATUS = 'a1'
#: A namedtuple of the version info for the current release.
version_info = namedtuple('version_info', 'major minor micro status')
version_info = version_info(MAJOR, MINOR, MICRO, STATUS)
# Remove everything but the 'version_info' from this module.
del namedtuple, MAJOR, MINOR, MICRO, STATUS
__version__ = ('{0}.{1}.{2}'.format(*version_info) if not version_info.status
else '{0}.{1}.{2}.{3}'.format(*version_info))
| 38.095238 | 79 | 0.67 |
acf2527572b897eb82716e55790ac28de1c0b1fd | 11,966 | py | Python | rhalphalib/parameter.py | jmduarte/rhalphalib | cbb5832d9feb84aac56747d187b0f170d00a5700 | [
"BSD-3-Clause"
] | 2 | 2020-01-09T05:47:36.000Z | 2021-03-25T14:16:37.000Z | rhalphalib/parameter.py | jmduarte/rhalphalib | cbb5832d9feb84aac56747d187b0f170d00a5700 | [
"BSD-3-Clause"
] | 11 | 2020-05-08T15:23:51.000Z | 2021-12-07T19:31:04.000Z | rhalphalib/parameter.py | jmduarte/rhalphalib | cbb5832d9feb84aac56747d187b0f170d00a5700 | [
"BSD-3-Clause"
] | 10 | 2019-04-11T13:43:53.000Z | 2022-02-07T01:18:27.000Z | import numbers
import warnings
import numpy as np
from .util import install_roofit_helpers
class Parameter(object):
def __init__(self, name, value):
self._name = name
self._value = value
self._hasPrior = False
self._intermediate = False
def __repr__(self):
return "<%s (%s) instance at 0x%x>" % (
self.__class__.__name__,
self._name,
id(self),
)
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def value(self):
return self._value
@property
def intermediate(self):
'''
An intermediate parameter is one that should not be explicitly rendered.
The formula will be expanded recursively until it depends only on non-intermediate value.
Only DependentParameters can be intermediate, hence one can modify this flag for them.
'''
return self._intermediate
def hasPrior(self):
'''
True if the prior is not flat
'''
return self._hasPrior
@property
def combinePrior(self):
'''
By default assume param has no prior and we are just informing combine about it
'''
return 'flatParam'
def getDependents(self, rendering=False, deep=False):
return {self}
def formula(self):
return '{' + self._name + '}'
def renderRoofit(self, workspace):
raise NotImplementedError
def _binary_op(self, opinfo, other):
opname, op, right = opinfo
if isinstance(other, Parameter):
if right:
name = other.name + opname + self.name
out = DependentParameter(name, "{0}%s{1}" % op, other, self)
else:
name = self.name + opname + other.name
out = DependentParameter(name, "{0}%s{1}" % op, self, other)
out.intermediate = True
return out
elif isinstance(other, numbers.Number):
if right:
name = type(other).__name__ + opname + self.name
out = DependentParameter(name, "%r%s{0}" % (other, op), self)
else:
name = self.name + opname + type(other).__name__
out = DependentParameter(name, "{0}%s%r" % (op, other), self)
out.intermediate = True
return out
return NotImplemented
def __radd__(self, other):
return self._binary_op(('_add_', '+', True), other)
def __rsub__(self, other):
return self._binary_op(('_sub_', '-', True), other)
def __rmul__(self, other):
return self._binary_op(('_mul_', '*', True), other)
def __rtruediv__(self, other):
return self._binary_op(('_div_', '/', True), other)
def __rpow__(self, other):
return self._binary_op(('_pow_', '**', True), other)
def __add__(self, other):
return self._binary_op(('_add_', '+', False), other)
def __sub__(self, other):
return self._binary_op(('_sub_', '-', False), other)
def __mul__(self, other):
return self._binary_op(('_mul_', '*', False), other)
def __truediv__(self, other):
return self._binary_op(('_div_', '/', False), other)
def __pow__(self, other):
return self._binary_op(('_pow_', '**', False), other)
class IndependentParameter(Parameter):
DefaultRange = (-10, 10)
def __init__(self, name, value, lo=None, hi=None, constant=False):
super(IndependentParameter, self).__init__(name, value)
self._lo = lo if lo is not None else self.DefaultRange[0]
self._hi = hi if hi is not None else self.DefaultRange[1]
self._constant = constant
@Parameter.value.setter
def value(self, val):
self._value = val
@property
def lo(self):
return self._lo
@lo.setter
def lo(self, lo):
self._lo = lo
@property
def hi(self):
return self._hi
@hi.setter
def hi(self, hi):
self._hi = hi
@property
def constant(self):
return self._constant
@constant.setter
def constant(self, const):
self._constant = const
def renderRoofit(self, workspace):
import ROOT
install_roofit_helpers()
if workspace.var(self._name) == None: # noqa: E711
var = ROOT.RooRealVar(self._name, self._name, self._value, self._lo, self._hi)
var.setAttribute("Constant", self._constant)
workspace.add(var)
return workspace.var(self._name)
class NuisanceParameter(IndependentParameter):
def __init__(self, name, combinePrior, value=0, lo=None, hi=None):
'''
A nuisance parameter.
name: name of parameter
combinePrior: one of 'shape', 'shapeN', 'lnN', etc.
Render the prior somewhere else? Probably in Model because the prior needs
to be added at the RooSimultaneus level (I think)
Filtering the set of model parameters for these classes can collect needed priors.
'''
super(NuisanceParameter, self).__init__(name, value, lo, hi)
self._hasPrior = True
if combinePrior not in {'shape', 'shapeN', 'shapeU', 'lnN', 'lnU', 'gmM', 'trG', 'param'}:
raise ValueError("Unrecognized combine prior %s" % combinePrior)
self._prior = combinePrior
@property
def combinePrior(self):
return self._prior
class DependentParameter(Parameter):
def __init__(self, name, formula, *dependents):
'''
Create a dependent parameter
name: name of parameter
formula: a python format-string using only indices, e.g.
'{0} + sin({1})*{2}'
'''
super(DependentParameter, self).__init__(name, np.nan)
if not all(isinstance(d, Parameter) for d in dependents):
raise ValueError
# TODO: validate formula for allowed functions
self._formula = formula
self._dependents = dependents
@property
def value(self):
# TODO: value from rendering formula and eval() or numexpr or TFormula or ...
raise NotImplementedError
@Parameter.intermediate.setter
def intermediate(self, val):
self._intermediate = val
def getDependents(self, rendering=False, deep=False):
'''
Return a set of parameters that this parameter depends on, which will be rendered.
By default, this means all non-intermediate dependent parameters, recursively descending and stopping at
the first renderable parameter (i.e. either non-intermediate or an IndependentParameter)
If this parameter itself is renderable, we return a set of just this parameter.
If rendering=True, we pass through this parameter if it is renderable.
If deep=True, descend all the way to the IndependentParameters
'''
dependents = set()
if deep:
for p in self._dependents:
if isinstance(p, DependentParameter):
dependents.update(p.getDependents(deep=True))
else:
dependents.add(p)
return dependents
if not (self.intermediate or rendering):
return {self}
for p in self._dependents:
if p.intermediate:
dependents.update(p.getDependents())
else:
dependents.add(p)
return dependents
def formula(self, rendering=False):
if not (self.intermediate or rendering):
return "{" + self.name + "}"
return "(" + self._formula.format(*(p.formula() for p in self._dependents)) + ")"
def renderRoofit(self, workspace):
import ROOT
install_roofit_helpers()
if workspace.function(self._name) == None: # noqa: E711
if self.intermediate:
# This is a warning because we should make sure the name does not conflict as
# intermediate parameter names are often autogenerated and might not be unique/appropriate
warnings.warn("Rendering intermediate parameter: %r" % self, RuntimeWarning)
self.intermediate = False
rooVars = [v.renderRoofit(workspace) for v in self.getDependents(rendering=True)]
# Originally just passed the named variables to RooFormulaVar but it seems the TFormula class
# is more sensitive to variable names than is reasonable, so we reindex here
formula = self.formula(rendering=True).format(**{var.GetName(): '@%d' % i for i, var in enumerate(rooVars)})
var = ROOT.RooFormulaVar(self._name, self._name, formula, ROOT.RooArgList.fromiter(rooVars))
workspace.add(var)
return workspace.function(self._name)
class SmoothStep(DependentParameter):
def __init__(self, param):
if not isinstance(param, Parameter):
raise ValueError("Expected a Parameter instance, got %r" % param)
if param.intermediate:
raise ValueError("SmoothStep can only depend on a non-intermediate parameter")
super(SmoothStep, self).__init__(param.name + '_smoothstep', '{0}', param)
self.intermediate = False
@property
def value(self):
raise NotImplementedError
def formula(self, rendering=False):
return "{" + self.name + "}"
def renderRoofit(self, workspace):
import ROOT
install_roofit_helpers()
if workspace.function(self._name) == None: # noqa: E711
# Formula satisfies f(x<=-1) = 0, f(x>=1) = 1, f'(-1) = f'(1) = f''(-1) = f''(1) = 0
formula = "(((0.1875*x*x - 0.625)*x*x + 0.9375)*x + 0.5)*(x > -1)*(x < 1) + 1*(x >= 1)".replace("x", "@0")
rooVars = [v.renderRoofit(workspace) for v in self.getDependents(rendering=True)]
if len(rooVars) != 1:
raise RuntimeError("Unexpected number of parameters encountered while rendering SmoothStep")
var = ROOT.RooFormulaVar(self._name, self._name, formula, ROOT.RooArgList.fromiter(rooVars))
workspace.add(var)
return workspace.function(self._name)
class Observable(Parameter):
'''
A simple struct that holds the name of an observable (e.g. x axis of discriminator histogram) and its binning
The first sample attached to a channel will dictate how the rendering of the observable is done.
Subequent samples attached will be checked against the first, and if they match, their observable will be set
to the first samples' instance of this class.
'''
def __init__(self, name, binning):
super(Observable, self).__init__(name, np.nan)
self._binning = np.array(binning)
def __eq__(self, other):
if isinstance(other, Observable) and self._name == other._name and np.array_equal(self._binning, other._binning):
return True
return False
@property
def name(self):
return self._name
@property
def binning(self):
return self._binning
@property
def nbins(self):
return len(self._binning) - 1
def binningTArrayD(self):
import ROOT
return ROOT.TArrayD(len(self._binning), self._binning)
def renderRoofit(self, workspace):
'''
Return a RooObservable following the definition
'''
import ROOT
install_roofit_helpers()
if workspace.var(self._name) == None: # noqa: E711
var = ROOT.RooRealVar(self.name, self.name, self.binning[0], self.binning[-1])
var.setBinning(ROOT.RooBinning(self.nbins, self.binning))
workspace.add(var)
return workspace.var(self._name)
def formula(self):
raise RuntimeError("Observables cannot be used in formulas, as this would necessitate support for numeric integration, which is outside the scope of rhalphalib.")
| 35.719403 | 170 | 0.61541 |
acf2537ccd3a1b5f6ee42c8f616bcb2716240b12 | 4,497 | py | Python | vnpy/app/algo_trading/template.py | xiumingxu/vnpy-xx | 8b2d9ecdabcb7931d46fd92fad2d3701b7e66975 | [
"MIT"
] | null | null | null | vnpy/app/algo_trading/template.py | xiumingxu/vnpy-xx | 8b2d9ecdabcb7931d46fd92fad2d3701b7e66975 | [
"MIT"
] | null | null | null | vnpy/app/algo_trading/template.py | xiumingxu/vnpy-xx | 8b2d9ecdabcb7931d46fd92fad2d3701b7e66975 | [
"MIT"
] | null | null | null | from vnpy.trader.constant import OrderType, Offset, Direction
from vnpy.trader.engine import BaseEngine
from vnpy.trader.object import TickData, OrderData, TradeData
from vnpy.trader.utility import virtual
class AlgoTemplate:
""""""
_count = 0
display_name = ""
default_setting = {}
variables = []
def __init__(
self,
algo_engine: BaseEngine,
algo_name: str,
setting: dict
):
"""Constructor"""
self.algo_engine = algo_engine
self.algo_name = algo_name
self.active = False
self.active_orders = {} # vt_orderid:order
self.variables.insert(0, "active")
@classmethod
def new(cls, algo_engine: BaseEngine, setting: dict):
"""Create new algo instance"""
cls._count += 1
algo_name = f"{cls.__name__}_{cls._count}"
algo = cls(algo_engine, algo_name, setting)
return algo
def update_tick(self, tick: TickData):
""""""
if self.active:
self.on_tick(tick)
def update_order(self, order: OrderData):
""""""
if order.is_active():
self.active_orders[order.vt_orderid] = order
elif order.vt_orderid in self.active_orders:
self.active_orders.pop(order.vt_orderid)
self.on_order(order)
def update_trade(self, trade: TradeData):
""""""
self.on_trade(trade)
def update_timer(self):
""""""
if self.active:
self.on_timer()
def on_start(self):
""""""
pass
@virtual
def on_stop(self):
""""""
pass
@virtual
def on_tick(self, tick: TickData):
""""""
pass
@virtual
def on_order(self, order: OrderData):
""""""
pass
@virtual
def on_trade(self, trade: TradeData):
""""""
pass
@virtual
def on_timer(self):
""""""
pass
def start(self):
""""""
self.active = True
self.on_start()
self.put_variables_event()
def stop(self):
""""""
self.active = False
self.cancel_all()
self.on_stop()
self.put_variables_event()
self.write_log("停止算法")
def subscribe(self, vt_symbol):
""""""
self.algo_engine.subscribe(self, vt_symbol)
def buy(
self,
vt_symbol,
price,
volume,
order_type: OrderType = OrderType.LIMIT,
offset: Offset = Offset.NONE
):
""""""
if not self.active:
return
msg = f"委托买入{vt_symbol}:{volume}@{price}"
self.write_log(msg)
return self.algo_engine.send_order(
self,
vt_symbol,
Direction.LONG,
price,
volume,
order_type,
offset
)
def sell(
self,
vt_symbol,
price,
volume,
order_type: OrderType = OrderType.LIMIT,
offset: Offset = Offset.NONE
):
""""""
if not self.active:
return
msg = f"委托卖出{vt_symbol}:{volume}@{price}"
self.write_log(msg)
return self.algo_engine.send_order(
self,
vt_symbol,
Direction.SHORT,
price,
volume,
order_type,
offset
)
def cancel_order(self, vt_orderid: str):
""""""
self.algo_engine.cancel_order(self, vt_orderid)
def cancel_all(self):
""""""
if not self.active_orders:
return
for vt_orderid in self.active_orders.keys():
self.cancel_order(vt_orderid)
def get_tick(self, vt_symbol: str):
""""""
return self.algo_engine.get_tick(self, vt_symbol)
def get_contract(self, vt_symbol: str):
""""""
return self.algo_engine.get_contract(self, vt_symbol)
def write_log(self, msg: str):
""""""
self.algo_engine.write_log(msg, self)
def put_parameters_event(self):
""""""
parameters = {}
for name in self.default_setting.keys():
parameters[name] = getattr(self, name)
self.algo_engine.put_parameters_event(self, parameters)
def put_variables_event(self):
""""""
variables = {}
for name in self.variables:
variables[name] = getattr(self, name)
self.algo_engine.put_variables_event(self, variables)
| 22.712121 | 63 | 0.542139 |
acf2542224ac75e4868e3b3606c2ec8046c07b9e | 12,218 | py | Python | ironic_tempest_plugin/config.py | ameya-r/ironic-tempest-plugin | d3360cf3b6ad8b89b9c80fc806dc5d4ba373dd01 | [
"Apache-2.0"
] | null | null | null | ironic_tempest_plugin/config.py | ameya-r/ironic-tempest-plugin | d3360cf3b6ad8b89b9c80fc806dc5d4ba373dd01 | [
"Apache-2.0"
] | null | null | null | ironic_tempest_plugin/config.py | ameya-r/ironic-tempest-plugin | d3360cf3b6ad8b89b9c80fc806dc5d4ba373dd01 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from tempest import config # noqa
# NOTE(TheJulia): The following options are loaded into a tempest
# plugin configuration option via plugin.py.
ironic_service_option = cfg.BoolOpt('ironic',
default=False,
help='Whether or not ironic is expected '
'to be available')
inspector_service_option = cfg.BoolOpt("ironic_inspector",
default=False,
help="Whether or not ironic-inspector "
"is expected to be available")
ironic_scope_enforcement = cfg.BoolOpt('ironic',
default=False,
help='Wheter or not ironic is '
'exepcted to enforce auth '
'scope.')
inspector_scope_enforcement = cfg.BoolOpt('ironic_inspector',
default=False,
help='Whether or not '
'ironic-inspector is expected '
'to enforce auth scope.')
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal provisioning service options',
help='When enabling baremetal tests, Nova '
'must be configured to use the Ironic '
'driver. The following parameters for the '
'[compute] section must be disabled: '
'console_output, interface_attach, '
'live_migration, pause, rescue, resize, '
'shelve, snapshot, and suspend')
# The bulk of the embedded configuration is below.
baremetal_introspection_group = cfg.OptGroup(
name="baremetal_introspection",
title="Baremetal introspection service options",
help="When enabling baremetal introspection tests,"
"Ironic must be configured.")
baremetal_features_group = cfg.OptGroup(
name='baremetal_feature_enabled',
title="Enabled Baremetal Service Features")
BaremetalGroup = [
cfg.StrOpt('catalog_type',
default='baremetal',
help="Catalog type of the baremetal provisioning service"),
cfg.StrOpt('driver',
default='fake-hardware',
help="Driver name to use for API tests"),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the baremetal provisioning"
" service"),
cfg.IntOpt('deploywait_timeout',
default=15,
help="Timeout for Ironic node to reach the "
"wait-callback state after powering on."),
cfg.IntOpt('active_timeout',
default=300,
help="Timeout for Ironic node to completely provision"),
cfg.IntOpt('association_timeout',
default=30,
help="Timeout for association of Nova instance and Ironic "
"node"),
cfg.IntOpt('power_timeout',
default=60,
help="Timeout for Ironic power transitions."),
cfg.IntOpt('unprovision_timeout',
default=300,
help="Timeout for unprovisioning an Ironic node. "
"Takes longer since Kilo as Ironic performs an extra "
"step in Node cleaning."),
cfg.IntOpt('rescue_timeout',
default=300,
help="Timeout for rescuing an Ironic node."),
cfg.IntOpt('unrescue_timeout',
default=300,
help="Timeout for unrescuing an Ironic node."),
cfg.StrOpt('min_microversion',
help="Lower version of the test target microversion range. "
"The format is 'X.Y', where 'X' and 'Y' are int values. "
"Tempest selects tests based on the range between "
"min_microversion and max_microversion. "
"If both values are None, Tempest avoids tests which "
"require a microversion."),
cfg.StrOpt('max_microversion',
default='latest',
help="Upper version of the test target microversion range. "
"The format is 'X.Y', where 'X' and 'Y' are int values. "
"Tempest selects tests based on the range between "
"min_microversion and max_microversion. "
"If both values are None, Tempest avoids tests which "
"require a microversion."),
cfg.BoolOpt('use_provision_network',
default=False,
help="Whether the Ironic/Neutron tenant isolation is enabled"),
cfg.StrOpt('whole_disk_image_ref',
help="UUID of the wholedisk image to use in the tests."),
cfg.StrOpt('whole_disk_image_url',
help="An http link to the wholedisk image to use in the "
"tests."),
cfg.StrOpt('whole_disk_image_checksum',
help="An MD5 checksum of the image."),
cfg.StrOpt('partition_image_ref',
help="UUID of the partitioned image to use in the tests."),
cfg.StrOpt('ramdisk_iso_image_ref',
help=("UUID (or url) of an ISO image for the ramdisk boot "
"tests.")),
cfg.ListOpt('enabled_drivers',
default=['fake', 'pxe_ipmitool', 'agent_ipmitool'],
help="List of Ironic enabled drivers."),
cfg.ListOpt('enabled_hardware_types',
default=['ipmi'],
help="List of Ironic enabled hardware types."),
cfg.ListOpt('enabled_bios_interfaces',
default=['fake'],
help="List of Ironic enabled bios interfaces."),
cfg.ListOpt('enabled_deploy_interfaces',
default=['iscsi', 'direct'],
help="List of Ironic enabled deploy interfaces."),
cfg.ListOpt('enabled_rescue_interfaces',
default=['no-rescue'],
help="List of Ironic enabled rescue interfaces."),
cfg.ListOpt('enabled_boot_interfaces',
default=['fake', 'pxe'],
help="List of Ironic enabled boot interfaces."),
cfg.ListOpt('enabled_raid_interfaces',
default=['no-raid', 'agent'],
help="List of Ironic enabled RAID interfaces."),
cfg.StrOpt('default_rescue_interface',
help="Ironic default rescue interface."),
cfg.IntOpt('adjusted_root_disk_size_gb',
min=0,
help="Ironic adjusted disk size to use in the standalone tests "
"as instance_info/root_gb value."),
cfg.IntOpt('available_nodes', min=0, default=None,
help="The number of baremetal hosts available to use for "
"the tests."),
cfg.BoolOpt('partition_netboot',
default=True,
help="Treat partition images as netbooted as opposed to "
"attempting to populate a boot loader. IF cirros is "
"being used, this option should be set to True as "
"it lacks the needed components to make it locally "
"from a partition image."),
cfg.StrOpt('boot_mode',
default='bios',
choices=['bios', 'uefi'],
help="The desired boot_mode to be used on testing nodes."),
cfg.StrOpt('default_boot_option',
# No good default here, we need to actually set it.
help="The default boot option the testing nodes are using."),
]
BaremetalFeaturesGroup = [
cfg.BoolOpt('ipxe_enabled',
default=True,
help="Defines if IPXE is enabled"),
cfg.BoolOpt('adoption',
# Defaults to False since it's a destructive operation AND it
# requires the plugin to be able to read ipmi_password.
default=False,
help="Defines if adoption is enabled"),
cfg.BoolOpt('software_raid',
default=False,
help="Defines if software RAID is enabled (available "
"starting with Train). Requires at least two disks "
"on testing nodes."),
cfg.BoolOpt('deploy_time_raid',
default=False,
help="Defines if in-band RAID can be built in deploy time "
"(possible starting with Victoria)."),
]
BaremetalIntrospectionGroup = [
cfg.StrOpt('catalog_type',
default='baremetal-introspection',
help="Catalog type of the baremetal provisioning service"),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the baremetal introspection"
" service"),
cfg.IntOpt('introspection_sleep',
default=30,
help="Introspection sleep before check status"),
cfg.IntOpt('introspection_timeout',
default=600,
help="Introspection time out"),
cfg.IntOpt('introspection_start_timeout',
default=90,
help="Timeout to start introspection"),
cfg.IntOpt('hypervisor_update_sleep',
default=60,
help="Time to wait until nova becomes aware of "
"bare metal instances"),
cfg.IntOpt('hypervisor_update_timeout',
default=300,
help="Time out for wait until nova becomes aware of "
"bare metal instances"),
# NOTE(aarefiev): status_check_period default is 60s, but checking
# node state takes some time(API call), so races appear here,
# 80s would be enough to make one more check.
cfg.IntOpt('ironic_sync_timeout',
default=80,
help="Time it might take for Ironic--Inspector "
"sync to happen"),
cfg.IntOpt('discovery_timeout',
default=300,
help="Time to wait until new node would enrolled in "
"ironic"),
cfg.BoolOpt('auto_discovery_feature',
default=False,
help="Is the auto-discovery feature enabled. Enroll hook "
"should be specified in node_not_found_hook - processing "
"section of inspector.conf"),
cfg.StrOpt('auto_discovery_default_driver',
# TODO(dtantsur): change to fake-hardware when Queens is no
# longer supported.
default='fake',
help="The driver expected to be set on newly discovered nodes. "
"Only has effect with auto_discovery_feature is True."),
cfg.StrOpt('auto_discovery_target_driver',
help="The driver to set on the newly discovered nodes. "
"Only has effect with auto_discovery_feature is True."),
cfg.StrOpt('data_store',
help="The storage backend for storing introspection data."),
]
| 47.726563 | 79 | 0.560894 |
acf255b0735d2d8646aedd1cefdebc565eec89d3 | 1,541 | py | Python | test_amplifiers.py | gps035/aoc-2019 | 19bc843720680cc3988a5d07647688787cae81d0 | [
"Unlicense"
] | null | null | null | test_amplifiers.py | gps035/aoc-2019 | 19bc843720680cc3988a5d07647688787cae81d0 | [
"Unlicense"
] | null | null | null | test_amplifiers.py | gps035/aoc-2019 | 19bc843720680cc3988a5d07647688787cae81d0 | [
"Unlicense"
] | 1 | 2019-12-04T09:09:03.000Z | 2019-12-04T09:09:03.000Z | import unittest
from amplifiers import run_amps
class TestFuelCalculation(unittest.TestCase):
def test_thruster_simple_1(self):
program = [3, 15, 3, 16, 1002, 16, 10, 16, 1, 16, 15, 15, 4, 15, 99, 0, 0]
self.assertEqual(run_amps(program, (4, 3, 2, 1, 0)), 43210)
def test_thruster_simple_2(self):
program = [3, 23, 3, 24, 1002, 24, 10, 24, 1002, 23, -1, 23, 101, 5, 23, 23, 1, 24, 23, 23, 4, 23, 99, 0, 0]
self.assertEqual(run_amps(program, (0, 1, 2, 3, 4)), 54321)
def test_thruster_simple_3(self):
program = [
3, 31, 3, 32, 1002, 32, 10, 32, 1001, 31, -2, 31, 1007, 31, 0, 33, 1002, 33, 7, 33, 1, 33, 31, 31, 1, 32,
31, 31, 4, 31, 99, 0, 0, 0
]
self.assertEqual(run_amps(program, (1, 0, 4, 3, 2)), 65210)
def test_thruster_recursive_1(self):
program = [
3, 26, 1001, 26, -4, 26, 3, 27, 1002, 27, 2, 27, 1, 27, 26, 27, 4, 27, 1001, 28, -1, 28, 1005, 28, 6, 99, 0,
0, 5
]
self.assertEqual(run_amps(program, (9, 8, 7, 6, 5)), 139629729)
def test_thruster_recursive_2(self):
program = [
3, 52, 1001, 52, -5, 52, 3, 53, 1, 52, 56, 54, 1007, 54, 5, 55, 1005, 55, 26, 1001, 54, -5, 54, 1105, 1, 12,
1, 53, 54, 53, 1008, 54, 0, 55, 1001, 55, 1, 55, 2, 53, 55, 53, 4, 53, 1001, 56, -1, 56, 1005, 56, 6, 99, 0,
0, 0, 0, 10
]
self.assertEqual(run_amps(program, (9, 7, 8, 5, 6)), 18216)
if __name__ == '__main__':
unittest.main()
| 38.525 | 120 | 0.527579 |
acf255bac8baa944d95c2dd694a2314105898421 | 1,655 | py | Python | setup.py | mgotz/EBT_evaluation | ec39845b61dc599850fb19915c0f1365ac521e9b | [
"MIT"
] | null | null | null | setup.py | mgotz/EBT_evaluation | ec39845b61dc599850fb19915c0f1365ac521e9b | [
"MIT"
] | null | null | null | setup.py | mgotz/EBT_evaluation | ec39845b61dc599850fb19915c0f1365ac521e9b | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
setup script for ebttools
"""
import os
from setuptools import setup, find_packages, __version__
#check to have setuptools > v36.2
version = __version__.split(".")
if len(version) < 2:
raise ImportError("unkown version of setuptools, please update")
if int(version[0]) < 36 or (int(version[0]) == 36 and int(version[1]) < 2):
raise ImportError("setuptools version "+ __version__ +" too low, needs at least 36.2")
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='ebttools',
version='2.0.1a',
packages=find_packages(exclude=("tests")), #automagically include all subfolders as packages, but don't install the tests
package_data = {"":["*.png"],
"ebttools":["calibrations/*"]},
license='MIT',
long_description=read('README.md'),
author='Malte Gotz',
author_email='malte.gotz@oncoray.de',
url='https://github.com/mgotz/EBT_evalution',
install_requires=['matplotlib>=1.4.3',
'scipy>=0.15.1',
'numpy>=1.9.2',
'pillow>=2.8.2',
'configparser;python_version<"3.2"',
'mg_dataprocessing>=1.0.0',
'mg_pyguitools>=1.1.1'],
dependency_links=["https://github.com/mgotz/PyGUITools/tarball/master#egg=mg_pyguitools-1.1.1",
"https://github.com/mgotz/PyDataProcessing/tarball/master#egg=mg_dataprocessing-1.0.0"],
entry_points = {"gui_scripts":["EBT-evaluation = ebttools.gui.main:run"]}
) | 33.1 | 125 | 0.598792 |
acf255bb99b807d0d36d52392432b6afdba7c4a1 | 1,098 | py | Python | dolike/dolike.py | gadfly3173/sports | ac7e32735487c3ccbd293e7d3e10d668de8d1c79 | [
"MIT"
] | null | null | null | dolike/dolike.py | gadfly3173/sports | ac7e32735487c3ccbd293e7d3e10d668de8d1c79 | [
"MIT"
] | null | null | null | dolike/dolike.py | gadfly3173/sports | ac7e32735487c3ccbd293e7d3e10d668de8d1c79 | [
"MIT"
] | null | null | null | import gevent
from gevent.monkey import patch_all
patch_all()
from gevent.pool import Pool
import traceback
from mysports.sports import get_md5_code
from mysports.original_json import *
from mysports.login import login
import requests
import json
import time
def dolike(ses: requests.session, to_userid: str, type: int = 1):
data = json.dumps({"from": "1", "toUserId": to_userid, "type": str(type)})
sign = get_md5_code(data)
res = ses.post(host + '/api/center/doLike', data={'sign': sign, 'data': data})
if res.json()['code'] != 200:
print(res.text)
raise Exception
if __name__ == '__main__':
account = ''
password = ''
to_userid = '175691'
try:
print('try login...')
_, s,_ = login(account, password)
except Exception as e:
traceback.print_exc()
print('login failed')
exit(0)
print('loging successfully')
try:
pool = Pool(size=100)
for i in range(500):
pool.spawn(dolike, s, to_userid, 1)
time.sleep(.05)
except:
traceback.print_exc()
| 23.361702 | 82 | 0.622951 |
acf255f7ba23feac7213348114f99d4964a75416 | 644 | py | Python | matrixadd.py | bjoffficial/Python | 73e6fdc19a1bec18488405c4a60c30ba68581ce5 | [
"Apache-2.0"
] | null | null | null | matrixadd.py | bjoffficial/Python | 73e6fdc19a1bec18488405c4a60c30ba68581ce5 | [
"Apache-2.0"
] | null | null | null | matrixadd.py | bjoffficial/Python | 73e6fdc19a1bec18488405c4a60c30ba68581ce5 | [
"Apache-2.0"
] | null | null | null | n=int(input("Enter the no of rows"))
n1=int(input("Enter the no of col"))
li=[]
for i in range(n):
mat=[]
for j in range(n1):
mat.append(int(input()))
li.append(mat)
n2=int(input("Enter the 2nd matrix row"))
n3=int(input("Enter the 2rd matrix col"))
ma=[]
for i in range(n2):
l=[]
for j in range(n3):
l.append(int(input()))
ma.append(l)
an=[]
for i in range(n):
ans=[]
for j in range(n1):
ans.append(ma[i][j]+li[i][j])
an.append(ans)
for i in range(n):
for j in range(n1):
print(an[i][j],end="")
print()
| 21.466667 | 42 | 0.498447 |
acf25651f184e364223e6fe76070eb51eaa1fda5 | 147 | py | Python | sqladmin/__init__.py | tr11/sqladmin | 78d1d5ff87dbc55255055fa3b3c6d440d177f355 | [
"BSD-3-Clause"
] | 1 | 2022-03-24T09:11:36.000Z | 2022-03-24T09:11:36.000Z | sqladmin/__init__.py | dwreeves/sqladmin | fbc733ec1a7eb31ddd1e39e2b9dd2b21c82f0ce7 | [
"BSD-3-Clause"
] | null | null | null | sqladmin/__init__.py | dwreeves/sqladmin | fbc733ec1a7eb31ddd1e39e2b9dd2b21c82f0ce7 | [
"BSD-3-Clause"
] | null | null | null | from sqladmin.application import Admin
from sqladmin.models import ModelAdmin
__version__ = "0.1.7"
__all__ = [
"Admin",
"ModelAdmin",
]
| 14.7 | 38 | 0.707483 |
acf256afdd8e17ab49396698c2831b55a394e3dc | 1,219 | py | Python | Python All in one/main.py | AwaleSajil/PLF | 873540d4862e2a40f2dca0ed7dd4e17037a8511c | [
"MIT"
] | null | null | null | Python All in one/main.py | AwaleSajil/PLF | 873540d4862e2a40f2dca0ed7dd4e17037a8511c | [
"MIT"
] | null | null | null | Python All in one/main.py | AwaleSajil/PLF | 873540d4862e2a40f2dca0ed7dd4e17037a8511c | [
"MIT"
] | null | null | null | import time
import multiprocessing
from peakCounterV1 import *
from comV1 import *
from flaskServerV1 import *
from rskfinal import *
# from test7 import *
if __name__ == "__main__":
##shared memory variable declarations
#create a int array of size 2
#for peakcount and weight of feed taken in
soundAnalysis = multiprocessing.Array('d', 2)
#create an int array of size 4
#for sending data to arduino
sendData = multiprocessing.Array('i', 5)
#create an int array of size 4
#for receving data from arduino
readData = multiprocessing.Array('i', 4)
#create an float array of size 2
#for receiving distribution index and mobility index
imageAnalysis = multiprocessing.Array('d', 2)
p1 = multiprocessing.Process(target=peakCounter, args=(soundAnalysis, ))
p2 = multiprocessing.Process(target=com, args=(sendData,readData,imageAnalysis, soundAnalysis,))
p3 = multiprocessing.Process(target=flaskServer, args=(soundAnalysis, sendData, readData, imageAnalysis, ))
p4 = multiprocessing.Process(target=rskIndex, args=(imageAnalysis,))
p1.start()
p2.start()
p3.start()
p4.start()
p1.join()
p2.join()
p3.join()
p4.join()
| 24.877551 | 111 | 0.703035 |
acf25767a4f41cbde49d4a841e8687a88873dbef | 1,768 | py | Python | tests/api_tests/list_comment.py | voilknetwork/voilk | 839716ae446d39d7260505b5d6d16f4f122cf1fe | [
"MIT"
] | null | null | null | tests/api_tests/list_comment.py | voilknetwork/voilk | 839716ae446d39d7260505b5d6d16f4f122cf1fe | [
"MIT"
] | null | null | null | tests/api_tests/list_comment.py | voilknetwork/voilk | 839716ae446d39d7260505b5d6d16f4f122cf1fe | [
"MIT"
] | 3 | 2020-09-03T11:23:13.000Z | 2021-06-16T07:07:54.000Z | #!/usr/bin/env python3
"""
Create list of all voilk comments in file.
Usage: list_comment.py <server_address> [<output_filename>]
"""
import sys
import json
from jsonsocket import JSONSocket
from jsonsocket import voilkd_call
def list_comments(url):
"""
url in form <ip_address>:<port>
"""
last_cashout_time = "2016-01-01T00-00-00"
end = False
comments = []
while True:
request = bytes( json.dumps( {
"jsonrpc": "2.0",
"id": 0,
"method": "database_api.list_comments",
"params": { "start":[ last_cashout_time, "", "" ], "limit": 5, "order": "by_cashout_time" }
} ), "utf-8" ) + b"\r\n"
status, response = voilkd_call(url, data=request)
if status == False:
print( "rpc failed for last_cashout_time: " + last_cashout_time )
return []
comment_list = response["result"]["comments"]
if len(comment_list) == 0:
break
actual_cashout_time = comment_list[-1]["cashout_time"]
if actual_cashout_time == last_cashout_time:
break
last_cashout_time = actual_cashout_time
for comment in comment_list:
comments.append( comment["permlink"]+";"+comment["author"] +";"+comment["last_update"] )
# while True
return comments
def main():
if len( sys.argv ) < 2 or len( sys.argv ) > 3:
exit( "Usage: list_comment.py <server_address> [<output_filename>]" )
url = sys.argv[1]
print( url )
comments = list_comments( url )
if len(comments) == 0:
exit(-1)
if len( sys.argv ) == 3:
filename = sys.argv[2]
try: file = open( filename, "w" )
except: exit( "Cannot open file " + filename )
for comment in comments:
file.write(comment + "\n")
file.close()
if __name__ == "__main__":
main() | 22.961039 | 97 | 0.618778 |
acf257a1e8a4221ddcce3926dda8b429dfae7dc9 | 2,403 | py | Python | library/setup.py | fsargent/inky | 54684464b2f35bfd52208cdfb922c09685644181 | [
"MIT"
] | null | null | null | library/setup.py | fsargent/inky | 54684464b2f35bfd52208cdfb922c09685644181 | [
"MIT"
] | null | null | null | library/setup.py | fsargent/inky | 54684464b2f35bfd52208cdfb922c09685644181 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Copyright (c) 2017 Pimoroni.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from setuptools import setup
classifiers = [
'Development Status :: 5 - Production/Stable',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: System :: Hardware'
]
setup(
name='inky',
version='1.2.2',
author='Philip Howard',
author_email='phil@pimoroni.com',
description='Inky pHAT Driver',
long_description=open('README.md').read() + '\n' + open('CHANGELOG.txt').read(),
long_description_content_type="text/markdown",
license='MIT',
keywords='Raspberry Pi e-paper display driver',
url='http://www.pimoroni.com',
project_urls={'GitHub': 'https://www.github.com/pimoroni/inky'},
classifiers=classifiers,
py_modules=[],
packages=['inky'],
include_package_data=True,
install_requires=['numpy', 'smbus2', 'spidev'],
extras_require={
'rpi-gpio-output': ['RPi.GPIO'],
'rpi': ['RPi.GPIO'],
'fonts': ['font-fredoka-one', 'font-source-serif-pro', 'font-hanken-grotesk', 'font-intuitive'],
'example-depends': ['requests', 'geocoder', 'beautifulsoup4']
}
)
| 38.142857 | 104 | 0.705368 |
acf257c3675b51555ae0a7a265fcd28173fcf877 | 1,608 | py | Python | tabletop/mutations/add_game_to_collection.py | dcramer/tabletop-server | 062f56d149a29d5ab8605e220c156c1b4fb52d2f | [
"Apache-2.0"
] | 7 | 2018-09-03T20:52:00.000Z | 2021-09-12T20:52:43.000Z | tabletop/mutations/add_game_to_collection.py | dcramer/tabletop-server | 062f56d149a29d5ab8605e220c156c1b4fb52d2f | [
"Apache-2.0"
] | 9 | 2020-02-11T23:11:31.000Z | 2022-01-13T00:53:07.000Z | tabletop/mutations/add_game_to_collection.py | dcramer/tabletop-server | 062f56d149a29d5ab8605e220c156c1b4fb52d2f | [
"Apache-2.0"
] | null | null | null | import graphene
from django.db import IntegrityError, transaction
from tabletop.models import Collection, CollectionGame, Game
from tabletop.schema import CollectionNode, GameNode
class AddGameToCollection(graphene.Mutation):
class Arguments:
collection = graphene.UUID(required=True)
game = graphene.UUID(required=True)
ok = graphene.Boolean()
errors = graphene.List(graphene.String)
collection = graphene.Field(CollectionNode)
game = graphene.Field(GameNode)
def mutate(self, info, collection: str, game: str):
current_user = info.context.user
if not current_user.is_authenticated:
return AddGameToCollection(ok=False, errors=["Authentication required"])
try:
collection = Collection.objects.get(id=collection)
except Collection.DoesNotExist:
return AddGameToCollection(ok=False, errors=["Invalid Collection"])
if collection.created_by_id != current_user.id:
return AddGameToCollection(ok=False, errors=["Cannot edit this Collection"])
try:
game = Game.objects.get(id=game)
except Game.DoesNotExist:
return AddGameToCollection(ok=False, errors=["Invalid Game"])
try:
with transaction.atomic():
CollectionGame.objects.create(collection=collection, game=game)
except IntegrityError:
return AddGameToCollection(
ok=False, errors=["Game already exists in Collection"]
)
return AddGameToCollection(ok=True, collection=collection, game=game)
| 35.733333 | 88 | 0.678483 |
acf2586c2ea82595b09a08d090d936c409ddbb74 | 2,358 | py | Python | setup.py | VHchavez/pdft | 1efff972dc3ff69ea7b86b1121349c6a4bdd9677 | [
"BSD-3-Clause"
] | null | null | null | setup.py | VHchavez/pdft | 1efff972dc3ff69ea7b86b1121349c6a4bdd9677 | [
"BSD-3-Clause"
] | null | null | null | setup.py | VHchavez/pdft | 1efff972dc3ff69ea7b86b1121349c6a4bdd9677 | [
"BSD-3-Clause"
] | null | null | null | """
Partition Density Functional Theory
A fragment based calculation using density functional theory
"""
import sys
from setuptools import setup, find_packages
import versioneer
short_description = __doc__.split("\n")
# from https://github.com/pytest-dev/pytest-runner#conditional-requirement
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
try:
with open("README.md", "r") as handle:
long_description = handle.read()
except:
long_description = "\n".join(short_description[2:])
setup(
# Self-descriptive entries which should always be present
name='pdft',
author='The Wasserman Group',
author_email='awasser@purdue.edu',
description=short_description[0],
long_description=long_description,
long_description_content_type="text/markdown",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='BSD-3-Clause',
# Which Python importable modules should be included when your package is installed
# Handled automatically by setuptools. Use 'exclude' to prevent some specific
# subpackage(s) from being added, if needed
packages=find_packages(),
# Optional include package data to ship with your package
# Customize MANIFEST.in if the general case does not suit your needs
# Comment out this line to prevent the files from being packaged with your software
include_package_data=True,
# Allows `setup.py test` to work correctly with pytest
setup_requires=[] + pytest_runner,
install_requires=["numpy",
"qcelemental",
"matplotlib",
"opt-einsum"]
# Additional entries you may want simply uncomment the lines you want and fill in the data
# url='http://www.my_package.com', # Website
# install_requires=[], # Required packages, pulls from pip if needed; do not use for Conda deployment
# platforms=['Linux',
# 'Mac OS-X',
# 'Unix',
# 'Windows'], # Valid platforms your code works on, adjust to your flavor
# python_requires=">=3.5", # Python version restrictions
# Manual control if final package is compressible or not, set False to prevent the .egg from being made
# zip_safe=False,
)
| 37.428571 | 118 | 0.683206 |
acf25947fc01883dbe24323686479a9f37b267fc | 2,560 | py | Python | merci/tests/test_configuration_manager.py | isabella232/merci-py | 515e8b2ec0545642cd902d60146f42965b24b772 | [
"Apache-2.0"
] | 1 | 2019-04-02T23:54:20.000Z | 2019-04-02T23:54:20.000Z | merci/tests/test_configuration_manager.py | medallia/merci-py | 515e8b2ec0545642cd902d60146f42965b24b772 | [
"Apache-2.0"
] | 1 | 2022-02-19T12:30:52.000Z | 2022-02-19T12:30:52.000Z | merci/tests/test_configuration_manager.py | isabella232/merci-py | 515e8b2ec0545642cd902d60146f42965b24b772 | [
"Apache-2.0"
] | 2 | 2019-04-04T04:01:42.000Z | 2022-02-19T10:30:08.000Z | #
# Copyright 2019 Medallia, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for configuration manager.
"""
import unittest
from merci.managers import ConfigurationManager, FeatureFlagManager
from merci.structure import Context, Modifiers, Configuration
class TestConfigurationManager(unittest.TestCase):
""" Unit tests for configuration manager. """
joe_in_qa = {"environment": "qa", "user": "joe"}
joe_in_prod = {"environment": "prod", "user": "joe"}
def test_set_contexts(self):
configuration = Configuration(
"enable-welcome", Context(False,
Modifiers(
'environment', {
'qa': Context(False,
Modifiers(
'user', {
'joe': Context(True, None)
})),
'prod': Context(False, None)
})))
configuration_manager = ConfigurationManager()
configuration_store = {"enable-welcome": configuration}
configuration_manager.set_configuration_store(configuration_store)
is_welcome_enabled = configuration_manager.get_object("enable-welcome", self.joe_in_qa, False)
self.assertTrue(is_welcome_enabled)
configuration_manager = ConfigurationManager()
feature_flag_manager = FeatureFlagManager(configuration_manager)
configuration_manager.set_configuration_store(configuration_store)
is_welcome_enabled = feature_flag_manager.is_active("enable-welcome", self.joe_in_qa, False)
self.assertTrue(is_welcome_enabled)
is_welcome_enabled = feature_flag_manager.is_active("enable-welcome", self.joe_in_prod, False)
self.assertFalse(is_welcome_enabled)
| 39.384615 | 102 | 0.603906 |
acf25a311e7f8b5fde11f8e84d0c1c2238697cc0 | 6,873 | py | Python | src/molp/dichotomic_search/solver.py | gokhanceyhan/momilp | d6b35509713be12b435ff71fd700a876ca4aa4a9 | [
"MIT"
] | 1 | 2020-04-14T13:37:10.000Z | 2020-04-14T13:37:10.000Z | src/molp/dichotomic_search/solver.py | gokhanceyhan/momilp | d6b35509713be12b435ff71fd700a876ca4aa4a9 | [
"MIT"
] | 31 | 2019-10-27T19:08:14.000Z | 2021-12-23T23:06:57.000Z | src/molp/dichotomic_search/solver.py | gokhanceyhan/momilp | d6b35509713be12b435ff71fd700a876ca4aa4a9 | [
"MIT"
] | null | null | null | """Implements dichotomic search to find extreme supported nondominated points of BOLP problems
Based on:
Aneja, Yash P., and Kunhiraman PK Nair. "Bicriteria transportation problem." Management Science 25.1 (1979): 73-78."""
from collections import namedtuple
import math
from src.common.elements import PointInTwoDimension
from src.molp.solver import MolpSolver
from src.molp.utilities import ModelQueryUtilities
PointPair = namedtuple("PointPair", ["point_with_higher_z1_value", "point_with_higher_z2_value"])
class BolpDichotomicSearchWithGurobiSolver(MolpSolver):
"""Implements bi-objective linear programming problem solver by applying dichotomic search with Gurobi solver"""
def __init__(self, model, obj_rel_tol=1e-6):
super(BolpDichotomicSearchWithGurobiSolver, self).__init__(model)
self._extreme_supported_nondominated_points = []
self._obj_rel_tol = obj_rel_tol
self._point_pairs_to_check = []
self._set_model_params()
self._validate()
self._initialize()
def _calculate_objective_weights(self, point_with_higher_z1_value, point_with_higher_z2_value):
"""Calculates the objective weights to search a nondominated point between the given two points"""
w_0 = point_with_higher_z2_value.z2() - point_with_higher_z1_value.z2()
w_1 = point_with_higher_z1_value.z1() - point_with_higher_z2_value.z1()
w = w_0 + w_1
return {0: w_0 / w, 1: w_1 / w}
def _initialize(self):
"""Initializes the solver"""
model = self._model
# first (second) indexed point in the extreme supported points is the one with the highest z1 (z2) value
points = []
for i in range(2):
model.setParam("ObjNumber", 0)
model.setAttr("ObjNPriority", 2 - i)
model.setParam("ObjNumber", 1)
model.setAttr("ObjNPriority", 1 + i)
model.optimize()
values, _ = ModelQueryUtilities.query_optimal_objective_values(model)
new_point = PointInTwoDimension(values)
if points and self._isclose(points[-1], new_point):
continue
points.append(new_point)
self._extreme_supported_nondominated_points = points
if len(points) < 2:
return
self._point_pairs_to_check.append(
PointPair(
point_with_higher_z1_value=self._extreme_supported_nondominated_points[0],
point_with_higher_z2_value=self._extreme_supported_nondominated_points[1]))
def _isclose(self, point_a, point_b, rel_tol=1e-6):
"""Returns True if two points are close in their values, False otherwise"""
return math.isclose(point_a.z1(), point_b.z1(), rel_tol=rel_tol) and \
math.isclose(point_a.z2(), point_b.z2(), rel_tol=rel_tol)
def _modify_model_objectives(self, equal_priority=False, obj_index_2_weight=None):
"""Modify the model objectives"""
model = self._model
if equal_priority:
for i in range(2):
model.setParam("ObjNumber", i)
model.setAttr("ObjNPriority", 1)
obj_index_2_weight = obj_index_2_weight or {}
for obj_index, weight in obj_index_2_weight.items():
model.setParam("ObjNumber", obj_index)
model.setAttr("ObjNWeight", weight)
def _set_model_params(self, obj_n_abs_tol=0):
"""Sets the model parameters
NOTE: The value of the 'ObjNAbsTol' parameter indicates the amount by which a fixed variable's reduced cost is
allowed to violate dual feasibility, whereas the 'ObjNRelTol' parameter is simply ignored
(https://www.gurobi.com/documentation/9.0/refman/working_with_multiple_obje.html)"""
model = self._model
for index in range(2):
model.setParam("ObjNumber", index)
model.setAttr("ObjNAbsTol", obj_n_abs_tol)
model.update()
@staticmethod
def _sort_points(points, key, reverse=False):
"""Sorts the points"""
return sorted(points, key=key, reverse=reverse)
def _validate(self):
"""Validates that the model is a BOLP"""
model = self._model
assert model.getAttr("NumObj") == 2, "the'%s' model is not bi-objective" % self._model.getAttr("ModelName")
assert not model.isQP, "the %s model is a QP" % self._model.getAttr("ModelName")
assert not model.isQCP, "the %s model is a QCP" % self._model.getAttr("ModelName")
def extreme_supported_nondominated_points(self):
"""Returns the extreme supported nondominated points"""
return self._extreme_supported_nondominated_points
def solve(self):
model = self._model
point_pairs_two_check = self._point_pairs_to_check
while len(point_pairs_two_check) > 0:
point_pair = point_pairs_two_check.pop(0)
point_with_higher_z1_value = point_pair.point_with_higher_z1_value
point_with_higher_z2_value = point_pair.point_with_higher_z2_value
obj_index_2_weight = self._calculate_objective_weights(
point_with_higher_z1_value, point_with_higher_z2_value)
self._modify_model_objectives(equal_priority=True, obj_index_2_weight=obj_index_2_weight)
model.optimize()
values, _ = ModelQueryUtilities.query_optimal_objective_values(model)
point = PointInTwoDimension(values)
if self._isclose(point, point_with_higher_z1_value) or self._isclose(point, point_with_higher_z2_value):
continue
left_extreme_point_obj_value = point_with_higher_z2_value.values()[0] * obj_index_2_weight[0] + \
point_with_higher_z2_value.values()[1] * obj_index_2_weight[1]
point_obj_value = point.values()[0] * obj_index_2_weight[0] + point.values()[1] * obj_index_2_weight[1]
# do not include the new extreme supported point if it close to the edge between the adjacent points, that
# is, the obj function value does not improve much.
if math.isclose(point_obj_value, left_extreme_point_obj_value, rel_tol=self._obj_rel_tol):
continue
self._extreme_supported_nondominated_points.append(point)
point_pairs_two_check.append(
PointPair(point_with_higher_z1_value=point_with_higher_z1_value, point_with_higher_z2_value=point))
point_pairs_two_check.append(
PointPair(point_with_higher_z1_value=point, point_with_higher_z2_value=point_with_higher_z2_value))
# sort the nondominated points in non-decreasing order of z1 values
self._extreme_supported_nondominated_points = BolpDichotomicSearchWithGurobiSolver._sort_points(
self._extreme_supported_nondominated_points, lambda x: x.z1()) | 50.911111 | 119 | 0.688928 |
acf25ad4a3eb302170e2ac01e510861d2abe70f1 | 5,107 | py | Python | openstack_dashboard/dashboards/admin/instances/tabs.py | xuweiliang/Codelibrary | 54e45b2daa205132c05b0ff5a2c3db7fca2853a7 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/dashboards/admin/instances/tabs.py | xuweiliang/Codelibrary | 54e45b2daa205132c05b0ff5a2c3db7fca2853a7 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/dashboards/admin/instances/tabs.py | xuweiliang/Codelibrary | 54e45b2daa205132c05b0ff5a2c3db7fca2853a7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from horizon.utils import functions as utils
from openstack_dashboard.dashboards.admin.instances \
import audit_tables as a_tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.instances import console
LOG = logging.getLogger(__name__)
class OverviewTab(tabs.Tab):
name = _("Overview")
slug = "overview"
template_name = ("admin/instances/"
"_detail_overview.html")
def get_context_data(self, request):
return {"instance": self.tab_group.kwargs['instance'],
"is_superuser": request.user.is_superuser}
class LogTab(tabs.Tab):
name = _("Log")
slug = "log"
template_name = "admin/instances/_detail_log.html"
preload = False
def get_context_data(self, request):
instance = self.tab_group.kwargs['instance']
log_length = utils.get_log_length(request)
try:
data = api.nova.server_console_output(request,
instance.id,
tail_length=log_length)
except Exception:
data = _('Unable to get log for instance "%s".') % instance.id
exceptions.handle(request, ignore=True)
return {"instance": instance,
"console_log": data,
"log_length": log_length}
class ConsoleTab(tabs.Tab):
name = _("Console")
slug = "console"
template_name = "admin/instances/_detail_console.html"
preload = False
def get_context_data(self, request):
instance = self.tab_group.kwargs['instance']
console_type = getattr(settings, 'CONSOLE_TYPE', 'AUTO')
console_url = None
try:
console_type, console_url = console.get_console(
request, console_type, instance)
# For serial console, the url is different from VNC, etc.
# because it does not include params for title and token
if console_type == "SERIAL":
console_url = reverse('horizon:admin:instances:serial',
args=[instance.id])
except exceptions.NotAvailable:
exceptions.handle(request, ignore=True, force_log=True)
return {'console_url': console_url, 'instance_id': instance.id,
'console_type': console_type}
def allowed(self, request):
# The ConsoleTab is available if settings.CONSOLE_TYPE is not set at
# all, or if it's set to any value other than None or False.
return bool(getattr(settings, 'CONSOLE_TYPE', True))
class AuditTab(tabs.TableTab):
name = _("Action Log")
slug = "audit"
table_classes = (a_tables.AuditTable,)
template_name = "admin/instances/_detail_audit.html"
preload = False
def get_audit_data(self):
actions = []
try:
actions = api.nova.instance_action_list(
self.request, self.tab_group.kwargs['instance_id'])
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve instance action list.'))
return sorted(actions, reverse=True, key=lambda y: y.start_time)
class DevsnapshotTab(tabs.TableTab):
name = _("Dev_snapshot Tab")
slug = "dev_snapshot"
table_classes = (a_tables.DevsnapshotTable,)
template_name = "admin/instances/_detail_dev_sanpshot.html"
preload = False
def get_dev_sanpshot_data(self):
try:
instance_id = self.tab_group.kwargs['instance_id']
actions = api.nova.dev_snapshot_list(self.request, instance_id)
return [{'id':'__'.join([action.dev_snapshot_name, instance_id]),
'name':action.dev_snapshot_name,
'instance_id':instance_id}
for action in actions]
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve dev_snapshot list.'))
return []
class InstanceDetailTabs(tabs.TabGroup):
slug = "instance_details"
tabs = (OverviewTab, LogTab, ConsoleTab, AuditTab, DevsnapshotTab,)
#tabs = (OverviewTab, LogTab, ConsoleTab, AuditTab,)
sticky = True
| 37.277372 | 78 | 0.634423 |
acf25c431719c8a30aed35d4c65158911ebd8339 | 2,008 | py | Python | ihome/apps/verifications/views.py | Noah-Smith-wgp/rentinghouse | 22ba71aa8b3b0c290b8c01cd2f4dd14bca81d3d3 | [
"MIT"
] | null | null | null | ihome/apps/verifications/views.py | Noah-Smith-wgp/rentinghouse | 22ba71aa8b3b0c290b8c01cd2f4dd14bca81d3d3 | [
"MIT"
] | null | null | null | ihome/apps/verifications/views.py | Noah-Smith-wgp/rentinghouse | 22ba71aa8b3b0c290b8c01cd2f4dd14bca81d3d3 | [
"MIT"
] | null | null | null | import json
import random
import re
from django import http
from django.views import View
from django_redis import get_redis_connection
from apps.verifications.libs.captcha.captcha import captcha
from apps.verifications.txyun.sms_txy import Sms_txy
# Create your views here.
# 图形验证
class UserImagesCode(View):
def get(self, request):
cur = request.GET.get('cur')
# 生成图形验证码
text, image = captcha.generate_captcha()
print('图形验证码是:', text)
# 保存图形验证码
redis_conn = get_redis_connection('verify_code')
redis_conn.setex('img_%s' % cur, 3600, text)
# 响应图形验证码
return http.HttpResponse(image, content_type='image/jpg')
# 短信验证
class SmsCode(View):
def post(self, request):
data_dict = json.loads(request.body.decode())
id = data_dict.get('id')
mobile = data_dict.get('mobile')
text = data_dict.get('text')
if not all([id, mobile, text]):
return http.HttpResponseForbidden('缺少必传参数')
if not re.match(r'^1[3-9]\d{9}$', mobile):
return http.HttpResponseForbidden('请输入正确的手机号码')
# 创建连接到redis的对象
redis_conn = get_redis_connection('verify_code')
# 提取图形验证码
image_code_server = redis_conn.get('img_%s' % id)
if image_code_server is None:
# 图形验证码过期或者不存在
return http.JsonResponse({'code': 4004, 'errmsg': '图形验证码失效'})
# 删除图形验证码,避免恶意测试图形验证码
redis_conn.delete('img_%s' % id)
# 对比图形验证码
image_code_server = image_code_server.decode() # bytes转字符串
if text.lower() != image_code_server.lower(): # 转小写后比较
return http.JsonResponse({'code': 4004, 'errmsg': '输入图形验证码有误'})
# 生成短信验证码:生成6位数验证码
sms_code = '%06d' % random.randint(0, 999999)
print("短信验证码是:", sms_code)
# 保存短信验证码
redis_conn.setex('sms_%s' % mobile, 3600, sms_code)
Sms_txy(mobile, sms_code)
return http.JsonResponse({'errno': 'OK', "errmsg": "发送成功"})
| 34.62069 | 75 | 0.631474 |
acf25dc208a13f7ea6f3b23e2bbcaf23a3ba9305 | 8,949 | py | Python | ee/clickhouse/queries/funnels/test/test_funnel_persons.py | EDsCODE/posthog | 75b86cae6a545c2ce319b2b830776d0945a87d05 | [
"MIT"
] | null | null | null | ee/clickhouse/queries/funnels/test/test_funnel_persons.py | EDsCODE/posthog | 75b86cae6a545c2ce319b2b830776d0945a87d05 | [
"MIT"
] | null | null | null | ee/clickhouse/queries/funnels/test/test_funnel_persons.py | EDsCODE/posthog | 75b86cae6a545c2ce319b2b830776d0945a87d05 | [
"MIT"
] | null | null | null | from uuid import uuid4
from ee.clickhouse.models.event import create_event
from ee.clickhouse.queries.funnels.funnel import ClickhouseFunnel
from ee.clickhouse.queries.funnels.funnel_persons import ClickhouseFunnelPersons
from ee.clickhouse.util import ClickhouseTestMixin
from posthog.constants import INSIGHT_FUNNELS, TRENDS_FUNNEL
from posthog.models import Filter
from posthog.models.filters.mixins.funnel import FunnelWindowDaysMixin
from posthog.models.person import Person
from posthog.test.base import APIBaseTest
FORMAT_TIME = "%Y-%m-%d 00:00:00"
MAX_STEP_COLUMN = 0
COUNT_COLUMN = 1
PERSON_ID_COLUMN = 2
def _create_person(**kwargs):
person = Person.objects.create(**kwargs)
return Person(id=person.uuid, uuid=person.uuid)
def _create_event(**kwargs):
kwargs.update({"event_uuid": uuid4()})
create_event(**kwargs)
class TestFunnelPersons(ClickhouseTestMixin, APIBaseTest):
def _create_sample_data_multiple_dropoffs(self):
for i in range(5):
_create_person(distinct_ids=[f"user_{i}"], team=self.team)
_create_event(event="step one", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:00:00")
_create_event(event="step two", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-03 00:00:00")
_create_event(event="step three", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-05 00:00:00")
for i in range(5, 15):
_create_person(distinct_ids=[f"user_{i}"], team=self.team)
_create_event(event="step one", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:00:00")
_create_event(event="step two", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-03 00:00:00")
for i in range(15, 35):
_create_person(distinct_ids=[f"user_{i}"], team=self.team)
_create_event(event="step one", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:00:00")
def test_first_step(self):
self._create_sample_data_multiple_dropoffs()
data = {
"insight": INSIGHT_FUNNELS,
"interval": "day",
"date_from": "2021-05-01 00:00:00",
"date_to": "2021-05-07 00:00:00",
"funnel_window_days": 7,
"funnel_step": 1,
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
}
filter = Filter(data=data)
results = ClickhouseFunnelPersons(filter, self.team)._exec_query()
self.assertEqual(35, len(results))
def test_last_step(self):
self._create_sample_data_multiple_dropoffs()
data = {
"insight": INSIGHT_FUNNELS,
"interval": "day",
"date_from": "2021-05-01 00:00:00",
"date_to": "2021-05-07 00:00:00",
"funnel_window_days": 7,
"funnel_step": 3,
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
}
filter = Filter(data=data)
results = ClickhouseFunnelPersons(filter, self.team)._exec_query()
self.assertEqual(5, len(results))
def test_second_step_dropoff(self):
self._create_sample_data_multiple_dropoffs()
data = {
"insight": INSIGHT_FUNNELS,
"interval": "day",
"date_from": "2021-05-01 00:00:00",
"date_to": "2021-05-07 00:00:00",
"funnel_window_days": 7,
"funnel_step": -2,
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
}
filter = Filter(data=data)
results = ClickhouseFunnelPersons(filter, self.team)._exec_query()
self.assertEqual(20, len(results))
def test_last_step_dropoff(self):
self._create_sample_data_multiple_dropoffs()
data = {
"insight": INSIGHT_FUNNELS,
"interval": "day",
"date_from": "2021-05-01 00:00:00",
"date_to": "2021-05-07 00:00:00",
"funnel_window_days": 7,
"funnel_step": -3,
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
}
filter = Filter(data=data)
results = ClickhouseFunnelPersons(filter, self.team)._exec_query()
self.assertEqual(10, len(results))
def _create_sample_data(self):
for i in range(250):
_create_person(distinct_ids=[f"user_{i}"], team=self.team)
_create_event(event="step one", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-01 00:00:00")
_create_event(event="step two", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-03 00:00:00")
_create_event(event="step three", distinct_id=f"user_{i}", team=self.team, timestamp="2021-05-05 00:00:00")
def test_basic_offset(self):
self._create_sample_data()
data = {
"insight": INSIGHT_FUNNELS,
"interval": "day",
"date_from": "2021-05-01 00:00:00",
"date_to": "2021-05-07 00:00:00",
"funnel_window_days": 7,
"funnel_step": 1,
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
}
filter = Filter(data=data)
results = ClickhouseFunnelPersons(filter, self.team)._exec_query()
self.assertEqual(100, len(results))
filter_offset = Filter(data={**data, "offset": 100,})
results = ClickhouseFunnelPersons(filter_offset, self.team).run()
self.assertEqual(100, len(results))
filter_offset = Filter(data={**data, "offset": 200,})
results = ClickhouseFunnelPersons(filter_offset, self.team).run()
self.assertEqual(50, len(results))
def test_first_step_breakdowns(self):
person1 = _create_person(distinct_ids=["person1"], team_id=self.team.pk)
_create_event(
team=self.team,
event="sign up",
distinct_id="person1",
properties={"key": "val", "$browser": "Chrome"},
timestamp="2020-01-01T12:00:00Z",
)
_create_event(
team=self.team,
event="play movie",
distinct_id="person1",
properties={"key": "val", "$browser": "Chrome"},
timestamp="2020-01-01T13:00:00Z",
)
_create_event(
team=self.team,
event="buy",
distinct_id="person1",
properties={"key": "val", "$browser": "Chrome"},
timestamp="2020-01-01T15:00:00Z",
)
person2 = _create_person(distinct_ids=["person2"], team_id=self.team.pk)
_create_event(
team=self.team,
event="sign up",
distinct_id="person2",
properties={"key": "val", "$browser": "Safari"},
timestamp="2020-01-02T14:00:00Z",
)
_create_event(
team=self.team,
event="play movie",
distinct_id="person2",
properties={"key": "val", "$browser": "Safari"},
timestamp="2020-01-02T16:00:00Z",
)
data = {
"insight": INSIGHT_FUNNELS,
"date_from": "2020-01-01",
"date_to": "2020-01-08",
"interval": "day",
"funnel_window_days": 7,
"funnel_step": 1,
"events": [{"id": "sign up", "order": 0}, {"id": "play movie", "order": 1}, {"id": "buy", "order": 2},],
"breakdown_type": "event",
"breakdown": "$browser",
}
filter = Filter(data=data)
results = ClickhouseFunnelPersons(filter, self.team)._exec_query()
self.assertCountEqual([val[0] for val in results], [person1.uuid, person2.uuid])
results = ClickhouseFunnelPersons(
filter.with_data({"funnel_step_breakdown": "Chrome"}), self.team
)._exec_query()
print(results)
self.assertCountEqual([val[0] for val in results], [person1.uuid])
results = ClickhouseFunnelPersons(
filter.with_data({"funnel_step_breakdown": "Safari"}), self.team
)._exec_query()
self.assertCountEqual([val[0] for val in results], [person2.uuid])
results = ClickhouseFunnelPersons(
filter.with_data({"funnel_step_breakdown": "Safari, Chrome"}), self.team
)._exec_query()
self.assertCountEqual([val[0] for val in results], [person2.uuid, person1.uuid])
| 39.078603 | 119 | 0.568779 |
acf25e405fcf44a00430eab2a0ac3d6d38f6b080 | 22,743 | py | Python | ibis/utilities/utilities.py | shivaathreya/ibis | f99e3b7a677652a8a1c00a069e645d97682e839c | [
"Apache-2.0"
] | 50 | 2018-09-27T13:03:45.000Z | 2021-04-06T15:36:59.000Z | ibis/utilities/utilities.py | shivaathreya/ibis | f99e3b7a677652a8a1c00a069e645d97682e839c | [
"Apache-2.0"
] | null | null | null | ibis/utilities/utilities.py | shivaathreya/ibis | f99e3b7a677652a8a1c00a069e645d97682e839c | [
"Apache-2.0"
] | 14 | 2018-10-03T20:36:15.000Z | 2021-05-18T07:08:57.000Z | """Utility methods used for ibis."""
import os
import string
import subprocess
import itertools
import getpass
import re
import requests
from mako.template import Template
from ibis.custom_logging import get_logger
from ibis.utilities.oozie_helper import OozieAPi
try:
# available only on linux
from pwd import getpwuid
except ImportError:
pass
requests.packages.urllib3.disable_warnings()
class Utilities(object):
"""Provides additional functionality to ibis"""
def __init__(self, cfg_mgr):
self.cfg_mgr = cfg_mgr
self.logger = get_logger(self.cfg_mgr)
self.oozie_api = OozieAPi(self.cfg_mgr)
def get_lines_from_file(self, file_name):
"""Returns lines from a file"""
lines = ''
try:
with open(file_name, "r") as file_h:
lines = file_h.readlines()
except IOError as ie:
err_msg = 'Cannot open {file} .'.format(file=file_name)
self.logger.error(err_msg)
return lines
def gen_job_properties(self, workflow_name, table_list, appl_id=None,
table=None):
"""Generates the _job.properties file
Args:
workflow_name: just the workflow name without extension
table_list: list of table names
appl_id: Automation appl id
table: it_table row
"""
job_prop = self.get_lines_from_file(self.cfg_mgr.job_prop_template)
status = True
job_properties = []
if not appl_id:
appl_id = None
name_val = workflow_name.split('.')[0]
file_name = os.path.join(self.cfg_mgr.files,
'{0}_job.properties'.format(name_val))
for prop_val in job_prop:
job_properties.append(prop_val)
wf_path = 'oozie.wf.application.path=${nameNode}' + self. \
cfg_mgr.oozie_workspace + workflow_name + '.xml\n'
wf_path = string.Formatter().vformat(wf_path, (), SafeDict(
{'workflow_name': workflow_name}))
job_properties.append(wf_path)
if table:
_lines = []
_lines.append('source_table_name={0}\n'.format(table.table_name))
_lines.append('source_database_name={0}\n'.format(table.database))
if table.query:
_lines.append('sql_query={0}\n'.format(table.query))
job_properties += _lines
with open(file_name, "wb+") as file_h:
line = ''.join(job_properties)
file_h.write(line)
file_h.write('workflowName={job}\n'.format(job=workflow_name))
if table_list:
_line = '#List of tables ingested: {0}\n'
_line = _line.format(', '.join(table_list))
file_h.write(_line)
self.logger.info('Generated job properties file: {0}'.format(
file_name))
self.gen_job_config_xml(name_val, job_properties)
return status
def gen_job_config_xml(self, workflow_name, job_properties):
"""Generates job config xml for starting jobs via oozie api"""
wf_props = {}
for line in self.clean_lines(job_properties):
prop_name, prop_val = line.split('=')
wf_props[prop_name] = prop_val
# TODO: Move username to be a property
wf_props["user.name"] = "fake_username"
template = Template(filename=self.cfg_mgr.job_config_xml_template,
format_exceptions=True)
xml = template.render(wf_props=wf_props)
file_name = os.path.join(self.cfg_mgr.files,
'{0}_props_job.xml'.format(workflow_name))
with open(file_name, "wb+") as file_h:
file_h.write(xml)
self.logger.info('Generated job config xml: {0}'.format(file_name))
def run_workflow(self, workflow_name):
"""Runs oozie properties file on host
Args:
workflow_name: name of the workflow without extension
"""
run = False
config_file = os.path.join(self.cfg_mgr.saves, '{name}_job.properties')
config_file = config_file.format(name=workflow_name)
xml_file = os.path.join(self.cfg_mgr.oozie_workspace,
'{name}.xml'.format(name=workflow_name))
path_exists_cmd = ['hadoop', 'fs', '-test', '-e', xml_file]
self.logger.info(" ".join(path_exists_cmd))
path_exists_ret = self.run_subprocess(path_exists_cmd)
if path_exists_ret != 0:
self.logger.error('XML file missing in HDFS: {0}'.format(xml_file))
return run
command = ['oozie', 'job', '-config', config_file, '-run']
self.logger.info('Running: ' + ' '.join(command))
proc = subprocess.Popen(command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = proc.communicate()
if proc.returncode == 0:
self.logger.info(output)
msg = 'Workflow {0} started - please check Hue'.format(
workflow_name)
self.logger.info(msg)
run = True
else:
self.logger.error("Workflow {0} did not run".format(workflow_name))
self.logger.error(err)
return run
def run_xml_workflow(self, workflow_name):
"""Runs oozie properties xml file on host
Args:
workflow_name: name of the workflow without extension
"""
run = False
config_file = os.path.join(self.cfg_mgr.saves, '{name}_props_job.xml')
config_file = config_file.format(name=workflow_name)
if not self.cfg_mgr.for_env:
env = self.cfg_mgr.env
else:
env = self.cfg_mgr.for_env
if 'dev' in env:
os.environ['KRB5_CONFIG'] = "/opt/app/kerberos/dev_krb5.conf"
keytab = 'fake.keytab'
elif 'int' in env:
os.environ['KRB5_CONFIG'] = "/opt/app/kerberos/int_krb5.conf"
keytab = 'fake.keytab'
elif 'prod' in env:
os.environ['KRB5_CONFIG'] = "/opt/app/kerberos/prod_krb5.conf"
keytab = 'fake.keytab'
else:
raise ValueError('Unrecognized --for-env value: {0}'.format(
self.cfg_mgr.for_env))
command = ["kinit", "-S",
"krbtgt/{0}.COM@{0}.COM".format(self.cfg_mgr.kerberos),
"fake_username@{0}.COM".format(self.cfg_mgr.kerberos),
"-k", "-t", "/opt/app/kerberos/{0}".format(keytab)]
proc = subprocess.Popen(command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = proc.communicate()
if proc.returncode == 0:
self.logger.info(output)
msg = 'kinit successful'
self.logger.info(msg)
run = self.oozie_api.start_job(config_file)
if not run:
self.logger.error("Job didn't run!")
else:
self.logger.info("Job started. Please check HUE")
else:
self.logger.error('kinit failed')
self.logger.error(output)
self.logger.error(err)
return run
def put_dry_workflow(self, workflow_name):
"""Put workflow xml to hdfs
Args:
workflow_name: just the workflow name without extension
"""
temp_folder = 'tmp'
temp = '/' + temp_folder + '/'
workflow_full_path = os.path.join(self.cfg_mgr.files,
workflow_name + '.xml')
workflow_hdfs_path = os.path.join(temp, workflow_name + '.xml')
put_cmd = ['hadoop', 'fs', '-put', '-f', workflow_full_path, temp]
self.logger.info('Running: {0}'.format(" ".join(put_cmd)))
put_status = self.run_subprocess(put_cmd)
chmod_status = 1
if put_status == 0:
chmod_status = self.run_subprocess(
['hadoop', 'fs', '-chmod', '-R', '777',
workflow_hdfs_path])
status = (put_status == 0 and chmod_status == 0)
if status:
self.logger.info(
'Put workflow to hdfs: {0}'.format(workflow_hdfs_path))
return status
def gen_dryrun_workflow(self, workflow_name):
"""Generate temp workflow for dry run
Args:
workflow_name: just the workflow name without extension
"""
status = False
temp_folder = 'tmp'
temp = '/' + temp_folder + '/'
new_wf_name = workflow_name + '_dryrun'
props_path = os.path.join(self.cfg_mgr.files,
'{0}_job.properties'.format(workflow_name))
with open(props_path, 'r') as props_fh:
lines = props_fh.readlines()
new_lines = []
for line in lines:
wf_file = self.cfg_mgr.oozie_workspace + workflow_name + '.xml'
if wf_file in line:
new_line = line.replace(wf_file,
temp + new_wf_name + '.xml')
new_lines.append(new_line)
else:
new_lines.append(line)
dryrun_props_file_name = '{0}_dryrun_job.properties'.format(
workflow_name)
dryrun_props_path = os.path.join(self.cfg_mgr.files,
dryrun_props_file_name)
with open(dryrun_props_path, 'wb') as dry_props_fh:
dry_props_fh.write("".join(new_lines))
msg = "Created temp properties for dryrun: {0}".format(
dryrun_props_path)
self.logger.info(msg)
self.chmod_files([dryrun_props_file_name])
wf_path = os.path.join(self.cfg_mgr.files, workflow_name + '.xml')
with open(wf_path, 'r') as wf_fh:
wf_txt = wf_fh.read()
new_wf_path = os.path.join(self.cfg_mgr.files,
new_wf_name + '.xml')
with open(new_wf_path, 'wb') as new_wf_fh:
new_wf_fh.write(wf_txt)
msg = "Created temp workflow for dryrun: {0}".format(
new_wf_path)
self.logger.info(msg)
self.chmod_files([new_wf_name + '.xml'])
status = self.put_dry_workflow(new_wf_name)
return status, new_wf_name
def dryrun_workflow(self, workflow_name):
"""Runs oozie job on host
Args:
workflow_name: just the workflow name without extension
"""
status = False
try:
ret, new_wf_name = self.gen_dryrun_workflow(workflow_name)
if ret:
workflow_name = new_wf_name
msg = 'Dry running workflow: {0}.xml'.format(workflow_name)
self.logger.info(msg)
config_file = os.path.join(self.cfg_mgr.files,
'{0}_job.properties')
config_file = config_file.format(workflow_name)
cmd = ['oozie', 'job', '-config', config_file, '-dryrun']
self.logger.info('Running: {0}'.format(" ".join(cmd)))
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, err = proc.communicate()
self.rm_dry_workflow(new_wf_name)
if proc.returncode != 0:
self.logger.error('Dry run failed!')
raise ValueError(err)
self.logger.info('Dry run successful')
status = True
except ValueError as error:
err_msg = error.message
err_msg += 'Workflow {workflow_xml}.xml is not valid'.format(
workflow_xml=workflow_name)
self.logger.error(err_msg)
return status
def rm_dry_workflow(self, workflow_name):
"""Remove the temporary workflow used for dry run"""
temp_folder = 'tmp'
temp = '/' + temp_folder + '/'
file_path = temp + workflow_name + '.xml'
rm_cmd = ['hadoop', 'fs', '-rm', file_path]
proc = subprocess.Popen(rm_cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, err = proc.communicate()
if proc.returncode != 0:
self.logger.warning('hdfs rm {0} failed!'.format(file_path))
else:
self.logger.info('success: hdfs rm {0}'.format(file_path))
def gen_kornshell(self, workflow_name):
"""Creates kornshell script for jobs scheduled with automation
Args:
workflow_name: just the workflow name without extension
"""
status = False
job_name = workflow_name.split('.')[0]
oozie_url = self.cfg_mgr.oozie_url
oozie_url = oozie_url.replace('/v2/', '')
output_file = os.path.join(self.cfg_mgr.files,
'{name}.ksh'.format(name=job_name))
with open(output_file, "wb") as file_out:
template = Template(filename=self.cfg_mgr.korn_shell_template,
format_exceptions=True)
ksh_text = template.render(job_name=job_name,
saved_loc=self.cfg_mgr.saves,
oozie_url=oozie_url,
kerberos_realm=self.cfg_mgr.kerberos)
file_out.write(ksh_text)
self.logger.info(
'Generated kornshell script: {0}'.format(output_file))
status = True
return status
@classmethod
def print_box_msg(cls, msg, border_char='#'):
"""Returns a visually appealing message
Args:
msg: str, log string
"""
content = ''
max_line_len = 80
tab_replace_len = 3
four_spaces = ' ' * 4
msg_lst_orig = msg.splitlines()
msg_lst = []
for line in msg_lst_orig:
if len(line) > max_line_len:
lengthy_lines = line.split(' ')
if len(lengthy_lines) == 1:
msg_lst.append(line)
else:
limited_lines = []
line_len = 0
new_line = ''
for each in lengthy_lines:
if line_len < max_line_len:
new_line += each + ' '
line_len += len(each)
else:
if len(limited_lines) == 0:
limited_lines.append(new_line)
else:
limited_lines.append('\t' + new_line)
new_line = each + ' '
line_len = len(each)
if len(new_line) != 0 and len(new_line) < max_line_len:
limited_lines.append('\t' + new_line)
else:
limited_lines.append(new_line)
msg_lst = msg_lst + limited_lines
else:
msg_lst.append(line)
long_str_len = \
len(max(msg_lst, key=len).replace('\t', ' ' * tab_replace_len))
start = four_spaces + border_char * (long_str_len + 10) + '\n'
empty_line = four_spaces + border_char + four_spaces + \
' ' * long_str_len + four_spaces + border_char + '\n'
for line in msg_lst:
line = line.replace('\t', ' ' * tab_replace_len)
content += four_spaces + border_char + four_spaces + line + \
' ' * (long_str_len - len(line)) + four_spaces + \
border_char + '\n'
return '\n\n' + start + empty_line + content + empty_line + start
@classmethod
def pairwise(cls, iterable):
"""For-loop when we need current and next element in iterable"""
first, second = itertools.tee(iterable)
next(second, None)
return itertools.izip(first, second)
def run_subprocess(self, command_list):
"""Launch subprocess get return code
:param command_list: List[String] List of commands to run
:return Int return code"""
process = subprocess.Popen(command_list)
process.wait()
return process.returncode
def chmod_files(self, files):
"""Make files accessible from different users
Args:
files(list): list of file names.
"""
file_permission = 0o774
for file_name in files:
_path = os.path.join(self.cfg_mgr.files, file_name)
if getpass.getuser() in Utilities.find_owner(_path):
# self.logger.info('Chmod 777 file: {0}'.format(_path))
os.chmod(_path, file_permission)
def sort_tables_by_source(self, tables):
"""Given a list of tables sort by source of db
:param tables: List[Dictionary{table}] A list of dictionary of table
:return sources: Dictionary{List[{table}]} A dictionary of list
of tables in a common source """
sources = {'teradata': [],
'oracle': [],
'sqlserver': [],
'db2': [],
'mysql': []}
for table in tables:
for i, key in enumerate(sources.keys()):
jdbc_key = 'jdbc:' + key
if jdbc_key in table['jdbcurl'].lower():
src_tables = sources[key]
src_tables.append(table)
sources[key] = src_tables
break
elif i == len(
sources.keys()) - 1: # Looped through keys, no match
self.logger.error(
'Sort by source can\'t group {table} with url, '
'{jdbc}. Supported groupings, '
'{keys}'.format(table=table['source_table_name'],
jdbc=table['jdbcurl'],
keys=sources.keys()))
sources = {}
return sources
def sort_tables_by_domain(self, tables):
"""Given a list of tables sort by domain
:param tables: List[Dictionary{table}] A list of dictionary of table
:return sources: Dictionary{List[{table}]} A dictionary of list
of tables in a common domain """
domains = {}
for table in tables:
domain = table['domain']
if domain not in domains.keys():
domains[domain] = [table]
else:
dom_tables = domains[domain]
dom_tables.append(table)
domains[domain] = dom_tables
return domains
def sort_tables_by_database(self, tables):
"""Given a list of tables sort by database
:param tables List[{table}] A list of dictionary of table
:return databases {database: List[{table}] A dictionary of list
of tables in a common database"""
databases = {}
for table in tables:
database = table['source_database_name']
if database not in databases.keys():
databases[database] = [table]
else:
db_tables = databases[database]
db_tables.append(table)
databases[database] = db_tables
return databases
def sort_tables_by_schedule(self, tables):
"""Give a list of tables sort by schedule
:param tables List[{table}] A list of dictionary of table
:return databases {database: List[{table}] A dictionary of list
of tables in a common database"""
schedules = {}
for schedule in self.cfg_mgr.allowed_frequencies.keys():
schedules[schedule] = []
for table in tables:
try:
frequency = table['load'][0:3]
if frequency in schedules.keys():
sch_tables = schedules[frequency]
sch_tables.append(table)
schedules[frequency] = sch_tables
except (KeyError, IndexError) as e:
err_msg = ("Sorting tables by schedule, table {table} "
"has improper load value. Supported values "
"are {values} \n {e}")
err_msg = err_msg.format(
table=table['source_table_name'],
values=self.cfg_mgr.allowed_frequencies.keys(), e=e)
self.logger.error(err_msg)
self.logger.error(
"Error found in utilities.sort_tables_by_schedule - "
"reason %s" % e.message)
schedules = {}
return schedules
@classmethod
def clean_lines(cls, lines):
"""strip space, newline characters and skip empty lines
Args:
lines: List of lines in a file. return value of open().readlines()
"""
lines_str = ''.join(lines)
lines = lines_str.splitlines()
cleaned_lines = []
for line in lines:
if line.strip():
cleaned_lines.append(line.strip())
return cleaned_lines
@classmethod
def replace_special_chars(cls, value):
"""replace non alphanumerics except underscore to
empty char in the given string
"""
pattern_non_alphanumeric = re.compile(r'[^A-Za-z0-9_]')
value = re.sub(pattern_non_alphanumeric, '', value)
return value
@classmethod
def find_owner(cls, filename):
"""Fetch owner of the file
Args:
filename: absolute path
"""
return getpwuid(os.stat(filename).st_uid).pw_name
class WorkflowTablesMapper(object):
"""Workflow and table names for printing"""
def __init__(self, table_id):
"""init"""
self.table_id = table_id
self.view_table_names = '--x--x--'
self.incr_wf = '--x--x--'
self.full_wf = '--x--x--'
self.is_subwf_wf = 'No'
class SafeDict(dict):
"""Used to catch missing keys in string format. By falsetru found
at http://stackoverflow.com/questions/17215400/
python-format-string-unused-named-arguments"""
def __missing__(self, key):
return '{' + key + '}'
| 40.6125 | 79 | 0.543376 |
acf25f0ff2604c1681ef7052809342a720881ead | 1,391 | py | Python | hello.py | zorawar87/CAS | ef8e08649d1f16d616901eb753271efe3c7e8148 | [
"MIT"
] | null | null | null | hello.py | zorawar87/CAS | ef8e08649d1f16d616901eb753271efe3c7e8148 | [
"MIT"
] | null | null | null | hello.py | zorawar87/CAS | ef8e08649d1f16d616901eb753271efe3c7e8148 | [
"MIT"
] | null | null | null | from flask import Flask, url_for, render_template, request
from .TextAnalyser import TextAnalyser
import json
app = Flask(__name__, static_url_path='/static')
@app.route("/")
def index():
return "index page."
@app.route('/hello/')
@app.route('/hello/<name>')
def hello(name=None):
return render_template('hello.html', name=name)
@app.route('/post/<int:post_id>')
def show_post(post_id):
# show the post with the given id, the id is an integer
return 'Post %d' % post_id
@app.route('/path/<path:subpath>')
def show_path(subpath):
return "Subpath: %s" % subpath
@app.route('/cas/', methods=['POST','GET'])
def cas_app():
if request.method == "POST":
print(TextAnalyser(request.form["blogpost"]).retrieve())
info = TextAnalyser(request.form["blogpost"]).getKeyInfo()
print(info)
return render_template('cas-analysis.html',
blogpost = request.form["blogpost"],
analysis = {"keyphrase" : info["keyPhrases"][0],"score": info["score"]*100},
articles = [
{"score":0.5, "description": "this is positive", "link":"https://google.com", "website":"google.com"},
{"score":0.6, "description": "this is negative", "link":"https://yahoo.com", "website":"yahoo.com"}
]
)
return render_template('cas-analysis.html')
| 33.119048 | 122 | 0.608196 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.