max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
src/clic/cloud.py
NathanRVance/clic
2
18000
<reponame>NathanRVance/clic #!/usr/bin/env python3 from clic import nodes import time import os import logging as loggingmod logging = loggingmod.getLogger('cloud') logging.setLevel(loggingmod.WARNING) def getCloud(): return gcloud() class abstract_cloud: def __init__(self): pass def makeImage(self, instanceName, recreateInstance): pass def create(self, node): pass def delete(node): pass def deleteDisk(diskName): pass def getDisks(): # Return: [diskName, ...] pass def getSshKeys(): # Return: [[keyUser, keyValue], ...] pass def setSshKeys(keys): # keys: [[keyUser, keyValue], ...] pass def nodesUp(self, running): # Return: [{'node' : node, 'name': name, 'running' : True|False, 'ip' : IP} ...] pass def getStartupScript(self): from pathlib import Path from pwd import getpwnam cmds = ['index=2000; for user in `ls /home`; do usermod -o -u $index $user; groupmod -o -g $index $user; let "index += 1"; done'] for path in Path('/home').iterdir(): if path.is_dir(): localUser = path.parts[-1] try: uid = getpwnam(localUser).pw_uid cmds.append('usermod -o -u {0} {1}'.format(uid, localUser)) gid = getpwnam(localUser).pw_gid cmds.append('groupmod -o -g {0} {1}'.format(gid, localUser)) except KeyError: continue import configparser config = configparser.ConfigParser() config.read('/etc/clic/clic.conf') user = config['Daemon']['user'] hostname = os.popen('hostname -s').read().strip() if not config['Daemon'].getboolean('cloudHeadnode'): import ipgetter cmds.append('sudo clic-synchosts {0}:{1}'.format(hostname, ipgetter.myip())) # Port 6817 traffic is slurm compute to head, 6818 is slurm head to compute #cmds.append('ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=error -i /home/{0}/.ssh/id_rsa -fN -L 6817:localhost:6817 {0}@{1}'.format(user, hostname)) #cmds.append('ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=error -i /home/{0}/.ssh/id_rsa -fN -R 6818:localhost:6818 {0}@{1}'.format(user, hostname)) cmds.append('ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=error -i /home/{0}/.ssh/id_rsa -fN -L 3049:localhost:2049 {0}@{1}'.format(user, hostname)) cmds.append('sudo mount -t nfs4 -o port=3049,rw localhost:/home /home') cmds.append('if [ ! -d "/bind-root" ]; then sudo mkdir /bind-root; fi') cmds.append('sudo mount --bind / /bind-root') cmds.append('for user in `ls /home`; do sudo mount --bind /bind-root/home/$user/.ssh /home/$user/.ssh; done') cmds.append('sudo mount -t nfs4 -o port=3049,ro localhost:/etc/slurm /etc/slurm') cmds.append('sudo systemctl restart slurmd.service') return cmds class gcloud(abstract_cloud): # Docs: https://developers.google.com/resources/api-libraries/documentation/compute/v1/python/latest/ def __init__(self): import configparser config = configparser.ConfigParser() config.read('/etc/clic/clic.conf') settings = config['Cloud'] self.project = settings['project'] self.zone = settings['zone'] self.image = settings['image'] import googleapiclient.discovery # Must first do sudo gcloud auth application-default login self.api = googleapiclient.discovery.build('compute', 'v1') def isDone(self, operation): from googleapiclient.errors import HttpError # There's probably some elegant way to do this. I don't know that way. try: return self.api.zoneOperations().get(project=self.project, zone=self.zone, operation=operation['name']).execute()['status'] == 'DONE' except HttpError: return self.api.globalOperations().get(project=self.project, operation=operation['name']).execute()['status'] == 'DONE' def wait(self, operation): while True: if self.isDone(operation): break time.sleep(1) def makeImage(self, instanceName, recreateInstance=False): diskName = [disk for disk in self.api.instances().get(project=self.project, zone=self.zone, instance=instanceName).execute()['disks'] if disk['boot']][0]['deviceName'] print("Setting disk autodelete to False") self.wait(self.api.instances().setDiskAutoDelete(project=self.project, zone=self.zone, instance=instanceName, autoDelete=False, deviceName=diskName).execute()) # Grab instance data to recreate it later machineType = self.api.instances().get(project=self.project, zone=self.zone, instance=instanceName).execute()['machineType'] print("Deleting instance") self.wait(self.deleteName(instanceName)) # Create the image self.diskToImage(diskName) if recreateInstance: print("Recreating instance") config = {'name': instanceName, 'machineType': machineType, 'disks': [ { 'boot': True, 'autoDelete': True, 'deviceName': diskName, 'source': 'projects/{0}/zones/{1}/disks/{2}'.format(self.project, self.zone, diskName) } ], "serviceAccounts": [ { "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] } ], # Specify a network interface with NAT to access the public # internet. 'networkInterfaces': [{ 'network': 'global/networks/default', 'accessConfigs': [ {'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'} ] }] } self.wait(self.api.instances().insert(project=self.project, zone=self.zone, body=config).execute()) def diskToImage(self, diskName): print("Creating image") self.wait(self.api.images().insert(project=self.project, body={ 'sourceDisk' : 'zones/{0}/disks/{1}'.format(self.zone, diskName), 'name' : self.image, 'family' : self.image }).execute()) def create(self, node): try: # Get the latest image image_response = self.api.images().getFromFamily(project=self.project, family=self.image).execute() source_disk_image = image_response['selfLink'] machine_type = 'zones/{0}/machineTypes/n1-{1}-{2}'.format(self.zone, node.partition.mem, node.partition.cpus) config = {'name': node.name, 'machineType': machine_type, 'disks': [ { 'boot': True, 'autoDelete': True, 'initializeParams': { 'diskSizeGb': int(node.partition.disk * 1.1), 'sourceImage': source_disk_image, } } ], 'metadata': { 'items': [ { 'key': 'startup-script', 'value': '#! /bin/bash\n{}'.format('\n'.join(self.getStartupScript())) } ] }, # Specify a network interface with NAT to access the public # internet. 'networkInterfaces': [{ 'network': 'global/networks/default', 'accessConfigs': [ {'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'} ] }] } return self.api.instances().insert(project=self.project, zone=self.zone, body=config).execute() except Exception as e: logging.error(traceback.format_exc()) def delete(self, node): return self.deleteName(node.name) def deleteName(self, name): try: return self.api.instances().delete(project=self.project, zone=self.zone, instance=name).execute() except Exception as e: logging.error(traceback.format_exc()) def deleteDisk(self, diskName): from googleapiclient.errors import HttpError try: return self.api.disks().delete(project=self.project, zone=self.zone, disk=diskName).execute() except Exception as e: logging.error(traceback.format_exc()) def getDisks(self): try: return [disk['name'] for disk in self.api.disks().list(project=self.project, zone=self.zone).execute().get('items', [])] except Exception as e: logging.error(traceback.format_exc()) return [] def getSshKeys(self): keys = [] try: for key in next(value['value'] for value in self.api.projects().get(project=self.project).execute()['commonInstanceMetadata']['items'] if value['key'] == 'sshKeys').split('\n'): keys.append(key.split(':', 1)) except Exception as e: logging.error(traceback.format_exc()) return keys def setSshKeys(self, keys): try: current = self.api.projects().get(project=self.project).execute()['commonInstanceMetadata'] formatKeys = [':'.join(key) for key in keys] next(value for value in current['items'] if value['key'] == 'sshKeys')['value'] = '\n'.join(formatKeys) self.wait(self.api.projects().setCommonInstanceMetadata(project=self.project, body=current).execute()) except Exception as e: logging.error(traceback.format_exc()) def nodesUp(self, running): try: allNodes = [] for item in self.api.instances().list(project=self.project, zone=self.zone).execute().get('items', []): node = {'node' : nodes.getNode(item['name']), 'name' : item['name'], 'running' : item['status'] == 'RUNNING'} if node['running']: node['ip'] = item['networkInterfaces'][0]['accessConfigs'][0]['natIP'] else: node['ip'] = '' allNodes.append(node) if not running: return allNodes else: return [node for node in allNodes if node['running']] except Exception as e: logging.error(traceback.format_exc()) def main(): import argparse parser = argparse.ArgumentParser(description='Execute cloud API commands') from clic import version parser.add_argument('-v', '--version', action='version', version=version.__version__) image = parser.add_argument_group() image.add_argument('--image', metavar='NAME', nargs=1, help='Create an image from NAME') image.add_argument('--recreate', action='store_true', help='Recreate NAME after creating an image') args = parser.parse_args() if args.image: getCloud().makeImage(args.image[0], args.recreate)
2.125
2
HyperUnmixing/visualization.py
mdbresh/HyperUnmixing
1
18001
<gh_stars>1-10 import numpy as np import pandas as pd import ipywidgets as widgets import matplotlib.pyplot as plt from skimage.measure import label, regionprops, regionprops_table from skimage.color import label2rgb def Wav_2_Im(im, wn): ''' Input a 3-D datacube and outputs a normalized slice at one wavenumber. Parameters ---------- im : array-like image. Input data. wn : integer. Integer index value. Returns ---------- slice : ndarray. An image the same size as the input, but with one slice in wavenumber space. ''' normalized = [] # storage for each normalized slice img_norm = np.empty(im.shape, dtype=np.float32) for i in np.linspace(0, im.shape[2]-1, im.shape[2]-1).astype(np.int): image = im[:,:,i] normalized.append((image - np.min(image))/(np.amax(image) - np.min(image))) for i in np.linspace(0, im.shape[2]-1, im.shape[2]-1).astype(np.int): img_norm[:,:,i] = normalized[i-1] im_slice = img_norm[:,:,wn-750] return im_slice def AreaFraction(im, norm_im, image_size): ''' Input test image, normalized NMF coefficients image, and image size. Outputs a dictionary of computed properties for regions of interest, a multidimensional array containing threshold masks, and a list of computed area fractions for the areas of interest in each threshold mask. Parameters ---------- im : array-like image. Image slice to measure. norm_im : multidimensional array-like image Image of normalized NMF coefficients. image_size : integer. Size of the image. Returns --------- regions : dict. Dictionary of regions of interest and their computed properties. mask : multidimensional array-like image. Multidimensional array with each threshold mask image. area_frac : list. List of computed area fractions of DPPDTT. ''' # Set up threshold masks percents = np.round(np.arange(0.5, 1.0, 0.05),2) # array of thresholds mask = np.zeros((norm_im.shape[0], norm_im.shape[1], 10)) # ten tested thresholds for h in range(mask.shape[2]): for i in range(mask.shape[0]): for j in range(mask.shape[1]): if norm_im[i][j] >= percents[h]: mask[i][j][h] = 1 else: mask[i][j][h] = 0 # Compute region properties of labeled images regions = {} props = ('area', 'major_axis_length', 'minor_axis_length', 'mean_intensity') for i in range(mask.shape[2]): labels = label(mask[:,:,i]) regions[i] = pd.DataFrame(regionprops_table(labels, im, props)) # Compute the area fractions area_frac = [] for i in range(len(regions.keys())): area_frac.append(regions[i]['area'].values / image_size**2) return regions, mask, area_frac def interactive_hyperimage(image, w=(750,1877,1)): ''' input: image: 3D Hyperspectral image w: wavenumbers, which is desired interval format is (starting wavenumber, ending wavenumber, step). Default is full spectrum, which is (750,1128,1) output: interactive 2D image of hyperspectral image at desired wavenumber ''' def update(a): fig, ax = plt.subplots(figsize=(6,6)) ax.imshow(image[ :, :,a-750]) ax.set_title('Wavenumber '+str(a)+' $\mathregular{cm^{-1}}$', fontsize=24) return widgets.interact(update, a=w)
2.578125
3
velocileptors/Utils/loginterp.py
kokron/velocileptors
0
18002
<filename>velocileptors/Utils/loginterp.py<gh_stars>0 import numpy as np from scipy.interpolate import InterpolatedUnivariateSpline as interpolate from scipy.misc import derivative import inspect def loginterp(x, y, yint = None, side = "both", lorder = 9, rorder = 9, lp = 1, rp = -2, ldx = 1e-6, rdx = 1e-6,\ interp_min = -12, interp_max = 12, Nint = 10**5, verbose=False, option='B'): ''' Extrapolate function by evaluating a log-index of left & right side. From <NAME>'s CLEFT code at https://github.com/modichirag/CLEFT/blob/master/qfuncpool.py The warning for divergent power laws on both ends is turned off. To turn back on uncomment lines 26-33. ''' if yint is None: yint = interpolate(x, y, k = 5) if side == "both": side = "lr" # Make sure there is no zero crossing between the edge points # If so assume there can't be another crossing nearby if np.sign(y[lp]) == np.sign(y[lp-1]) and np.sign(y[lp]) == np.sign(y[lp+1]): l = lp else: l = lp + 2 if np.sign(y[rp]) == np.sign(y[rp-1]) and np.sign(y[rp]) == np.sign(y[rp+1]): r = rp else: r = rp - 2 lneff = derivative(yint, x[l], dx = x[l]*ldx, order = lorder)*x[l]/y[l] rneff = derivative(yint, x[r], dx = x[r]*rdx, order = rorder)*x[r]/y[r] #print(lneff, rneff) # uncomment if you like warnings. #if verbose: # if lneff < 0: # print( 'In function - ', inspect.getouterframes( inspect.currentframe() )[2][3]) # print('WARNING: Runaway index on left side, bad interpolation. Left index = %0.3e at %0.3e'%(lneff, x[l])) # if rneff > 0: # print( 'In function - ', inspect.getouterframes( inspect.currentframe() )[2][3]) # print('WARNING: Runaway index on right side, bad interpolation. Reft index = %0.3e at %0.3e'%(rneff, x[r])) if option == 'A': xl = np.logspace(interp_min, np.log10(x[l]), Nint) xr = np.logspace(np.log10(x[r]), interp_max, Nint) yl = y[l]*(xl/x[l])**lneff yr = y[r]*(xr/x[r])**rneff #print(xr/x[r]) xint = x[l+1:r].copy() yint = y[l+1:r].copy() if side.find("l") > -1: xint = np.concatenate((xl, xint)) yint = np.concatenate((yl, yint)) if side.find("r") > -1: xint = np.concatenate((xint, xr)) yint = np.concatenate((yint, yr)) yint2 = interpolate(xint, yint, k = 5, ext=3) else: yint2 = lambda xx: (xx <= x[l]) * y[l]*(xx/x[l])**lneff \ + (xx >= x[r]) * y[r]*(xx/x[r])**rneff \ + (xx > x[l]) * (xx < x[r]) * interpolate(x, y, k = 5, ext=3)(xx) return yint2
2.21875
2
src/modeling/calc_target_scale.py
pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution
44
18003
<gh_stars>10-100 from typing import Tuple import dataclasses import numpy as np import torch from pathlib import Path from l5kit.data import LocalDataManager, ChunkedDataset import sys import os from tqdm import tqdm sys.path.append(os.pardir) sys.path.append(os.path.join(os.pardir, os.pardir)) from lib.evaluation.mask import load_mask_chopped from lib.rasterization.rasterizer_builder import build_custom_rasterizer from lib.dataset.faster_agent_dataset import FasterAgentDataset from lib.utils.yaml_utils import save_yaml, load_yaml from modeling.load_flag import load_flags, Flags def calc_target_scale(agent_dataset, n_sample: int = 10000) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: sub_indices = np.linspace(0, len(agent_dataset) - 1, num=n_sample, dtype=np.int64) pos_list = [] for i in tqdm(sub_indices): d = agent_dataset[i] pos = d["target_positions"] pos[~d["target_availabilities"].astype(bool)] = np.nan pos_list.append(pos) agents_pos = np.array(pos_list) target_scale_abs_mean = np.nanmean(np.abs(agents_pos), axis=0) target_scale_abs_max = np.nanmax(np.abs(agents_pos), axis=0) target_scale_std = np.nanstd(agents_pos, axis=0) return target_scale_abs_mean, target_scale_abs_max, target_scale_std if __name__ == '__main__': mode = "" flags: Flags = load_flags(mode=mode) flags_dict = dataclasses.asdict(flags) cfg = load_yaml(flags.cfg_filepath) out_dir = Path(flags.out_dir) print(f"cfg {cfg}") os.makedirs(str(out_dir), exist_ok=True) print(f"flags: {flags_dict}") save_yaml(out_dir / 'flags.yaml', flags_dict) save_yaml(out_dir / 'cfg.yaml', cfg) debug = flags.debug # set env variable for data os.environ["L5KIT_DATA_FOLDER"] = flags.l5kit_data_folder dm = LocalDataManager(None) print("init dataset") train_cfg = cfg["train_data_loader"] valid_cfg = cfg["valid_data_loader"] # Build StubRasterizer for fast dataset access cfg["raster_params"]["map_type"] = "stub_debug" rasterizer = build_custom_rasterizer(cfg, dm) print("rasterizer", rasterizer) train_path = "scenes/sample.zarr" if debug else train_cfg["key"] train_agents_mask = None if flags.validation_chopped: # Use chopped dataset to calc statistics... num_frames_to_chop = 100 th_agent_prob = cfg["raster_params"]["filter_agents_threshold"] min_frame_future = 1 num_frames_to_copy = num_frames_to_chop train_agents_mask = load_mask_chopped( dm.require(train_path), th_agent_prob, num_frames_to_copy, min_frame_future) print("train_path", train_path, "train_agents_mask", train_agents_mask.shape) train_zarr = ChunkedDataset(dm.require(train_path)).open(cached=False) print("train_zarr", type(train_zarr)) print(f"Open Dataset {flags.pred_mode}...") train_agent_dataset = FasterAgentDataset( cfg, train_zarr, rasterizer, min_frame_history=flags.min_frame_history, min_frame_future=flags.min_frame_future, agents_mask=train_agents_mask ) print("train_agent_dataset", len(train_agent_dataset)) n_sample = 1_000_000 # Take 1M sample. target_scale_abs_mean, target_scale_abs_max, target_scale_std = calc_target_scale(train_agent_dataset, n_sample) chopped_str = "_chopped" if flags.validation_chopped else "" agent_prob = cfg["raster_params"]["filter_agents_threshold"] filename = f"target_scale_abs_mean_{agent_prob}_{flags.min_frame_history}_{flags.min_frame_future}{chopped_str}.npz" cache_path = Path(train_zarr.path) / filename np.savez_compressed(cache_path, target_scale=target_scale_abs_mean) print("Saving to ", cache_path) filename = f"target_scale_abs_max_{agent_prob}_{flags.min_frame_history}_{flags.min_frame_future}{chopped_str}.npz" cache_path = Path(train_zarr.path) / filename np.savez_compressed(cache_path, target_scale=target_scale_abs_max) print("Saving to ", cache_path) filename = f"target_scale_std_{agent_prob}_{flags.min_frame_history}_{flags.min_frame_future}{chopped_str}.npz" cache_path = Path(train_zarr.path) / filename np.savez_compressed(cache_path, target_scale=target_scale_std) print("Saving to ", cache_path) print("target_scale_abs_mean", target_scale_abs_mean) print("target_scale_abs_max", target_scale_abs_max) print("target_scale_std", target_scale_std) import IPython; IPython.embed()
1.773438
2
examples/solvers using low level utilities/interior_laplace_neumann_panel_polygon.py
dbstein/pybie2d
11
18004
<gh_stars>10-100 import numpy as np import scipy as sp import scipy.sparse import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.path plt.ion() import pybie2d """o solve an interior Modified Helmholtz problem On a complicated domain using a global quadr Demonstrate how to use the pybie2d package tature This example demonstrates how to do this entirely using low-level routines, To demonstrate both how to use these low level routines And to give you an idea what is going on under the hood in the higher level routines """ NG = 1000 h_max = 0.01 # extract some functions for easy calling squish = pybie2d.misc.curve_descriptions.squished_circle PPB = pybie2d.boundaries.panel_polygon_boundary.panel_polygon_boundary.Panel_Polygon_Boundary Grid = pybie2d.grid.Grid PointSet = pybie2d.point_set.PointSet Laplace_Layer_Form = pybie2d.kernels.high_level.laplace.Laplace_Layer_Form Laplace_Layer_Singular_Form = pybie2d.kernels.high_level.laplace.Laplace_Layer_Singular_Form Laplace_Layer_Apply = pybie2d.kernels.high_level.laplace.Laplace_Layer_Apply ################################################################################ # define problem # boundary boundary = PPB([0,1,1,0], [0,0,1,1], [h_max]*4, [True]*4, dyadic_levels=20, dyadic_base=3) # solution solution_func = lambda x, y: 2*x + y bc = solution_func(boundary.x, boundary.y) bcx = lambda x, y: 2.0*np.ones_like(x) bcy = lambda x, y: 1.0*np.ones_like(x) bcn = lambda x, y, nx, ny: bcx(x, y)*nx + bcy(x, y)*ny def err_plot(u): # compute the error error = u - solution_func(gridp.xg, gridp.yg) digits = -np.log10(np.abs(error)+1e-16) mdigits = np.ma.array(digits) # plot the error as a function of space (only good in interior) fig, ax = plt.subplots(1,1) clf = ax.imshow(mdigits[:,::-1].T, extent=[0,1,0,1], cmap=mpl.cm.viridis_r) ax.set_aspect('equal') fig.colorbar(clf) print('Error: {:0.2e}'.format(np.abs(error).max())) ################################################################################ ##### solve problem the hard way ############################################### ################################################################################ ################################################################################ # find physical region # (this implements a fast way to tell if points are in or out of the boundary) # (and of course, for the squish boundary, we could easily figure out something # faster, but this illustrates a general purpose routine) gridp = Grid([0,1], NG, [0,1], NG, x_endpoints=[False,False], y_endpoints=[False,False]) ################################################################################ # solve for the density DLP = Laplace_Layer_Singular_Form(boundary, ifdipole=True) SLPp = (DLP/boundary.weights).T*boundary.weights A = 0.5*np.eye(boundary.N) + SLPp tau = np.linalg.solve(A, bcn(boundary.x, boundary.y, boundary.normal_x, boundary.normal_y)) # fix the mean target = PointSet(x=np.array((0.5)),y=np.array((0.5))) good_eval = Laplace_Layer_Apply(boundary, target=target, charge=tau) correction = (2*0.5 + 0.5) - good_eval ################################################################################ # naive evaluation u = Laplace_Layer_Apply(boundary, gridp, charge=tau) u = gridp.reshape(u) u += correction err_plot(u) ################################################################################ # oversampled hmax = gridp.xg[1,0] - gridp.xg[0,0] fbdy, IMAT = boundary.prepare_oversampling(hmax/6.0) IMAT = sp.sparse.csr_matrix(IMAT) ftau = IMAT.dot(tau) u = Laplace_Layer_Apply(fbdy, gridp, charge=ftau) u = gridp.reshape(u) u += correction err_plot(u) ua = 2*gridp.xg + gridp.yg
2.625
3
controllers/rcj_soccer_referee_supervisor/rcj_soccer_referee_supervisor.py
dbscoach/webots-soccer-sim-playground
0
18005
<filename>controllers/rcj_soccer_referee_supervisor/rcj_soccer_referee_supervisor.py from math import ceil from referee.consts import MATCH_TIME, TIME_STEP from referee.referee import RCJSoccerReferee referee = RCJSoccerReferee( match_time=MATCH_TIME, progress_check_steps=ceil(15/(TIME_STEP/1000.0)), progress_check_threshold=0.5, ball_progress_check_steps=ceil(10/(TIME_STEP/1000.0)), ball_progress_check_threshold=0.5, ) while referee.step(TIME_STEP) != -1: referee.emit_positions() if not referee.tick(): break # When end of match, pause simulator immediately referee.simulationSetMode(referee.SIMULATION_MODE_PAUSE)
2.3125
2
backend/app.py
CMU-IDS-2020/fp-profiler
0
18006
<filename>backend/app.py<gh_stars>0 from flask import Flask, request import os from subprocess import Popen, PIPE import json from prof_file_util import load_source, load_line_profile, load_graph_profile from linewise_barchart import linewise_barchart from valgrind import extract_valgrind_result from mem_issue_visualize import mem_issue_visualize app = Flask(__name__) @app.route('/upload-file', methods = ['POST']) def hello(): ''' shall return a json dict if succeeds, { 'error': 0, 'vega_json': ... 'node_json': ... 'edge_json': ... ... } if fails, { 'error': 1, 'source': formatted source code, 'error_message': the compile failure message } ''' code = request.get_json()['code'] # print(code) local_path = 'temp.c' # TODO: hash file names to handle concurrency issues with open(local_path, 'w') as f: f.write(code) process = Popen(['wc', '-l', local_path], stdout=PIPE) (output, err) = process.communicate() exit_code = process.wait() # print(output) # with open('test.json') as f: # s = json.load(f) ret_dict = {} ''' Invoke compiler (if need) and profiler to generate the results. ''' os.system('clang-format -i {}'.format(local_path)) compile_retvalue = os.system('gcc -g -pg {} -o prog 1> gcc_output 2>&1'.format(local_path)) # handle compiling error if compile_retvalue != 0: ret_dict['error'] = 1 ret_dict['source'] = ''.join(list(open(local_path, 'r').readlines())) ret_dict['error_message'] = ''.join(list(open('gcc_output', 'r').readlines())) return ret_dict os.system('./prog') os.system('ctags --fields=+ne -o - --sort=no {} 1> ctags_output 2>&1'.format(local_path)) os.system('gprof --graph prog gmon.out 1> graph_file 2>&1') os.system('gprof -l prog gmon.out 1> linewise_file 2>&1') ''' Now we have the outputs. Visualize and pass it back to the frontend. ''' # for debug purpose. Only linux can host grof so far. ret_dict['error'] = 0 if os.path.isfile('linewise_file') and os.path.getsize('linewise_file') > 0\ and os.path.isfile('graph_file') and os.path.getsize('graph_file') > 0: df = load_line_profile(local_path, 'linewise_file') chart = linewise_barchart(df) # chart.save('new.json') ''' TODO: Maybe the temporary files should be cleared or stored somewhere serving as history data. ''' ret_dict['vega_json'] = json.loads(chart.to_json()) graph_dct = load_graph_profile('graph_file') if graph_dct: for k, v in graph_dct.items(): ret_dict[k] = v else: ret_dict['vega_json'] = json.load(open('test.json', 'r')) # print(uninitialised_buffer, invalid_write_buffer, mem_leak_dic) return ret_dict @app.route('/mem-profile', methods = ['POST']) def mem_profile(): ''' shall return a json dict if succeeds, { 'error': 0, 'vega_json': ... ... } if fails, { 'error': 1, 'source': formatted source code, 'error_message': the compile failure message } ''' code = request.get_json()['code'] # print(code) local_path = 'temp.c' # TODO: hash file names to handle concurrency issues with open(local_path, 'w') as f: f.write(code) process = Popen(['wc', '-l', local_path], stdout=PIPE) (output, err) = process.communicate() exit_code = process.wait() # print(output) # with open('test.json') as f: # s = json.load(f) ret_dict = {} ''' Invoke compiler (if need) and profiler to generate the results. ''' os.system('clang-format -i {}'.format(local_path)) compile_retvalue = os.system('gcc -pedantic -g {} -o exec 1> gcc_output 2>&1'.format(local_path)) if compile_retvalue != 0: ret_dict['error'] = 1 ret_dict['source'] = ''.join(list(open(local_path, 'r').readlines())) ret_dict['error_message'] = ''.join(list(open('gcc_output', 'r').readlines())) return ret_dict os.system('valgrind ./exec > valgrind.txt 2>&1') uninitialised_buffer, invalid_write_buffer = extract_valgrind_result('other', 'valgrind.txt') os.system('valgrind --leak-check=full ./exec > valgrind_leak.txt 2>&1') mem_leak_dic = extract_valgrind_result('memory_leak', 'valgrind_leak.txt') ret_dict['error'] = 0 vega_chart = mem_issue_visualize(local_path, uninitialised_buffer, invalid_write_buffer, mem_leak_dic) ret_dict['vega_json'] = json.loads(vega_chart.to_json()) return ret_dict
2.1875
2
py/book/ShortestSubarrayLength.py
danyfang/SourceCode
0
18007
''' Leetcode problem No 862 Shortest Subarray with Sum at Least K Solution written by <NAME> on 1 July, 2018 ''' import collections class Solution(object): def shortestSubarray(self, A, K): """ :type A: List[int] :type K: int :rtype: int """ n = len(A) B = [0] * (n + 1) for i in range(n): B[i+1] = B[i] + A[i] d = collections.deque() ans = n + 1 for i in range(n+1): while d and B[i] - B[d[0]] >= K: ans = min(ans, i-d.popleft()) while d and B[i] <= B[d[-1]]: d.pop() d.append(i) return ans if ans <= n else -1 def main(): s = Solution() print(s.shortestSubarray([2,-1,2], 3)) print(s.shortestSubarray([1,2], 4)) print(s.shortestSubarray([1], 1)) print(s.shortestSubarray([1,2,3,-5,4,-7,5,-8,6,-9,7,8,-4], 5)) #1 print(s.shortestSubarray([1,2,-5,3,-5,4,-7,5,-8,6,-9,7,8,-4], 5)) main()
3.34375
3
djangocms_baseplugins/contact/models.py
benzkji/djangocms-baseplugins
2
18008
<reponame>benzkji/djangocms-baseplugins<gh_stars>1-10 import time from ckeditor.fields import RichTextField from django.db import models from django.utils.translation import ugettext_lazy as _ from requests import ConnectionError from djangocms_baseplugins.baseplugin.models import AbstractBasePlugin from djangocms_baseplugins.baseplugin.utils import check_migration_modules_needed check_migration_modules_needed('contact') class ContactBase(AbstractBasePlugin): website = models.URLField(_("Website"), blank=True, default='') email = models.EmailField(_("Email"), blank=True, default='') phone = models.CharField(_("Phone"), max_length=64, blank=True, default='') fax = models.CharField(_("Fax"), max_length=64, blank=True, default='') body = RichTextField(_("Text"), blank=True, default='') address = models.TextField(_('Address'), default='', blank=True) geocoding_address = models.CharField( _('Address for the map'), max_length=64, default='', blank=True, ) lat = models.FloatField(blank=True, default=0, null=True) lng = models.FloatField(blank=True, default=0, null=True) geo_error = models.BooleanField(_("Probleme mit der Adresse?"), default=False) class Meta: abstract = True def __str__(self): text = str(_("Contact / Subsidiary")) if self.geo_error: text = "%s (%s)" % (text, _("Coordinates Error!")) return self.add_hidden_flag(text) class Contact(ContactBase): def save(self, *args, **kwargs): """ here for now. may end in a metaclass, if we haz time to do this """ try: import geocoder except ImportError: return super(Contact, self).save(*args, **kwargs) try: from_db = Contact.objects.get(id=self.id) except self.DoesNotExist: from_db = Contact() if self.geocoding_address: if not self.lat or not from_db.geocoding_address == self.geocoding_address: g = None try: g = geocoder.komoot(self.geocoding_address) time.sleep(2) except ConnectionError: pass if g and g.ok: self.lat = g.latlng[0] self.lng = g.latlng[1] self.geo_error = False else: self.geo_error = True if not self.lat: # print "no latlng found: %s" % self self.geo_error = True else: self.geo_error = False self.lat = 0 self.lng = 0 return super(Contact, self).save(*args, **kwargs)
1.914063
2
Level1/Lessons76501/minari-76501.py
StudyForCoding/ProgrammersLevel
0
18009
<filename>Level1/Lessons76501/minari-76501.py<gh_stars>0 def solution(absolutes, signs): answer = 0 for i in range(len(absolutes)): if signs[i] is True: answer += int(absolutes[i]) else: answer -= int(absolutes[i]) return answer #1. for문 (len(absolutes)), if signs[i] is true: answer += absolutes[i], else: answer -= absolutes[i] #2. sum(absolutes)
3.640625
4
pyexcel/__init__.py
quis/pyexcel
0
18010
""" pyexcel ~~~~~~~~~~~~~~~~~~~ **pyexcel** is a wrapper library to read, manipulate and write data in different excel formats: csv, ods, xls, xlsx and xlsm. It does not support formulas, styles and charts. :copyright: (c) 2014-2017 by Onni Software Ltd. :license: New BSD License, see LICENSE for more details """ # flake8: noqa from .cookbook import ( merge_csv_to_a_book, merge_all_to_a_book, split_a_book, extract_a_sheet_from_a_book, ) from .core import ( get_array, iget_array, get_dict, get_records, iget_records, get_book_dict, get_sheet, get_book, iget_book, save_as, isave_as, save_book_as, isave_book_as, ) from .book import Book from .sheet import Sheet from .internal.garbagecollector import free_resources from .deprecated import ( load_book, load_book_from_memory, load, load_from_memory, load_from_dict, load_from_records, Reader, SeriesReader, ColumnSeriesReader, BookReader, ) from .__version__ import __version__, __author__
1.632813
2
tests/resources/selenium/test_nfc.py
Avi-Labs/taurus
1,743
18011
# coding=utf-8 import logging import random import string import sys import unittest from time import time, sleep import apiritif import os import re from selenium import webdriver from selenium.common.exceptions import NoSuchElementException, TimeoutException from selenium.webdriver.common.by import By from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.ui import Select from selenium.webdriver.support import expected_conditions as econd from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.common.keys import Keys from bzt.resources.selenium_extras import waiter, get_locator class TestSc1(unittest.TestCase): def setUp(self): self.vars = {} timeout = 2.0 options = webdriver.FirefoxOptions() profile = webdriver.FirefoxProfile() profile.set_preference('webdriver.log.file', '/somewhere/webdriver.log') options.set_capability('unhandledPromptBehavior', 'ignore') self.driver = webdriver.Firefox(profile, options=options) self.driver.implicitly_wait(timeout) apiritif.put_into_thread_store(timeout=timeout, func_mode=False, driver=self.driver, windows={}, scenario_name='sc1') def _1_httpsblazedemocomsetup1(self): with apiritif.smart_transaction('https://blazedemo.com/setup1'): self.driver.get('https://blazedemo.com/setup1') def _2_setup2(self): with apiritif.smart_transaction('setup2'): self.driver.get('https://blazedemo.com/setup2') waiter() def _3_httpsblazedemocommain1(self): with apiritif.smart_transaction('https://blazedemo.com/main1'): self.driver.get('https://blazedemo.com/main1') def _4_main2(self): with apiritif.smart_transaction('main2'): self.driver.get('https://blazedemo.com/main2') waiter() def _5_httpsblazedemocomteardown1(self): with apiritif.smart_transaction('https://blazedemo.com/teardown1'): self.driver.get('https://blazedemo.com/teardown1') def _6_teardown2(self): with apiritif.smart_transaction('teardown2'): self.driver.get('https://blazedemo.com/teardown2') waiter() def test_sc1(self): try: self._1_httpsblazedemocomsetup1() self._2_setup2() self._3_httpsblazedemocommain1() self._4_main2() finally: apiritif.set_stage("teardown") # can't be interrupted self._5_httpsblazedemocomteardown1() self._6_teardown2() def tearDown(self): if self.driver: self.driver.quit()
2.140625
2
src/onegov/search/dsl.py
politbuero-kampagnen/onegov-cloud
0
18012
<reponame>politbuero-kampagnen/onegov-cloud<filename>src/onegov/search/dsl.py from elasticsearch_dsl import Search as BaseSearch from elasticsearch_dsl.response import Hit as BaseHit from elasticsearch_dsl.response import Response as BaseResponse def type_from_hit(hit): return hit.meta.index.split('-')[-2] class Search(BaseSearch): """ Extends elastichsearch_dsl's search object with ORM integration. Works exactly the same as the original, but the results it returns offer additional methods to query the SQLAlchemy models behind the results (if any). """ def __init__(self, *args, **kwargs): # get the session and mapping if possilbe (not provided during cloning) self.session = kwargs.pop('session', None) self.mappings = kwargs.pop('mappings', None) super().__init__(*args, **kwargs) # bind responses to the orm self._response_class = Response.bind( self.session, self.mappings, self.explain) @property def explain(self): return self._extra.get('explain', False) def _clone(self): search = super()._clone() search.session = self.session search.mappings = self.mappings return search def _get_result(self, *args, **kwargs): result = super()._get_result(*args, **kwargs) result.__class__ = Hit.bind( session=self.session, model=self.mappings[type_from_hit(result)].model ) return result class Response(BaseResponse): """ Extends the default response (list of results) with additional methods to query the SQLAlchemy models behind the results. """ @classmethod def bind(cls, session, mappings, explain): class BoundResponse(cls): pass BoundResponse.session = session BoundResponse.mappings = mappings BoundResponse.explain = explain return BoundResponse def hits_by_type(self, type): for hit in self.hits: if type_from_hit(hit) == type: yield hit def query(self, type): """ Returns an SQLAlchemy query for the given type. You must provide a type, because a query can't consist of multiple unrelated tables. If no results match the type, None is returned. """ hits = list(self.hits_by_type(type)) if not hits: return None model = self.mappings[type].model query = self.session.query(model) model_ids = (h.meta.id for h in hits) query = query.filter(getattr(model, model.es_id).in_(model_ids)) return query def load(self): """ Loads all results by querying the SQLAlchemy session in the order they were returned by elasticsearch. Note that the resulting lists may include None values, since we are might get elasticsearch results for which we do not have a model on the database (the data is then out of sync). """ positions = {} types = set() # put the types into buckets and store the original position... for ix, hit in enumerate(self.hits): type = type_from_hit(hit) positions[(type, str(hit.meta.id))] = ix types.add(type) results = [None] * len(positions) # ...so we can query the database once per type and not once per result # this has the potential of resulting in fewer queries for type in types: for result in self.query(type): object_id = str(getattr(result, result.es_id)) ix = positions[(type, object_id)] if self.explain: ex = self.hits[ix].meta.explanation result.explanation = { 'raw': ex.__dict__, 'score': self.hits[ix].meta.score, 'term-frequency': explanation_value( ex, 'termFreq' ), 'inverse-document-frequency': explanation_value( ex, 'idf' ), 'field-norm': explanation_value( ex, 'fieldNorm' ) } results[ix] = result return results def explanation_value(explanation, text): """ Gets the value from the explanation for descriptions starting with the given text. """ if explanation.description.startswith(text): return { 'description': explanation.description, 'value': explanation.value } for detail in getattr(explanation, 'details', []): result = explanation_value(detail, text) if result: return result class Hit(BaseHit): """ Extends a single result with additional methods to query the SQLAlchemy models behind the results. """ @classmethod def bind(cls, model, session): class BoundHit(cls): pass BoundHit.model = model BoundHit.session = session return BoundHit def query(self): """ Returns the SQLAlchemy query for this result. """ query = self.session.query(self.model) model_id = getattr(self.model, self.model.es_id) query = query.filter(model_id == self.meta.id) return query def load(self): """ Loads this result from the SQLAlchemy session. """ return self.query().one()
2.4375
2
DistributedStorageBenchmarkTool/EchoHandler.py
shadoobie/dbench
0
18013
<filename>DistributedStorageBenchmarkTool/EchoHandler.py from SocketServer import BaseRequestHandler, TCPServer from DistributedStorageBenchmarkTool.StampyMcGetTheLog import StampyMcGetTheLog # from sets import Set import re class EchoHandler(BaseRequestHandler): name = None server = None chunkSizeWriteTimes = [] chunkSizeSet = set() def __init__(self, request, client_address, server): self.server = server self.name = "EchoHandlerFor client " + str(client_address) self.server.flood("EchoHandler names " + self.name + " has been instantiated.") BaseRequestHandler.__init__(self, request, client_address, server) def handle(self): self.server.flood(self.name + " handle() invoked.") while True: receivedData = self.request.recv(8192) self.server.flood("EchoHandler " + self.name + " receivedData = " + str(receivedData)) if not receivedData: break self.request.sendall(receivedData) self.request.close() self.server.flood(self.name + " handel() has completed.") def lookForStuff(self, parsedData): '''this whole lookForStuff approach is wrong. please forgive me for the code the ensues. i did it wrong.''' lifeSpan = self.getLifeSpan(parsedData) maxFileSizeString = self.getMaxFileSizeString(parsedData) self.gatherChunkSizeAndWriteTimeMessages(parsedData) if lifeSpan != None and maxFileSizeString != None: maxFileSize = int(maxFileSizeString) for aChunkSize in self.chunkSizeSet: self.evaluateForRolloverCompliant(lifeSpan, maxFileSize, aChunkSize) def getClientName(self, parsedData): clientName = None if "clientName:" in parsedData: clientName = parsedData[1] return clientName def getLifeSpan(self, parsedData): lifeSpan = None if "lifeSpan:" in parsedData: lifeSpan = parsedData[2] else: lifeSpan = self.lifeSpan return lifeSpan def getMaxFileSizeString(self, parsedData): maxFileSize = None if "maxFileSize:" in parsedData: maxFileSize = parsedData[9] return maxFileSize def gatherChunkSizeAndWriteTimeMessages(self, parsedData): if "writeTime:" in parsedData: self.chunkSizeWriteTimes.append({'chunkSize':int(parsedData[3]), 'writeTime':float(parsedData[1])}) self.chunkSizeSet.add(int(parsedData[3])) def getAverageTimeBetweenWritesForChunkSize(self, chunkSize): '''the first draft of this method probably not correct.''' average = sum(d['writeTime'] for d in self.chunkSizeWriteTimes) / len(self.chunkSizeWriteTimes) return average def evaluateForRolloverCompliant(self, lifeSpan, maxFileSize, chunkSize): numberOfSecondsRunning = lifeSpan howManySecondsBetweenEachChunkWrite = self.getAverageTimeBetweenWritesForChunkSize(chunkSize) numberOfChunksPerFile = maxFileSize / chunkSize numberOfSecondsPerFile = howManySecondsBetweenEachChunkWrite * numberOfChunksPerFile estimatedTotalFiles = numberOfSecondsRunning / numberOfSecondsPerFile if estimatedTotalFiles <= 2: self.server.flood(self.name + " says hey there I'm complaining that this test run will only roll over the data file an estimated " + str(estimatedTotalFiles) + " number of files.") else: self.server.flood(self.name + " says it looks like we will have an estimated number of data files = " + str(estimatedTotalFiles)) def parseLine(self,line): '''leverage regular expression to parse on space.''' parsedLine = re.split(r'\s',line) return parsedLine if __name__ == '__main__': serv = TCPServer(('', 20000), EchoHandler) serv.serve_forever()
2.359375
2
bitten/queue.py
SpamExperts/bitten
0
18014
# -*- coding: utf-8 -*- # # Copyright (C) 2007-2010 Edgewall Software # Copyright (C) 2005-2007 <NAME> <<EMAIL>> # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://bitten.edgewall.org/wiki/License. """Implements the scheduling of builds for a project. This module provides the functionality for scheduling builds for a specific Trac environment. It is used by both the build master and the web interface to get the list of required builds (revisions not built yet). Furthermore, the `BuildQueue` class is used by the build master to determine the next pending build, and to match build slaves against configured target platforms. """ from itertools import ifilter import re import time from trac.util.datefmt import to_timestamp from trac.util import pretty_timedelta, format_datetime from trac.attachment import Attachment from bitten.model import BuildConfig, TargetPlatform, Build, BuildStep from bitten.util.repository import get_repos __docformat__ = 'restructuredtext en' def collect_changes(config, authname=None): """Collect all changes for a build configuration that either have already been built, or still need to be built. This function is a generator that yields ``(platform, rev, build)`` tuples, where ``platform`` is a `TargetPlatform` object, ``rev`` is the identifier of the changeset, and ``build`` is a `Build` object or `None`. :param config: the build configuration :param authname: the logged in user :param db: a database connection (optional) """ env = config.env repos_name, repos, repos_path = get_repos(env, config.path, authname) with env.db_query as db: try: node = repos.get_node(repos_path) except Exception, e: env.log.warn('Error accessing path %r for configuration %r', repos_path, config.name, exc_info=True) return for path, rev, chg in node.get_history(): # Don't follow moves/copies if path != repos.normalize_path(repos_path): break # Stay within the limits of the build config if config.min_rev and repos.rev_older_than(rev, config.min_rev): break if config.max_rev and repos.rev_older_than(config.max_rev, rev): continue # Make sure the repository directory isn't empty at this # revision old_node = repos.get_node(path, rev) is_empty = True for entry in old_node.get_entries(): is_empty = False break if is_empty: continue # For every target platform, check whether there's a build # of this revision for platform in TargetPlatform.select(env, config.name): builds = list(Build.select(env, config.name, rev, platform.id)) if builds: build = builds[0] else: build = None yield platform, rev, build class BuildQueue(object): """Enapsulates the build queue of an environment. A build queue manages the the registration of build slaves and detection of repository revisions that need to be built. """ def __init__(self, env, build_all=False, stabilize_wait=0, timeout=0): """Create the build queue. :param env: the Trac environment :param build_all: whether older revisions should be built :param stabilize_wait: The time in seconds to wait before considering the repository stable to create a build in the queue. :param timeout: the time in seconds after which an in-progress build should be considered orphaned, and reset to pending state """ self.env = env self.log = env.log self.build_all = build_all self.stabilize_wait = stabilize_wait self.timeout = timeout # Build scheduling def get_build_for_slave(self, name, properties): """Check whether one of the pending builds can be built by the build slave. :param name: the name of the slave :type name: `basestring` :param properties: the slave configuration :type properties: `dict` :return: the allocated build, or `None` if no build was found :rtype: `Build` """ self.log.debug('Checking for pending builds...') self.reset_orphaned_builds() # Iterate through pending builds by descending revision timestamp, to # avoid the first configuration/platform getting all the builds platforms = [p.id for p in self.match_slave(name, properties)] builds_to_delete = [] build_found = False for build in Build.select(self.env, status=Build.PENDING): config_path = BuildConfig.fetch(self.env, name=build.config).path _name, repos, _path = get_repos(self.env, config_path, None) if self.should_delete_build(build, repos): self.log.info('Scheduling build %d for deletion', build.id) builds_to_delete.append(build) elif build.platform in platforms: build_found = True break if not build_found: self.log.debug('No pending builds.') build = None # delete any obsolete builds for build_to_delete in builds_to_delete: build_to_delete.delete() if build: build.slave = name build.slave_info.update(properties) build.status = Build.IN_PROGRESS build.update() return build def match_slave(self, name, properties): """Match a build slave against available target platforms. :param name: the name of the slave :type name: `basestring` :param properties: the slave configuration :type properties: `dict` :return: the list of platforms the slave matched """ platforms = [] for config in BuildConfig.select(self.env): for platform in TargetPlatform.select(self.env, config=config.name): match = True for propname, pattern in ifilter(None, platform.rules): try: propvalue = properties.get(propname) if not propvalue or not re.match(pattern, propvalue, re.I): match = False break except re.error: self.log.error('Invalid platform matching pattern "%s"', pattern, exc_info=True) match = False break if match: self.log.debug('Slave %r matched target platform %r of ' 'build configuration %r', name, platform.name, config.name) platforms.append(platform) if not platforms: self.log.warning('Slave %r matched none of the target platforms', name) return platforms def populate(self): """Add a build for the next change on each build configuration to the queue. The next change is the latest repository check-in for which there isn't a corresponding build on each target platform. Repeatedly calling this method will eventually result in the entire change history of the build configuration being in the build queue. """ builds = [] for config in BuildConfig.select(self.env): platforms = [] for platform, rev, build in collect_changes(config): if not self.build_all and platform.id in platforms: # We've seen this platform already, so these are older # builds that should only be built if built_all=True self.log.debug('Ignoring older revisions for configuration ' '%r on %r', config.name, platform.name) break platforms.append(platform.id) if build is None: self.log.info('Enqueuing build of configuration "%s" at ' 'revision [%s] on %s', config.name, rev, platform.name) _repos_name, repos, _repos_path = get_repos( self.env, config.path, None) rev_time = to_timestamp(repos.get_changeset(rev).date) age = int(time.time()) - rev_time if self.stabilize_wait and age < self.stabilize_wait: self.log.info('Delaying build of revision %s until %s ' 'seconds pass. Current age is: %s ' 'seconds' % (rev, self.stabilize_wait, age)) continue build = Build(self.env, config=config.name, platform=platform.id, rev=str(rev), rev_time=rev_time) builds.append(build) for build in builds: try: build.insert() except Exception, e: # really only want to catch IntegrityErrors raised when # a second slave attempts to add builds with the same # (config, platform, rev) as an existing build. self.log.info('Failed to insert build of configuration "%s" ' 'at revision [%s] on platform [%s]: %s', build.config, build.rev, build.platform, e) raise def reset_orphaned_builds(self): """Reset all in-progress builds to ``PENDING`` state if they've been running so long that the configured timeout has been reached. This is used to cleanup after slaves that have unexpectedly cancelled a build without notifying the master, or are for some other reason not reporting back status updates. """ if not self.timeout: # If no timeout is set, none of the in-progress builds can be # considered orphaned return with self.env.db_transaction as db: now = int(time.time()) for build in Build.select(self.env, status=Build.IN_PROGRESS): if now - build.last_activity < self.timeout: # This build has not reached the timeout yet, assume it's still # being executed continue self.log.info('Orphaning build %d. Last activity was %s (%s)' % \ (build.id, format_datetime(build.last_activity), pretty_timedelta(build.last_activity))) build.status = Build.PENDING build.slave = None build.slave_info = {} build.started = 0 build.stopped = 0 build.last_activity = 0 for step in list(BuildStep.select(self.env, build=build.id)): step.delete() build.update() Attachment.delete_all(self.env, 'build', build.resource.id) #commit def should_delete_build(self, build, repos): config = BuildConfig.fetch(self.env, build.config) config_name = config and config.name \ or 'unknown config "%s"' % build.config platform = TargetPlatform.fetch(self.env, build.platform) # Platform may or may not exist anymore - get safe name for logging platform_name = platform and platform.name \ or 'unknown platform "%s"' % build.platform # Drop build if platform no longer exists if not platform: self.log.info('Dropping build of configuration "%s" at ' 'revision [%s] on %s because the platform no longer ' 'exists', config.name, build.rev, platform_name) return True # Ignore pending builds for deactived build configs if not (config and config.active): self.log.info('Dropping build of configuration "%s" at ' 'revision [%s] on %s because the configuration is ' 'deactivated', config_name, build.rev, platform_name) return True # Stay within the revision limits of the build config if (config.min_rev and repos.rev_older_than(build.rev, config.min_rev)) \ or (config.max_rev and repos.rev_older_than(config.max_rev, build.rev)): self.log.info('Dropping build of configuration "%s" at revision [%s] on ' '"%s" because it is outside of the revision range of the ' 'configuration', config.name, build.rev, platform_name) return True # If not 'build_all', drop if a more recent revision is available if not self.build_all and \ len(list(Build.select(self.env, config=build.config, min_rev_time=build.rev_time, platform=build.platform))) > 1: self.log.info('Dropping build of configuration "%s" at revision [%s] ' 'on "%s" because a more recent build exists', config.name, build.rev, platform_name) return True return False
2.375
2
dynamic-programming/Python/0120-triangle.py
lemonnader/LeetCode-Solution-Well-Formed
1
18015
from typing import List class Solution: def minimumTotal(self, triangle: List[List[int]]) -> int: size = len(triangle) if size == 0: return 0 dp = [0] * size for i in range(size): dp[i] = triangle[size - 1][i] for i in range(size - 2, - 1, -1): for j in range(i + 1): dp[j] = min(dp[j], dp[j + 1]) + triangle[i][j] return dp[0]
3.21875
3
regparser/tree/xml_parser/tree_utils.py
pkfec/regulations-parser
26
18016
<reponame>pkfec/regulations-parser # -*- coding: utf-8 -*- from __future__ import unicode_literals from copy import deepcopy from functools import wraps from itertools import chain from lxml import etree from six.moves.html_parser import HTMLParser from regparser.tree.priority_stack import PriorityStack def prepend_parts(parts_prefix, n): """ Recursively preprend parts_prefix to the parts of the node n. Parts is a list of markers that indicates where you are in the regulation text. """ n.label = parts_prefix + n.label for c in n.children: prepend_parts(parts_prefix, c) return n class NodeStack(PriorityStack): """ The NodeStack aids our construction of a struct.Node tree. We process xml one paragraph at a time; using a priority stack allows us to insert items at their proper depth and unwind the stack (collecting children) as necessary""" def unwind(self): """ Unwind the stack, collapsing sub-paragraphs that are on the stack into the children of the previous level. """ children = self.pop() parts_prefix = self.peek_last()[1].label children = [prepend_parts(parts_prefix, c[1]) for c in children] self.peek_last()[1].children = children def collapse(self): """After all of the nodes have been inserted at their proper levels, collapse them into a single root node""" while self.size() > 1: self.unwind() return self.peek_last()[1] def split_text(text, tokens): """ Given a body of text that contains tokens, splice the text along those tokens. """ starts = [text.find(t) for t in tokens] if not starts or starts[0] != 0: starts.insert(0, 0) slices = zip(starts, starts[1:]) texts = [text[i[0]:i[1]] for i in slices] + [text[starts[-1]:]] return texts def _combine_with_space(prev_text, next_text, add_space_if_needed): """Logic to determine where to add spaces to XML. Generally this is just as matter of checking for space characters, but there are some outliers""" prev_text, next_text = prev_text or "", next_text or "" prev_char, next_char = prev_text[-1:], next_text[:1] needs_space = (not prev_char.isspace() and not next_char.isspace() and next_char and prev_char not in u'([/<—-' and next_char not in u').;,]>/—-') if add_space_if_needed and needs_space: return prev_text + " " + next_text else: return prev_text + next_text def replace_xml_node_with_text(node, text): """There are some complications w/ lxml when determining where to add the replacement text. Account for all of that here.""" parent, prev = node.getparent(), node.getprevious() if prev is not None: prev.tail = (prev.tail or '') + text else: parent.text = (parent.text or '') + text parent.remove(node) def replace_xpath(xpath): """Decorator to convert all elements matching the provided xpath in to plain text. This'll convert the wrapped function into a new function which will search for the provided xpath and replace all matches""" def decorator(fn): @wraps(fn) def wrapped(node, add_spaces): for element in node.xpath(xpath): text = fn(element) text = _combine_with_space(text, element.tail, add_spaces) replace_xml_node_with_text(element, text) return wrapped return decorator @replace_xpath(".//E[@T='52' or @T='54']") def subscript_to_plaintext(element): return "_{{{0}}}".format(element.text) @replace_xpath(".//E[@T='51' or @T='53']|.//SU[not(@footnote)]") def superscript_to_plaintext(element): return "^{{{0}}}".format(element.text) @replace_xpath(".//SU[@footnote]") def footnotes_to_plaintext(element): footnote = element.attrib['footnote'] footnote = footnote.replace('(', r'\(').replace(')', r'\)') return u"[^{0}]({1})".format(element.text, footnote) def get_node_text(node, add_spaces=False): """ Extract all the text from an XML node (including the text of it's children). """ node = deepcopy(node) subscript_to_plaintext(node, add_spaces) superscript_to_plaintext(node, add_spaces) footnotes_to_plaintext(node, add_spaces) parts = [node.text] + list( chain(*([c.text, c.tail] for c in node.getchildren()))) final_text = '' for part in filter(bool, parts): final_text = _combine_with_space(final_text, part, add_spaces) return final_text.strip() _tag_black_list = ('PRTPAGE', ) def get_node_text_tags_preserved(xml_node): """Get the body of an XML node as a string, avoiding a specific blacklist of bad tags.""" xml_node = deepcopy(xml_node) etree.strip_tags(xml_node, *_tag_black_list) # Remove the wrapping tag node_text = xml_node.text or '' node_text += ''.join(etree.tounicode(child) for child in xml_node) node_text = HTMLParser().unescape(node_text) return node_text
2.875
3
aiida/orm/implementation/querybuilder.py
PercivalN/aiida-core
1
18017
<filename>aiida/orm/implementation/querybuilder.py # -*- coding: utf-8 -*- ########################################################################### # Copyright (c), The AiiDA team. All rights reserved. # # This file is part of the AiiDA code. # # # # The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### """Backend query implementation classes""" from __future__ import division from __future__ import print_function from __future__ import absolute_import import abc import six from aiida.common import exceptions from aiida.common.lang import abstractclassmethod, type_check from aiida.common.exceptions import InputValidationError __all__ = ('BackendQueryBuilder',) @six.add_metaclass(abc.ABCMeta) class BackendQueryBuilder(object): """Backend query builder interface""" # pylint: disable=invalid-name,too-many-public-methods,useless-object-inheritance outer_to_inner_schema = None inner_to_outer_schema = None def __init__(self, backend): """ :param backend: the backend """ from . import backends type_check(backend, backends.Backend) self._backend = backend self.inner_to_outer_schema = dict() self.outer_to_inner_schema = dict() @abc.abstractmethod def Node(self): """ Decorated as a property, returns the implementation for DbNode. It needs to return a subclass of sqlalchemy.Base, which means that for different ORM's a corresponding dummy-model must be written. """ @abc.abstractmethod def Link(self): """ A property, decorated with @property. Returns the implementation for the DbLink """ @abc.abstractmethod def Computer(self): """ A property, decorated with @property. Returns the implementation for the Computer """ @abc.abstractmethod def User(self): """ A property, decorated with @property. Returns the implementation for the User """ @abc.abstractmethod def Group(self): """ A property, decorated with @property. Returns the implementation for the Group """ @abc.abstractmethod def AuthInfo(self): """ A property, decorated with @property. Returns the implementation for the AuthInfo """ @abc.abstractmethod def Comment(self): """ A property, decorated with @property. Returns the implementation for the Comment """ @abc.abstractmethod def Log(self): """ A property, decorated with @property. Returns the implementation for the Log """ @abc.abstractmethod def table_groups_nodes(self): """ A property, decorated with @property. Returns the implementation for the many-to-many relationship between group and nodes. """ @property def AiidaNode(self): """ A property, decorated with @property. Returns the implementation for the AiiDA-class for Node """ from aiida.orm import Node return Node @abc.abstractmethod def get_session(self): """ :returns: a valid session, an instance of sqlalchemy.orm.session.Session """ @abc.abstractmethod def modify_expansions(self, alias, expansions): """ Modify names of projections if ** was specified. This is important for the schema having attributes in a different table. """ @abstractclassmethod def get_filter_expr_from_attributes(cls, operator, value, attr_key, column=None, column_name=None, alias=None): # pylint: disable=too-many-arguments """ Returns an valid SQLAlchemy expression. :param operator: The operator provided by the user ('==', '>', ...) :param value: The value to compare with, e.g. (5.0, 'foo', ['a','b']) :param str attr_key: The path to that attribute as a tuple of values. I.e. if that attribute I want to filter by is the 2nd element in a list stored under the key 'mylist', this is ('mylist', '2'). :param column: Optional, an instance of sqlalchemy.orm.attributes.InstrumentedAttribute or :param str column_name: The name of the column, and the backend should get the InstrumentedAttribute. :param alias: The aliased class. :returns: An instance of sqlalchemy.sql.elements.BinaryExpression """ @classmethod def get_corresponding_properties(cls, entity_table, given_properties, mapper): """ This method returns a list of updated properties for a given list of properties. If there is no update for the property, the given property is returned in the list. """ if entity_table in mapper.keys(): res = list() for given_property in given_properties: res.append(cls.get_corresponding_property(entity_table, given_property, mapper)) return res return given_properties @classmethod def get_corresponding_property(cls, entity_table, given_property, mapper): """ This method returns an updated property for a given a property. If there is no update for the property, the given property is returned. """ try: # Get the mapping for the specific entity_table property_mapping = mapper[entity_table] try: # Get the mapping for the specific property return property_mapping[given_property] except KeyError: # If there is no mapping, the property remains unchanged return given_property except KeyError: # If it doesn't exist, it means that the given_property remains v return given_property @classmethod def get_filter_expr_from_column(cls, operator, value, column): """ A method that returns an valid SQLAlchemy expression. :param operator: The operator provided by the user ('==', '>', ...) :param value: The value to compare with, e.g. (5.0, 'foo', ['a','b']) :param column: an instance of sqlalchemy.orm.attributes.InstrumentedAttribute or :returns: An instance of sqlalchemy.sql.elements.BinaryExpression """ # Label is used because it is what is returned for the # 'state' column by the hybrid_column construct # Remove when https://github.com/PyCQA/pylint/issues/1931 is fixed # pylint: disable=no-name-in-module,import-error from sqlalchemy.sql.elements import Cast, Label from sqlalchemy.orm.attributes import InstrumentedAttribute, QueryableAttribute from sqlalchemy.sql.expression import ColumnClause from sqlalchemy.types import String if not isinstance(column, (Cast, InstrumentedAttribute, QueryableAttribute, Label, ColumnClause)): raise TypeError('column ({}) {} is not a valid column'.format(type(column), column)) database_entity = column if operator == '==': expr = database_entity == value elif operator == '>': expr = database_entity > value elif operator == '<': expr = database_entity < value elif operator == '>=': expr = database_entity >= value elif operator == '<=': expr = database_entity <= value elif operator == 'like': # the like operator expects a string, so we cast to avoid problems # with fields like UUID, which don't support the like operator expr = database_entity.cast(String).like(value) elif operator == 'ilike': expr = database_entity.ilike(value) elif operator == 'in': expr = database_entity.in_(value) else: raise InputValidationError('Unknown operator {} for filters on columns'.format(operator)) return expr @abc.abstractmethod def get_projectable_attribute(self, alias, column_name, attrpath, cast=None, **kwargs): pass @abc.abstractmethod def get_aiida_res(self, key, res): """ Some instance returned by ORM (django or SA) need to be converted to Aiida instances (eg nodes) :param key: the key that this entry would be returned with :param res: the result returned by the query :returns: an aiida-compatible instance """ @abc.abstractmethod def yield_per(self, query, batch_size): """ :param int batch_size: Number of rows to yield per step Yields *count* rows at a time :returns: a generator """ @abc.abstractmethod def count(self, query): """ :returns: the number of results """ @abc.abstractmethod def first(self, query): """ Executes query in the backend asking for one instance. :returns: One row of aiida results """ @abc.abstractmethod def iterall(self, query, batch_size, tag_to_index_dict): """ :return: An iterator over all the results of a list of lists. """ @abc.abstractmethod def iterdict(self, query, batch_size, tag_to_projected_properties_dict, tag_to_alias_map): """ :returns: An iterator over all the results of a list of dictionaries. """ @abc.abstractmethod def get_column_names(self, alias): """ Return the column names of the given table (alias). """ def get_column(self, colname, alias): # pylint: disable=no-self-use """ Return the column for a given projection. """ try: return getattr(alias, colname) except AttributeError: raise exceptions.InputValidationError("{} is not a column of {}\n" "Valid columns are:\n" "{}".format( colname, alias, '\n'.join(alias._sa_class_manager.mapper.c.keys()) # pylint: disable=protected-access ))
1.898438
2
qiskit/util.py
alejomonbar/qiskit-terra
0
18018
# This code is part of Qiskit. # # (C) Copyright IBM 2017. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. # pylint: disable=wildcard-import,unused-wildcard-import """Common utilities for Qiskit.""" # Deprecated: for backwards compatibility to be removed in a future release from qiskit.utils import *
1.164063
1
examples/plot_spirals.py
zblz/gammapy
0
18019
<reponame>zblz/gammapy<gh_stars>0 # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Plot Milky Way spiral arm models. """ import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from gammapy.astro.population.spatial import ValleeSpiral, FaucherSpiral vallee_spiral = ValleeSpiral() faucher_spiral = FaucherSpiral() #theta = np.arange(0, 720) radius = np.arange(2.1, 20, 0.1) for spiralarm_index in range(4): # Plot Vallee spiral x, y = vallee_spiral.xy_position(radius=radius, spiralarm_index=spiralarm_index) name = vallee_spiral.spiralarms[spiralarm_index] plt.plot(x, y, label=name) # Plot Faucher spiral x, y = faucher_spiral.xy_position(radius=radius, spiralarm_index=spiralarm_index) name = faucher_spiral.spiralarms[spiralarm_index] plt.plot(x, y, ls='-.', label='Faucher ' + name) plt.plot(vallee_spiral.bar['x'], vallee_spiral.bar['y']) plt.xlim(-10, 10) plt.ylim(-10, 10) plt.legend(ncol=2) filename = 'valee_spiral.pdf' print('Writing {0}'.format(filename)) plt.savefig(filename)
2.25
2
pytc/fitters/bayesian.py
jharman25/pytc
20
18020
__description__ = \ """ Fitter subclass for performing bayesian (MCMC) fits. """ __author__ = "<NAME>" __date__ = "2017-05-10" from .base import Fitter import emcee, corner import numpy as np import scipy.optimize as optimize import multiprocessing class BayesianFitter(Fitter): """ """ def __init__(self,num_walkers=100,initial_walker_spread=1e-4,ml_guess=True, num_steps=100,burn_in=0.1,num_threads=1): """ Initialize the bayesian fitter Parameters ---------- num_walkers : int > 0 how many markov chains to have in the analysis initial_walker_spread : float each walker is initialized with parameters sampled from normal distributions with mean equal to the initial guess and a standard deviation of guess*initial_walker_spread ml_guess : bool if true, do an ML optimization to get the initial guess num_steps: number of steps to run the markov chains burn_in : float between 0 and 1 fraction of samples to discard from the start of the run num_threads : int or `"max"` number of threads to use. if `"max"`, use the total number of cpus. [NOT YET IMPLEMENTED] """ Fitter.__init__(self) self._num_walkers = num_walkers self._initial_walker_spread = initial_walker_spread self._ml_guess = ml_guess self._num_steps = num_steps self._burn_in = burn_in self._num_threads = num_threads if self._num_threads == "max": self._num_threads = multiprocessing.cpu_count() if not type(self._num_threads) == int and self._num_threads > 0: err = "num_threads must be 'max' or a positive integer\n" raise ValueError(err) if self._num_threads != 1: err = "multithreading has not yet been (fully) implemented.\n" raise NotImplementedError(err) self._success = None self.fit_type = "bayesian" def ln_prior(self,param): """ Log prior of fit parameters. Priors are uniform between bounds and set to -np.inf outside of bounds. Parameters ---------- param : array of floats parameters to fit Returns ------- float value for log of priors. """ # If a paramter falls outside of the bounds, make the prior -infinity if np.sum(param < self._bounds[0,:]) > 0 or np.sum(param > self._bounds[1,:]) > 0: return -np.inf # otherwise, uniform return 0.0 def ln_prob(self,param): """ Posterior probability of model parameters. Parameters ---------- param : array of floats parameters to fit Returns ------- float value for log posterior proability """ # Calcualte prior. If not finite, this solution has an -infinity log # likelihood ln_prior = self.ln_prior(param) if not np.isfinite(ln_prior): return -np.inf # Calcualte likelihood. If not finite, this solution has an -infinity # log likelihood ln_like = self.ln_like(param) if not np.isfinite(ln_like): return -np.inf # log posterior is log prior plus log likelihood return ln_prior + ln_like def fit(self,model,parameters,bounds,y_obs,y_err=None,param_names=None): """ Fit the parameters. Parameters ---------- model : callable model to fit. model should take "parameters" as its only argument. this should (usually) be GlobalFit._y_calc parameters : array of floats parameters to be optimized. usually constructed by GlobalFit._prep_fit bounds : list list of two lists containing lower and upper bounds y_obs : array of floats observations in an concatenated array y_err : array of floats or None standard deviation of each observation. if None, each observation is assigned an error of 1/num_obs param_names : array of str names of parameters. If None, parameters assigned names p0,p1,..pN """ self._model = model self._y_obs = y_obs # Convert the bounds (list of lower and upper lists) into a 2d numpy array self._bounds = np.array(bounds) # If no error is specified, assign the error as 1/N, identical for all # points self._y_err = y_err if y_err is None: self._y_err = np.array([1/len(self._y_obs) for i in range(len(self._y_obs))]) if param_names is None: self._param_names = ["p{}".format(i) for i in range(len(parameters))] else: self._param_names = param_names[:] # Make initial guess (ML or just whatever the paramters sent in were) if self._ml_guess: fn = lambda *args: -self.weighted_residuals(*args) ml_fit = optimize.least_squares(fn,x0=parameters,bounds=self._bounds) self._initial_guess = np.copy(ml_fit.x) else: self._initial_guess = np.copy(parameters) # Create walker positions # Size of perturbation in parameter depends on the scale of the parameter perturb_size = self._initial_guess*self._initial_walker_spread ndim = len(parameters) pos = [self._initial_guess + np.random.randn(ndim)*perturb_size for i in range(self._num_walkers)] # Sample using walkers self._fit_result = emcee.EnsembleSampler(self._num_walkers, ndim, self.ln_prob, threads=self._num_threads) self._fit_result.run_mcmc(pos, self._num_steps) # Create list of samples to_discard = int(round(self._burn_in*self._num_steps,0)) self._samples = self._fit_result.chain[:,to_discard:,:].reshape((-1,ndim)) self._lnprob = self._fit_result.lnprobability[:,:].reshape(-1) # Get mean and standard deviation self._estimate = np.mean(self._samples,axis=0) self._stdev = np.std(self._samples,axis=0) # Calculate 95% confidence intervals self._ninetyfive = [] lower = int(round(0.025*self._samples.shape[0],0)) upper = int(round(0.975*self._samples.shape[0],0)) for i in range(self._samples.shape[1]): nf = np.sort(self._samples[:,i]) self._ninetyfive.append([nf[lower],nf[upper]]) self._ninetyfive = np.array(self._ninetyfive) self._success = True @property def fit_info(self): """ Information about the Bayesian run. """ output = {} output["Num walkers"] = self._num_walkers output["Initial walker spread"] = self._initial_walker_spread output["Use ML guess"] = self._ml_guess output["Num steps"] = self._num_steps output["Burn in"] = self._burn_in output["Final sample number"] = len(self._samples[:,0]) output["Num threads"] = self._num_threads return output @property def samples(self): """ Bayesian samples. """ return self._samples
2.765625
3
code/taskB/models.py
nft-appraiser/nft-appraiser-api
0
18021
<gh_stars>0 from django.db import models class TaskB_table(models.Model): img = models.ImageField(upload_to='taskB/', default='defo') pred_price = models. FloatField()
1.804688
2
DesignPatterns/FactoryPattern/SimpleFactory/autoFactory.py
Py-Himanshu-Patel/Learn-Python
0
18022
<filename>DesignPatterns/FactoryPattern/SimpleFactory/autoFactory.py from inspect import isclass, isabstract, getmembers import autos def isconcrete(obj): return isclass(obj) and not isabstract(obj) class AutoFactory: vehicles = {} # { car model name: class for the car} def __init__(self): self.load_autos() def load_autos(self): classes = getmembers(autos, isconcrete) for name, _type in classes: if isclass(_type) and issubclass(_type, autos.AbstractAuto): self.vehicles.update([[name, _type]]) def create_instance(self, carname): if carname in self.vehicles: return self.vehicles[carname]() return autos.NullCar(carname)
3.078125
3
Costa Rican Household Poverty Level Prediction/tens.py
hautan/train_tf
0
18023
# -*- coding: utf-8 -*- # We must always import the relevant libraries for our problem at hand. NumPy and TensorFlow are required for this example. # https://www.kaggle.com/c/costa-rican-household-poverty-prediction/data#_=_ import numpy as np np.set_printoptions(threshold='nan') import matplotlib.pyplot as plt import tensorflow as tf import pandas as pd def toInt(x): if x == 'yes': return 1 else: if x == 'no': return 0 else: return x costa_rica_household = pd.read_csv('data/train.csv') #x1 = costa_rica_household.describe() #x1["v2a1"] costa_rica_household.head() list(costa_rica_household.dtypes) #costa_rica_household = costa_rica_household.fillna(0) costa_rica_household = costa_rica_household.fillna(costa_rica_household.mean()) #costa_rica_household["idhogar"] = costa_rica_household["idhogar"].apply(lambda x: int(x, 16)) #costa_rica_household["dependency"] = costa_rica_household["dependency"].apply(lambda x: toInt(x)) #costa_rica_household["edjefe"] = costa_rica_household["edjefe"].apply(lambda x: toInt(x))//edjefa #costa_rica_household.loc[costa_rica_household['dependency'] == "'<='"] #v1 = costa_rica_household[costa_rica_household['dependency'].apply(lambda x: type(x) == str)]['dependency'] #col_name = costa_rica_household.columns #print(list(col_name)) #costa_rica_household[["age", "SQBage", "agesq", "r4h1", "r4h2"]] cols_to_norm = ['v2a1', 'hacdor', 'rooms', 'hacapo', 'v14a', 'refrig', 'v18q', 'v18q1', 'tamhog', 'tamviv', 'escolari', 'rez_esc', 'hhsize', 'paredblolad', 'paredzocalo', 'paredpreb', 'pareddes', 'paredmad', 'paredzinc', 'paredfibras', 'paredother', 'pisomoscer', 'pisocemento', 'pisoother', 'pisonatur', 'pisonotiene', 'pisomadera', 'techozinc', 'techoentrepiso', 'techocane', 'techootro', 'cielorazo', 'abastaguadentro', 'abastaguafuera', 'abastaguano', 'public', 'planpri', 'noelec', 'coopele', 'sanitario1', 'sanitario2', 'sanitario3', 'sanitario5', 'sanitario6', 'energcocinar1', 'energcocinar2', 'energcocinar3', 'energcocinar4', 'elimbasu1', 'elimbasu2', 'elimbasu3', 'elimbasu4', 'elimbasu5', 'elimbasu6', 'epared1', 'epared2', 'epared3', 'etecho1', 'etecho2', 'etecho3', 'eviv1', 'eviv2', 'eviv3', 'dis', 'male', 'female', 'estadocivil1', 'estadocivil2', 'estadocivil3', 'estadocivil4', 'estadocivil5', 'estadocivil6', 'estadocivil7', 'parentesco1', 'parentesco2', 'parentesco3', 'parentesco4', 'parentesco5', 'parentesco6', 'parentesco7', 'parentesco8', 'parentesco9', 'parentesco10', 'parentesco11', 'parentesco12', 'hogar_nin', 'hogar_adul', 'hogar_mayor', 'hogar_total', 'meaneduc', 'instlevel1', 'instlevel2', 'instlevel3', 'instlevel4', 'instlevel5', 'instlevel6', 'instlevel7', 'instlevel8', 'instlevel9', 'bedrooms', 'overcrowding', 'tipovivi1', 'tipovivi2', 'tipovivi3', 'tipovivi4', 'tipovivi5', 'computer', 'television', 'mobilephone', 'qmobilephone', 'lugar1', 'lugar2', 'lugar3', 'lugar4', 'lugar5', 'lugar6', 'area1', 'area2', 'SQBescolari', 'SQBhogar_total', 'SQBedjefe', 'SQBhogar_nin', 'SQBovercrowding', 'SQBdependency', 'SQBmeaned', 'agesq'] cat_cols_to_norm = ['r4h1', 'r4h2', 'r4h3', 'r4m1', 'r4m2', 'r4m3', 'r4t1', 'r4t2', 'r4t3'] cols_of_interest = ['v2a1', 'hacdor', 'rooms', 'hacapo', 'v14a', 'refrig', 'v18q', 'v18q1', 'r4h1', 'r4h2', 'r4h3', 'r4m1', 'r4m2', 'r4m3', 'r4t1', 'r4t2', 'r4t3', 'tamhog', 'tamviv', 'escolari', 'rez_esc', 'hhsize', 'paredblolad', 'paredzocalo', 'paredpreb', 'pareddes', 'paredmad', 'paredzinc', 'paredfibras', 'paredother', 'pisomoscer', 'pisocemento', 'pisoother', 'pisonatur', 'pisonotiene', 'pisomadera', 'techozinc', 'techoentrepiso', 'techocane', 'techootro', 'cielorazo', 'abastaguadentro', 'abastaguafuera', 'abastaguano', 'public', 'planpri', 'noelec', 'coopele', 'sanitario1', 'sanitario2', 'sanitario3', 'sanitario5', 'sanitario6', 'energcocinar1', 'energcocinar2', 'energcocinar3', 'energcocinar4', 'elimbasu1', 'elimbasu2', 'elimbasu3', 'elimbasu4', 'elimbasu5', 'elimbasu6', 'epared1', 'epared2', 'epared3', 'etecho1', 'etecho2', 'etecho3', 'eviv1', 'eviv2', 'eviv3', 'dis', 'male', 'female', 'estadocivil1', 'estadocivil2', 'estadocivil3', 'estadocivil4', 'estadocivil5', 'estadocivil6', 'estadocivil7', 'parentesco1', 'parentesco2', 'parentesco3', 'parentesco4', 'parentesco5', 'parentesco6', 'parentesco7', 'parentesco8', 'parentesco9', 'parentesco10', 'parentesco11', 'parentesco12', 'hogar_nin', 'hogar_adul', 'hogar_mayor', 'hogar_total', 'meaneduc', 'instlevel1', 'instlevel2', 'instlevel3', 'instlevel4', 'instlevel5', 'instlevel6', 'instlevel7', 'instlevel8', 'instlevel9', 'bedrooms', 'overcrowding', 'tipovivi1', 'tipovivi2', 'tipovivi3', 'tipovivi4', 'tipovivi5', 'computer', 'television', 'mobilephone', 'qmobilephone', 'lugar1', 'lugar2', 'lugar3', 'lugar4', 'lugar5', 'lugar6', 'area1', 'area2', 'SQBescolari', 'SQBhogar_total', 'SQBedjefe', 'SQBhogar_nin', 'SQBovercrowding', 'SQBdependency', 'SQBmeaned', 'agesq'] #costa_rica_household[cols_to_norm] = costa_rica_household[cols_to_norm].apply(lambda x: (x - x.min())/(x.max() - x.min())) #costa_rica_household[cat_cols_to_norm] = costa_rica_household[cat_cols_to_norm].apply(lambda x: (x - x.min())/(x.max() - x.min())) costa_rica_household[cols_of_interest] = costa_rica_household[cols_of_interest].apply(lambda x: (x - x.min())/(x.max() - x.min())) feat_cols = [] for col_name in cols_to_norm: col_name = tf.feature_column.numeric_column(col_name) feat_cols.append(col_name) age_range_count = [1,2,3,4,5,7] r4h1_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4h1'), boundaries=age_range_count) r4h2_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4h2'), boundaries=age_range_count) r4h3_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4h3'), boundaries=age_range_count) crossed_r4h = tf.feature_column.crossed_column([r4h1_bucket, r4h2_bucket, r4h3_bucket], 100) #fc = [r4h1_bucket, r4h2_bucket, r4h3_bucket, crossed_r4h] r4m1_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4m1'), boundaries=age_range_count) r4m2_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4m2'), boundaries=age_range_count) r4m3_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4m3'), boundaries=age_range_count) crossed_r4m = tf.feature_column.crossed_column([r4m1_bucket, r4m2_bucket, r4m3_bucket], 100) r4t1_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4t1'), boundaries=age_range_count) r4t2_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4t2'), boundaries=age_range_count) r4t3_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4t3'), boundaries=age_range_count) crossed_r4t = tf.feature_column.crossed_column([r4t1_bucket, r4t2_bucket, r4t3_bucket], 100) feat_cols.extend([r4h1_bucket, r4h2_bucket, r4h3_bucket, crossed_r4h, r4m1_bucket, r4m2_bucket, r4m3_bucket, crossed_r4m, r4t1_bucket, r4t2_bucket, r4t3_bucket, crossed_r4t]) len(feat_cols) feat_cols[138] estimator = tf.estimator.LinearClassifier(feature_columns=feat_cols, n_classes=4) #costa_rica_household[(costa_rica_household.Target == 4)] x_data = costa_rica_household.drop('Id', axis=1).drop('edjefa', axis=1).drop('idhogar', axis=1).drop('dependency', axis=1).drop('Target', axis=1) #x_data['idhogar'] #x_data.describe() #x_data.head() labels = costa_rica_household['Target'] labels.head() from sklearn.model_selection import train_test_split X_train, X_eval, y_train, y_eval = train_test_split(x_data, labels, test_size=0.3, random_state=101) print(X_train.shape, y_eval.shape) input_func = tf.estimator.inputs.pandas_input_fn(x=X_train, y=y_train, batch_size=10, num_epochs=100, shuffle=True) estimator.train(input_fn=input_func,steps=1000) eval_input_func = tf.estimator.inputs.pandas_input_fn(x=X_eval, y=y_eval, batch_size=10, num_epochs=1, shuffle=False) eval_metrics = estimator.evaluate(input_fn=eval_input_func) print('Eval metrics') print(eval_metrics) pred_input_func = tf.estimator.inputs.pandas_input_fn(x=X_eval, shuffle=False) predictions = [] for predict in estimator.predict(input_fn=pred_input_func): predictions.append(predict) predictions #categorical_columun_voc = tf.feature_column.embedding_column(categorical_columun_voc, 4) dnn_classifier = tf.estimator.DNNClassifier(hidden_units=[10, 10, 10], feature_columns=feat_cols, n_classes=2) dnn_classifier.train(input_fn=input_func,steps=1000) dnn_eval_metrics = dnn_classifier.evaluate(input_fn=eval_input_func) dnn_eval_metrics
3.625
4
DTL_tests/unittests/test_api.py
rocktavious/DevToolsLib
1
18024
<filename>DTL_tests/unittests/test_api.py import os import time import unittest from DTL.api import * class TestCaseApiUtils(unittest.TestCase): def setUp(self): apiUtils.synthesize(self, 'mySynthesizeVar', None) self.bit = apiUtils.BitTracker.getBit(self) def test_wildcardToRe(self): self.assertEquals(apiUtils.wildcardToRe('c:\CIG\main\*.*'), '(?i)c\\:\\\\CIG\\\\main\\\\[^\\\\]*\\.[^\\\\]*$') self.assertEquals(apiUtils.wildcardToRe('c:\CIG\main\*.*'), apiUtils.wildcardToRe('c:/CIG/main/*.*')) def test_synthesize(self): self.assertIn('_mySynthesizeVar', self.__dict__) self.assertTrue(hasattr(self, 'mySynthesizeVar')) self.assertTrue(hasattr(self, 'getMySynthesizeVar')) self.assertTrue(hasattr(self, 'setMySynthesizeVar')) self.assertEqual(self.getMySynthesizeVar(), self.mySynthesizeVar) def test_getClassName(self): self.assertEqual(apiUtils.getClassName(self), 'TestCaseApiUtils') def test_bittracker(self): self.assertEqual(apiUtils.BitTracker.getBit(self), self.bit) class TestCaseDotifyDict(unittest.TestCase): def setUp(self): self.dotifydict = DotifyDict({'one':{'two':{'three':'value'}}}) def test_dotifydict(self): self.assertEquals(self.dotifydict.one.two, {'three':'value'}) self.dotifydict.one.two.update({'three':3,'four':4}) self.assertEquals(self.dotifydict.one.two.four, 4) self.assertEquals(self.dotifydict.one, self.dotifydict.one) self.assertIn('two.three', (self.dotifydict.one)) self.assertEquals(str(self.dotifydict), "DotifyDict(datadict={'one': DotifyDict(datadict={'two': DotifyDict(datadict={'four': 4, 'three': 3})})})") self.assertEquals(self.dotifydict.one.two, eval(str(self.dotifydict.one.two))) class TestCasePath(unittest.TestCase): def setUp(self): self.filepath = Path.getTempPath() def test_path(self): temp_path = Path.getTempPath() self.assertEquals(self.filepath, temp_path) self.assertEquals(self.filepath.name, temp_path.name) self.assertEquals(self.filepath.parent, temp_path.parent) self.assertIn(self.filepath.parent.parent.name, self.filepath) myPathSepTest = Path('c:\\Users/krockman/documents').join('mytest') self.assertEquals(myPathSepTest, os.path.join('c:','Users','krockman','documents','mytest')) self.assertEquals({'TestKey', myPathSepTest},{'TestKey',os.path.join('c:','Users','krockman','documents','mytest')}) class TestCaseDocument(unittest.TestCase): def setUp(self): self.doc = Document({'Testing':'min'}) self.doc.filepath = Path.getTempPath().join('document.dat') def test_document(self): self.assertEquals(self.doc.filepath, Path.getTempPath().join('document.dat')) self.assertEquals(self.doc, eval(str(self.doc))) self.doc.save() self.assertTrue(self.doc.filepath.exists()) def tearDown(self): self.doc.filepath.remove() class TestCaseVersion(unittest.TestCase): def setUp(self): self.version = Version('2.0.5.Beta') def test_version(self): self.assertEquals(self.version,(2,0,5,'Beta')) self.assertEquals(self.version,'2.0.5.Beta') self.assertEquals(self.version,eval(str(self.version))) self.version.update({'status':VersionStatus.Gold}) self.assertNotEquals(self.version,(2,0,5,'Beta')) class TestCaseDecorators(unittest.TestCase): @Safe def test_safe(self): 1/0 @Timer def test_timer(self, timer): for i in range(5): time.sleep(2) timer.newLap(i) @Profile def test_profile(self): for i in range(5): (1 / 20 * 5 - 10 + 15) == 1 def main(): unittest.main(verbosity=2) if __name__ == '__main__': main()
2.53125
3
src/yellow_ball/src/ball.py
AndyHUI711/ELEC3210-Group7
1
18025
<reponame>AndyHUI711/ELEC3210-Group7 #!/usr/bin/env python import numpy as np import cv2 import math import rospy from cv_bridge import CvBridge, CvBridgeError from std_msgs.msg import Bool from sensor_msgs.msg import Image from geometry_msgs.msg import Twist bridge = CvBridge() laser_scan_on = True def auto_mode_callback(msg): global laser_scan_on laser_scan_on = msg.data def image_callback(msg): global cv2_img try: cv2_img = bridge.imgmsg_to_cv2(msg, "bgr8") cv2_img = cv2.flip(cv2_img, 1) hsv = cv2.cvtColor(cv2_img, cv2.COLOR_BGR2HSV) lower_yellow = np.array([20, 100, 100]) upper_yellow = np.array([30, 255, 255]) mask = cv2.inRange(hsv, lower_yellow, upper_yellow) #find ball contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) if len(contours) < 1 or laser_scan_on: return #locate ball for c in contours: M = cv2.moments(c) cX = float(M["m10"]/M["m00"]) cY = float(M["m01"]/M["m00"]) rX = int(M["m10"]/M["m00"]) rY = int(M["m01"]/M["m00"]) radius = int(math.sqrt(cv2.contourArea(c)/math.pi)) h,w = cv2_img.shape[:2] (ideal_X, ideal_Y) = (w/2, h-(20 + radius)) verticle_diff = cY-ideal_Y angle_diff = cX-ideal_X pub = rospy.Publisher('/vrep/cmd_vel', Twist, queue_size=10) twist = Twist() #linear if verticle_diff <= -50: twist.linear.x = 1.1 elif (verticle_diff > -50) & (verticle_diff < 0): twist.linear.x = 0.5 elif verticle_diff >= 20: twist.linear.x = -0.6 elif (verticle_diff <20) & (verticle_diff > 5): twist.linear.x = -0.3 else: twist.linear.x = 0 #angular if angle_diff >= 30: twist.angular.z = -1 elif (angle_diff < 30) & (angle_diff > 10): twist.angular.z = -0.5 elif angle_diff <= -30: twist.angular.z = 1 elif (angle_diff > -30) & (angle_diff < -10): twist.angular.z = 0.5 else: twist.angular.z = 0 pub.publish(twist) copy_img = cv2_img.copy() cv2.drawContours(copy_img, contours, -1, (0, 0, 255), 2) cv2.circle(copy_img, (rX, rY), 3, (255, 0, 0), -1) cv2.putText(copy_img, "centroid", (rX - 25, rY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2) except CvBridgeError as err: print(err) def main(): rospy.init_node('ball', anonymous=True) rospy.Subscriber('/vrep/laser_switch', Bool, auto_mode_callback) rospy.Subscriber('/vrep/image', Image, image_callback) rospy.spin() if __name__ == '__main__': main()
2.359375
2
dlkit/json_/authentication/queries.py
UOC/dlkit
2
18026
"""JSON implementations of authentication queries.""" # pylint: disable=no-init # Numerous classes don't require __init__. # pylint: disable=too-many-public-methods,too-few-public-methods # Number of methods are defined in specification # pylint: disable=protected-access # Access to protected methods allowed in package json package scope # pylint: disable=too-many-ancestors # Inheritance defined in specification from .. import utilities from ..osid import queries as osid_queries from ..primitives import Id from ..utilities import get_registry from dlkit.abstract_osid.authentication import queries as abc_authentication_queries from dlkit.abstract_osid.osid import errors class AgentQuery(abc_authentication_queries.AgentQuery, osid_queries.OsidObjectQuery): """This is the query for searching agents. Each method specifies an ``AND`` term while multiple invocations of the same method produce a nested ``OR``. The following example returns agents whose display name begins with "Tom" and whose "login name" is "tom" or "tjcoppet" in an agent record specified by ``companyAgentType``. Agent Query query = session.getAgentQuery(); query.matchDisplayName("Tom*", wildcardStringMatchType, true); companyAgentQuery = query.getAgentQueryRecord(companyAgentType); companyAgentQuery.matchLoginName("tom"); companyAgentQuery = query.getAgentQueryRecord(companyAgentType); companyAgentQuery.matchLoginName("tjcoppet"); AgentList agentList = session.getAgentsByQuery(query); """ def __init__(self, runtime): self._namespace = 'authentication.Agent' self._runtime = runtime record_type_data_sets = get_registry('AGENT_RECORD_TYPES', runtime) self._all_supported_record_type_data_sets = record_type_data_sets self._all_supported_record_type_ids = [] for data_set in record_type_data_sets: self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set]))) osid_queries.OsidObjectQuery.__init__(self, runtime) @utilities.arguments_not_none def match_resource_id(self, agency_id, match): """Sets the resource ``Id`` for this query. arg: agency_id (osid.id.Id): a resource ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``agency_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.match_avatar_id self._add_match('resourceId', str(agency_id), match) def clear_resource_id_terms(self): """Clears the resource ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_avatar_id self._clear_terms('resourceId') resource_id_terms = property(fdel=clear_resource_id_terms) def supports_resource_query(self): """Tests if a ``ResourceQuery`` is available. return: (boolean) - ``true`` if a resource query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_resource_query(self): """Gets the query for a resource. Multiple retrievals produce a nested ``OR`` term. return: (osid.resource.ResourceQuery) - the resource query raise: Unimplemented - ``supports_resource_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_resource_query()`` is ``true``.* """ raise errors.Unimplemented() resource_query = property(fget=get_resource_query) @utilities.arguments_not_none def match_any_resource(self, match): """Matches agents with any resource. arg: match (boolean): ``true`` if to match agents with a resource, ``false`` to match agents with no resource *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_resource_terms(self): """Clears the resource terms. *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() resource_terms = property(fdel=clear_resource_terms) @utilities.arguments_not_none def match_agency_id(self, agency_id, match): """Sets the agency ``Id`` for this query. arg: agency_id (osid.id.Id): an agency ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for negative match raise: NullArgument - ``agency_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.match_bin_id self._add_match('assignedAgencyIds', str(agency_id), match) def clear_agency_id_terms(self): """Clears the agency ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_bin_id_terms self._clear_terms('assignedAgencyIds') agency_id_terms = property(fdel=clear_agency_id_terms) def supports_agency_query(self): """Tests if an ``AgencyQuery`` is available. return: (boolean) - ``true`` if an agency query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_agency_query(self): """Gets the query for an agency. Multiple retrievals produce a nested ``OR`` term. return: (osid.authentication.AgencyQuery) - the agency query raise: Unimplemented - ``supports_agency_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_agency_query()`` is ``true``.* """ raise errors.Unimplemented() agency_query = property(fget=get_agency_query) def clear_agency_terms(self): """Clears the agency terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_group_terms self._clear_terms('agency') agency_terms = property(fdel=clear_agency_terms) @utilities.arguments_not_none def get_agent_query_record(self, agent_record_type): """Gets the agent query record corresponding to the given ``Agent`` record ``Type``. Multiple retrievals produce a nested ``OR`` term. arg: agent_record_type (osid.type.Type): an agent record type return: (osid.authentication.records.AgentQueryRecord) - the agent query record raise: NullArgument - ``agent_record_type`` is ``null`` raise: OperationFailed - unable to complete request raise: Unsupported - ``has_record_type(agent_record_type)`` is ``false`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented()
2.640625
3
PyStellar/stellar/Git/service/git_commit_service.py
psgstellar/Stellar
3
18027
<reponame>psgstellar/Stellar import requests import dateutil.parser import pytz from Git.dao.git_dao import GitOwnerRepo class GitCommitCheckService: """Github Public 저장소 커밋 기록 가져오기""" @classmethod def git_public_request(cls, request): """Commit 기록 요청""" owner = request.GET['owner'] repo = request.GET['repo'] token = request.GET['token'] if request.GET.get('since', '') and request.GET.get('until', ''): since = request.GET['since'] until = request.GET['until'] r = requests.get(f'https://api.github.com/repos/{owner}/{repo}/commits?my_client_id={owner}&since={since}&until={until}', headers={'Authorization': 'token '+token}) elif request.GET.get('since', ''): since = request.GET['since'] r = requests.get(f'https://api.github.com/repos/{owner}/{repo}/commits?my_client_id={owner}&since={since}', headers={'Authorization': 'token '+token}) elif request.GET.get('until', ''): until = request.GET['until'] r = requests.get(f'https://api.github.com/repos/{owner}/{repo}/commits?my_client_id={owner}&until={until}', headers={'Authorization': 'token '+token}) else: r = requests.get(f'https://api.github.com/repos/{owner}/{repo}/commits?my_client_id={owner}', headers={'Authorization': 'token '+token}) data = r.json() commit_json = None commit_info = [None] * 4 if str(type(data)) == "<class 'list'>": if str(data) != '[]': local_timezone = pytz.timezone('Asia/Seoul') commit_json = [] for i in data: for k, v in i.items(): if k == 'commit': commit_info[1] = v['message'] commit_info[2] = (dateutil.parser.parse(v['author']['date'])).replace(tzinfo=pytz.utc).astimezone(local_timezone) elif k == 'author': commit_info[0] = v['login'] elif k == 'html_url': commit_info[3] = v commit_json.append({'username': commit_info[0], 'message': commit_info[1], 'date': commit_info[2], 'url': commit_info[3]}) else: commit_json = [{'username': owner, 'message': 'Fault Token Info OR Repo Info', 'date': None, 'url': None}] return commit_json @classmethod def git_commit_insert(cls, commit_list): """ 깃 커밋 리스트를 디비에 저장""" list_tuple = [] for i in commit_list: list_tuple.append(tuple(i.values())) insert_commit = GitOwnerRepo() return_json = insert_commit.insert_git_commit(list_tuple) return return_json
2.453125
2
apps/jetbrains/jetbrains.py
HansKlokkenspel/knausj_talon
0
18028
<reponame>HansKlokkenspel/knausj_talon import os import os.path import requests import time from pathlib import Path from talon import ctrl, ui, Module, Context, actions, clip import tempfile # Courtesy of https://github.com/anonfunc/talon-user/blob/master/apps/jetbrains.py extendCommands = [] # Each IDE gets its own port, as otherwise you wouldn't be able # to run two at the same time and switch between them. # Note that MPS and IntelliJ ultimate will conflict... port_mapping = { "com.google.android.studio": 8652, "com.jetbrains.AppCode": 8655, "com.jetbrains.CLion": 8657, "com.jetbrains.datagrip": 8664, "com.jetbrains.goland-EAP": 8659, "com.jetbrains.goland": 8659, "com.jetbrains.intellij-EAP": 8653, "com.jetbrains.intellij.ce": 8654, "com.jetbrains.intellij": 8653, "com.jetbrains.PhpStorm": 8662, "com.jetbrains.pycharm": 8658, "com.jetbrains.rider": 8660, "com.jetbrains.rubymine": 8661, "com.jetbrains.WebStorm": 8663, "google-android-studio": 8652, "idea64.exe": 8653, "IntelliJ IDEA": 8653, "jetbrains-appcode": 8655, "jetbrains-clion": 8657, "jetbrains-datagrip": 8664, "jetbrains-goland-eap": 8659, "jetbrains-goland": 8659, "jetbrains-idea-ce": 8654, "jetbrains-idea-eap": 8653, "jetbrains-idea": 8653, "jetbrains-phpstorm": 8662, "jetbrains-pycharm-ce": 8658, "jetbrains-pycharm": 8658, "jetbrains-rider": 8660, "jetbrains-rubymine": 8661, "jetbrains-studio": 8652, "jetbrains-webstorm": 8663, "PyCharm": 8658, "pycharm64.exe": 8658, "webstorm64.exe": 8663, } select_verbs_map = { "clear": ["action EditorBackSpace"], "collapse": ["action CollapseRegion"], "comment": ["action CommentByLineComment"], "copy": ["action EditorCopy"], "cut": ["action EditorCut"], "drag down": ["action MoveLineDown"], "drag up": ["action MoveLineUp"], "expand": ["action ExpandRegion"], "indent": ["action EditorIndentLineOrSelection"], "refactor": ["action Refactorings.QuickListPopupAction"], "rename": ["action RenameElement"], "replace": ["action EditorPaste"], "select": [], "unindent": ["action EditorUnindentSelection"], } movement_verbs_map = { "fix": ["action ShowIntentionActions"], "go": [], "paste": ["action EditorPaste"], } def set_extend(*commands): def set_inner(_): global extendCommands extendCommands = commands return set_inner def _get_nonce(port, file_prefix): file_name = file_prefix + str(port) try: with open(os.path.join(tempfile.gettempdir(), file_name), "r") as fh: return fh.read() except FileNotFoundError as e: try: home = str(Path.home()) with open(os.path.join(home, file_name), "r") as fh: return fh.read() except FileNotFoundError as eb: print(f"Could not find {file_name} in tmp or home") return None except IOError as e: print(e) return None def send_idea_command(cmd): print("Sending {}".format(cmd)) active_app = ui.active_app() bundle = active_app.bundle or active_app.name port = port_mapping.get(bundle, None) nonce = _get_nonce(port, ".vcidea_") or _get_nonce(port, "vcidea_") print(f"sending {bundle} {port} {nonce}") if port and nonce: response = requests.get( "http://localhost:{}/{}/{}".format(port, nonce, cmd), timeout=(0.05, 3.05) ) response.raise_for_status() return response.text def get_idea_location(): return send_idea_command("location").split() def idea_commands(commands): command_list = commands.split(",") print("executing jetbrains", commands) global extendCommands extendCommands = command_list for cmd in command_list: if cmd: send_idea_command(cmd.strip()) time.sleep(0.1) ctx = Context() mod = Module() mod.list("select_verbs", desc="Verbs for selecting in the IDE") mod.list("movement_verbs", desc="Verbs for navigating the IDE") @mod.action_class class Actions: def idea(commands: str): """Send a command to Jetbrains product""" idea_commands(commands) def idea_select(select_verb: str, commands: str): """Do a select command, then the specified commands""" command_list = ",".join(commands.split(",") + select_verbs_map[select_verb]) print(command_list) idea_commands(command_list) def idea_movement(movement_verb: str, commands: str): """Do a select movement, then the specified commands""" command_list = ",".join(commands.split(",") + movement_verbs_map[movement_verb]) print(command_list) idea_commands(command_list) def idea_grab(times: int): """Copies specified number of words to the left""" old_clip = clip.get() try: original_line, original_column = get_idea_location() for _ in range(times): send_idea_command("action EditorSelectWord") send_idea_command("action EditorCopy") send_idea_command("goto {} {}".format(original_line, original_column)) send_idea_command("action EditorPaste") finally: clip.set(old_clip) global extendCommands extendCommands = [] def extend_action(number: str): """Repeat previous actions up to number of times""" global extendCommands count = max(int(number), 1) for _ in range(count): for cmd in extendCommands: send_idea_command(cmd) def set_extended_actions(commands: str): """Adds specified commands to the list of commands to repeat""" set_extend(commands.split(",")) ctx.matches = r""" app: /jetbrains/ app: IntelliJ IDEA app: idea64.exe app: PyCharm app: PyCharm64.exe app: pycharm64.exe app: webstorm64.exe """ @ctx.action_class("user") class user_actions: def tab_jump(number: int): if number < 10: actions.user.idea("action GoToTab{}".format(number)) def perform_selection_action(verb: str): """Performs selection action defined for context""" acts = select_verbs_map[verb] for act in acts: act() def perform_movement_action(verb: str): """Performs movement action defined for context""" acts = movement_verbs_map[verb] for act in acts: act() def select_next_occurrence(verbs: str, text: str): actions.user.idea_select(verbs, "find next {}".format(text)) def select_previous_occurrence(verbs: str, text: str): actions.user.idea_select(verbs, "find prev {}".format(text)) def move_next_occurrence(verbs: str, text: str): actions.user.idea_movement( verbs, "find next {}, action EditorRight".format(text) ) def move_previous_occurrence(verbs: str, text: str): actions.user.idea_select(verbs, "find prev {}, action EditorRight".format(text)) def go_to_line(verb: str, line: int): actions.user.idea_movement(verb, "goto {} 0".format(line)) def go_to_line_end(verb: str, line: int): actions.user.idea_movement(verb, "goto {} 9999".format(line)) def select_word(verb: str): actions.user.idea_select(verb, "action EditorSelectWord") def select_whole_line(verb: str, line: int): actions.user.idea_select( verb, "goto {} 0, action EditorSelectLine".format(line) ) def select_current_line(verb: str): actions.user.idea_select( verb, "action EditorLineStart, action EditorLineEndWithSelection" ) def select_line(verb: str, line: int): actions.user.idea_select( verb, "goto {} 0, action EditorLineStart, action EditorLineEndWithSelection".format( line ), ) def select_until_line(verb: str, line: int): actions.user.idea_select(verb, "extend {}".format(line)) def select_range(verb: str, line_start: int, line_end: int): actions.user.idea_select(verb, "range {} {}".format(line_start, line_end)) def select_way_left(verb: str): actions.user.idea_select(verb, "action EditorLineStartWithSelection") def select_way_right(verb: str): actions.user.idea_select(verb, "action EditorLineEndWithSelection") def select_way_up(verb: str): actions.user.idea_select(verb, "action EditorTextStartWithSelection") def select_way_down(verb: str): actions.user.idea_select(verb, "action EditorTextEndWithSelection") def select_camel_left(verb: str): actions.user.idea_select( verb, "action EditorPreviousWordInDifferentHumpsModeWithSelection" ) def select_camel_right(verb: str): actions.user.idea_select( verb, "action EditorNextWordInDifferentHumpsModeWithSelection" ) def select_all(verb: str): actions.user.idea_select(verb, "action $SelectAll") def select_left(verb: str): actions.user.idea_select(verb, "action EditorLeftWithSelection") def select_right(verb: str): actions.user.idea_select(verb, "action EditorRightWithSelection") def select_up(verb: str): actions.user.idea_select(verb, "action EditorUpWithSelection") def select_down(verb: str): actions.user.idea_select(verb, "action EditorDownWithSelection") def select_word_left(verb: str): actions.user.idea_select(verb, "action EditorPreviousWordWithSelection") def select_word_right(verb: str): actions.user.idea_select(verb, "action EditorNextWordWithSelection") def move_camel_left(verb: str): actions.user.idea_movement( verb, "action EditorPreviousWordInDifferentHumpsMode" ) def move_camel_right(verb: str): actions.user.idea_movement(verb, "action EditorNextWordInDifferentHumpsMode") def line_clone(line: int): actions.user.idea("clone {}".format(line)) ctx.lists["user.selection_verbs"] = select_verbs_map.keys() ctx.lists["user.navigation_verbs"] = movement_verbs_map.keys()
2.21875
2
be/model/db_conn.py
CharlesDDDD/bookstore
0
18029
<reponame>CharlesDDDD/bookstore<gh_stars>0 from be.table.user import User from be.table.user_store import User_Store from be.table.store import Store class DBConn: def user_id_exist(self, user_id): row = User.query.filter(User.user_id == user_id).first() if row is None: return False else: return True def book_id_exist(self, store_id, book_id): row = Store.query.filter(Store.store_id == store_id, Store.book_id == book_id).first() if row is None: return False else: return True def store_id_exist(self, store_id): row = User_Store.query.filter(User_Store.store_id == store_id).first() if row is None: return False else: return True
2.5625
3
lib/roi_data_rel/fast_rcnn_rel.py
champon1020/TRACE
34
18030
# Adapted by <NAME>, 2019 # # Based on Detectron.pytorch/lib/roi_data/fast_rcnn.py # Original license text: # -------------------------------------------------------- # Copyright (c) 2017-present, Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## """Construct minibatches for Fast R-CNN training. Handles the minibatch blobs that are specific to Fast R-CNN. Other blobs that are generic to RPN, etc. are handled by their respecitive roi_data modules. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np import numpy.random as npr import logging from core.config import cfg import utils_rel.boxes_rel as box_utils_rel import utils.blob as blob_utils import utils.fpn as fpn_utils logger = logging.getLogger(__name__) def add_rel_blobs(blobs, im_scales, roidb): """Add blobs needed for training Fast R-CNN style models.""" # Sample training RoIs from each image and append them to the blob lists for im_i, entry in enumerate(roidb): frcn_blobs = _sample_pairs(entry, im_scales[im_i], im_i) for k, v in frcn_blobs.items(): blobs[k].append(v) # Concat the training blob lists into tensors for k, v in blobs.items(): if isinstance(v, list) and len(v) > 0: blobs[k] = np.concatenate(v) if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_ROIS: _add_rel_multilevel_rois(blobs) return True def _sample_pairs(roidb, im_scale, batch_idx): """Generate a random sample of RoIs comprising foreground and background examples. """ fg_pairs_per_image = cfg.TRAIN.FG_REL_SIZE_PER_IM pairs_per_image = int(cfg.TRAIN.FG_REL_SIZE_PER_IM / cfg.TRAIN.FG_REL_FRACTION) # need much more pairs since it's quadratic max_pair_overlaps = roidb['max_pair_overlaps'] if cfg.MODEL.MULTI_RELATION: prd_gt_overlaps = roidb['prd_gt_overlaps'].toarray() prd_class_num = prd_gt_overlaps.shape[1] gt_pair_inds, gt_pair_class = np.where(prd_gt_overlaps > 1.0 - 1e-4) fg_pair_inds, fg_pair_class = np.where((prd_gt_overlaps >= cfg.TRAIN.FG_THRESH) & (prd_gt_overlaps <= 1.0 - 1e-4)) hash_gt_pair_inds = prd_class_num * gt_pair_inds + gt_pair_class hash_fg_pair_inds = prd_class_num * fg_pair_inds + fg_pair_class fg_pairs_per_this_image = np.minimum(fg_pairs_per_image, hash_gt_pair_inds.size + hash_fg_pair_inds.size) if hash_fg_pair_inds.size > 0 and fg_pairs_per_this_image > hash_gt_pair_inds.size: hash_fg_pair_inds = npr.choice( hash_fg_pair_inds, size=(fg_pairs_per_this_image - hash_gt_pair_inds.size), replace=False) hash_fg_pair_inds = np.append(hash_fg_pair_inds, hash_gt_pair_inds) elif fg_pairs_per_this_image <= hash_gt_pair_inds.size: hash_gt_pair_inds = npr.choice( hash_gt_pair_inds, size=fg_pairs_per_this_image, replace=False) hash_fg_pair_inds = hash_gt_pair_inds else: hash_fg_pair_inds = hash_gt_pair_inds blob_dict = {} if cfg.MODEL.USE_BG: bg_pair_inds, bg_pair_class_inds = np.where((prd_gt_overlaps < cfg.TRAIN.BG_THRESH_HI)) hash_bg_pair_inds = prd_class_num * bg_pair_inds + bg_pair_class_inds bg_pairs_per_this_image = pairs_per_image - fg_pairs_per_this_image bg_pairs_per_this_image = np.minimum(bg_pairs_per_this_image, hash_bg_pair_inds.size) if hash_bg_pair_inds.size > 0: hash_bg_pair_inds = npr.choice( hash_bg_pair_inds, size=bg_pairs_per_this_image, replace=False) hash_keep_pair_inds = np.append(hash_fg_pair_inds, hash_bg_pair_inds) multi_prd_labels = np.zeros(hash_keep_pair_inds.size, dtype=np.int32) multi_prd_labels[:hash_fg_pair_inds.size] = 1.0 #fg_multi_prd_labels keep_pair_inds = np.append(hash_fg_pair_inds // prd_class_num, hash_bg_pair_inds // prd_class_num) keep_pair_class = np.append(hash_fg_pair_inds % prd_class_num, hash_bg_pair_inds % prd_class_num) else: multi_prd_labels = np.ones(fg_multi_prd_labels.size, dtype=np.int32) #fg_multi_prd_labels keep_pair_inds = np.append(hash_fg_pair_inds // prd_class_num) keep_pair_class = np.append(hash_fg_pair_inds % prd_class_num) blob_dict['multi_prd_labels_int32'] = multi_prd_labels.astype(np.int32, copy=False) blob_dict['keep_pair_class_int32'] = keep_pair_class.astype(np.int32, copy=False) blob_dict['fg_size'] = np.array([hash_fg_pair_inds.size], dtype=np.int32) else: gt_pair_inds = np.where(max_pair_overlaps > 1.0 - 1e-4)[0] fg_pair_inds = np.where((max_pair_overlaps >= cfg.TRAIN.FG_THRESH) & (max_pair_overlaps <= 1.0 - 1e-4))[0] fg_pairs_per_this_image = np.minimum(fg_pairs_per_image, gt_pair_inds.size + fg_pair_inds.size) # Sample foreground regions without replacement if fg_pair_inds.size > 0 and fg_pairs_per_this_image > gt_pair_inds.size: fg_pair_inds = npr.choice( fg_pair_inds, size=(fg_pairs_per_this_image - gt_pair_inds.size), replace=False) fg_pair_inds = np.append(fg_pair_inds, gt_pair_inds) elif fg_pairs_per_this_image <= gt_pair_inds.size: gt_pair_inds = npr.choice( gt_pair_inds, size=fg_pairs_per_this_image, replace=False) fg_pair_inds = gt_pair_inds else: fg_pair_inds = gt_pair_inds # Label is the class each RoI has max overlap with fg_prd_labels = roidb['max_prd_classes'][fg_pair_inds] blob_dict = dict( fg_prd_labels_int32=fg_prd_labels.astype(np.int32, copy=False)) if cfg.MODEL.USE_BG: bg_pair_inds = np.where((max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0] # Compute number of background RoIs to take from this image (guarding # against there being fewer than desired) bg_pairs_per_this_image = pairs_per_image - fg_pairs_per_this_image bg_pairs_per_this_image = np.minimum(bg_pairs_per_this_image, bg_pair_inds.size) # Sample foreground regions without replacement if bg_pair_inds.size > 0: bg_pair_inds = npr.choice( bg_pair_inds, size=bg_pairs_per_this_image, replace=False) # logger.info('{} : {}'.format(fg_pair_inds.size, bg_pair_inds.size)) keep_pair_inds = np.append(fg_pair_inds, bg_pair_inds) all_prd_labels = np.zeros(keep_pair_inds.size, dtype=np.int32) all_prd_labels[:fg_pair_inds.size] = fg_prd_labels + 1 # class should start from 1 else: keep_pair_inds = fg_pair_inds all_prd_labels = fg_prd_labels blob_dict['all_prd_labels_int32'] = all_prd_labels.astype(np.int32, copy=False) blob_dict['fg_size'] = np.array([fg_pair_inds.size], dtype=np.int32) # this is used to check if there is at least one fg to learn sampled_sbj_boxes = roidb['sbj_boxes'][keep_pair_inds] sampled_obj_boxes = roidb['obj_boxes'][keep_pair_inds] sampled_all_boxes = roidb['all_boxes'] det_labels = roidb['det_labels'] sampled_sbj_inds = roidb['sbj_id'][keep_pair_inds] sampled_obj_inds = roidb['obj_id'][keep_pair_inds] # Scale rois and format as (batch_idx, x1, y1, x2, y2) sampled_sbj_rois = sampled_sbj_boxes * im_scale sampled_obj_rois = sampled_obj_boxes * im_scale sampled_all_rois = sampled_all_boxes * im_scale repeated_batch_idx = batch_idx * blob_utils.ones((keep_pair_inds.shape[0], 1)) all_boxes_repeated_batch_idx = batch_idx * blob_utils.ones((sampled_all_boxes.shape[0], 1)) sampled_sbj_rois = np.hstack((repeated_batch_idx, sampled_sbj_rois)) sampled_obj_rois = np.hstack((repeated_batch_idx, sampled_obj_rois)) sampled_all_rois = np.hstack((all_boxes_repeated_batch_idx, sampled_all_rois)) int_repeated_batch_idx = batch_idx * np.ones((keep_pair_inds.shape[0], 1), dtype=np.int) blob_dict['sbj_inds'] = np.hstack((repeated_batch_idx, sampled_sbj_inds.reshape(-1, 1))) blob_dict['obj_inds'] = np.hstack((repeated_batch_idx, sampled_obj_inds.reshape(-1, 1))) blob_dict['sbj_rois'] = sampled_sbj_rois blob_dict['obj_rois'] = sampled_obj_rois blob_dict['det_rois'] = sampled_all_rois blob_dict['det_labels'] = det_labels sampled_rel_rois = box_utils_rel.rois_union(sampled_sbj_rois, sampled_obj_rois) blob_dict['rel_rois'] = sampled_rel_rois if cfg.MODEL.USE_SPATIAL_FEAT: sampled_spt_feat = box_utils_rel.get_spt_features( sampled_sbj_boxes, sampled_obj_boxes, roidb['width'], roidb['height']) blob_dict['spt_feat'] = sampled_spt_feat if cfg.MODEL.USE_FREQ_BIAS: sbj_labels = roidb['max_sbj_classes'][keep_pair_inds] obj_labels = roidb['max_obj_classes'][keep_pair_inds] blob_dict['all_sbj_labels_int32'] = sbj_labels.astype(np.int32, copy=False) blob_dict['all_obj_labels_int32'] = obj_labels.astype(np.int32, copy=False) if cfg.MODEL.USE_NODE_CONTRASTIVE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_SO_AWARE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_P_AWARE_LOSS: nodes_per_image = cfg.MODEL.NODE_SAMPLE_SIZE max_sbj_overlaps = roidb['max_sbj_overlaps'] max_obj_overlaps = roidb['max_obj_overlaps'] # sbj # Here a naturally existing assumption is, each positive sbj should have at least one positive obj sbj_pos_pair_pos_inds = np.where((max_pair_overlaps >= cfg.TRAIN.FG_THRESH))[0] sbj_pos_obj_pos_pair_neg_inds = np.where((max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) & (max_obj_overlaps >= cfg.TRAIN.FG_THRESH) & (max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0] sbj_pos_obj_neg_pair_neg_inds = np.where((max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) & (max_obj_overlaps < cfg.TRAIN.FG_THRESH) & (max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0] if sbj_pos_pair_pos_inds.size > 0: sbj_pos_pair_pos_inds = npr.choice( sbj_pos_pair_pos_inds, size=int(min(nodes_per_image, sbj_pos_pair_pos_inds.size)), replace=False) if sbj_pos_obj_pos_pair_neg_inds.size > 0: sbj_pos_obj_pos_pair_neg_inds = npr.choice( sbj_pos_obj_pos_pair_neg_inds, size=int(min(nodes_per_image, sbj_pos_obj_pos_pair_neg_inds.size)), replace=False) sbj_pos_pair_neg_inds = sbj_pos_obj_pos_pair_neg_inds if nodes_per_image - sbj_pos_obj_pos_pair_neg_inds.size > 0 and sbj_pos_obj_neg_pair_neg_inds.size > 0: sbj_pos_obj_neg_pair_neg_inds = npr.choice( sbj_pos_obj_neg_pair_neg_inds, size=int(min(nodes_per_image - sbj_pos_obj_pos_pair_neg_inds.size, sbj_pos_obj_neg_pair_neg_inds.size)), replace=False) sbj_pos_pair_neg_inds = np.append(sbj_pos_pair_neg_inds, sbj_pos_obj_neg_pair_neg_inds) sbj_pos_inds = np.append(sbj_pos_pair_pos_inds, sbj_pos_pair_neg_inds) binary_labels_sbj_pos = np.zeros(sbj_pos_inds.size, dtype=np.int32) binary_labels_sbj_pos[:sbj_pos_pair_pos_inds.size] = 1 blob_dict['binary_labels_sbj_pos_int32'] = binary_labels_sbj_pos.astype(np.int32, copy=False) prd_pos_labels_sbj_pos = roidb['max_prd_classes'][sbj_pos_pair_pos_inds] prd_labels_sbj_pos = np.zeros(sbj_pos_inds.size, dtype=np.int32) prd_labels_sbj_pos[:sbj_pos_pair_pos_inds.size] = prd_pos_labels_sbj_pos + 1 blob_dict['prd_labels_sbj_pos_int32'] = prd_labels_sbj_pos.astype(np.int32, copy=False) sbj_labels_sbj_pos = roidb['max_sbj_classes'][sbj_pos_inds] + 1 # 1. set all obj labels > 0 obj_labels_sbj_pos = roidb['max_obj_classes'][sbj_pos_inds] + 1 # 2. find those negative obj max_obj_overlaps_sbj_pos = roidb['max_obj_overlaps'][sbj_pos_inds] obj_neg_inds_sbj_pos = np.where(max_obj_overlaps_sbj_pos < cfg.TRAIN.FG_THRESH)[0] obj_labels_sbj_pos[obj_neg_inds_sbj_pos] = 0 blob_dict['sbj_labels_sbj_pos_int32'] = sbj_labels_sbj_pos.astype(np.int32, copy=False) blob_dict['obj_labels_sbj_pos_int32'] = obj_labels_sbj_pos.astype(np.int32, copy=False) # this is for freq bias in RelDN blob_dict['sbj_labels_sbj_pos_fg_int32'] = roidb['max_sbj_classes'][sbj_pos_inds].astype(np.int32, copy=False) blob_dict['obj_labels_sbj_pos_fg_int32'] = roidb['max_obj_classes'][sbj_pos_inds].astype(np.int32, copy=False) sampled_sbj_boxes_sbj_pos = roidb['sbj_boxes'][sbj_pos_inds] sampled_obj_boxes_sbj_pos = roidb['obj_boxes'][sbj_pos_inds] # Scale rois and format as (batch_idx, x1, y1, x2, y2) sampled_sbj_rois_sbj_pos = sampled_sbj_boxes_sbj_pos * im_scale sampled_obj_rois_sbj_pos = sampled_obj_boxes_sbj_pos * im_scale repeated_batch_idx = batch_idx * blob_utils.ones((sbj_pos_inds.shape[0], 1)) sampled_sbj_rois_sbj_pos = np.hstack((repeated_batch_idx, sampled_sbj_rois_sbj_pos)) sampled_obj_rois_sbj_pos = np.hstack((repeated_batch_idx, sampled_obj_rois_sbj_pos)) blob_dict['sbj_rois_sbj_pos'] = sampled_sbj_rois_sbj_pos blob_dict['obj_rois_sbj_pos'] = sampled_obj_rois_sbj_pos sampled_rel_rois_sbj_pos = box_utils_rel.rois_union(sampled_sbj_rois_sbj_pos, sampled_obj_rois_sbj_pos) blob_dict['rel_rois_sbj_pos'] = sampled_rel_rois_sbj_pos _, inds_unique_sbj_pos, inds_reverse_sbj_pos = np.unique( sampled_sbj_rois_sbj_pos, return_index=True, return_inverse=True, axis=0) assert inds_reverse_sbj_pos.shape[0] == sampled_sbj_rois_sbj_pos.shape[0] blob_dict['inds_unique_sbj_pos'] = inds_unique_sbj_pos blob_dict['inds_reverse_sbj_pos'] = inds_reverse_sbj_pos if cfg.MODEL.USE_SPATIAL_FEAT: sampled_spt_feat_sbj_pos = box_utils_rel.get_spt_features( sampled_sbj_boxes_sbj_pos, sampled_obj_boxes_sbj_pos, roidb['width'], roidb['height']) blob_dict['spt_feat_sbj_pos'] = sampled_spt_feat_sbj_pos # obj # Here a naturally existing assumption is, each positive obj should have at least one positive sbj obj_pos_pair_pos_inds = np.where((max_pair_overlaps >= cfg.TRAIN.FG_THRESH))[0] obj_pos_sbj_pos_pair_neg_inds = np.where((max_obj_overlaps >= cfg.TRAIN.FG_THRESH) & (max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) & (max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0] obj_pos_sbj_neg_pair_neg_inds = np.where((max_obj_overlaps >= cfg.TRAIN.FG_THRESH) & (max_sbj_overlaps < cfg.TRAIN.FG_THRESH) & (max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0] if obj_pos_pair_pos_inds.size > 0: obj_pos_pair_pos_inds = npr.choice( obj_pos_pair_pos_inds, size=int(min(nodes_per_image, obj_pos_pair_pos_inds.size)), replace=False) if obj_pos_sbj_pos_pair_neg_inds.size > 0: obj_pos_sbj_pos_pair_neg_inds = npr.choice( obj_pos_sbj_pos_pair_neg_inds, size=int(min(nodes_per_image, obj_pos_sbj_pos_pair_neg_inds.size)), replace=False) obj_pos_pair_neg_inds = obj_pos_sbj_pos_pair_neg_inds if nodes_per_image - obj_pos_sbj_pos_pair_neg_inds.size > 0 and obj_pos_sbj_neg_pair_neg_inds.size: obj_pos_sbj_neg_pair_neg_inds = npr.choice( obj_pos_sbj_neg_pair_neg_inds, size=int(min(nodes_per_image - obj_pos_sbj_pos_pair_neg_inds.size, obj_pos_sbj_neg_pair_neg_inds.size)), replace=False) obj_pos_pair_neg_inds = np.append(obj_pos_pair_neg_inds, obj_pos_sbj_neg_pair_neg_inds) obj_pos_inds = np.append(obj_pos_pair_pos_inds, obj_pos_pair_neg_inds) binary_labels_obj_pos = np.zeros(obj_pos_inds.size, dtype=np.int32) binary_labels_obj_pos[:obj_pos_pair_pos_inds.size] = 1 blob_dict['binary_labels_obj_pos_int32'] = binary_labels_obj_pos.astype(np.int32, copy=False) prd_pos_labels_obj_pos = roidb['max_prd_classes'][obj_pos_pair_pos_inds] prd_labels_obj_pos = np.zeros(obj_pos_inds.size, dtype=np.int32) prd_labels_obj_pos[:obj_pos_pair_pos_inds.size] = prd_pos_labels_obj_pos + 1 blob_dict['prd_labels_obj_pos_int32'] = prd_labels_obj_pos.astype(np.int32, copy=False) obj_labels_obj_pos = roidb['max_obj_classes'][obj_pos_inds] + 1 # 1. set all sbj labels > 0 sbj_labels_obj_pos = roidb['max_sbj_classes'][obj_pos_inds] + 1 # 2. find those negative sbj max_sbj_overlaps_obj_pos = roidb['max_sbj_overlaps'][obj_pos_inds] sbj_neg_inds_obj_pos = np.where(max_sbj_overlaps_obj_pos < cfg.TRAIN.FG_THRESH)[0] sbj_labels_obj_pos[sbj_neg_inds_obj_pos] = 0 blob_dict['sbj_labels_obj_pos_int32'] = sbj_labels_obj_pos.astype(np.int32, copy=False) blob_dict['obj_labels_obj_pos_int32'] = obj_labels_obj_pos.astype(np.int32, copy=False) # this is for freq bias in RelDN blob_dict['sbj_labels_obj_pos_fg_int32'] = roidb['max_sbj_classes'][obj_pos_inds].astype(np.int32, copy=False) blob_dict['obj_labels_obj_pos_fg_int32'] = roidb['max_obj_classes'][obj_pos_inds].astype(np.int32, copy=False) sampled_sbj_boxes_obj_pos = roidb['sbj_boxes'][obj_pos_inds] sampled_obj_boxes_obj_pos = roidb['obj_boxes'][obj_pos_inds] # Scale rois and format as (batch_idx, x1, y1, x2, y2) sampled_sbj_rois_obj_pos = sampled_sbj_boxes_obj_pos * im_scale sampled_obj_rois_obj_pos = sampled_obj_boxes_obj_pos * im_scale repeated_batch_idx = batch_idx * blob_utils.ones((obj_pos_inds.shape[0], 1)) sampled_sbj_rois_obj_pos = np.hstack((repeated_batch_idx, sampled_sbj_rois_obj_pos)) sampled_obj_rois_obj_pos = np.hstack((repeated_batch_idx, sampled_obj_rois_obj_pos)) blob_dict['sbj_rois_obj_pos'] = sampled_sbj_rois_obj_pos blob_dict['obj_rois_obj_pos'] = sampled_obj_rois_obj_pos sampled_rel_rois_obj_pos = box_utils_rel.rois_union(sampled_sbj_rois_obj_pos, sampled_obj_rois_obj_pos) blob_dict['rel_rois_obj_pos'] = sampled_rel_rois_obj_pos _, inds_unique_obj_pos, inds_reverse_obj_pos = np.unique( sampled_obj_rois_obj_pos, return_index=True, return_inverse=True, axis=0) assert inds_reverse_obj_pos.shape[0] == sampled_obj_rois_obj_pos.shape[0] blob_dict['inds_unique_obj_pos'] = inds_unique_obj_pos blob_dict['inds_reverse_obj_pos'] = inds_reverse_obj_pos if cfg.MODEL.USE_SPATIAL_FEAT: sampled_spt_feat_obj_pos = box_utils_rel.get_spt_features( sampled_sbj_boxes_obj_pos, sampled_obj_boxes_obj_pos, roidb['width'], roidb['height']) blob_dict['spt_feat_obj_pos'] = sampled_spt_feat_obj_pos return blob_dict def _add_rel_multilevel_rois(blobs): """By default training RoIs are added for a single feature map level only. When using FPN, the RoIs must be distributed over different FPN levels according the level assignment heuristic (see: modeling.FPN. map_rois_to_fpn_levels). """ lvl_min = cfg.FPN.ROI_MIN_LEVEL lvl_max = cfg.FPN.ROI_MAX_LEVEL def _distribute_rois_over_fpn_levels(rois_blob_names): """Distribute rois over the different FPN levels.""" # Get target level for each roi # Recall blob rois are in (batch_idx, x1, y1, x2, y2) format, hence take # the box coordinates from columns 1:5 lowest_target_lvls = None for rois_blob_name in rois_blob_names: target_lvls = fpn_utils.map_rois_to_fpn_levels( blobs[rois_blob_name][:, 1:5], lvl_min, lvl_max) if lowest_target_lvls is None: lowest_target_lvls = target_lvls else: lowest_target_lvls = np.minimum(lowest_target_lvls, target_lvls) for rois_blob_name in rois_blob_names: # Add per FPN level roi blobs named like: <rois_blob_name>_fpn<lvl> fpn_utils.add_multilevel_roi_blobs( blobs, rois_blob_name, blobs[rois_blob_name], lowest_target_lvls, lvl_min, lvl_max) _distribute_rois_over_fpn_levels(['sbj_rois']) _distribute_rois_over_fpn_levels(['obj_rois']) _distribute_rois_over_fpn_levels(['rel_rois']) _distribute_rois_over_fpn_levels(['det_rois']) if cfg.MODEL.USE_NODE_CONTRASTIVE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_SO_AWARE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_P_AWARE_LOSS: _distribute_rois_over_fpn_levels(['sbj_rois_sbj_pos']) _distribute_rois_over_fpn_levels(['obj_rois_sbj_pos']) _distribute_rois_over_fpn_levels(['rel_rois_sbj_pos']) _distribute_rois_over_fpn_levels(['sbj_rois_obj_pos']) _distribute_rois_over_fpn_levels(['obj_rois_obj_pos']) _distribute_rois_over_fpn_levels(['rel_rois_obj_pos'])
1.734375
2
src/mysql/tables.py
katerina7479/sooty-shearwater
0
18031
import time import re from src.core.tables import Table, MigrationTable from src.core.constraints import Index class MysqlTable(Table): @staticmethod def _join_cols(cols): '''Join and escape a list''' return ', '.join(['`%s`' % i for i in cols]) @staticmethod def _join_conditionals(row_dict): '''Create a joined conditional statement for updates return escaped string of `key`=val, `key`='val' for dictionary ''' equalities = [] for key, val in row_dict.items(): temp = '`{}`='.format(key) if isinstance(val, (int, float)): temp += '{}'.format(val) elif isinstance(val, str): temp += '\'{}\''.format(val) else: raise TypeError('Value %s, type %s not recognised as a number or string' % (val, type(val))) equalities.append(temp) return ', '.join(equalities) @staticmethod def _qualify(table, cols): '''Qualify, join and escape the list''' return ', '.join(['`{}`.`{}`'.format(table, c) for c in cols]) @staticmethod def _equals(cols, new_table, new_cols): '''Qualify, join and equate''' return ', '.join('`{}`=`{}`.`{}`'.format(cols[i], new_table, new_cols[i]) for i in range(len(cols))) def insert_row(self, row_dict): """Add a row to the table""" sql = self.commands.insert_row( self.name, self._join_cols(row_dict.keys()), self._join_values(row_dict.values()) ) self.execute(sql) return self.db.last_row def get_column_definition(self, column_name): '''Get the sql column definition Selects the column type, and YES or NO from the column, IS NULLABLE. That's enough information to re-create the column. ''' sql = self.commands.column_definition(self.db.name, self.name, column_name) ans = self.execute(sql)[0] if ans[1] == 'NO': return '{} NOT NULL'.format(ans[0]) else: return ans[0] def rename_column(self, old_name, new_name): '''Rename a column''' self.execute(self.commands.rename_column( self.name, old_name, new_name, self.get_column_definition(old_name)) ) @property def create_statement(self): """Get table create statement""" query = self.commands.get_table_create_statement(self.name) if self.db.table_exists(self.name): statement = self.execute(query)[0][1] statement = re.sub('\s+', ' ', statement) return statement raise ValueError('Table does not exist, no create statement') @property def indexes(self): """Return list of indexes""" indexes = self.execute(self.commands.get_indexes(self.name)) return [Index(tup[0], tup[2], tup[1], tup[4]) for tup in indexes] class MySqlMigrationTable(MysqlTable, MigrationTable): def create_from_source(self): """Create new table like source_table""" create_statement = self.source.create_statement.replace( 'CREATE TABLE `{}`'.format(self.source.name), 'CREATE TABLE `{}`' ) self.create_from_statement(create_statement) def _trigger_name(self, method_type): 'Create trigger name' name = 'migration_trigger_{}_{}'.format(method_type, self.source.name) return name[:self.db.config['MAX_LENGTH_NAME']] def create_insert_trigger(self): '''Set insert Triggers. 'NEW' and 'OLD' are mysql references see https://dev.mysql.com/doc/refman/5.0/en/create-trigger.html ''' sql = self.commands.insert_trigger( self._trigger_name('insert'), self.source.name, self.name, self._join_cols(self.intersection.dest_columns), self._qualify('NEW', self.intersection.origin_columns)) import pdb pdb.set_trace() print(sql) self.execute(sql) def create_delete_trigger(self): '''Set delete triggers 'NEW' and 'OLD' are mysql references see https://dev.mysql.com/doc/refman/5.0/en/create-trigger.html ''' sql = self.commands.delete_trigger( self._trigger_name('delete'), self.source.name, self.name, self.primary_key_column) self.execute(sql) def create_update_trigger(self): '''Set update triggers 'NEW' and 'OLD' are mysql references see https://dev.mysql.com/doc/refman/5.0/en/create-trigger.html ''' sql = self.commands.update_trigger( self._trigger_name('update'), self.source.name, self.name, self._equals(self.intersection.dest_columns, 'NEW', self.intersection.origin_columns), self.primary_key_column ) self.execute(sql) def rename_tables(self): 'Rename the tables' self.delete_triggers() retries = 0 source_name, archive_name, migrate_name = self.source.name, self.source.archive_name, self.name while True: try: self.execute(self.commands.rename_table(source_name, archive_name, migrate_name)) break except Exception as e: retries += 1 if retries > self.db.config['MAX_RENAME_RETRIES']: self.create_triggers() return False # TODO: make sure this is a Lock wait timeout error before retrying print('Rename retry %d, error: %s' % (retries, e)) time.sleep(self.db.donfig['RETRY_SLEEP_TIME']) self.name, self.source.name = self.source.name, self.archive_name print("Rename complete!") return True
2.671875
3
CvZoneCompetition.py
MoranLeven/CvZomeCompetition
0
18032
<reponame>MoranLeven/CvZomeCompetition<filename>CvZoneCompetition.py<gh_stars>0 import cv2 import numpy as np from time import sleep import random length_min = 80 # Minimum length of retangle height_min = 80 # Minimum height of the angle offset = 6 #Error allowed between pixel pos_linha = 550 delay = 60 #FPS of video detect = [] cars = 0 def paste_center (x, y, w, h): x1 = int (w / 2) y1 = int (h / 2) cx = x + x1 cy = y + y1 return cx, cy cap = cv2.VideoCapture ("DRONE-SURVEILLANCE-CONTEST-VIDEO.mp4") cap.set (3,500) cap.set (4,500) subtractor = cv2.bgsegm.createBackgroundSubtractorMOG () while True: ret, frame1 = cap.read () time = float(1 / delay) sleep(time) gray = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray, (3,3), 10) img_sub = subtractor.apply(blur) dilate = cv2.dilate(img_sub, np.ones ((5,5))) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7)) dilated = cv2.morphologyEx(dilate, cv2. MORPH_CLOSE, kernel) dilated = cv2.morphologyEx(dilated, cv2. MORPH_CLOSE, kernel) contour, h = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cv2.line(frame1, (25, pos_linha), (1900, pos_linha), (255,0,0), 3) for (i, c) in enumerate(contour): (x, y, w, h) = cv2.boundingRect(c) validate_contour = (w >= length_min) and (h >= height_min) if not validate_contour: continue cv2.rectangle(frame1, (x, y), (x + w, y + h), (0,255,0), 2) center = paste_center (x, y, w, h) detect.append(center) cv2.circle(frame1, center, 4, (0, 0.255), -1) cv2.putText(frame1,str(random.randint(1,200)),(x,y),cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),2) for (x, y) in detect: if y <(pos_linha + offset) and y> (pos_linha-offset): cars += 1 cv2.line(frame1, (25, pos_linha), (1200, pos_linha), (0,127,255), 3) cv2.putText(frame1, str (random.randint (1,200)), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,255), 2) detect.remove((x, y)) print("car is detected:" + str (cars)) cv2.putText(frame1, "Moran 11", (850, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 5) cv2.putText(frame1, str(cars), (1700, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 5) cv2.imshow("Surveillance Video", frame1) if cv2.waitKey (10) == 27: break cv2.destroyAllWindows () cap.release ()
2.390625
2
setup.py
jiinus/django-db-prefix
11
18033
<gh_stars>10-100 # -*- coding: utf-8 -*- import os.path from distutils.core import setup def read(fname): with open(os.path.join(os.path.dirname(__file__), fname)) as f: return f.read() setup( name='django-db-prefix', version='1.0', keywords='django database', author=u'<NAME> <<EMAIL>>, <NAME> <<EMAIL>>', packages=['django_db_prefix'], url='https://github.com/denilsonsa/django-db-prefix', license='BSD licence, see LICENCE', description='Allow specification of a global, per-app or per-model database table name prefix.', long_description=read('README.md'), requires=[ 'Django', ], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Topic :: Database', ] )
1.53125
2
pyleecan/Methods/Slot/HoleUD/build_geometry.py
mjfwest/pyleecan
1
18034
# -*- coding: utf-8 -*- from numpy import arcsin, arctan, cos, exp, array, angle, pi from numpy import imag as np_imag from scipy.optimize import fsolve from ....Classes.Segment import Segment from ....Classes.SurfLine import SurfLine from ....Classes.Arc1 import Arc1 from ....Methods import ParentMissingError from ....Functions.labels import HOLEV_LAB, HOLEM_LAB def build_geometry(self, alpha=0, delta=0, is_simplified=False): """Compute the curve (Segment) needed to plot the Hole. The ending point of a curve is the starting point of the next curve in the list Parameters ---------- self : HoleUD A HoleUD object alpha : float Angle to rotate the slot (Default value = 0) [rad] delta : complex Complex to translate the slot (Default value = 0) is_simplified : bool True to avoid line superposition (not used) Returns ------- surf_list: list List of SurfLine needed to draw the Hole """ surf_list = self.surf_list # Get correct label for surfaces lam_label = self.parent.get_label() R_id, surf_type = self.get_R_id() vent_label = lam_label + "_" + surf_type + "_R" + str(R_id) + "-T" mag_label = lam_label + "_" + HOLEM_LAB + "_R" + str(R_id) + "-T" # Update surface labels hole_id = 0 mag_id = 0 for surf in surf_list: if HOLEM_LAB in surf.label: key = "magnet_" + str(mag_id) if key in self.magnet_dict and self.magnet_dict[key] is not None: surf.label = mag_label + str(mag_id) + "-S0" mag_id += 1 else: # Magnet disabled or not defined surf.label = vent_label + str(hole_id) + "-S0" hole_id += 1 elif HOLEV_LAB in surf.label: surf.label = vent_label + str(hole_id) + "-S0" hole_id += 1 # Apply the transformations return_list = list() for surf in surf_list: return_list.append(surf.copy()) return_list[-1].rotate(alpha) return_list[-1].translate(delta) return return_list
2.546875
3
tardis/tardis_portal/auth/localdb_auth.py
nrmay/mytardis
0
18035
''' Local DB Authentication module. .. moduleauthor:: <NAME> <<EMAIL>> ''' import logging from django.contrib.auth.models import User, Group from django.contrib.auth.backends import ModelBackend from tardis.tardis_portal.auth.interfaces import AuthProvider, GroupProvider, UserProvider logger = logging.getLogger(__name__) auth_key = u'localdb' auth_display_name = u'Local DB' _modelBackend = ModelBackend() class DjangoAuthBackend(AuthProvider): """Authenticate against Django's Model Backend. """ def authenticate(self, request): """authenticate a user, this expect the user will be using form based auth and the *username* and *password* will be passed in as **POST** variables. :param request: a HTTP Request instance :type request: :class:`django.http.HttpRequest` """ username = request.POST['username'] password = request.POST['password'] if not username or not password: return None return _modelBackend.authenticate(username, password) def get_user(self, user_id): try: user = User.objects.get(username=user_id) except User.DoesNotExist: user = None return user class DjangoGroupProvider(GroupProvider): name = u'django_group' def getGroups(self, user): """return an iteration of the available groups. """ groups = user.groups.all() return [g.id for g in groups] def getGroupById(self, id): """return the group associated with the id:: {"id": 123, "display": "Group Name",} """ groupObj = Group.objects.get(id=id) if groupObj: return {'id': id, 'display': groupObj.name} return None def searchGroups(self, **filter): result = [] groups = Group.objects.filter(**filter) for g in groups: users = [u.username for u in User.objects.filter(groups=g)] result += [{'id': g.id, 'display': g.name, 'members': users}] return result class DjangoUserProvider(UserProvider): name = u'django_user' def getUserById(self, id): """ return the user dictionary in the format of:: {"id": 123, "first_name": "John", "last_name": "Smith", "email": "<EMAIL>"} """ try: userObj = User.objects.get(username=id) return {'id': id, 'first_name': userObj.first_name, 'last_name': userObj.last_name, 'email': userObj.email} except User.DoesNotExist: return None django_user = DjangoUserProvider.name django_group = DjangoGroupProvider.name
2.765625
3
src/app.py
hubmapconsortium/search-api
0
18036
import os import time from pathlib import Path from flask import Flask, jsonify, abort, request, Response, Request import concurrent.futures import threading import requests import logging import ast from urllib.parse import urlparse from flask import current_app as app from urllib3.exceptions import InsecureRequestWarning from yaml import safe_load # Local modules from elasticsearch.indexer import Indexer from libs.assay_type import AssayType # HuBMAP commons from hubmap_commons.hm_auth import AuthHelper # Set logging fromat and level (default is warning) # All the API logging is forwarded to the uWSGI server and gets written into the log file `uwsgo-entity-api.log` # Log rotation is handled via logrotate on the host system with a configuration file # Do NOT handle log file and rotation via the Python logging to avoid issues with multi-worker processes logging.basicConfig(format='[%(asctime)s] %(levelname)s in %(module)s:%(lineno)d: %(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S') logger = logging.getLogger(__name__) # Specify the absolute path of the instance folder and use the config file relative to the instance path app = Flask(__name__, instance_path=os.path.join(os.path.abspath(os.path.dirname(__file__)), 'instance'), instance_relative_config=True) app.config.from_pyfile('app.cfg') # load the index configurations and set the default INDICES = safe_load((Path(__file__).absolute().parent / 'instance/search-config.yaml').read_text()) DEFAULT_INDEX_WITHOUT_PREFIX = INDICES['default_index'] logger.debug("############ INDICES config LOADED") logger.debug(INDICES) # Remove trailing slash / from URL base to avoid "//" caused by config with trailing slash DEFAULT_ELASTICSEARCH_URL = INDICES['indices'][DEFAULT_INDEX_WITHOUT_PREFIX]['elasticsearch']['url'].strip('/') DEFAULT_ENTITY_API_URL = INDICES['indices'][DEFAULT_INDEX_WITHOUT_PREFIX]['document_source_endpoint'].strip('/') # Suppress InsecureRequestWarning warning when requesting status on https with ssl cert verify disabled requests.packages.urllib3.disable_warnings(category = InsecureRequestWarning) #################################################################################################### ## Register error handlers #################################################################################################### # Error handler for 400 Bad Request with custom error message @app.errorhandler(400) def http_bad_request(e): return jsonify(error=str(e)), 400 # Error handler for 401 Unauthorized with custom error message @app.errorhandler(401) def http_unauthorized(e): return jsonify(error=str(e)), 401 # Error handler for 403 Forbidden with custom error message @app.errorhandler(403) def http_forbidden(e): return jsonify(error=str(e)), 403 # Error handler for 500 Internal Server Error with custom error message @app.errorhandler(500) def http_internal_server_error(e): return jsonify(error=str(e)), 500 #################################################################################################### ## AuthHelper initialization #################################################################################################### # Initialize AuthHelper class and ensure singleton try: if AuthHelper.isInitialized() == False: auth_helper_instance = AuthHelper.create(app.config['APP_CLIENT_ID'], app.config['APP_CLIENT_SECRET']) logger.info("Initialized AuthHelper class successfully :)") else: auth_helper_instance = AuthHelper.instance() except Exception: msg = "Failed to initialize the AuthHelper class" # Log the full stack trace, prepend a line with our message logger.exception(msg) #################################################################################################### ## Default route #################################################################################################### @app.route('/', methods = ['GET']) def index(): return "Hello! This is HuBMAP Search API service :)" #################################################################################################### ## Assay type API #################################################################################################### @app.route('/assaytype', methods = ['GET']) def assaytypes(): primary = None simple = False for key, val in request.args.items(): if key == 'primary': primary = val.lower() == "true" elif key == 'simple': simple = val.lower() == "true" else: abort(400, f'invalid request parameter {key}') if primary is None: name_l = [name for name in AssayType.iter_names()] else: name_l = [name for name in AssayType.iter_names(primary=primary)] if simple: return jsonify(result=name_l) else: return jsonify(result=[AssayType(name).to_json() for name in name_l]) @app.route('/assaytype/<name>', methods = ['GET']) @app.route('/assayname', methods = ['POST']) def assayname(name=None): if name is None: request_json_required(request) try: name = request.json['name'] except Exception: abort(400, 'request contains no "name" field') try: return jsonify(AssayType(name).to_json()) except Exception as e: abort(400, str(e)) #################################################################################################### ## API #################################################################################################### # Both HTTP GET and HTTP POST can be used to execute search with body against ElasticSearch REST API. # general search uses the DEFAULT_INDEX @app.route('/search', methods = ['GET', 'POST']) def search(): # Always expect a json body request_json_required(request) logger.info("======search with no index provided======") logger.info ("default_index: " + DEFAULT_INDEX_WITHOUT_PREFIX) # Determine the target real index in Elasticsearch to be searched against # Use the DEFAULT_INDEX_WITHOUT_PREFIX since /search doesn't take any index target_index = get_target_index(request, DEFAULT_INDEX_WITHOUT_PREFIX) # get URL for that index es_url = INDICES['indices'][DEFAULT_INDEX_WITHOUT_PREFIX]['elasticsearch']['url'].strip('/') # Return the elasticsearch resulting json data as json string return execute_query('_search', request, target_index, es_url) # Both HTTP GET and HTTP POST can be used to execute search with body against ElasticSearch REST API. # Note: the index in URL is not he real index in Elasticsearch, it's that index without prefix @app.route('/<index_without_prefix>/search', methods = ['GET', 'POST']) def search_by_index(index_without_prefix): # Always expect a json body request_json_required(request) # Make sure the requested index in URL is valid validate_index(index_without_prefix) logger.info("======requested index_without_prefix======") logger.info(index_without_prefix) # Determine the target real index in Elasticsearch to be searched against target_index = get_target_index(request, index_without_prefix) # get URL for that index es_url = INDICES['indices'][index_without_prefix]['elasticsearch']['url'].strip('/') # Return the elasticsearch resulting json data as json string return execute_query('_search', request, target_index, es_url) # HTTP GET can be used to execute search with body against ElasticSearch REST API. @app.route('/count', methods = ['GET']) def count(): # Always expect a json body request_json_required(request) logger.info("======count with no index provided======") # Determine the target real index in Elasticsearch to be searched against target_index = get_target_index(request, DEFAULT_INDEX_WITHOUT_PREFIX) # get URL for that index es_url = INDICES['indices'][DEFAULT_INDEX_WITHOUT_PREFIX]['elasticsearch']['url'].strip('/') # Return the elasticsearch resulting json data as json string return execute_query('_count', request, target_index, es_url) # HTTP GET can be used to execute search with body against ElasticSearch REST API. # Note: the index in URL is not he real index in Elasticsearch, it's that index without prefix @app.route('/<index_without_prefix>/count', methods = ['GET']) def count_by_index(index_without_prefix): # Always expect a json body request_json_required(request) # Make sure the requested index in URL is valid validate_index(index_without_prefix) logger.info("======requested index_without_prefix======") logger.info(index_without_prefix) # Determine the target real index in Elasticsearch to be searched against target_index = get_target_index(request, index_without_prefix) # get URL for that index es_url = INDICES['indices'][index_without_prefix]['elasticsearch']['url'].strip('/') # Return the elasticsearch resulting json data as json string return execute_query('_count', request, target_index, es_url) # Get a list of indices @app.route('/indices', methods = ['GET']) def indices(): # Return the resulting json data as json string result = { "indices": get_filtered_indices() } return jsonify(result) # Get the status of Elasticsearch cluster by calling the health API # This shows the connection status and the cluster health status (if connected) @app.route('/status', methods = ['GET']) def status(): response_data = { # Use strip() to remove leading and trailing spaces, newlines, and tabs 'version': ((Path(__file__).absolute().parent.parent / 'VERSION').read_text()).strip(), 'build': ((Path(__file__).absolute().parent.parent / 'BUILD').read_text()).strip(), 'elasticsearch_connection': False } target_url = DEFAULT_ELASTICSEARCH_URL + '/_cluster/health' #target_url = app.config['ELASTICSEARCH_URL'] + '/_cluster/health' resp = requests.get(url = target_url) if resp.status_code == 200: response_data['elasticsearch_connection'] = True # If connected, we also get the cluster health status status_dict = resp.json() # Add new key response_data['elasticsearch_status'] = status_dict['status'] return jsonify(response_data) # This reindex function will also reindex Collection and Upload # in addition to the Dataset, Donor, Sample entities @app.route('/reindex/<uuid>', methods=['PUT']) def reindex(uuid): # Reindex individual document doesn't require the token to belong # to the HuBMAP-Data-Admin group # since this is being used by entity-api and ingest-api too token = get_user_token(request.headers) try: indexer = init_indexer(token) threading.Thread(target=indexer.reindex, args=[uuid]).start() # indexer.reindex(uuid) # for non-thread logger.info(f"Started to reindex uuid: {uuid}") except Exception as e: logger.exception(e) internal_server_error(e) return f"Request of reindexing {uuid} accepted", 202 # Live reindex without first deleting and recreating the indices # This just deletes the old document and add the latest document of each entity (if still available) @app.route('/reindex-all', methods=['PUT']) def reindex_all(): # The token needs to belong to the HuBMAP-Data-Admin group # to be able to trigger a live reindex for all documents token = get_user_token(request.headers, admin_access_required = True) saved_request = request.headers logger.debug(saved_request) try: indexer = init_indexer(token) threading.Thread(target=reindex_all_uuids, args=[indexer, token]).start() logger.info('Started live reindex all') except Exception as e: logger.exception(e) internal_server_error(e) return 'Request of live reindex all documents accepted', 202 #################################################################################################### ## Internal Functions Used By API #################################################################################################### # Throws error for 400 Bad Reqeust with message def bad_request_error(err_msg): abort(400, description = err_msg) # Throws error for 401 Unauthorized with message def unauthorized_error(err_msg): abort(401, description = err_msg) # Throws error for 403 Forbidden with message def forbidden_error(err_msg): abort(403, description = err_msg) # Throws error for 500 Internal Server Error with message def internal_server_error(err_msg): abort(500, description = err_msg) # Get user infomation dict based on the http request(headers) # `group_required` is a boolean, when True, 'hmgroupids' is in the output def get_user_info_for_access_check(request, group_required): return auth_helper_instance.getUserInfoUsingRequest(request, group_required) """ Parase the token from Authorization header Parameters ---------- request_headers: request.headers The http request headers admin_access_required : bool If the token is required to belong to the HuBMAP-Data-Admin group, default to False Returns ------- str The token string if valid """ def get_user_token(request_headers, admin_access_required = False): # Get user token from Authorization header # getAuthorizationTokens() also handles MAuthorization header but we are not using that here try: user_token = auth_helper_instance.getAuthorizationTokens(request_headers) except Exception: msg = "Failed to parse the Authorization token by calling commons.auth_helper.getAuthorizationTokens()" # Log the full stack trace, prepend a line with our message logger.exception(msg) internal_server_error(msg) # The user_token is flask.Response on error if isinstance(user_token, Response): # The Response.data returns binary string, need to decode unauthorized_error(user_token.data.decode()) if admin_access_required: # By now the token is already a valid token # But we also need to ensure the user belongs to HuBMAP-Data-Admin group # in order to execute the live reindex-all # Return a 403 response if the user doesn't belong to HuBMAP-Data-Admin group if not user_in_hubmap_data_admin_group(request): forbidden_error("Access not granted") return user_token """ Check if the user with token belongs to the HuBMAP-Data-Admin group Parameters ---------- request : falsk.request The flask http request object that containing the Authorization header with a valid Globus nexus token for checking group information Returns ------- bool True if the user belongs to HuBMAP-Data-Admin group, otherwise False """ def user_in_hubmap_data_admin_group(request): try: # The property 'hmgroupids' is ALWASYS in the output with using get_user_info() # when the token in request is a nexus_token user_info = get_user_info(request) hubmap_data_admin_group_uuid = auth_helper_instance.groupNameToId('HuBMAP-Data-Admin')['uuid'] except Exception as e: # Log the full stack trace, prepend a line with our message logger.exception(e) # If the token is not a nexus token, no group information available # The commons.hm_auth.AuthCache would return a Response with 500 error message # We treat such cases as the user not in the HuBMAP-Data-Admin group return False return (hubmap_data_admin_group_uuid in user_info['hmgroupids']) """ Get user infomation dict based on the http request(headers) The result will be used by the trigger methods Parameters ---------- request : Flask request object The Flask request passed from the API endpoint Returns ------- dict A dict containing all the user info { "scope": "urn:globus:auth:scope:nexus.api.globus.org:groups", "name": "<NAME>", "iss": "https://auth.globus.org", "client_id": "21f293b0-5fa5-4ee1-9e0e-3cf88bd70114", "active": True, "nbf": 1603761442, "token_type": "Bearer", "aud": ["nexus.api.globus.org", "21f293b0-5fa5-4ee1-9e0e-3cf88bd70114"], "iat": 1603761442, "dependent_tokens_cache_id": "af2d5979090a97536619e8fbad1ebd0afa875c880a0d8058cddf510fc288555c", "exp": 1603934242, "sub": "c0f8907a-ec78-48a7-9c85-7da995b05446", "email": "<EMAIL>", "username": "<EMAIL>", "hmscopes": ["urn:globus:auth:scope:nexus.api.globus.org:groups"], } """ def get_user_info(request): # `group_required` is a boolean, when True, 'hmgroupids' is in the output user_info = auth_helper_instance.getUserInfoUsingRequest(request, True) logger.debug("======get_user_info()======") logger.debug(user_info) # It returns error response when: # - invalid header or token # - token is valid but not nexus token, can't find group info if isinstance(user_info, Response): # Bubble up the actual error message from commons # The Response.data returns binary string, need to decode msg = user_info.get_data().decode() # Log the full stack trace, prepend a line with our message logger.exception(msg) raise Exception(msg) return user_info # Always expect a json body def request_json_required(request): if not request.is_json: bad_request_error("A JSON body and appropriate Content-Type header are required") # We'll need to verify the requested index in URL is valid def validate_index(index_without_prefix): separator = ',' #indices = get_filtered_indices() indices = INDICES['indices'].keys() if index_without_prefix not in indices: bad_request_error(f"Invalid index name. Use one of the following: {separator.join(indices)}") # Determine the target real index in Elasticsearch bases on the request header and given index (without prefix) # The Authorization header with globus token is optional # Case #1: Authorization header is missing, default to use the `hm_public_<index_without_prefix>`. # Case #2: Authorization header with valid token, but the member doesn't belong to the HuBMAP-Read group, direct the call to `hm_public_<index_without_prefix>`. # Case #3: Authorization header presents but with invalid or expired token, return 401 (if someone is sending a token, they might be expecting more than public stuff). # Case #4: Authorization header presents with a valid token that has the group access, direct the call to `hm_consortium_<index_without_prefix>`. def get_target_index(request, index_without_prefix): # Case #1 and #2 target_index = INDICES['indices'][index_without_prefix]['public'] # Keys in request.headers are case insensitive if 'Authorization' in request.headers: # user_info is a dict user_info = get_user_info_for_access_check(request, True) logger.info("======user_info======") logger.info(user_info) # Case #3 if isinstance(user_info, Response): # Notify the client with 401 error message unauthorized_error("The globus token in the HTTP 'Authorization: Bearer <globus-token>' header is either invalid or expired.") # Otherwise, we check user_info['hmgroupids'] list # Key 'hmgroupids' presents only when group_required is True else: # Case #4 if app.config['GLOBUS_HUBMAP_READ_GROUP_UUID'] in user_info['hmgroupids']: #target_index = app.config['PRIVATE_INDEX_PREFIX'] + index_without_prefix target_index = INDICES['indices'][index_without_prefix]['private'] return target_index # Make a call to Elasticsearch def execute_query(query_against, request, index, es_url, query=None): supported_query_against = ['_search', '_count'] separator = ',' if query_against not in supported_query_against: bad_request_error(f"Query against '{query_against}' is not supported by Search API. Use one of the following: {separator.join(supported_query_against)}") # Determine the target real index in Elasticsearch to be searched against #index = get_target_index(request, index_without_prefix) #target_url = app.config['ELASTICSEARCH_URL'] + '/' + target_index + '/' + query_against #es_url = INDICES['indices'][index_without_prefix]['elasticsearch']['url'].strip('/') logger.debug('es_url') logger.debug(es_url) logger.debug(type(es_url)) # use the index es connection target_url = es_url + '/' + index + '/' + query_against logger.debug("Target url: " + target_url) if query is None: # Parse incoming json string into json data(python dict object) json_data = request.get_json() # All we need to do is to simply pass the search json to elasticsearch # The request json may contain "access_group" in this case # Will also pass through the query string in URL target_url = target_url + get_query_string(request.url) # Make a request with json data # The use of json parameter converts python dict to json string and adds content-type: application/json automatically else: json_data = query logger.debug(json_data) resp = requests.post(url=target_url, json=json_data) logger.debug("==========response==========") logger.debug(resp) try: return jsonify(resp.json()) except Exception as e: logger.debug(e) raise e # Return the elasticsearch resulting json data as json string return jsonify(resp) # Get the query string from orignal request def get_query_string(url): query_string = '' parsed_url = urlparse(url) logger.debug("======parsed_url======") logger.debug(parsed_url) # Add the ? at beginning of the query string if not empty if not parsed_url.query: query_string = '?' + parsed_url.query return query_string # Get a list of entity uuids via entity-api for a given entity type: # Collection, Donor, Sample, Dataset, Submission. Case-insensitive. def get_uuids_by_entity_type(entity_type, token): entity_type = entity_type.lower() request_headers = create_request_headers_for_auth(token) # Use different entity-api endpoint for Collection if entity_type == 'collection': #url = app.config['ENTITY_API_URL'] + "/collections?property=uuid" url = DEFAULT_ENTITY_API_URL + "/collections?property=uuid" else: #url = app.config['ENTITY_API_URL'] + "/" + entity_type + "/entities?property=uuid" url = DEFAULT_ENTITY_API_URL + "/" + entity_type + "/entities?property=uuid" response = requests.get(url, headers = request_headers, verify = False) if response.status_code != 200: internal_server_error("get_uuids_by_entity_type() failed to make a request to entity-api for entity type: " + entity_type) uuids_list = response.json() return uuids_list # Create a dict with HTTP Authorization header with Bearer token def create_request_headers_for_auth(token): auth_header_name = 'Authorization' auth_scheme = 'Bearer' headers_dict = { # Don't forget the space between scheme and the token value auth_header_name: auth_scheme + ' ' + token } return headers_dict def get_uuids_from_es(index, es_url): uuids = [] size = 10_000 query = { "size": size, "from": len(uuids), "_source": ["_id"], "query": { "bool": { "must": [], "filter": [ { "match_all": {} } ], "should": [], "must_not": [] } } } end_of_list = False while not end_of_list: logger.debug("Searching ES for uuids...") logger.debug(es_url) resp = execute_query('_search', None, index, es_url, query) logger.debug('Got a response from ES...') ret_obj = resp.get_json() uuids.extend(hit['_id'] for hit in ret_obj.get('hits').get('hits')) total = ret_obj.get('hits').get('total').get('value') if total <= len(uuids): end_of_list = True else: query['from'] = len(uuids) return uuids def init_indexer(token): return Indexer( INDICES, app.config['APP_CLIENT_ID'], app.config['APP_CLIENT_SECRET'], token ) def reindex_all_uuids(indexer, token): with app.app_context(): try: logger.info("############# Reindex Live Started #############") start = time.time() # Make calls to entity-api to get a list of uuids for each entity type donor_uuids_list = get_uuids_by_entity_type("donor", token) sample_uuids_list = get_uuids_by_entity_type("sample", token) dataset_uuids_list = get_uuids_by_entity_type("dataset", token) upload_uuids_list = get_uuids_by_entity_type("upload", token) public_collection_uuids_list = get_uuids_by_entity_type("collection", token) logger.debug("merging sets into a one list...") # Merge into a big list that with no duplicates all_entities_uuids = set(donor_uuids_list + sample_uuids_list + dataset_uuids_list + upload_uuids_list + public_collection_uuids_list) es_uuids = [] #for index in ast.literal_eval(app.config['INDICES']).keys(): logger.debug("looping through the indices...") logger.debug(INDICES['indices'].keys()) index_names = get_all_indice_names() logger.debug(index_names) for index in index_names.keys(): all_indices = index_names[index] # get URL for that index es_url = INDICES['indices'][index]['elasticsearch']['url'].strip('/') for actual_index in all_indices: es_uuids.extend(get_uuids_from_es(actual_index, es_url)) es_uuids = set(es_uuids) logger.debug("looping through the UUIDs...") # Remove entities found in Elasticserach but no longer in neo4j for uuid in es_uuids: if uuid not in all_entities_uuids: logger.debug(f"Entity of uuid: {uuid} found in Elasticserach but no longer in neo4j. Delete it from Elasticserach.") indexer.delete(uuid) logger.debug("Starting multi-thread reindexing ...") # Reindex in multi-treading mode for: # - each public collection # - each upload, only add to the hm_consortium_entities index (private index of the default) # - each donor and its descendants in the tree futures_list = [] results = [] with concurrent.futures.ThreadPoolExecutor() as executor: public_collection_futures_list = [executor.submit(indexer.index_public_collection, uuid, reindex = True) for uuid in public_collection_uuids_list] upload_futures_list = [executor.submit(indexer.index_upload, uuid, reindex = True) for uuid in upload_uuids_list] donor_futures_list = [executor.submit(indexer.index_tree, uuid) for uuid in donor_uuids_list] # Append the above three lists into one futures_list = public_collection_futures_list + upload_futures_list + donor_futures_list for f in concurrent.futures.as_completed(futures_list): logger.debug(f.result()) end = time.time() logger.info(f"############# Live Reindex-All Completed. Total time used: {end - start} seconds. #############") except Exception as e: logger.error(e) # Gets a list of actually public and private indice names def get_all_indice_names(): all_names = {} try: indices = INDICES['indices'].keys() for i in indices: index_info = {} index_names = [] public_index = INDICES['indices'][i]['public'] private_index = INDICES['indices'][i]['private'] index_names.append(public_index) index_names.append(private_index) index_info[i] = index_names all_names.update(index_info) except Exception as e: raise e return all_names # Get a list of filtered Elasticsearch indices to expose to end users without the prefix def get_filtered_indices(): # just get all the defined index keys from the yml file indices = INDICES['indices'].keys() return list(indices) # For local development/testing if __name__ == "__main__": try: app.run(host='0.0.0.0', port="5005") except Exception as e: print("Error during starting debug server.") print(str(e)) logger.error(e, exc_info=True) print("Error during startup check the log file for further information")
1.898438
2
wordpress/apps.py
2e2a/django-wordpress
1
18037
<reponame>2e2a/django-wordpress from django.apps import AppConfig class WordpressAppConfig(AppConfig): name = 'wordpress' default_auto_field = 'django.db.models.AutoField'
1.46875
1
libs/libgmp/libgmp.py
wrobelda/craft-blueprints-kde
14
18038
<gh_stars>10-100 # -*- coding: utf-8 -*- # Copyright 2018 <NAME> <<EMAIL>> # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. import info class subinfo(info.infoclass): def setTargets(self): self.targets["6.1.2"] = "https://gmplib.org/download/gmp/gmp-6.1.2.tar.bz2" self.targetDigests['6.1.2'] = (['5275bb04f4863a13516b2f39392ac5e272f5e1bb8057b18aec1c9b79d73d8fb2'], CraftHash.HashAlgorithm.SHA256) self.targetInstSrc["6.1.2"] = "gmp-6.1.2" self.defaultTarget = "6.1.2" def setDependencies(self): self.runtimeDependencies["virtual/base"] = None if CraftCore.compiler.isMinGW(): self.buildDependencies["dev-utils/msys"] = None from Package.AutoToolsPackageBase import * from Package.VirtualPackageBase import * class PackageAutoTools(AutoToolsPackageBase): def __init__(self, **args): AutoToolsPackageBase.__init__(self) self.subinfo.options.package.withCompiler = False self.subinfo.options.configure.args = "--disable-static --enable-shared --enable-cxx " self.subinfo.options.useShadowBuild = False if not CraftCore.compiler.isMSVC(): class Package(PackageAutoTools): def __init__(self): PackageAutoTools.__init__(self) else: class Package(VirtualPackageBase): def __init__(self): VirtualPackageBase.__init__(self)
1.390625
1
scluster/aws_create_resources.py
dorgun/ncluster
0
18039
#!/usr/bin/env python # # Creates resources # This script creates VPC/security group/keypair if not already present import logging import os import sys import time from . import aws_util as u from . import util DRYRUN = False DEBUG = True # Names of Amazon resources that are created. These settings are fixed across # all runs, and correspond to resources created once per user per region. PUBLIC_TCP_RANGES = [ 22, # ssh (8888, 8899), # ipython notebook ports 6379, # redis port (6006, 6016) # tensorboard ports ] PUBLIC_UDP_RANGES = [(60000, 61000)] # mosh ports logger = logging.getLogger(__name__) def network_setup(): """Creates VPC if it doesn't already exists, configures it for public internet access, returns vpc, subnet, security_group""" ec2 = u.get_ec2_resource() client = u.get_ec2_client() existing_vpcs = u.get_vpc_dict() zones = u.get_zones() # create VPC from scratch. Remove this if default VPC works well enough. vpc_name = u.get_vpc_name() if u.get_vpc_name() in existing_vpcs: logger.info("Reusing VPC " + vpc_name) vpc = existing_vpcs[vpc_name] else: logger.info("Creating VPC " + vpc_name) vpc = ec2.create_vpc(CidrBlock='192.168.0.0/16') # enable DNS on the VPC local_response = vpc.modify_attribute(EnableDnsHostnames={"Value": True}) assert u.is_good_response(local_response) local_response = vpc.modify_attribute(EnableDnsSupport={"Value": True}) assert u.is_good_response(local_response) vpc.create_tags(Tags=u.create_name_tags(vpc_name)) vpc.wait_until_available() gateways = u.get_gateway_dict(vpc) gateway_name = u.get_gateway_name() if gateway_name in gateways: logger.info("Reusing gateways " + gateway_name) else: logger.info("Creating internet gateway " + gateway_name) ig = ec2.create_internet_gateway() ig.attach_to_vpc(VpcId=vpc.id) ig.create_tags(Tags=u.create_name_tags(gateway_name)) # check that attachment succeeded attach_state = u.extract_attr_for_match(ig.attachments, State=-1, VpcId=vpc.id) assert attach_state == 'available', "vpc %s is in state %s" % (vpc.id, attach_state) route_table = vpc.create_route_table() route_table_name = u.get_route_table_name() route_table.create_tags(Tags=u.create_name_tags(route_table_name)) dest_cidr = '0.0.0.0/0' route_table.create_route(DestinationCidrBlock=dest_cidr, GatewayId=ig.id) assert len(zones) <= 16 # for cidr/20 to fit into cidr/16 ip = 0 for zone in zones: cidr_block = '192.168.%d.0/20' % (ip,) ip += 16 logging.info("Creating subnet %s in zone %s" % (cidr_block, zone)) subnet = vpc.create_subnet(CidrBlock=cidr_block, AvailabilityZone=zone) subnet.create_tags(Tags=[{'Key': 'Name', 'Value': f'{vpc_name}-subnet'}, {'Key': 'Region', 'Value': zone}]) local_response = client.modify_subnet_attribute(MapPublicIpOnLaunch={'Value': True}, SubnetId=subnet.id) assert u.is_good_response(local_response) u.wait_until_available(subnet) assert subnet.map_public_ip_on_launch, "Subnet doesn't enable public IP by default, why?" route_table.associate_with_subnet(SubnetId=subnet.id) existing_security_groups = u.get_security_group_dict(vpc.id) security_group_name = u.get_security_group_name() if security_group_name in existing_security_groups: logger.info("Reusing security group " + security_group_name) security_group = existing_security_groups[security_group_name] assert security_group.vpc_id == vpc.id, f"Found security group {security_group} " \ f"attached to {security_group.vpc_id} but expected {vpc.id}" else: logging.info("Creating security group " + security_group_name) security_group = ec2.create_security_group( GroupName=security_group_name, Description=security_group_name, VpcId=vpc.id) cidr_ip = os.environ.get('SCLUSTER_SECURITY_GROUP_CidrIp', '0.0.0.0/0') security_group.create_tags(Tags=u.create_name_tags(security_group_name)) # allow ICMP access for public ping security_group.authorize_ingress( CidrIp='0.0.0.0/0', IpProtocol='icmp', FromPort=-1, ToPort=-1 ) # open public ports # always include SSH port which is required for basic functionality assert 22 in PUBLIC_TCP_RANGES, "Must enable SSH access" for port in PUBLIC_TCP_RANGES: if util.is_iterable(port): assert len(port) == 2 from_port, to_port = port else: from_port, to_port = port, port response = security_group.authorize_ingress( IpProtocol="tcp", CidrIp=cidr_ip, FromPort=from_port, ToPort=to_port ) assert u.is_good_response(response) for port in PUBLIC_UDP_RANGES: if util.is_iterable(port): assert len(port) == 2 from_port, to_port = port else: from_port, to_port = port, port response = security_group.authorize_ingress(IpProtocol="udp", CidrIp=cidr_ip, FromPort=from_port, ToPort=to_port) assert u.is_good_response(response) return vpc, security_group def keypair_setup(): """Creates keypair if necessary, saves private key locally, returns contents of private key file.""" os.system('mkdir -p ' + u.PRIVATE_KEY_LOCATION) keypair_name = u.get_keypair_name() keypair = u.get_keypair_dict().get(keypair_name, None) keypair_fn = u.get_keypair_fn() if keypair: print("Reusing keypair " + keypair_name) # check that local pem file exists and is readable assert os.path.exists( keypair_fn), "Keypair %s exists, but corresponding .pem file %s is not found, delete keypair %s through " \ "console and run again to recreate keypair/.pem together" % ( keypair_name, keypair_fn, keypair_name) keypair_contents = open(keypair_fn).read() assert len(keypair_contents) > 0 else: print("Creating keypair " + keypair_name) ec2 = u.get_ec2_resource() assert not os.path.exists( keypair_fn), "previous keypair exists, delete it with 'sudo rm %s' and also delete corresponding " \ "keypair through console" % (keypair_fn) keypair = ec2.create_key_pair(KeyName=keypair_name) open(keypair_fn, 'w').write(keypair.key_material) os.system('chmod 400 ' + keypair_fn) return keypair def placement_group_setup(group_name): """Creates placement_group group if necessary. Returns True if new placement_group group was created, False otherwise.""" existing_placement_groups = u.get_placement_group_dict() group = existing_placement_groups.get(group_name, None) if group: assert group.state == 'available' assert group.strategy == 'cluster' print("Reusing group ", group.name) return group print("Creating group " + group_name) ec2 = u.get_ec2_resource() group = ec2.create_placement_group(GroupName=group_name, Strategy='cluster') return group def create_resources(): logger.info(f"Creating {u.get_prefix()} resources in region {u.get_region()}") vpc, security_group = network_setup() keypair_setup() # saves private key locally to keypair_fn # create EFS efss = u.get_efs_dict() efs_name = u.get_efs_name() efs_id = efss.get(efs_name, '') if not efs_id: logger.info("Creating EFS " + efs_name) efs_id = u.create_efs(efs_name) else: logger.info("Reusing EFS " + efs_name) efs_client = u.get_efs_client() # create mount target for each subnet in the VPC # added retries because efs is not immediately available max_failures = 10 retry_interval_sec = 1 for subnet in vpc.subnets.all(): for retry_attempt in range(max_failures): try: sys.stdout.write("Creating efs mount target for %s ... " % (subnet.availability_zone,)) sys.stdout.flush() response = efs_client.create_mount_target( FileSystemId=efs_id, SubnetId=subnet.id, SecurityGroups=[security_group.id] ) if u.is_good_response(response): logger.info("success") break except Exception as e: if 'already exists' in str(e): # ignore "already exists" errors logger.info('already exists') break # Takes couple of seconds for EFS to come online, with # errors like this: # Creating efs mount target for us-east-1f ... Failed with An error occurred (IncorrectFileSystemLifeCycleState) when calling the CreateMountTarget operation: None, retrying in 1 sec logger.info("Got %s, retrying in %s sec" % (str(e), retry_interval_sec)) time.sleep(retry_interval_sec) else: logger.info("Giving up.") if __name__ == '__main__': create_resources()
2.453125
2
ex005-antecessorSucessor/005.py
KaiqueCassal/cursoEmVideoPython
1
18040
<gh_stars>1-10 num = int(input('Digite um número inteiro: ')) print(f'O número: {num}' f'\nO antecessor: {num - 1}' f'\nO sucessor: {num + 1}')
3.640625
4
homeassistant/components/ihc/binary_sensor.py
jasperro/core
7
18041
<gh_stars>1-10 """Support for IHC binary sensors.""" from homeassistant.components.binary_sensor import BinarySensorDevice from homeassistant.const import CONF_TYPE from . import IHC_CONTROLLER, IHC_INFO from .const import CONF_INVERTING from .ihcdevice import IHCDevice def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the IHC binary sensor platform.""" if discovery_info is None: return devices = [] for name, device in discovery_info.items(): ihc_id = device["ihc_id"] product_cfg = device["product_cfg"] product = device["product"] # Find controller that corresponds with device id ctrl_id = device["ctrl_id"] ihc_key = f"<KEY> info = hass.data[ihc_key][IHC_INFO] ihc_controller = hass.data[ihc_key][IHC_CONTROLLER] sensor = IHCBinarySensor( ihc_controller, name, ihc_id, info, product_cfg.get(CONF_TYPE), product_cfg[CONF_INVERTING], product, ) devices.append(sensor) add_entities(devices) class IHCBinarySensor(IHCDevice, BinarySensorDevice): """IHC Binary Sensor. The associated IHC resource can be any in or output from a IHC product or function block, but it must be a boolean ON/OFF resources. """ def __init__( self, ihc_controller, name, ihc_id: int, info: bool, sensor_type: str, inverting: bool, product=None, ) -> None: """Initialize the IHC binary sensor.""" super().__init__(ihc_controller, name, ihc_id, info, product) self._state = None self._sensor_type = sensor_type self.inverting = inverting @property def device_class(self): """Return the class of this sensor.""" return self._sensor_type @property def is_on(self): """Return true if the binary sensor is on/open.""" return self._state def on_ihc_change(self, ihc_id, value): """IHC resource has changed.""" if self.inverting: self._state = not value else: self._state = value self.schedule_update_ha_state()
2.421875
2
script/TuneLR.py
yipeiw/parameter_server
0
18042
<gh_stars>0 #!/usr/bin/env python import os.path as path import sys tmpDir = '../config/tmp/' logDir = '../config/tmp/log/' conffile = sys.argv[1] runfile=sys.argv[2] lr = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0] fout = open(runfile, 'w') fout.write("#!/bin/bash\n\n\n") fws = {} confname = path.splitext(path.basename(conffile))[0] loglist = confname+'.meta.log' fl = open(loglist, 'w') for i in range(0, len(lr)): filename = confname+'_'+str(lr[i]) tmpfile = path.join(tmpDir, filename+'.conf') logfile = path.join(logDir, filename + '.txt') fws[i] = open(tmpfile, 'w') fout.write("echo \""+"./local.sh 1 4 "+tmpfile + " 2>"+logfile+'\"\n\n') fout.write("./local.sh 1 4 "+tmpfile + " 2>"+logfile+'\n\n\n') fl.write(logfile+'\n') fout.close() fl.close() for line in open(conffile): if line.find("eta")==0: for i in range(0, len(lr)): output = "eta: "+str(lr[i]) + '\n' fws[i].write(output) else: for i in range(0, len(lr)): fws[i].write(line) for i in range(0, len(lr)): fws[i].close()
2.234375
2
pc.py
Omar8345/tic-tac-toe
0
18043
<gh_stars>0 # Tic Tac Toe Game # Original repository: (https://github.com/Omar8345/tic-tac-toe) # Author: <NAME> # Date: 08/02/2022 # Version: 1.0 # Description: Tic Tac Toe Game made using Python Tkitner (Open Source) # This game is a simple game that can be played with two players # and can be played with a computer. ##### CODING STARTS HERE ##### # Importing the necessary libraries from itertools import tee import tkinter import random import time from tkinter import messagebox from numpy import empty from time import sleep as sleep try: # Tkinter window = tkinter.Tk() window.title("Tic Tac Toe") window.resizable(0, 0) # It makes everything needed to fit the window! WoW! # Window icon window.iconbitmap("img\XO.ico") # Tkinter game buttons # create 9 tkinter buttons b1 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b1)) b1.grid(row=1, column=0) b2 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b2)) b2.grid(row=1, column=1) b3 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b3)) b3.grid(row=1, column=2) b4 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b4)) b4.grid(row=2, column=0) b5 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b5)) b5.grid(row=2, column=1) b6 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b6)) b6.grid(row=2, column=2) b7 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b7)) b7.grid(row=3, column=0) b8 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b8)) b8.grid(row=3, column=1) b9 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b9)) b9.grid(row=3, column=2) # create a list to store the buttons buttons = [b1, b2, b3, b4, b5, b6, b7, b8, b9] # create a list to store the values of the buttons values = [] # when button clicked, it puts X in the button def btn_click(buttons): # make clicked button disabled buttons.config(state=tkinter.DISABLED) # check if button contains O if buttons['text'] == "O": None elif buttons['text'] == " ": buttons.config(text="X") buttons.config(bg="red") elif buttons['text'] == "X": None else: None # check if 1st row is equal to X if b1['text'] == "X" and b2['text'] == "X" and b3['text'] == "X": print("X wins") tkinter.messagebox.showinfo("Winner", "X wins") # stop the game window.destroy() # check if 2nd row is equal to X elif b4['text'] == "X" and b5['text'] == "X" and b6['text'] == "X": print("X wins") tkinter.messagebox.showinfo("Winner", "X wins") # stop the game window.destroy() # check if 3rd row is equal to X elif b7['text'] == "X" and b8['text'] == "X" and b9['text'] == "X": print("X wins") tkinter.messagebox.showinfo("Winner", "X wins") # stop the game window.destroy() # check if 1st column is equal to X elif b1['text'] == "X" and b4['text'] == "X" and b7['text'] == "X": print("X wins") tkinter.messagebox.showinfo("Winner", "X wins") # stop the game window.destroy() # check if 2nd column is equal to X elif b2['text'] == "X" and b5['text'] == "X" and b8['text'] == "X": print("X wins") tkinter.messagebox.showinfo("Winner", "X wins") # stop the game window.destroy() # check if 3rd column is equal to X elif b3['text'] == "X" and b6['text'] == "X" and b9['text'] == "X": print("X wins") tkinter.messagebox.showinfo("Winner", "X wins") # stop the game window.destroy() # check if 1st diagonal is equal to X elif b1['text'] == "X" and b5['text'] == "X" and b9['text'] == "X": print("X wins") tkinter.messagebox.showinfo("Winner", "X wins") # stop the game window.destroy() # check if 2nd diagonal is equal to X elif b3['text'] == "X" and b5['text'] == "X" and b7['text'] == "X": print("X wins") tkinter.messagebox.showinfo("Winner", "X wins") # stop the game window.destroy() else: emptybuttons = [] if b1['text'] == " ": emptybuttons.append(b1) if b2['text'] == " ": emptybuttons.append(b2) if b3['text'] == " ": emptybuttons.append(b3) if ['text'] == " ": emptybuttons.append(b4) if b5 == " ": emptybuttons.append(b5) if b6['text'] == " ": emptybuttons.append(b6) if b7['text'] == " ": emptybuttons.append(b7) if b8['text'] == " ": emptybuttons.append(b8) if b9['text'] == " ": emptybuttons.append(b9) # randomly select a button from the list import random random_button = random.choice(emptybuttons) # change button text to O random_button.config(text="O") # make button disabled random_button.config(state=tkinter.DISABLED) # make O blue random_button.config(bg="blue") # clear the list emptybuttons.clear() # check if 1st row is equal to O if b1['text'] == "O" and b2['text'] == "O" and b3['text'] == "O": print("O wins") # alert tkinter.messagebox.showinfo("Winner", "O wins") # stop the game window.destroy() # check if 2nd row is equal to O elif b4['text'] == "O" and b5['text'] == "O" and b6['text'] == "O": print("O wins") tkinter.messagebox.showinfo("Winner", "O wins") # stop the game window.destroy() # check if 3rd row is equal to O elif b7['text'] == "O" and b8['text'] == "O" and b9['text'] == "O": print("O wins") tkinter.messagebox.showinfo("Winner", "O wins") # stop the game window.destroy() # check if 1st column is equal to O elif b1['text'] == "O" and b4['text'] == "O" and b7['text'] == "O": print("O wins") tkinter.messagebox.showinfo("Winner", "O wins") # stop the game window.destroy() # check if 2nd column is equal to O elif b2['text'] == "O" and b5['text'] == "O" and b8['text'] == "O": print("O wins") tkinter.messagebox.showinfo("Winner", "O wins") # stop the game window.destroy() # check if 3rd column is equal to O elif b3['text'] == "O" and b6['text'] == "O" and b9['text'] == "O": print("O wins") tkinter.messagebox.showinfo("Winner", "O wins") # stop the game window.destroy() # check if 1st diagonal is equal to O elif b1['text'] == "O" and b5['text'] == "O" and b9['text'] == "O": print("O wins") tkinter.messagebox.showinfo("Winner", "O wins") # stop the game window.destroy() # check if 2nd diagonal is equal to O elif b3['text'] == "O" and b5['text'] == "O" and b7['text'] == "O": print("O wins") tkinter.messagebox.showinfo("Winner", "O wins") # stop the game window.destroy() # check if all buttons are filled elif buttons == "X" or buttons == "O": print("Draw") tkinter.messagebox.showinfo("Winner", "Draw, Game Over!") # stop the game window.destroy() except: None def run_game(): window.mainloop() if __name__ == "__main__": run_game() else: print("If you run the game using the launcher.py or launcher.exe.") sleep(1) print('Ignore this message, thank you.') print('------------------------------------------------------') print("Error: This is a module and not a script.") sleep(2) print("Please run this module as a script.") sleep(2) print("If you actually did run it as a script, please report this bug.") sleep(2) print("Raise an issue on GitHub. More details:") sleep(2) print("__name__ != __main__") sleep(2) print(" __name__ does not equal __main__ and this was made to prevent errors.") sleep(2) print("If you are a developer and you are seeing this message, please report this bug and (if possible, more details).") sleep(2) print("If you are not a developer and you are seeing this message, please report the details gaven above.") sleep(2) print("Thank you.") sleep(2) print("<NAME>") sleep(2) print("Hope you in good health. Stay safe.") sleep(1)
3.75
4
nvtabular/utils.py
deepyaman/NVTabular
0
18044
# # Copyright (c) 2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings try: from numba import cuda except ImportError: cuda = None try: import psutil except ImportError: psutil = None def _pynvml_mem_size(kind="total", index=0): import pynvml pynvml.nvmlInit() size = None if kind == "free": size = int(pynvml.nvmlDeviceGetMemoryInfo(pynvml.nvmlDeviceGetHandleByIndex(index)).free) elif kind == "total": size = int(pynvml.nvmlDeviceGetMemoryInfo(pynvml.nvmlDeviceGetHandleByIndex(index)).total) else: raise ValueError("{0} not a supported option for device_mem_size.".format(kind)) pynvml.nvmlShutdown() return size def device_mem_size(kind="total", cpu=False): # Use psutil (if available) for cpu mode if cpu and psutil: if kind == "total": return psutil.virtual_memory().total elif kind == "free": return psutil.virtual_memory().free elif cpu: warnings.warn("Please install psutil for full cpu=True support.") # Assume 1GB of memory return int(1e9) if kind not in ["free", "total"]: raise ValueError("{0} not a supported option for device_mem_size.".format(kind)) try: if kind == "free": return int(cuda.current_context().get_memory_info()[0]) else: return int(cuda.current_context().get_memory_info()[1]) except NotImplementedError: if kind == "free": # Not using NVML "free" memory, because it will not include RMM-managed memory warnings.warn("get_memory_info is not supported. Using total device memory from NVML.") size = _pynvml_mem_size(kind="total", index=0) return size def get_rmm_size(size): return (size // 256) * 256
1.90625
2
tests/segmentation/segmanagetest.py
j-h-m/Media-Journaling-Tool
0
18045
<reponame>j-h-m/Media-Journaling-Tool import unittest from maskgen import image_wrap import numpy from maskgen.segmentation.segmanage import select_region,segmentation_classification,convert_color from tests.test_support import TestSupport class SegManageTestCase(TestSupport): def test_select_region(self): img = numpy.zeros((500,500,3),dtype='uint8') img_wrapper = image_wrap.ImageWrapper(img) selector = numpy.zeros((500, 500, 3), dtype='uint8') selector[30:40,30:40,:] = [200,200,100] selector[130:140, 130:140, :] = [100, 200, 100] selector_wrapper = image_wrap.ImageWrapper(selector) result,rcolor = select_region(img_wrapper,selector_wrapper,convert_color('[200,200,100]')) result = result.to_array() self.assertTrue(numpy.all(result[30:40,30:40,3] == 255)) self.assertTrue(numpy.all(result[130:140, 130:140, 3] == 0)) self.assertEquals(rcolor,[200,200,100]) def test_select_region_anycolor(self): img = numpy.zeros((500, 500, 3), dtype='uint8') img_wrapper = image_wrap.ImageWrapper(img) selector = numpy.zeros((500, 500, 3), dtype='uint8') selector[30:40, 30:40, :] = [200, 200, 100] selector[130:140, 130:140, :] = [100, 200, 100] selector_wrapper = image_wrap.ImageWrapper(selector) result,color = select_region(img_wrapper, selector_wrapper) result = result.to_array() self.assertTrue(numpy.all(result[30:40, 30:40, 3] != result[130:140, 130:140, 3])) def test_segmentation_classification(self): import os filelocation = self.locateFile('./tests/data/classifications.csv') self.assertEquals(segmentation_classification(os.path.dirname(filelocation),[100,100,200]),'other') self.assertEquals(segmentation_classification(os.path.dirname(filelocation), [200,100,200]), 'house') if __name__ == '__main__': unittest.main()
2.46875
2
aimacode/tests/test_text.py
juandarr/AIND-planning
0
18046
<reponame>juandarr/AIND-planning import pytest import os import random from text import * # noqa from utils import isclose, DataFile def test_unigram_text_model(): flatland = DataFile("EN-text/flatland.txt").read() wordseq = words(flatland) P = UnigramTextModel(wordseq) s, p = viterbi_segment('itiseasytoreadwordswithoutspaces', P) assert s == [ 'it', 'is', 'easy', 'to', 'read', 'words', 'without', 'spaces'] def test_shift_encoding(): code = shift_encode("This is a secret message.", 17) assert code == 'Kyzj zj r jvtivk dvjjrxv.' def test_shift_decoding(): flatland = DataFile("EN-text/flatland.txt").read() ring = ShiftDecoder(flatland) msg = ring.decode('Kyzj zj r jvtivk dvjjrxv.') assert msg == 'This is a secret message.' def test_rot13_encoding(): code = rot13('Hello, world!') assert code == 'Uryyb, jbeyq!' def test_rot13_decoding(): flatland = DataFile("EN-text/flatland.txt").read() ring = ShiftDecoder(flatland) msg = ring.decode(rot13('Hello, world!')) assert msg == 'Hello, world!' def test_counting_probability_distribution(): D = CountingProbDist() for i in range(10000): D.add(random.choice('123456')) ps = [D[n] for n in '123456'] assert 1 / 7 <= min(ps) <= max(ps) <= 1 / 5 def test_ngram_models(): flatland = DataFile("EN-text/flatland.txt").read() wordseq = words(flatland) P1 = UnigramTextModel(wordseq) P2 = NgramTextModel(2, wordseq) P3 = NgramTextModel(3, wordseq) # The most frequent entries in each model assert P1.top(10) == [(2081, 'the'), (1479, 'of'), (1021, 'and'), (1008, 'to'), (850, 'a'), (722, 'i'), (640, 'in'), (478, 'that'), (399, 'is'), (348, 'you')] assert P2.top(10) == [(368, ('of', 'the')), (152, ('to', 'the')), (152, ('in', 'the')), (86, ('of', 'a')), (80, ('it', 'is')), (71, ('by', 'the')), (68, ('for', 'the')), (68, ('and', 'the')), (62, ('on', 'the')), (60, ('to', 'be'))] assert P3.top(10) == [(30, ('a', 'straight', 'line')), (19, ('of', 'three', 'dimensions')), (16, ('the', 'sense', 'of')), (13, ('by', 'the', 'sense')), (13, ('as', 'well', 'as')), (12, ('of', 'the', 'circles')), (12, ('of', 'sight', 'recognition')), (11, ('the', 'number', 'of')), (11, ('that', 'i', 'had')), (11, ('so', 'as', 'to'))] assert isclose(P1['the'], 0.0611, rel_tol=0.001) assert isclose(P2['of', 'the'], 0.0108, rel_tol=0.01) assert isclose(P3['', '', 'but'], 0.0, rel_tol=0.001) assert isclose(P3['', '', 'but'], 0.0, rel_tol=0.001) assert isclose(P3['so', 'as', 'to'], 0.000323, rel_tol=0.001) assert P2.cond_prob.get(('went',)) is None assert P3.cond_prob['in', 'order'].dictionary == {'to': 6} def test_ir_system(): from collections import namedtuple Results = namedtuple('IRResults', ['score', 'url']) uc = UnixConsultant() def verify_query(query, expected): assert len(expected) == len(query) for expected, (score, d) in zip(expected, query): doc = uc.documents[d] assert "{0:.2f}".format( expected.score) == "{0:.2f}".format(score * 100) assert os.path.basename(expected.url) == os.path.basename(doc.url) return True q1 = uc.query("how do I remove a file") assert verify_query(q1, [ Results(76.83, "aimacode-data/MAN/rm.txt"), Results(67.83, "aimacode-data/MAN/tar.txt"), Results(67.79, "aimacode-data/MAN/cp.txt"), Results(66.58, "aimacode-data/MAN/zip.txt"), Results(64.58, "aimacode-data/MAN/gzip.txt"), Results(63.74, "aimacode-data/MAN/pine.txt"), Results(62.95, "aimacode-data/MAN/shred.txt"), Results(57.46, "aimacode-data/MAN/pico.txt"), Results(43.38, "aimacode-data/MAN/login.txt"), Results(41.93, "aimacode-data/MAN/ln.txt"), ]) q2 = uc.query("how do I delete a file") assert verify_query(q2, [ Results(75.47, "aimacode-data/MAN/diff.txt"), Results(69.12, "aimacode-data/MAN/pine.txt"), Results(63.56, "aimacode-data/MAN/tar.txt"), Results(60.63, "aimacode-data/MAN/zip.txt"), Results(57.46, "aimacode-data/MAN/pico.txt"), Results(51.28, "aimacode-data/MAN/shred.txt"), Results(26.72, "aimacode-data/MAN/tr.txt"), ]) q3 = uc.query("email") assert verify_query(q3, [ Results(18.39, "aimacode-data/MAN/pine.txt"), Results(12.01, "aimacode-data/MAN/info.txt"), Results(9.89, "aimacode-data/MAN/pico.txt"), Results(8.73, "aimacode-data/MAN/grep.txt"), Results(8.07, "aimacode-data/MAN/zip.txt"), ]) q4 = uc.query("word count for files") assert verify_query(q4, [ Results(128.15, "aimacode-data/MAN/grep.txt"), Results(94.20, "aimacode-data/MAN/find.txt"), Results(81.71, "aimacode-data/MAN/du.txt"), Results(55.45, "aimacode-data/MAN/ps.txt"), Results(53.42, "aimacode-data/MAN/more.txt"), Results(42.00, "aimacode-data/MAN/dd.txt"), Results(12.85, "aimacode-data/MAN/who.txt"), ]) q5 = uc.query("learn: date") assert verify_query(q5, []) q6 = uc.query("2003") assert verify_query(q6, [ Results(14.58, "aimacode-data/MAN/pine.txt"), Results(11.62, "aimacode-data/MAN/jar.txt"), ]) def test_words(): assert words("``EGAD!'' Edgar cried.") == ['egad', 'edgar', 'cried'] def test_canonicalize(): assert canonicalize("``EGAD!'' Edgar cried.") == 'egad edgar cried' def test_translate(): text = 'orange apple lemon ' func = lambda x: ('s ' + x) if x ==' ' else x assert translate(text, func) == 'oranges apples lemons ' def test_bigrams(): assert bigrams('this') == ['th', 'hi', 'is'] assert bigrams(['this', 'is', 'a', 'test']) == [['this', 'is'], ['is', 'a'], ['a', 'test']] # TODO: for .ipynb """ >>> P1.samples(20) 'you thought known but were insides of see in depend by us dodecahedrons just but i words are instead degrees' >>> P2.samples(20) 'flatland well then can anything else more into the total destruction and circles teach others confine women must be added' >>> P3.samples(20) 'flatland by edwin a abbott 1884 to the wake of a certificate from nature herself proving the equal sided triangle' """ if __name__ == '__main__': pytest.main()
2.421875
2
implementations/python3/pysatl/apdu_tool.py
sebastien-riou/SATL
4
18047
import re import argparse import os import sys import logging import traceback import pysatl class EtsiTs101955(object): COMMENT_MARKER = "REM" COMMAND_MARKER = "CMD" RESET_MARKER = "RST" INIT_MARKER = "INI" OFF_MARKER = "OFF" def __init__(self, cmdHandler): self._cmdHandler = cmdHandler def runStream(self, scriptStream, *, line_cnt = 0): lineBuf = "" for line in scriptStream: line_cnt += 1 if line in ["\n", "\r"]: line = "" elif len(line): while line[-1] in ["\n", "\r"]: # remove end of line characters line = line[:-1] if 0 == len(line): continue if 0 == len(line): continue lineBreak = line[-1] == "\\" if lineBreak: lineBuf += line[:-1] continue line = lineBuf + line lineBuf = "" logging.debug("line %4d: '%s'" % (line_cnt, line)) if 0 == len(line): continue if line.startswith(EtsiTs101955.COMMENT_MARKER): self._cmdHandler.comment(line[len(EtsiTs101955.COMMENT_MARKER):]) continue tokens = line.split() if tokens[0] == EtsiTs101955.RESET_MARKER: self._cmdHandler.reset() elif tokens[0] == EtsiTs101955.OFF_MARKER: self._cmdHandler.off() elif tokens[0] == EtsiTs101955.INIT_MARKER: datstr = line[len(tokens[0]):] dat = pysatl.Utils.ba(datstr) self._cmdHandler.init(dat) elif tokens[0] == EtsiTs101955.COMMAND_MARKER: params = line[len(tokens[0]):] cmd_params_pattern = re.compile(r"(.*)\[(.*)\]\s*\((.*)\)") matchRes = cmd_params_pattern.match(params) if matchRes is not None: capdustr = matchRes.group(1) leDatStr = matchRes.group(2).replace(" ","").replace("\t","").lower() swStr = matchRes.group(3).replace(" ","").replace("\t","").lower() else: cmd_params_pattern = re.compile(r"(.*)\s*\((.*)\)") matchRes = cmd_params_pattern.match(params) capdustr = matchRes.group(1) leDatStr = "" swStr = matchRes.group(2) swStr = swStr.replace(" ","").replace("\t","").lower() capdu = pysatl.CAPDU.from_hexstr(capdustr) rapdu = self._cmdHandler.apdu(capdu,leDatStr,swStr) swlist = swStr.split(",") swMatch = False for sw in swlist: swMatch |= rapdu.matchSW(sw) if not swMatch: raise Exception("RAPDU does not match any of the expected status word") if not rapdu.matchDATA(leDatStr): raise Exception("RAPDU does not match expected outgoing data") else: raise Exception("line %d, syntax not supported: '%s'"%(line_cnt,line)) def runFile(scriptFile, apduHandler): tool = EtsiTs101955(apduHandler) with open(scriptFile) as script: tool.runStream(script) class CmdHandler(object): """Base class for command handlers""" def __init__(self): pass def apdu(self, capdu, leDatStr="", swStr=""): dat = pysatl.Utils.ba(leDatStr.replace('x','0')) sw=0 swStr = swStr.split(",")[0] for i in range(0,len(swStr)): d = swStr[i] sw = (sw << 4) | int(d,16) sw1=sw >> 8 sw2=sw & 0xFF rapdu = pysatl.RAPDU(SW1=sw1,SW2=sw2, DATA=dat) line = "CMD " header_len = 4 lc=len(capdu.DATA) if lc: header_len = 5 if lc>255: header_len = 7 else: header_len = 5 if capdu.LE>256: header_len = 7 dat = capdu.to_ba() line += pysatl.Utils.hexstr(dat[:header_len]) if len(capdu.DATA) > 0: line += " \\\n " dat = capdu.DATA while len(dat) > 16: line += pysatl.Utils.hexstr(dat[0:16]) + " \\\n " dat = dat[16:] line += pysatl.Utils.hexstr(dat) if len(rapdu.DATA) > 0: line += " \\\n [" dat = rapdu.DATA while len(dat) > 16: line += pysatl.Utils.hexstr(dat[0:16]) + " \\\n " dat = dat[16:] line += pysatl.Utils.hexstr(dat) line += " ] \\\n" elif capdu.LE > 0: line += " []" line += " ("+ pysatl.Utils.hexstr(rapdu.swBytes()) +")" logging.info(line) return rapdu def reset(self): logging.info("RST") def init(self, dat): logging.info("INIT "+pysatl.Utils.hexstr(dat)) def off(self): logging.info("OFF") def comment(self, msg): logging.info("REM %s" % (msg)) class ApduTool(object): """ETSI TS 101 955 script player""" def __init__(self, argv): scriptname = os.path.basename(__file__) parser = argparse.ArgumentParser(scriptname) #TODO: pass argv to parser. levels = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL') parser.add_argument('--log-level', default='INFO', choices=levels) parser.add_argument('--script', default="stdin", help='path to script', type=str) options = parser.parse_args() root = logging.getLogger() root.setLevel(options.log_level) if options.script == "stdin": player = EtsiTs101955(CmdHandler()) player.runStream(sys.stdin) else: EtsiTs101955.runFile(options.script,CmdHandler()) if __name__ == "__main__": ApduTool(sys.argv)
2.375
2
host-software/easyhid.py
kavka1983/key
1
18048
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2017 <EMAIL> # Licensed under the MIT license (http://opensource.org/licenses/MIT) import cffi import ctypes.util import platform ffi = cffi.FFI() ffi.cdef(""" struct hid_device_info { char *path; unsigned short vendor_id; unsigned short product_id; wchar_t *serial_number; unsigned short release_number; wchar_t *manufacturer_string; wchar_t *product_string; unsigned short usage_page; unsigned short usage; int interface_number; struct hid_device_info *next; }; typedef struct hid_device_ hid_device; int hid_init(void); int hid_exit(void); struct hid_device_info* hid_enumerate(unsigned short, unsigned short); void hid_free_enumeration (struct hid_device_info *devs); hid_device* hid_open (unsigned short vendor_id, unsigned short product_id, const wchar_t *serial_number); hid_device* hid_open_path (const char *path); int hid_write (hid_device *device, const unsigned char *data, size_t length); int hid_read_timeout (hid_device *dev, unsigned char *data, size_t length, int milliseconds); int hid_read (hid_device *device, unsigned char *data, size_t length); int hid_set_nonblocking (hid_device *device, int nonblock); int hid_send_feature_report (hid_device *device, const unsigned char *data, size_t length); int hid_get_feature_report (hid_device *device, unsigned char *data, size_t length); void hid_close (hid_device *device); int hid_get_manufacturer_string (hid_device *device, wchar_t *string, size_t maxlen); int hid_get_product_string (hid_device *device, wchar_t *string, size_t maxlen); int hid_get_serial_number_string (hid_device *device, wchar_t *string, size_t maxlen); int hid_get_indexed_string (hid_device *device, int string_index, wchar_t *string, size_t maxlen); const wchar_t* hid_error (hid_device *device); """) if "Windows" in platform.platform(): try: hidapi = ffi.dlopen('hidapi.dll') except: hidapi = ffi.dlopen(ctypes.util.find_library('hidapi.dll')) else: try: hidapi = ffi.dlopen('hidapi-libusb') except: hidapi = ffi.dlopen(ctypes.util.find_library('hidapi-libusb')) def _c_to_py_str(val): if val == ffi.NULL: return None new_val = ffi.string(val) if type(new_val) == bytes or type(new_val) == bytearray: return new_val.decode("utf-8") else: return new_val class HIDException(Exception): pass class Device: def __init__(self, cdata): """ """ if cdata == ffi.NULL: raise TypeError self.path = _c_to_py_str(cdata.path) self.vendor_id = cdata.vendor_id self.product_id = cdata.product_id self.release_number = cdata.release_number self.manufacturer_string = _c_to_py_str(cdata.manufacturer_string) self.product_string = _c_to_py_str(cdata.product_string) self.serial_number = _c_to_py_str(cdata.serial_number) self.usage_page = cdata.usage_page self.usage = cdata.usage self.interface_number = cdata.interface_number self._device = None self._is_open = False def __del__(self): self.close() def open(self): if self._is_open: raise HIDException("Failed to open device: Device already open") path = self.path.encode('utf-8') dev = hidapi.hid_open_path(path) if dev: self._is_open = True self._device = dev else: raise HIDException("Failed to open device") def close(self): """ Closes the hid device """ if self._is_open: self._is_open = False hidapi.hid_close(self._device) def description(self): return self.info.description() def write(self, data, report_id=0): """ Writes `bytes` to the hid device. """ if not self._is_open: raise HIDException("Device not open") write_data = bytearray([report_id]) + bytearray(data) cdata = ffi.new("const unsigned char[]", bytes(write_data)) num_written = hidapi.hid_write(self._device, cdata, len(write_data)) if num_written < 0: raise HIDException("Failed to write to HID device: " + str(num_written)) else: return num_written def read(self, size=64, timeout=None): """ Read from the hid device. Returns bytes read or None if no bytes read. size: number of bytes to read timeout: length to wait in milliseconds """ if not self._is_open: raise HIDException("Device not open") data = [0] * size cdata = ffi.new("unsigned char[]", data) bytes_read = 0 if timeout == None: bytes_read = hidapi.hid_read(self._device, cdata, len(cdata)) else: bytes_read = hidapi.hid_read_timeout(self._device, cdata, len(cdata), timeout) if bytes_read < 0: raise HIDException("Failed to read from HID device: " + str(bytes_read)) elif bytes_read == 0: return None else: return bytearray(cdata) def set_nonblocking(self, enable_nonblocking): if not self._is_open: raise HIDException("Device not open") if type(enable_nonblocking) != bool: raise TypeError hidapi.hid_set_nonblocking(self._device, enable_nonblocking) def is_open(self): return _is_open def is_connected(self): """ Checks if the USB device is still connected """ if self._is_open: err = hidapi.hid_read_timeout(self._device, ffi.NULL, 0, 0) if err == -1: return False else: return True else: en = Enumeration(vid=self.vendor_id, pid=self.product_id).find(path=self.path) if len(en) == 0: return False else: return True # int hid_send_feature_report (hid_device *device, const unsigned char *data, size_t length); # def send_feature_report(self, data): # cdata = ffi.new("const unsigned char[]", data) # hidapi.hid_send_feature_report(self._device, cdata, length) # pass # def get_feature_report(self, size=64): # hid_data = bytes([report_id]) + bytes(data) # cdata = ffi.new("unsigned char[]", data) # hidapi.hid_send_feature_report(self._device, cdata, length) # pass def get_error(self): err_str = hidapi.hid_error(self._device) if err_str == ffi.NULL: return None else: return ffi.string(err_str) def _get_prod_string_common(self, hid_fn): max_len = 128 str_buf = ffi.new("wchar_t[]", bytearray(max_len).decode('utf-8')) ret = hid_fn(self._device, str_buf, max_len) if ret < 0: raise HIDException(self._device.get_error()) else: assert(ret == 0) return ffi.string(str_buf) # Probably don't need these excpet for get_indexed_string, since they won't # change from the values found in the enumeration def get_manufacture_string(self): """ Get the manufacturer string of the device from its device descriptor """ return self._get_prod_string_common(hidapi.hid_get_manufacturer_string) def get_product_string(self): """ Get the product string of the device from its device descriptor """ return self._get_prod_string_common(hidapi.hid_get_product_string) def get_serial_number(self): """ Get the serial number string of the device from its device descriptor """ return self._get_prod_string_common(hidapi.hid_get_serial_number_string) def get_indexed_string(self, index): """ Get the string with the given index from the device """ max_len = 128 str_buf = ffi.new("wchar_t[]", str(bytearray(max_len))) ret = hidapi.hid_get_indexed_string(self._device, index, str_buf, max_len) if ret < 0: raise HIDException(self._device.get_error()) elif ret == 0: return None else: return ffi.string(str_buf).encode('utf-8') def description(self): return \ """Device: {} | {:x}:{:x} | {} | {} | {} release_number: {} usage_page: {} usage: {} interface_number: {}\ """.format(self.path, self.vendor_id, self.product_id, self.manufacturer_string, self.product_string, self.serial_number, self.release_number, self.usage_page, self.usage, self.interface_number ) class Enumeration: def __init__(self, vid=0, pid=0): self.device_list = _hid_enumerate(vid, pid) def show(self): for dev in self.device_list: print(dev.description()) def find(self, vid=None, pid=None, serial=None, interface=None, \ path=None, release_number=None, manufacturer=None, product=None, usage=None, usage_page=None): """ Attempts to open a device in the HID enumeration list. This function is only away of devices that were present when the object was created. """ result = [] for dev in self.device_list: if vid and dev.vendor_id != vid: continue if pid and dev.product_id != pid: continue if serial and dev.serial_number != serial: continue if interface and dev.interface_number != interface: continue if path and dev.path != path: continue if manufacturer and dev.manufacturer_string != manufacturer: continue if product and dev.product_string != product: continue if release_number and dev.release_number != release_number: continue if usage and dev.usage != usage: continue if usage_page and dev.usage_page != usage_page: continue result.append(dev) return result def _hid_enumerate(vendor_id=0, product_id=0): """ Enumerates all the hid devices for VID:PID. Returns a list of `DeviceInfo`. If vid is 0, then match any vendor id. Similarly, if pid is 0, match any product id. If both are zero, enumerate all HID devices. """ start = hidapi.hid_enumerate(vendor_id, product_id) result = [] cur = ffi.new("struct hid_device_info*"); cur = start # Copy everything into python list while cur != ffi.NULL: result.append(Device(cur)) cur = cur.next # Free the C memory hidapi.hid_free_enumeration(start) return result # def hid_open(vendor_id, product_id, serial=None): # """ # """ # if serial == None: # serial = ffi.NULL # else: # if type(serial) == bytes or type(serial) == bytearray: # serial = serial.decode('utf-8') # serial = ffi.new("wchar_t[]", serial) # dev = hidapi.hid_open(vendor_id, product_id, serial) # if dev: # return Device(dev) # else: # None if __name__ == "__main__": # Examples from easyhid import Enumeration # Stores an enumertion of all the connected USB HID devices en = Enumeration() # return a list of devices based on the search parameters devices = en.find(manufacturer="Company", product="Widget", interface=3) # print a description of the devices found for dev in devices: print(dev.description()) # open a device dev.open() # write some bytes to the device dev.write(bytearray([0, 1, 2, 3])) # read some bytes print(dev.read()) # close a device dev.close()
2.0625
2
mqttVec.py
Hamlet3000/mqttVec
0
18049
#!/usr/bin/env python3 import anki_vector import paho.mqtt.client as mqtt import time ############################################################################### def main(): voltage = 0 batlevel = 0 charging = 0 docked = 0 status = "error" ltime = time.strftime("%d.%m.%Y %H:%M:%S") try: # Connect to Vector and get battery info with anki_vector.Robot(behavior_control_level=None, cache_animation_lists=False) as robot: battery_state = robot.get_battery_state() voltage = battery_state.battery_volts batlevel = battery_state.battery_level charging = battery_state.is_charging docked = battery_state.is_on_charger_platform status = get_status(robot) except: print("couldn't connect to Vector") # In the openHAB channel, use a jsonpath transform to get specific values like this: JSONPATH:$..voltage data = {} data['robots'] = [] data['robots'].append({ 'name': 'Vector Green', 'voltage': voltage, 'batlevel': batlevel, 'charging': charging, 'docked': docked, 'time': ltime, 'status': status }) # Configure and publish data to mqtt do_mqtt(data) ############################################################################### def get_status(robot): status = "error" if robot.status.are_motors_moving: status = "Vector is moving" if robot.status.are_wheels_moving: status = "Vector's wheels are moving" if robot.status.is_animating: status = "Vector is animating" if robot.status.is_being_held: status = "Vector is being held" if robot.status.is_button_pressed: status = "Vector's button was button pressed" if robot.status.is_carrying_block: status = "Vector is carrying his block" if robot.status.is_charging: status = "Vector is currently charging" if robot.status.is_cliff_detected: status = "Vector has detected a cliff" if robot.status.is_docking_to_marker: status = "Vector has found a marker and is docking to it" if robot.status.is_falling: status = "Vector is falling" if robot.status.is_head_in_pos: status = "Vector's head is in position" if robot.status.is_in_calm_power_mode: status = "Vector is in calm power mode" if robot.status.is_lift_in_pos: status = "Vector's arm is in position" if robot.status.is_on_charger: status = "Vector is on the charger" if robot.status.is_pathing: status = "Vector is traversing a path" if robot.status.is_picked_up: status = "Vector is picked up" if robot.status.is_robot_moving: status = "Vector is in motion" return status ############################################################################### def on_publish(client, userdata, mid): print("Message published to broker") ############################################################################### def do_mqtt(data): # define variables for MQTT MQTT_HOST = "192.168.0.7" MQTT_TOPIC = "Vector" MQTT_PORT = 1883 MQTT_KEEPALIVE_INTERVAL = 20 MQTT_USER = "YOUR_MQTT_USER" MQTT_PW = "<PASSWORD>" # Convert it to text? Not sure why I did this but it works. Yay, 1am programming. MQTT_MSG = str(data) # Initiate MQTT Client mqttc = mqtt.Client() # Set username and password for the Broker mqttc.username_pw_set(MQTT_USER, MQTT_PW) # Register publish callback function #mqttc.on_publish = on_publish # Connect with MQTT Broker mqttc.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL) # Publish message to MQTT Broker mqttc.publish(MQTT_TOPIC,MQTT_MSG) # Disconnect from MQTT_Broker mqttc.disconnect() ############################################################################### if __name__ == "__main__": main()
2.9375
3
examples/scanner_ibeacon_example.py
hbcho/beacontools1
0
18050
import time from beacontools import BeaconScanner, IBeaconFilter def callback(bt_addr, rssi, packet, additional_info): print("<%s, %d> %s %s" % (bt_addr, rssi, packet, additional_info)) # scan for all iBeacon advertisements from beacons with the specified uuid scanner = BeaconScanner(callback, device_filter=IBeaconFilter(uuid="e2c56db5-dffb-48d2-b060-d0f5a71096e0") ) scanner.start() time.sleep(10) scanner.stop()
2.9375
3
desafio61.py
rafarbop/Python
0
18051
# Desafio 61 Curso em Video Python # By Rafabr from estrutura_modelo import cabecalho, rodape cabecalho(61, "Termos de uma Progressão Aritmética - II") while True: try: p0 = float(input('Digite o Termo inicial da PA: ')) r = float(input('Digite a razão da PA: ')) except ValueError: print('Voçe digitou um valor indevido!\n') continue break n = 1 print() while (n <= 10): print(f'Termo {n}:'.ljust(10) + f'{p0 + (n-1)*r}') n += 1 rodape()
3.84375
4
setup.py
leandron/steinlib
4
18052
from setuptools import setup tests_require = [ 'cov-core', 'mock', 'nose2', ] setup(name='steinlib', version='0.1', description='Python bindings for Steinlib format.', url='http://github.com/leandron/steinlib', author='<NAME>', author_email='<EMAIL>', license='MIT', packages=['steinlib'], tests_require=tests_require, test_suite='nose2.collector.collector', zip_safe=False)
1.109375
1
usdzconvert/usdStageWithFbx.py
summertriangle-dev/usdzconvert-docker
3
18053
from pxr import * import os, os.path import numpy import re import usdUtils import math import imp usdStageWithFbxLoaded = True try: imp.find_module('fbx') import fbx except ImportError: usdUtils.printError("Failed to import fbx module. Please install FBX Python bindings from http://www.autodesk.com/fbx and add path to FBX Python SDK to your PYTHONPATH") usdStageWithFbxLoaded = False class ConvertError(Exception): pass def printErrorAndExit(message): usdUtils.printError(message) raise ConvertError() def GfMatrix4dWithFbxMatrix(m): return Gf.Matrix4d( m[0][0], m[0][1], m[0][2], m[0][3], m[1][0], m[1][1], m[1][2], m[1][3], m[2][0], m[2][1], m[2][2], m[2][3], m[3][0], m[3][1], m[3][2], m[3][3]) def getFbxNodeTransforms(fbxNode): return GfMatrix4dWithFbxMatrix(fbxNode.EvaluateLocalTransform()) def getFbxNodeGeometricTransform(fbxNode): # geometry transform is an additional transform for geometry # it is relative to the node transform # this transform is not distributing to the children nodes in scene graph translation = fbxNode.GetGeometricTranslation(fbx.FbxNode.eSourcePivot) rotation = fbxNode.GetGeometricRotation(fbx.FbxNode.eSourcePivot) scale = fbxNode.GetGeometricScaling(fbx.FbxNode.eSourcePivot) return fbx.FbxAMatrix(translation, rotation, scale) def convertUVTransformFromFBX(translation, scale, rotation): # from FBX to Blender scale[0] = 1.0 / scale[0] scale[1] = 1.0 / scale[1] rotation = -rotation # Blender: Tuv = T * R * S # USD: Tuv = S * R * T scaleMatrix = Gf.Matrix4d(Gf.Vec4d(scale[0], scale[1], 1, 1)) inverseScaleMatrix = Gf.Matrix4d(Gf.Vec4d(1.0 / scale[0], 1.0 / scale[1], 1, 1)) rotationMatrix = Gf.Matrix4d( math.cos(rotation), math.sin(rotation), 0, 0, -math.sin(rotation), math.cos(rotation), 0, 0, 0, 0, 1, 0, 0, 0, 0, 1) inverseRotationMatrix = rotationMatrix.GetTranspose() translateMatrix = Gf.Matrix4d(1) translateMatrix.SetTranslate(Gf.Vec3d(translation[0], translation[1], 0)) # translate matrix from Blender to USD transform = scaleMatrix * rotationMatrix * translateMatrix * inverseRotationMatrix * inverseScaleMatrix translation3d = transform.ExtractTranslation() translation[0] = translation3d[0] translation[1] = translation3d[1] return translation, scale, math.degrees(rotation) class FbxNodeManager(usdUtils.NodeManager): def __init__(self, value=None): usdUtils.NodeManager.__init__(self) def overrideGetName(self, fbxNode): return usdUtils.makeValidIdentifier(fbxNode.GetName().split(":")[-1]) def overrideGetChildren(self, fbxNode): children = [] for childIdx in xrange(fbxNode.GetChildCount()): children.append(fbxNode.GetChild(childIdx)) return children def overrideGetLocalTransformGfMatrix4d(self, fbxNode): return GfMatrix4dWithFbxMatrix(fbxNode.EvaluateLocalTransform()) def overrideGetWorldTransformGfMatrix4d(self, fbxNode): return GfMatrix4dWithFbxMatrix(fbxNode.EvaluateGlobalTransform()) def overrideGetParent(self, fbxNode): return fbxNode.GetParent() class AnimProperty: def __init__(self, fbxAnimLayer, fbxProperty, timeSpans): self.fbxAnimLayer = fbxAnimLayer self.fbxProperty = fbxProperty self.timeSpans = timeSpans class FbxConverter: def __init__(self, fbxPath, usdPath, legacyModifier, copyTextures, searchPaths, verbose): self.verbose = verbose self.legacyModifier = legacyModifier self.copyTextures = copyTextures self.searchPaths = searchPaths self.asset = usdUtils.Asset(usdPath) self.usdStage = None self.usdMaterials = {} self.nodeId = 0 self.nodePaths = {} self.fbxSkinToSkin = {} self.startAnimationTime = 0 self.stopAnimationTime = 0 self.skeletonByNode = {} # collect skinned mesh to construct later self.blendShapeByNode = {} # collect blend shapes to construct later self.copiedTextures = {} # avoid copying textures more then once self.extent = [[], []] self.fbxScene = None filenameFull = fbxPath.split('/')[-1] self.srcFolder = fbxPath[:len(fbxPath)-len(filenameFull)] filenameFull = usdPath.split('/')[-1] self.dstFolder = usdPath[:len(usdPath)-len(filenameFull)] self.loadFbxScene(fbxPath) self.fps = fbx.FbxTime.GetFrameRate(fbx.FbxTime.GetGlobalTimeMode()) self.asset.setFPS(self.fps) self.nodeManager = FbxNodeManager() self.skinning = usdUtils.Skinning(self.nodeManager) self.shapeBlending = usdUtils.ShapeBlending() def loadFbxScene(self, fbxPath): fbxManager = fbx.FbxManager.Create() if not fbxManager: printErrorAndExit("failed to create FBX manager object") self.fbxManager = fbxManager fbxIOSettings = fbx.FbxIOSettings.Create(fbxManager, fbx.IOSROOT) fbxManager.SetIOSettings(fbxIOSettings) fbxImporter = fbx.FbxImporter.Create(fbxManager, "") result = fbxImporter.Initialize(fbxPath, -1, fbxManager.GetIOSettings()) if not result: printErrorAndExit("failed to initialize FbxImporter object") if fbxImporter.IsFBX(): fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_MATERIAL, True) fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_TEXTURE, True) fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_EMBEDDED, True) fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_SHAPE, True) fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_GOBO, True) fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_ANIMATION, True) fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_GLOBAL_SETTINGS, True) self.fbxScene = fbx.FbxScene.Create(fbxManager, "") result = fbxImporter.Import(self.fbxScene) fbxImporter.Destroy() if not result: printErrorAndExit("failed to load FBX scene") def getTextureProperties(self, materialProperty): if materialProperty.GetSrcObjectCount(fbx.FbxCriteria.ObjectType(fbx.FbxFileTexture.ClassId)) > 0: fbxFileTexture = materialProperty.GetSrcObject(fbx.FbxCriteria.ObjectType(fbx.FbxFileTexture.ClassId), 0) texCoordSet = 'st' if fbxFileTexture.UVSet is not None: texCoordSet = str(fbxFileTexture.UVSet.Get()) if texCoordSet == '' or texCoordSet == 'default': texCoordSet = 'st' else: texCoordSet = usdUtils.makeValidIdentifier(texCoordSet) wrapS = usdUtils.WrapMode.repeat wrapT = usdUtils.WrapMode.repeat if fbxFileTexture.GetWrapModeU() == fbx.FbxTexture.eClamp: wrapS = usdUtils.WrapMode.clamp if fbxFileTexture.GetWrapModeV() == fbx.FbxTexture.eClamp: wrapT = usdUtils.WrapMode.clamp # texture transform mapTransform = None translation = [fbxFileTexture.GetTranslationU(), fbxFileTexture.GetTranslationV()] scale = [fbxFileTexture.GetScaleU(), fbxFileTexture.GetScaleV()] rotation = fbxFileTexture.GetRotationW() if (translation[0] != 0 or translation[1] != 0 or scale[0] != 1 or scale[1] != 1 or rotation != 0): (translation, scale, rotation) = convertUVTransformFromFBX(translation, scale, rotation) mapTransform = usdUtils.MapTransform(translation, scale, rotation) return fbxFileTexture.GetFileName(), texCoordSet, wrapS, wrapT, mapTransform elif materialProperty.GetSrcObjectCount(fbx.FbxCriteria.ObjectType(fbx.FbxLayeredTexture.ClassId)) > 0: pass return '', 'st', usdUtils.WrapMode.repeat, usdUtils.WrapMode.repeat, None def processMaterialProperty(self, input, propertyName, property, factorProperty, channels, material, fbxMaterial): value = None factor = float(factorProperty.Get()) if factorProperty is not None else None if property is not None: if channels == 'rgb': value = [property.Get()[0], property.Get()[1], property.Get()[2]] else: if input == usdUtils.InputName.opacity: transparency = property.Get()[0] if factor is not None: transparency = transparency * factor factor = None value = 1.0 - transparency else: value = float(property.Get()[0]) srcTextureFilename = '' # source texture filename on drive textureFilename = '' # valid for USD materialProperty = fbxMaterial.FindProperty(propertyName) if materialProperty.IsValid(): srcTextureFilename, texCoordSet, wrapS, wrapT, mapTransform = self.getTextureProperties(materialProperty) srcTextureFilename = usdUtils.resolvePath(srcTextureFilename, self.srcFolder, self.searchPaths) textureFilename = usdUtils.makeValidPath(srcTextureFilename) if textureFilename != '' and (self.copyTextures or srcTextureFilename != textureFilename): if srcTextureFilename in self.copiedTextures: textureFilename = self.copiedTextures[srcTextureFilename] else: newTextureFilename = 'textures/' + os.path.basename(textureFilename) # do not rewrite the texture with same basename subfolderIdx = 0 while newTextureFilename in self.copiedTextures.values(): newTextureFilename = 'textures/' + str(subfolderIdx) + '/' + os.path.basename(textureFilename) subfolderIdx += 1 usdUtils.copy(srcTextureFilename, self.dstFolder + newTextureFilename, self.verbose) self.copiedTextures[srcTextureFilename] = newTextureFilename textureFilename = newTextureFilename if textureFilename != '': scale = None if factor is not None: if channels == 'rgb': scale = [factor, factor, factor] else: scale = factor material.inputs[input] = usdUtils.Map(channels, textureFilename, value, texCoordSet, wrapS, wrapT, scale, mapTransform) else: if value is not None: if factor is not None: if channels == 'rgb': material.inputs[input] = [value[0] * factor, value[1] * factor, value[2] * factor] else: material.inputs[input] = value * factor else: material.inputs[input] = value def processMaterials(self): for i in range(self.fbxScene.GetMaterialCount()): fbxMaterial = self.fbxScene.GetMaterial(i) material = usdUtils.Material(fbxMaterial.GetName().split(":")[-1]) normalMap = fbxMaterial.NormalMap if hasattr(fbxMaterial, 'NormalMap') else None self.processMaterialProperty(usdUtils.InputName.normal, fbx.FbxSurfaceMaterial.sNormalMap, normalMap, None, 'rgb', material, fbxMaterial) diffuse = fbxMaterial.Diffuse if hasattr(fbxMaterial, 'Diffuse') else None diffuseFactor = fbxMaterial.DiffuseFactor if hasattr(fbxMaterial, 'DiffuseFactor') else None self.processMaterialProperty(usdUtils.InputName.diffuseColor, fbx.FbxSurfaceMaterial.sDiffuse, diffuse, diffuseFactor, 'rgb', material, fbxMaterial) transparentColor = fbxMaterial.TransparentColor if hasattr(fbxMaterial, 'TransparentColor') else None transparencyFactor = fbxMaterial.TransparencyFactor if hasattr(fbxMaterial, 'TransparencyFactor') else None self.processMaterialProperty(usdUtils.InputName.opacity, fbx.FbxSurfaceMaterial.sTransparentColor, transparentColor, transparencyFactor, 'a', material, fbxMaterial) emissive = fbxMaterial.Emissive if hasattr(fbxMaterial, 'Emissive') else None emissiveFactor = fbxMaterial.EmissiveFactor if hasattr(fbxMaterial, 'EmissiveFactor') else None self.processMaterialProperty(usdUtils.InputName.emissiveColor, fbx.FbxSurfaceMaterial.sEmissive, emissive, emissiveFactor, 'rgb', material, fbxMaterial) ambient = fbxMaterial.Ambient if hasattr(fbxMaterial, 'Ambient') else None ambientFactor = fbxMaterial.AmbientFactor if hasattr(fbxMaterial, 'AmbientFactor') else None self.processMaterialProperty(usdUtils.InputName.occlusion, fbx.FbxSurfaceMaterial.sAmbient, ambient, ambientFactor, 'r', material, fbxMaterial) # 'metallic', 'roughness' ? usdMaterial = material.makeUsdMaterial(self.asset) if self.legacyModifier is not None: self.legacyModifier.opacityAndDiffuseOneTexture(material) self.usdMaterials[fbxMaterial.GetName()] = usdMaterial def prepareAnimations(self): animStacksCount = self.fbxScene.GetSrcObjectCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId)) if animStacksCount < 1: if self.verbose: print 'No animation found' return fbxAnimStack = self.fbxScene.GetSrcObject(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId), 0) timeSpan = fbxAnimStack.GetLocalTimeSpan() self.startAnimationTime = timeSpan.GetStart().GetSecondDouble() self.stopAnimationTime = timeSpan.GetStop().GetSecondDouble() self.asset.extentTime(self.startAnimationTime) self.asset.extentTime(self.stopAnimationTime) def processControlPoints(self, fbxMesh, usdMesh): points = [Gf.Vec3f(p[0], p[1], p[2]) for p in fbxMesh.GetControlPoints()] extent = Gf.Range3f() for point in points: extent.UnionWith(point) usdMesh.CreatePointsAttr(points) usdMesh.CreateExtentAttr([Gf.Vec3f(extent.GetMin()), Gf.Vec3f(extent.GetMax())]) if not any(self.extent): self.extent[0] = extent.GetMin() self.extent[1] = extent.GetMax() else: for i in range(3): self.extent[0][i] = min(self.extent[0][i], extent.GetMin()[i]) self.extent[1][i] = max(self.extent[1][i], extent.GetMax()[i]) def getVec3fArrayWithLayerElements(self, elements, fbxLayerElements): elementsArray = fbxLayerElements.GetDirectArray() for i in xrange(elementsArray.GetCount()): element = elementsArray.GetAt(i) elements.append(Gf.Vec3f(element[0], element[1], element[2])) def getIndicesWithLayerElements(self, fbxMesh, fbxLayerElements): mappingMode = fbxLayerElements.GetMappingMode() referenceMode = fbxLayerElements.GetReferenceMode() indexToDirect = ( referenceMode == fbx.FbxLayerElement.eIndexToDirect or referenceMode == fbx.FbxLayerElement.eIndex) indices = [] if mappingMode == fbx.FbxLayerElement.eByControlPoint: if indexToDirect: for contorlPointIdx in xrange(fbxMesh.GetControlPointsCount()): indices.append(fbxLayerElements.GetIndexArray().GetAt(contorlPointIdx)) elif mappingMode == fbx.FbxLayerElement.eByPolygonVertex: pointIdx = 0 for polygonIdx in xrange(fbxMesh.GetPolygonCount()): for vertexIdx in xrange(fbxMesh.GetPolygonSize(polygonIdx)): if indexToDirect: indices.append(fbxLayerElements.GetIndexArray().GetAt(pointIdx)) else: indices.append(pointIdx) pointIdx += 1 elif mappingMode == fbx.FbxLayerElement.eByPolygon: for polygonIdx in xrange(fbxMesh.GetPolygonCount()): if indexToDirect: indices.append(fbxLayerElements.GetIndexArray().GetAt(polygonIdx)) else: indices.append(polygonIdx) return indices def getInterpolationWithLayerElements(self, fbxLayerElements): mappingMode = fbxLayerElements.GetMappingMode() if mappingMode == fbx.FbxLayerElement.eByControlPoint: return UsdGeom.Tokens.vertex elif mappingMode == fbx.FbxLayerElement.eByPolygonVertex: return UsdGeom.Tokens.faceVarying elif mappingMode == fbx.FbxLayerElement.eByPolygon: return UsdGeom.Tokens.uniform elif mappingMode == fbx.FbxLayerElement.eAllSame: return UsdGeom.Tokens.constant elif mappingMode == fbx.FbxLayerElement.eByEdge: usdUtils.printWarning("Mapping mode eByEdge for layer elements is not supported.") return '' def processNormals(self, fbxMesh, usdMesh, vertexIndices): for layerIdx in xrange(fbxMesh.GetLayerCount()): fbxLayerNormals = fbxMesh.GetLayer(layerIdx).GetNormals() if fbxLayerNormals is None: continue normals = [] self.getVec3fArrayWithLayerElements(normals, fbxLayerNormals) if not any(normals): continue indices = self.getIndicesWithLayerElements(fbxMesh, fbxLayerNormals) interpolation = self.getInterpolationWithLayerElements(fbxLayerNormals) normalPrimvar = usdMesh.CreatePrimvar('normals', Sdf.ValueTypeNames.Normal3fArray, interpolation) normalPrimvar.Set(normals) if len(indices) != 0: normalPrimvar.SetIndices(Vt.IntArray(indices)) break # normals can be in one layer only def processUVs(self, fbxMesh, usdMesh, vertexIndices): for layerIdx in xrange(fbxMesh.GetLayerCount()): fbxLayerUVs = fbxMesh.GetLayer(layerIdx).GetUVs() # get diffuse texture uv-s if fbxLayerUVs is None: continue uvs = [] uvArray = fbxLayerUVs.GetDirectArray() for i in xrange(uvArray.GetCount()): uv = uvArray.GetAt(i) uvs.append(Gf.Vec2f(uv[0], uv[1])) if not any(uvs): continue indices = self.getIndicesWithLayerElements(fbxMesh, fbxLayerUVs) interpolation = self.getInterpolationWithLayerElements(fbxLayerUVs) texCoordSet = 'st' uvSets = fbxMesh.GetLayer(layerIdx).GetUVSets() if len(uvSets) > 0: fbxLayerElementUV = fbxMesh.GetLayer(layerIdx).GetUVSets()[0] texCoordSet = str(fbxLayerElementUV.GetName()) if layerIdx == 0 or texCoordSet == '' or texCoordSet == 'default': texCoordSet = 'st' else: texCoordSet = usdUtils.makeValidIdentifier(texCoordSet) uvPrimvar = usdMesh.CreatePrimvar(texCoordSet, Sdf.ValueTypeNames.Float2Array, interpolation) uvPrimvar.Set(uvs) if len(indices) != 0: uvPrimvar.SetIndices(Vt.IntArray(indices)) def processVertexColors(self, fbxMesh, usdMesh, vertexIndices): for layerIdx in xrange(fbxMesh.GetLayerCount()): fbxLayerColors = fbxMesh.GetLayer(layerIdx).GetVertexColors() if fbxLayerColors is None: continue colors = [] colorArray = fbxLayerColors.GetDirectArray() for i in xrange(colorArray.GetCount()): fbxColor = colorArray.GetAt(i) colors.append(Gf.Vec3f(fbxColor.mRed, fbxColor.mGreen, fbxColor.mBlue)) if not any(colors): continue indices = self.getIndicesWithLayerElements(fbxMesh, fbxLayerColors) interpolation = self.getInterpolationWithLayerElements(fbxLayerColors) displayColorPrimvar = usdMesh.CreateDisplayColorPrimvar(interpolation) displayColorPrimvar.Set(colors) if len(indices) != 0: displayColorPrimvar.SetIndices(Vt.IntArray(indices)) break # vertex colors can be in one layer only def applySkinning(self, fbxNode, fbxSkin, usdMesh, indices): skin = self.fbxSkinToSkin[fbxSkin] skeleton = skin.skeleton maxPointIndex = 0 for clusterIdx in range(fbxSkin.GetClusterCount()): fbxCluster = fbxSkin.GetCluster(clusterIdx) for i in range(fbxCluster.GetControlPointIndicesCount()): pointIndex = fbxCluster.GetControlPointIndices()[i] if maxPointIndex < pointIndex: maxPointIndex = pointIndex vertexCount = maxPointIndex + 1 # should be equal to number of vertices: max(indices) + 1 jointIndicesPacked = [[] for i in range(vertexCount)] weightsPacked = [[] for i in range(vertexCount)] for clusterIdx in range(fbxSkin.GetClusterCount()): fbxCluster = fbxSkin.GetCluster(clusterIdx) for i in range(fbxCluster.GetControlPointIndicesCount()): pointIndex = fbxCluster.GetControlPointIndices()[i] jointIndicesPacked[pointIndex].append(skin.remapIndex(clusterIdx)) weightsPacked[pointIndex].append(float(fbxCluster.GetControlPointWeights()[i])) components = 0 for indicesPerVertex in jointIndicesPacked: if components < len(indicesPerVertex): components = len(indicesPerVertex) jointIndices = [0] * vertexCount * components weights = [float(0)] * vertexCount * components for i in range(vertexCount): indicesPerVertex = jointIndicesPacked[i] for j in range(len(indicesPerVertex)): jointIndices[i * components + j] = indicesPerVertex[j] weights[i * components + j] = weightsPacked[i][j] weights = Vt.FloatArray(weights) UsdSkel.NormalizeWeights(weights, components) usdSkelBinding = UsdSkel.BindingAPI(usdMesh) usdSkelBinding.CreateJointIndicesPrimvar(False, components).Set(jointIndices) usdSkelBinding.CreateJointWeightsPrimvar(False, components).Set(weights) bindTransformWasNotFound = True bindTransform = Gf.Matrix4d(1) for i in range(self.fbxScene.GetPoseCount()): fbxPose = self.fbxScene.GetPose(i) if fbxPose is None: continue nodeIndex = fbxPose.Find(fbxNode) if nodeIndex > -1 and (fbxPose.IsBindPose() or not fbxPose.IsLocalMatrix(nodeIndex)): bindTransform = GfMatrix4dWithFbxMatrix(fbxPose.GetMatrix(nodeIndex)) bindTransformWasNotFound = False break if bindTransformWasNotFound and fbxSkin.GetClusterCount() > 0: if self.verbose: usdUtils.printWarning("can't find a bind pose for mesh " + fbxNode.GetName() + ". Trying to calculate.") # FBX stores bind transform matrix for the skin in each cluster # get it from the first one fbxCluster = fbxSkin.GetCluster(0) fbxBindTransform = fbx.FbxAMatrix() fbxBindTransform = fbxCluster.GetTransformMatrix(fbxBindTransform) bindTransform = GfMatrix4dWithFbxMatrix(fbxBindTransform) bindTransform = GfMatrix4dWithFbxMatrix(getFbxNodeGeometricTransform(fbxNode)) * bindTransform usdSkelBinding.CreateGeomBindTransformAttr(bindTransform) usdSkelBinding.CreateSkeletonRel().AddTarget(skeleton.usdSkeleton.GetPath()) if self.legacyModifier is not None: self.legacyModifier.addSkelAnimToMesh(usdMesh, skeleton) def bindRigidDeformation(self, fbxNode, usdMesh, skeleton): bindTransform = GfMatrix4dWithFbxMatrix(fbxNode.EvaluateGlobalTransform()) bindTransform = GfMatrix4dWithFbxMatrix(getFbxNodeGeometricTransform(fbxNode)) * bindTransform skeleton.bindRigidDeformation(fbxNode, usdMesh, GfMatrix4dWithFbxMatrix(bindTransform)) if self.legacyModifier is not None: self.legacyModifier.addSkelAnimToMesh(usdMesh, skeleton) def bindMaterials(self, fbxMesh, usdMesh): for layerIdx in xrange(fbxMesh.GetLayerCount()): fbxLayerMaterials = fbxMesh.GetLayer(layerIdx).GetMaterials() if not fbxLayerMaterials: continue # looks like there is a bug in FBX SDK: # GetDirectArray() does not work if .GetCount() has not been called materialsCount = fbxLayerMaterials.GetDirectArray().GetCount() if fbxLayerMaterials.GetIndexArray().GetCount() > 1 and fbxLayerMaterials.GetMappingMode() == fbx.FbxLayerElement.eByPolygon: # subsets subsets = [[] for i in range(materialsCount)] for polygonIdx in range(fbxLayerMaterials.GetIndexArray().GetCount()): materialIndex = fbxLayerMaterials.GetIndexArray().GetAt(polygonIdx) subsets[materialIndex].append(polygonIdx) bindingAPI = UsdShade.MaterialBindingAPI(usdMesh) for materialIndex in range(materialsCount): facesCount = len(subsets[materialIndex]) if facesCount > 0: fbxMaterial = fbxLayerMaterials.GetDirectArray().GetAt(materialIndex) materialName = usdUtils.makeValidIdentifier(fbxMaterial.GetName()) subsetName = materialName + '_subset' if self.verbose: print ' subset:', subsetName, 'faces:', facesCount usdSubset = UsdShade.MaterialBindingAPI.CreateMaterialBindSubset(bindingAPI, subsetName, Vt.IntArray(subsets[materialIndex])) usdMaterial = self.usdMaterials[fbxMaterial.GetName()] UsdShade.MaterialBindingAPI(usdSubset).Bind(usdMaterial) elif fbxLayerMaterials.GetIndexArray().GetCount() > 0: # one material for whole mesh fbxMaterial = fbxLayerMaterials.GetDirectArray().GetAt(0) if fbxMaterial is not None and fbxMaterial.GetName() in self.usdMaterials: usdMaterial = self.usdMaterials[fbxMaterial.GetName()] UsdShade.Material.Bind(usdMaterial, usdMesh.GetPrim()) def getFbxMesh(self, fbxNode): fbxNodeAttribute = fbxNode.GetNodeAttribute() if fbxNodeAttribute: fbxAttributeType = fbxNodeAttribute.GetAttributeType() if (fbx.FbxNodeAttribute.eMesh == fbxAttributeType or fbx.FbxNodeAttribute.eSubDiv == fbxAttributeType): return fbxNodeAttribute return None def getFbxSkin(self, fbxNode): fbxMesh = self.getFbxMesh(fbxNode) if fbxMesh is not None and fbxMesh.GetDeformerCount(fbx.FbxDeformer.eSkin) > 0: return fbxMesh.GetDeformer(0, fbx.FbxDeformer.eSkin) return None def getFbxBlenShape(self, fbxNode): fbxMesh = self.getFbxMesh(fbxNode) if fbxMesh is not None and fbxMesh.GetDeformerCount(fbx.FbxDeformer.eBlendShape) > 0: return fbxMesh.GetDeformer(0, fbx.FbxDeformer.eBlendShape) return None def processMesh(self, fbxNode, newPath, underSkeleton, indent): usdMesh = UsdGeom.Mesh.Define(self.usdStage, newPath) fbxMesh = fbxNode.GetNodeAttribute() if fbx.FbxNodeAttribute.eSubDiv == fbxMesh.GetAttributeType(): fbxMesh = fbxMesh.GetBaseMesh() else: usdMesh.CreateSubdivisionSchemeAttr(UsdGeom.Tokens.none) indices = [] faceVertexCounts = [] for polygonIdx in xrange(fbxMesh.GetPolygonCount()): polygonSize = fbxMesh.GetPolygonSize(polygonIdx) faceVertexCounts.append(polygonSize) for polygonVertexIdx in xrange(polygonSize): index = fbxMesh.GetPolygonVertex(polygonIdx, polygonVertexIdx) indices.append(index) usdMesh.CreateFaceVertexCountsAttr(faceVertexCounts) usdMesh.CreateFaceVertexIndicesAttr(indices) # positions, normals, texture coordinates self.processControlPoints(fbxMesh, usdMesh) self.processNormals(fbxMesh, usdMesh, indices) self.processUVs(fbxMesh, usdMesh, indices) self.processVertexColors(fbxMesh, usdMesh, indices) fbxSkin = self.getFbxSkin(fbxNode) if fbxSkin is not None: self.applySkinning(fbxNode, fbxSkin, usdMesh, indices) elif underSkeleton is not None: self.bindRigidDeformation(fbxNode, usdMesh, underSkeleton) if self.verbose: type = 'Mesh' if fbxSkin is not None: type = 'Skinned mesh' elif underSkeleton is not None: type = 'Rigid skinned mesh' print indent + type + ': ' + fbxNode.GetName() self.bindMaterials(fbxMesh, usdMesh) return usdMesh def addTranslateOpIfNotEmpty(self, prim, op, name = ''): if op != fbx.FbxVector4(0, 0, 0, 1): prim.AddTranslateOp(UsdGeom.XformOp.PrecisionFloat, name).Set((op[0], op[1], op[2])) def addInvertTranslateOpIfNotEmpty(self, prim, op, name = ''): if op != fbx.FbxVector4(0, 0, 0, -1): prim.AddTranslateOp(UsdGeom.XformOp.PrecisionFloat, name, True) def addRotationOpIfNotEmpty(self, prim, op, name = '', idRotation = None): if idRotation is None: idRotation = fbx.FbxVector4(0, 0, 0, 1) if op != idRotation: prim.AddRotateXYZOp(UsdGeom.XformOp.PrecisionFloat, name).Set((op[0], op[1], op[2])) def addScalingOpIfNotEmpty(self, prim, op, name = '', idScaling = None): if idScaling is None: idScaling = fbx.FbxVector4(1, 1, 1, 1) if op != idScaling: prim.AddScaleOp(UsdGeom.XformOp.PrecisionFloat, name).Set((op[0], op[1], op[2])) def getXformOp(self, usdGeom, type): ops = usdGeom.GetOrderedXformOps() for op in ops: # find operation without suffix if op.GetOpType() == type and len(op.GetName().split(':')) == 2: return op op = None if type == UsdGeom.XformOp.TypeTranslate: op = usdGeom.AddTranslateOp() elif type == UsdGeom.XformOp.TypeRotateXYZ: op = usdGeom.AddRotateXYZOp() if type == UsdGeom.XformOp.TypeOrient: op = usdGeom.AddOrientOp() if type == UsdGeom.XformOp.TypeScale: op = usdGeom.AddScaleOp() if op is not None: opNames = [ "xformOp:translate", "xformOp:translate:rotationOffset", "xformOp:translate:rotationPivot", "xformOp:rotateXYZ:preRotation", "xformOp:rotateXYZ", "xformOp:rotateXYZ:postRotation", "!invert!xformOp:translate:rotationPivot", "xformOp:translate:scalingOffset", "xformOp:translate:scalingPivot", "xformOp:scale", "!invert!xformOp:translate:scalingPivot", ] ops = usdGeom.GetOrderedXformOps() newOps = [] for opName in opNames: checkInverse = False if opName[0:8] == '!invert!': opName = opName[8:] checkInverse = True for operation in ops: if operation.GetName() == opName and operation.IsInverseOp() == checkInverse: newOps.append(operation) break usdGeom.SetXformOpOrder(newOps) return op def setNodeTransforms(self, node, prim): t = fbx.FbxVector4(node.LclTranslation.Get()) ro = node.GetRotationOffset(fbx.FbxNode.eSourcePivot) rp = node.GetRotationPivot(fbx.FbxNode.eSourcePivot) preRotation = node.GetPreRotation(fbx.FbxNode.eSourcePivot) r = fbx.FbxVector4(node.LclRotation.Get()) postRotation = node.GetPostRotation(fbx.FbxNode.eSourcePivot) so = node.GetScalingOffset(fbx.FbxNode.eSourcePivot) sp = node.GetScalingPivot(fbx.FbxNode.eSourcePivot) s = fbx.FbxVector4(node.LclScaling.Get()) # set translation self.addTranslateOpIfNotEmpty(prim, t) # set rotation offset, pivot and pre-post rotation ops self.addTranslateOpIfNotEmpty(prim, ro, "rotationOffset") self.addTranslateOpIfNotEmpty(prim, rp, "rotationPivot") self.addRotationOpIfNotEmpty(prim, preRotation, "preRotation") self.addRotationOpIfNotEmpty(prim, r) self.addRotationOpIfNotEmpty(prim, postRotation, "postRotation") self.addInvertTranslateOpIfNotEmpty(prim, -rp, "rotationPivot") # set scaling offset & pivot self.addTranslateOpIfNotEmpty(prim, so, "scalingOffset") self.addTranslateOpIfNotEmpty(prim, sp, "scalingPivot") self.addScalingOpIfNotEmpty(prim, s) self.addInvertTranslateOpIfNotEmpty(prim, -rp, "scalingPivot") def hasGeometricTransform(self, fbxNode): if (fbx.FbxVector4(0, 0, 0, 1) != fbxNode.GetGeometricTranslation(fbx.FbxNode.eSourcePivot) or fbx.FbxVector4(0, 0, 0, 1) != fbxNode.GetGeometricRotation(fbx.FbxNode.eSourcePivot) or fbx.FbxVector4(1, 1, 1, 1) != fbxNode.GetGeometricScaling(fbx.FbxNode.eSourcePivot)): return True return False def setGeometricTransform(self, fbxNode, prim): gt = fbxNode.GetGeometricTranslation(fbx.FbxNode.eSourcePivot) gr = fbxNode.GetGeometricRotation(fbx.FbxNode.eSourcePivot) gs = fbxNode.GetGeometricScaling(fbx.FbxNode.eSourcePivot) self.addTranslateOpIfNotEmpty(prim, gt, "geometricTranslation") self.addRotationOpIfNotEmpty(prim, gr, "geometricRotation") self.addScalingOpIfNotEmpty(prim, gs, "geometricScaling") def processSkeletalAnimation(self, skeletonIdx): skeleton = self.skinning.skeletons[skeletonIdx] framesCount = int((self.stopAnimationTime - self.startAnimationTime) * self.fps + 0.5) + 1 startFrame = int(self.startAnimationTime * self.fps + 0.5) if framesCount == 1: if self.verbose: print ' no skeletal animation' return animationName = self.asset.getAnimationsPath() + '/' + 'SkelAnimation' if skeletonIdx > 0: animationName += '_' + str(skeletonIdx) if self.verbose: print 'Animation:', animationName usdSkelAnim = UsdSkel.Animation.Define(self.usdStage, animationName) translateAttr = usdSkelAnim.CreateTranslationsAttr() rotateAttr = usdSkelAnim.CreateRotationsAttr() scaleAttr = usdSkelAnim.CreateScalesAttr() jointPaths = [] for fbxNode in skeleton.joints: jointPaths.append(skeleton.jointPaths[fbxNode]) fbxAnimEvaluator = self.fbxScene.GetAnimationEvaluator() for frame in range(framesCount): time = frame / self.fps + self.startAnimationTime translations = [] rotations = [] scales = [] for fbxNode in skeleton.joints: fbxTime = fbx.FbxTime() fbxTime.SetSecondDouble(time) fbxMatrix = fbxAnimEvaluator.GetNodeLocalTransform(fbxNode, fbxTime) translation = fbxMatrix.GetT() q = fbxMatrix.GetQ() rotation = Gf.Quatf(float(q[3]), Gf.Vec3f(float(q[0]), float(q[1]), float(q[2]))) scale = fbxMatrix.GetS() translations.append([translation[0], translation[1], translation[2]]) rotations.append(rotation) scales.append([scale[0], scale[1], scale[2]]) translateAttr.Set(translations, Usd.TimeCode(frame + startFrame)) rotateAttr.Set(rotations, Usd.TimeCode(frame + startFrame)) scaleAttr.Set(scales, Usd.TimeCode(frame + startFrame)) usdSkelAnim.CreateJointsAttr(jointPaths) skeleton.setSkeletalAnimation(usdSkelAnim) def processNodeTransformAnimation(self, fbxNode, fbxProperty, fbxAnimCurveNode, usdGeom): fbxTimeSpan = fbx.FbxTimeSpan() fbxAnimCurveNode.GetAnimationInterval(fbxTimeSpan) startTime = fbxTimeSpan.GetStart().GetSecondDouble() stopTime = fbxTimeSpan.GetStop().GetSecondDouble() framesCount = int((stopTime - startTime) * self.fps + 0.5) + 1 if framesCount < 1: return startFrame = int(startTime * self.fps + 0.5) isTranslation = False isRotation = False isScale = False channelName = str(fbxProperty.GetName()).strip() if channelName == 'Lcl Translation': isTranslation = True elif channelName == 'Lcl Rotation': isRotation = True elif channelName == 'Lcl Scaling': isScale = True else: if self.verbose: print 'Warnig: animation channel"', channelName, '"is not supported.' fbxAnimEvaluator = self.fbxScene.GetAnimationEvaluator() # TODO: for linear curves use key frames only for frame in range(startFrame, startFrame + framesCount): time = frame / self.fps + startTime timeCode = self.asset.toTimeCode(time, True) fbxTime = fbx.FbxTime() fbxTime.SetSecondDouble(time) if isTranslation: op = self.getXformOp(usdGeom, UsdGeom.XformOp.TypeTranslate) v = fbxNode.EvaluateLocalTranslation(fbxTime) op.Set(time = timeCode, value = Gf.Vec3f(float(v[0]), float(v[1]), float(v[2]))) elif isRotation: op = self.getXformOp(usdGeom, UsdGeom.XformOp.TypeRotateXYZ) v = fbxNode.EvaluateLocalRotation(fbxTime) op.Set(time = timeCode, value = Gf.Vec3f(float(v[0]), float(v[1]), float(v[2]))) elif isScale: op = self.getXformOp(usdGeom, UsdGeom.XformOp.TypeScale) v = fbxNode.EvaluateLocalScaling(fbxTime) op.Set(time = timeCode, value = Gf.Vec3f(float(v[0]), float(v[1]), float(v[2]))) def findAnimationProperties(self, fbxObject): animStacksCount = self.fbxScene.GetSrcObjectCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId)) if animStacksCount < 1: return [] animProperties = [] for animStackIdx in range(animStacksCount): fbxAnimStack = self.fbxScene.GetSrcObject(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId), animStackIdx) for layerIdx in range(fbxAnimStack.GetMemberCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimLayer.ClassId))): fbxAnimLayer = fbxAnimStack.GetMember(fbx.FbxCriteria.ObjectType(fbx.FbxAnimLayer.ClassId), layerIdx) for curveNodeIdx in range(fbxAnimLayer.GetMemberCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimCurveNode.ClassId))): fbxAnimCurveNode = fbxAnimLayer.GetMember(fbx.FbxCriteria.ObjectType(fbx.FbxAnimCurveNode.ClassId), curveNodeIdx) fbxTimeSpan = fbx.FbxTimeSpan() fbxAnimCurveNode.GetAnimationInterval(fbxTimeSpan) for propertyIdx in range(fbxAnimCurveNode.GetDstPropertyCount()): fbxProperty = fbxAnimCurveNode.GetDstProperty(propertyIdx) if fbxProperty.GetFbxObject() == fbxObject: animProperty = AnimProperty(fbxAnimLayer, fbxProperty, fbxTimeSpan) animProperties.append(animProperty) return animProperties def processNodeAnimations(self, fbxNode, usdGeom): animStacksCount = self.fbxScene.GetSrcObjectCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId)) if animStacksCount < 1: return for animStackIdx in range(animStacksCount): fbxAnimStack = self.fbxScene.GetSrcObject(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId), animStackIdx) for layerIdx in range(fbxAnimStack.GetMemberCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimLayer.ClassId))): fbxAnimLayer = fbxAnimStack.GetMember(fbx.FbxCriteria.ObjectType(fbx.FbxAnimLayer.ClassId), layerIdx) for curveNodeIdx in range(fbxAnimLayer.GetMemberCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimCurveNode.ClassId))): fbxAnimCurveNode = fbxAnimLayer.GetMember(fbx.FbxCriteria.ObjectType(fbx.FbxAnimCurveNode.ClassId), curveNodeIdx) for propertyIdx in range(fbxAnimCurveNode.GetDstPropertyCount()): fbxProperty = fbxAnimCurveNode.GetDstProperty(propertyIdx) fbxObject = fbxProperty.GetFbxObject() if fbxObject == fbxNode: self.processNodeTransformAnimation(fbxNode, fbxProperty, fbxAnimCurveNode, usdGeom) def processNode(self, fbxNode, path, underSkeleton, indent): nodeName = usdUtils.makeValidIdentifier(fbxNode.GetName().split(":")[-1]) newPath = path + '/' + nodeName if newPath in self.nodePaths: newPath = newPath + str(self.nodeId) self.nodeId = self.nodeId + 1 fbxAttributeType = fbx.FbxNodeAttribute.eNone fbxNodeAttribute = fbxNode.GetNodeAttribute() if fbxNodeAttribute: fbxAttributeType = fbxNodeAttribute.GetAttributeType() if fbx.FbxNodeAttribute.eSkeleton == fbxAttributeType: if fbxNodeAttribute.IsSkeletonRoot(): skeleton = self.skinning.findSkeletonByRoot(fbxNode) if skeleton is None: skeleton = self.skinning.findSkeletonByJoint(fbxNode) if skeleton is not None: skeleton.makeUsdSkeleton(self.usdStage, newPath, self.nodeManager) if self.verbose: print indent + "SkelRoot:", nodeName underSkeleton = skeleton if underSkeleton and self.getFbxMesh(fbxNode) is not None: self.skeletonByNode[fbxNode] = underSkeleton elif self.getFbxSkin(fbxNode) is not None: self.skeletonByNode[fbxNode] = None elif self.getFbxBlenShape(fbxNode) is not None: usdNode = self.prepareBlendShape(fbxNode, newPath) self.setNodeTransforms(fbxNode, usdNode) self.processNodeAnimations(fbxNode, usdNode) else: # if we have a geometric transformation we shouldn't propagate it to node's children usdNode = None hasGeometricTransform = self.hasGeometricTransform(fbxNode) if underSkeleton is None and hasGeometricTransform and underSkeleton is None: usdNode = UsdGeom.Xform.Define(self.usdStage, newPath) geometryPath = newPath + '/' + nodeName + '_geometry' else: geometryPath = newPath usdGeometry = None if (fbx.FbxNodeAttribute.eMesh == fbxAttributeType or fbx.FbxNodeAttribute.eSubDiv == fbxAttributeType): usdGeometry = self.processMesh(fbxNode, geometryPath, underSkeleton, indent) if underSkeleton is None: if usdGeometry is None: usdGeometry = UsdGeom.Xform.Define(self.usdStage, geometryPath) self.nodePaths[newPath] = newPath if hasGeometricTransform: self.setNodeTransforms(fbxNode, usdNode) self.setGeometricTransform(fbxNode, usdGeometry) self.processNodeAnimations(fbxNode, usdNode) else: self.setNodeTransforms(fbxNode, usdGeometry) self.processNodeAnimations(fbxNode, usdGeometry) # process child nodes recursively if underSkeleton is not None: newPath = path # keep meshes directly under SkelRoot scope for childIdx in xrange(fbxNode.GetChildCount()): self.processNode(fbxNode.GetChild(childIdx), newPath, underSkeleton, indent + ' ') def populateSkeletons(self, fbxNode): fbxNodeAttribute = fbxNode.GetNodeAttribute() if fbxNodeAttribute: fbxAttributeType = fbxNodeAttribute.GetAttributeType() if fbx.FbxNodeAttribute.eSkeleton == fbxAttributeType: if fbxNodeAttribute.IsSkeletonRoot(): self.skinning.createSkeleton(fbxNode) for childIdx in xrange(fbxNode.GetChildCount()): self.populateSkeletons(fbxNode.GetChild(childIdx)) def findSkelRoot(self, fbxNode): fbxNodeAttribute = fbxNode.GetNodeAttribute() if fbxNodeAttribute: fbxAttributeType = fbxNodeAttribute.GetAttributeType() if fbx.FbxNodeAttribute.eSkeleton == fbxAttributeType: if fbxNodeAttribute.IsSkeletonRoot(): return fbxNode fbxParentNode = fbxNode.GetParent() if fbxParentNode is not None: return self.findSkelRoot(fbxParentNode) return None def populateSkins(self, fbxNode): fbxNodeAttribute = fbxNode.GetNodeAttribute() if fbxNodeAttribute: fbxAttributeType = fbxNodeAttribute.GetAttributeType() if (fbx.FbxNodeAttribute.eMesh == fbxAttributeType or fbx.FbxNodeAttribute.eSubDiv == fbxAttributeType): fbxMesh = fbxNode.GetNodeAttribute() for i in range(fbxMesh.GetDeformerCount(fbx.FbxDeformer.eSkin)): fbxSkin = fbxMesh.GetDeformer(i, fbx.FbxDeformer.eSkin) # try to find skeleton root (.eSkeleton) in parent nodes root = self.findSkelRoot(fbxSkin.GetCluster(0).GetLink()) if fbxSkin.GetClusterCount() > 0 else None skin = usdUtils.Skin(root) for clusterIdx in range(fbxSkin.GetClusterCount()): fbxCluster = fbxSkin.GetCluster(clusterIdx) fbxJointNode = fbxCluster.GetLink() skin.joints.append(fbxJointNode) linkWorldTransform = fbx.FbxAMatrix() linkWorldTransform = fbxCluster.GetTransformLinkMatrix(linkWorldTransform) skin.bindMatrices[fbxJointNode] = GfMatrix4dWithFbxMatrix(linkWorldTransform) self.skinning.skins.append(skin) self.fbxSkinToSkin[fbxSkin] = skin for childIdx in xrange(fbxNode.GetChildCount()): self.populateSkins(fbxNode.GetChild(childIdx)) def processSkinning(self): self.populateSkeletons(self.fbxScene.GetRootNode()) self.populateSkins(self.fbxScene.GetRootNode()) self.skinning.createSkeletonsFromSkins() if self.verbose: if len(self.skinning.skeletons) > 0: print " Found skeletons:", len(self.skinning.skeletons), "with", len(self.skinning.skins), "skin(s)" def processSkinnedMeshes(self): for fbxNode, skeleton in self.skeletonByNode.iteritems(): fbxSkin = self.getFbxSkin(fbxNode) if skeleton is None: if fbxSkin is None: continue skin = self.fbxSkinToSkin[fbxSkin] skeleton = skin.skeleton nodeName = usdUtils.makeValidIdentifier(fbxNode.GetName().split(":")[-1]) newPath = skeleton.sdfPath + '/' + nodeName if newPath in self.nodePaths: newPath = newPath + str(self.nodeId) self.nodeId = self.nodeId + 1 self.nodePaths[newPath] = newPath self.processMesh(fbxNode, newPath, skeleton, '') def processSkeletalAnimations(self): for skeletonIdx in range(len(self.skinning.skeletons)): self.processSkeletalAnimation(skeletonIdx) def prepareBlendShape(self, fbxNode, path): fbxBlendShape = self.getFbxBlenShape(fbxNode) blendShape = self.shapeBlending.createBlendShape(0) self.blendShapeByNode[fbxNode] = blendShape return blendShape.makeUsdSkeleton(self.usdStage, path) def processBlendShapes(self): for fbxNode, blendShape in self.blendShapeByNode.iteritems(): nodeName = usdUtils.makeValidIdentifier(fbxNode.GetName().split(":")[-1]) newPath = blendShape.sdfPath + '/' + nodeName if newPath in self.nodePaths: newPath = newPath + str(self.nodeId) self.nodeId = self.nodeId + 1 self.nodePaths[newPath] = newPath usdMesh = self.processMesh(fbxNode, newPath, None, '') fbxMesh = fbxNode.GetNodeAttribute() if fbx.FbxNodeAttribute.eSubDiv == fbxMesh.GetAttributeType(): fbxMesh = fbxMesh.GetBaseMesh() points = [Gf.Vec3f(p[0], p[1], p[2]) for p in fbxMesh.GetControlPoints()] blendShapes = [] blendShapeTargets = [] index = 0; fbxBlendShape = self.getFbxBlenShape(fbxNode) for i in range(fbxBlendShape.GetBlendShapeChannelCount()): fbxBlendShapeChannel = fbxBlendShape.GetBlendShapeChannel(i) for j in range(fbxBlendShapeChannel.GetTargetShapeCount()): fbxShape = fbxBlendShapeChannel.GetTargetShape(j) blendShapeName = "blendShape" + str(index) index += 1 blendShapeTarget = newPath + "/" + blendShapeName blendShapeName = self.asset.makeUniqueBlendShapeName(blendShapeName, newPath) blendShapes.append(blendShapeName) blendShapeTargets.append(blendShapeTarget) usdBlendShape = UsdSkel.BlendShape.Define(self.usdStage, blendShapeTarget) if fbxShape.GetControlPointsCount(): offsets = [] pointIndices = [] for k in range(fbxShape.GetControlPointsCount()): point = fbxShape.GetControlPointAt(k) if points[k][0] - point[0] != 0 or points[k][1] - point[1] or points[k][2] - point[2]: offsets.append(Gf.Vec3f(point[0] - points[k][0], point[1] - points[k][1], point[2] - points[k][2])) pointIndices.append(k) usdBlendShape.CreateOffsetsAttr(offsets) usdBlendShape.CreatePointIndicesAttr(pointIndices) usdSkelBlendShapeBinding = UsdSkel.BindingAPI(usdMesh) usdSkelBlendShapeBinding.CreateBlendShapesAttr(blendShapes) usdSkelBlendShapeBinding.CreateBlendShapeTargetsRel().SetTargets(blendShapeTargets) UsdSkel.BindingAPI.Apply(usdMesh.GetPrim()); blendShape.addBlendShapeList(blendShapes) def processBlendShapeAnimations(self): framesCount = int((self.stopAnimationTime - self.startAnimationTime) * self.fps + 0.5) + 1 startFrame = int(self.startAnimationTime * self.fps + 0.5) if framesCount == 1: return blendShapeIdx = 0 for fbxNode, blendShape in self.blendShapeByNode.iteritems(): fbxBlendShape = self.getFbxBlenShape(fbxNode) animationName = self.asset.getAnimationsPath() + '/' + 'BlenShapeAnim' if blendShapeIdx > 0: animationName += '_' + str(blendShapeIdx) if self.verbose: print 'Animation:', animationName blendShapeIdx += 1 usdSkelAnim = UsdSkel.Animation.Define(self.usdStage, animationName) attr = usdSkelAnim.CreateBlendShapeWeightsAttr() for frame in range(framesCount): time = frame / self.fps + self.startAnimationTime values = [] for i in range(fbxBlendShape.GetBlendShapeChannelCount()): fbxBlendShapeChannel = fbxBlendShape.GetBlendShapeChannel(i) animProperties = self.findAnimationProperties(fbxBlendShapeChannel) for animProperty in animProperties: #channelName = str(fbxProperty.GetName()).strip() fbxMesh = fbxNode.GetNodeAttribute() if fbx.FbxNodeAttribute.eSubDiv == fbxMesh.GetAttributeType(): fbxMesh = fbxMesh.GetBaseMesh() fbxTime = fbx.FbxTime() fbxTime.SetSecondDouble(time) fbxAnimCurve = animProperty.fbxProperty.GetCurve(animProperty.fbxAnimLayer) values.append(fbxAnimCurve.Evaluate(fbxTime)[0] / 100.0) # in percent attr.Set(time = Usd.TimeCode(frame + startFrame), value = values) blendShape.setSkeletalAnimation(usdSkelAnim) self.shapeBlending.flush() def makeUsdStage(self): self.usdStage = self.asset.makeUsdStage() # axis system for USD should be Y-up, odd-forward, and right-handed sceneAxisSystem = self.fbxScene.GetGlobalSettings().GetAxisSystem() axisSystem = fbx.FbxAxisSystem(fbx.FbxAxisSystem.EUpVector(fbx.FbxAxisSystem.eYAxis), fbx.FbxAxisSystem.EFrontVector(fbx.FbxAxisSystem.eParityOdd), fbx.FbxAxisSystem.ECoordSystem(fbx.FbxAxisSystem.eRightHanded)) if sceneAxisSystem != axisSystem: if self.verbose: print(" converting to Y-up, odd-forward, and right-handed axis system") axisSystem.ConvertScene(self.fbxScene) systemUnit = self.fbxScene.GetGlobalSettings().GetSystemUnit() if systemUnit != fbx.FbxSystemUnit.cm: # cm is default for USD and FBX fbxMetersPerUnit = 0.01 metersPerUnit = systemUnit.GetScaleFactor() * fbxMetersPerUnit if self.legacyModifier is not None and self.legacyModifier.getMetersPerUnit() == 0: self.legacyModifier.setMetersPerUnit(metersPerUnit) else: self.usdStage.SetMetadata("metersPerUnit", metersPerUnit) self.processMaterials() self.processSkinning() self.prepareAnimations() self.processNode(self.fbxScene.GetRootNode(), self.asset.getGeomPath(), None, '') self.processSkeletalAnimations() self.processSkinnedMeshes() self.processBlendShapes() self.processBlendShapeAnimations() self.asset.finalize() return self.usdStage def usdStageWithFbx(fbxPath, usdPath, legacyModifier, copyTextures, searchPaths, verbose): if usdStageWithFbxLoaded == False: return None try: fbxConverter = FbxConverter(fbxPath, usdPath, legacyModifier, copyTextures, searchPaths, verbose) return fbxConverter.makeUsdStage() except ConvertError: return None except: raise return None
2.21875
2
modules/ghautoknit/EmbeddedConstraint.py
fstwn/ghautokn
2
18054
# PYTHON STANDARD LIBRARY IMPORTS ---------------------------------------------- from __future__ import absolute_import from __future__ import division # LOCAL MODULE IMPORTS --------------------------------------------------------- from ghautoknit.StoredConstraint import StoredConstraint # ALL LIST --------------------------------------------------------------------- __all__ = [ "EmbeddedConstraint" ] # ACTUAL CLASS ----------------------------------------------------------------- class EmbeddedConstraint(object): """ Class for representing an autoknit constraint in relation to the model. The chain is only stored as vertex indices. """ def __init__(self, chain, value, radius): """Create a new autoknit Constraint.""" self._set_chain(chain) self._set_value(value) self._set_radius(radius) def ToString(self): name = "Autoknit EmbeddedConstraint" data = "({}, {}, {})".format(self.Chain, self.Value, self.Radius) return name + data # BASE PROPERTIES ---------------------------------------------------------- # CHAIN PROPERTY ----------------------------------------------------------- def _get_chain(self): return self._chain def _set_chain(self, chain): if type(chain) != list: raise RuntimeError("Expected list of vertex indices as chain!") try: for i, item in enumerate(chain): chain[i] = int(item) except: raise RuntimeError("Some of the indices in the given chain " + \ "failed to convert to integers!") self._chain = chain Chain = property(_get_chain, _set_chain, None, "The chain of points of the constraint.") # TIME VALUE PROPERTY ------------------------------------------------------ def _get_value(self): return self._value def _set_value(self, value): try: value = float(value) except Exception, e: raise RuntimeError("Failed to set time value for constraint " + \ "{} // {}".format(str(self), e)) self._value = value Value = property(_get_value, _set_value, None, "The time value of the constraint.") # RADIUS PROPERTY ---------------------------------------------------------- def _get_radius(self): return self._radius def _set_radius(self, radius): try: radius = float(radius) except Exception, e: raise RuntimeError("Failed to set radius for constraint " + \ "{} // {}".format(str(self), e)) self._radius = radius Radius = property(_get_radius, _set_radius, None, "The radius of the constraint.") # CONVERT CONSTRAINT FOR STORAGE ------------------------------------------- def _get_storable(self): count = len(self.Chain) storable = (count, self.Value, self.Radius) return storable Storable = property(_get_storable, None, None, "A storable version of this constraint.") # MAIN ------------------------------------------------------------------------- if __name__ == '__main__': pass
2.5
2
vr/server/tests/test_build.py
isabella232/vr.server
0
18055
<filename>vr/server/tests/test_build.py<gh_stars>0 import tempfile import pytest from dateutil.relativedelta import relativedelta from django.utils import timezone from django.core.files import File from vr.server import models from vr.server.tests import randurl from vr.common.utils import randchars pytestmark = pytest.mark.usefixtures('postgresql') pytestmark = pytest.mark.usefixtures('gridfs') def test_build_usable(gridfs): app_url = randurl() a = models.App(name=randchars(), repo_url=app_url, repo_type='hg') a.save() with somefile() as f: b = models.Build( app=a, tag='blah', start_time=timezone.now() - relativedelta(minutes=2), end_time=timezone.now() - relativedelta(minutes=1), file=File(f), status='success', ) b.save() assert b.is_usable() is True def test_build_unusable_status(gridfs): app_url = randurl() a = models.App(name=randchars(), repo_url=app_url, repo_type='hg') a.save() with somefile() as f: b = models.Build( app=a, tag='blah', start_time=timezone.now() - relativedelta(minutes=2), end_time=timezone.now() - relativedelta(minutes=1), file=File(f), status='', ) b.save() assert b.is_usable() is False class somefile(): def __enter__(self): self.file = tempfile.NamedTemporaryFile() return self.file def __exit__(self, type, value, traceback): self.file.close()
2.203125
2
apps/api/v1/pagination.py
asmuratbek/oobamarket
0
18056
from rest_framework.pagination import LimitOffsetPagination, PageNumberPagination class CategoryLimitPagination(PageNumberPagination): page_size = 20 page_size_query_param = 'page_size' max_page_size = 40 class ProductLimitPagination(PageNumberPagination): page_size = 20 page_size_query_param = 'page_size' max_page_size = 40 class ShopLimitPagination(PageNumberPagination): page_size = 21 page_size_query_param = 'page_size' max_page_size = 42 class ShopProductsLimitPagination(PageNumberPagination): page_size = 24 page_size_query_param = 'page_size' max_page_size = 42
2.375
2
GraphOfDocs_Representation/graph_algos.py
imis-lab/book-chapter
0
18057
<reponame>imis-lab/book-chapter<filename>GraphOfDocs_Representation/graph_algos.py<gh_stars>0 import time import json import traceback import numpy as np from statistics import mean from sklearn import preprocessing from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report class GraphAlgos: """ Wrapper class which handle the graph algorithms more efficiently, by abstracting repeating code. """ database = None # Static variable shared across objects. def __init__(self, database, start, relationship, end = None, orientation = 'NATURAL', rel_weight = None): # Initialize the static variable and class member. if GraphAlgos.database is None: GraphAlgos.database = database # Initialize the optional parameter. end = end if end is not None else start # Construct the projection of the anonymous graph. self.graph_projection = ( f'{{nodeProjection: ["{start}", "{end}"], ' 'relationshipProjection: {' f'{relationship}: {{' f'type: "{relationship}", ' f'orientation: "{orientation}"' ) # If the relationship weight property exists, then set it. if rel_weight is not None: self.graph_projection += f', properties: "{rel_weight}"' # Add two right brackets to complete the query. self.graph_projection += '}}' def pagerank(self, write_property, max_iterations = 20, damping_factor = 0.85): setup = (f'{self.graph_projection}, ' f'writeProperty: "{write_property}", ' f'maxIterations: {max_iterations}, ' f'dampingFactor: {damping_factor}}}' ) GraphAlgos.database.execute(f'CALL gds.pageRank.write({setup})', 'w') def nodeSimilarity(self, write_property, write_relationship, cutoff = 0.5, top_k = 10): setup = (f'{self.graph_projection}, ' f'writeProperty: "{write_property}", ' f'writeRelationshipType: "{write_relationship}", ' f'similarityCutoff: {cutoff}, ' f'topK: {top_k}}}' ) GraphAlgos.database.execute(f'CALL gds.nodeSimilarity.write({setup})', 'w') def louvain(self, write_property, max_levels = 10, max_iterations = 10): setup = (f'{self.graph_projection}, ' f'writeProperty: "{write_property}", ' f'maxLevels: {max_levels}, ' f'maxIterations: {max_iterations}}}' ) GraphAlgos.database.execute(f'CALL gds.louvain.write({setup})', 'w') def node2vec(self, write_property, embedding_dim = 100, iterations = 1, walk_length = 80, walks_per_node = 10, window_size = 10, walk_buffer_size = 1000): setup = (f'{self.graph_projection}, ' f'writeProperty: "{write_property}", ' f'embeddingDimension: {embedding_dim}, ' f'iterations: {iterations}, ' f'walkLength: {walk_length}, ' f'walksPerNode: {walks_per_node}, ' f'windowSize: {window_size}, ' f'walkBufferSize: {walk_buffer_size}}}' ) GraphAlgos.database.execute(f'CALL gds.alpha.node2vec.write({setup})', 'w') def graphSage(self, write_property, rel_weight = None, embedding_dim = 64, epochs = 1, max_iterations = 10, aggregator = 'mean', activation_function = 'sigmoid'): # The community edition of the Neo4j Graph Data Science Library allows only one model to be stored in the database. model_exists = GraphAlgos.database.execute('CALL gds.beta.model.exists("graphSage") YIELD exists', 'r')[0][0] if model_exists: # then drop the model from the database. GraphAlgos.database.execute('CALL gds.beta.model.drop("graphSage")', 'r') train_setup = (f'{self.graph_projection}, ' f'embeddingDimension: {embedding_dim}, ' f'epochs: {epochs}, ' f'modelName: "graphSage", ' f'maxIterations: {max_iterations}, ' f'aggregator: "{aggregator}", ' f'activationFunction: "{activation_function}", ' 'degreeAsProperty: True' ) # If the relationship weight property exists, then set it. if rel_weight is not None: train_setup += f', relationshipWeightProperty: "{rel_weight}"' # Add a right bracket to complete the query. train_setup += '}' write_setup = (f'{self.graph_projection}, ' f'writeProperty: "{write_property}", ' f'modelName: "graphSage"}}' ) GraphAlgos.database.execute(f'CALL gds.beta.graphSage.train({train_setup})', 'w') GraphAlgos.database.execute(f'CALL gds.beta.graphSage.write({write_setup})', 'w') def fastRP(self, write_property, rel_weight = None, embedding_dim = 100, iterations = 10): # Construct the iteration weights vector, its first element is 0.0 and the rest are 1.0. # The length of the vector determines the amount of iterations by the algorithm. iteration_weights = [0.0] + [1.0] * (iterations - 1) setup = (f'{self.graph_projection}, ' f'writeProperty: "{write_property}", ' f'embeddingDimension: {embedding_dim}, ' f'iterationWeights: {iteration_weights}' ) # If the relationship weight property exists, then set it. if rel_weight is not None: setup += f', relationshipWeightProperty: "{rel_weight}"' # Add a right bracket to complete the query. setup += '}' GraphAlgos.database.execute(f'CALL gds.fastRP.write({setup})', 'w') @staticmethod def get_embeddings(write_property): query = ( 'MATCH (p:Person)-[:is_assigned_to]->(i:Issue) ' f'WHERE EXISTS(i.{write_property}) ' f'RETURN i.{write_property}, p.uname AS assignee' ) return GraphAlgos.database.execute(query, 'r') @staticmethod def write_word_embeddings_to_csv(write_property, filepath): query = ( f'MATCH (w:Word) WHERE EXISTS(w.{write_property}) ' f'RETURN w.key, w.{write_property}' ) with open(filepath, 'w', encoding = 'utf-8-sig', errors = 'ignore') as file: file.write('idx,word,embedding\n') for i, (word, embedding) in enumerate(GraphAlgos.database.execute(query, 'r')): file.write(f'{i},{word},"{embedding}"\n') @staticmethod def train_classifier(embeddings): # Unpack the embeddings and the assignees in X and Y separately. X, y = map(list, zip(*embeddings)) # Transform y using the Label Encoder. y = preprocessing.LabelEncoder().fit_transform(y) # Split our dataset into train and test. X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 42) # Construct the classifier. LR = LogisticRegression(random_state = 0, multi_class = 'multinomial') # Train the classifier. LR.fit(X_train, y_train) # Predict the values. y_pred = LR.predict(X_test) # Print the classification report. print(classification_report(y_test, y_pred, labels = np.unique(y_pred))) # These methods enable the use of this class in a with statement. def __enter__(self): return self # Automatic cleanup of the created graph of this class. def __exit__(self, exc_type, exc_value, tb): if exc_type is not None: traceback.print_exception(exc_type, exc_value, tb)
2.671875
3
Code/Components/Synthesis/testdata/current/simulation/synthregression/wtermtest.py
rtobar/askapsoft
1
18058
# regression tests with gridders taking w-term into account # some fixed parameters are given in wtermtest_template.in from synthprogrunner import * def analyseResult(spr, checkWeights=True): ''' spr - synthesis program runner (to run imageStats) throws exceptions if something is wrong, otherwise just returns ''' src_offset = 0.006/math.pi*180. psf_peak=[-172.5,-45] true_peak=sinProjection(psf_peak,src_offset,src_offset) stats = spr.imageStats('image.field1.restored') print "Statistics for restored image: ",stats disterr = getDistance(stats,true_peak[0],true_peak[1])*3600. if disterr > 8: raise RuntimeError, "Offset between true and expected position exceeds 1 cell size (8 arcsec), d=%f, true_peak=%s" % (disterr,true_peak) if abs(stats['peak']-1.)>0.1: raise RuntimeError, "Peak flux in the image is notably different from 1 Jy, F=%f" % stats['peak'] stats = spr.imageStats('image.field1') print "Statistics for modelimage: ",stats disterr = getDistance(stats,true_peak[0],true_peak[1])*3600. if disterr > 8: raise RuntimeError, "Offset between true and expected position exceeds 1 cell size (8 arcsec), d=%f, true_peak=%s" % (disterr,true_peak) stats = spr.imageStats('psf.field1') print "Statistics for psf image: ",stats disterr = getDistance(stats,psf_peak[0],psf_peak[1])*3600. if disterr > 8: raise RuntimeError, "Offset between true and expected position exceeds 1 cell size (8 arcsec), d=%f, true_peak=%s" % (disterr,true_peak) stats = spr.imageStats('psf.image.field1') print "Statistics for preconditioned psf image: ",stats disterr = getDistance(stats,psf_peak[0],psf_peak[1])*3600. if disterr > 8: raise RuntimeError, "Offset between true and expected position exceeds 1 cell size (8 arcsec), d=%f, true_peak=%s" % (disterr,true_peak) if abs(stats['peak']-1.)>0.01: raise RuntimeError, "Peak flux in the preconditioned psf image is notably different from 1.0, F=%f" % stats['peak'] if checkWeights: stats = spr.imageStats('weights.field1') print "Statistics for weight image: ",stats if abs(stats['rms']-stats['peak'])>0.1 or abs(stats['rms']-stats['median'])>0.1 or abs(stats['peak']-stats['median'])>0.1: raise RuntimeError, "Weight image is expected to be constant for WProject and WStack gridders" stats = spr.imageStats('residual.field1') print "Statistics for residual image: ",stats if stats['rms']>0.01 or abs(stats['median'])>0.0001: raise RuntimeError, "Residual image has too high rms or median. Please verify" spr = SynthesisProgramRunner(template_parset = 'wtermtest_template.in') spr.runSimulator() spr.addToParset("Cimager.gridder = WProject") spr.runImager() analyseResult(spr) spr.initParset() spr.addToParset("Cimager.gridder = WStack") spr.runImager() analyseResult(spr) spr.initParset() spr.addToParset("Cimager.gridder = WProject") spr.addToParset("Cimager.gridder.snapshotimaging = true") spr.addToParset("Cimager.gridder.snapshotimaging.wtolerance = 500") spr.runImager() analyseResult(spr,False)
2.046875
2
manage/fuzzytranslation.py
Acidburn0zzz/browser-update
2
18059
<reponame>Acidburn0zzz/browser-update # -*- coding: utf-8 -*- """ Created on Sun Jun 12 14:21:31 2016 @author: TH """ #%% import polib #%% #old (translated) string #new renamed string pairs=""" An initiative by web designers to inform users about browser-updates An initiative by websites to inform users to update their web browser If you are on a computer that is maintained by an admin and you cannot install a new browser, ask your admin about it. Ask your admin to update your browser if you cannot install updates yourself. blaasdasdfsdaf faselsdfsadf"""; pairs=pairs.replace("\r","")[1:-1].split("\n\n") mappings={s.split("\n")[0]:s.split("\n")[1] for s in pairs} #%% po = polib.pofile('lang/de_DE/LC_MESSAGES/update.po') valid_entries = [e for e in po if not e.obsolete] for entry in valid_entries: #print(entry.msgid) if entry.msgid in mappings: print("replacing", entry.msgid[:10], "with",mappings[entry.msgid][:10]) entry.msgid=mappings[entry.msgid] po.save() po.save_as_mofile('lang/de_DE/LC_MESSAGES/update.mo') #%% pairs="""aaa bbb Subtle Unobtrusive bla fasel""" pairs=pairs.replace("\r","")[1:-1].split("\n\n") mappings={s.split("\n")[0]:s.split("\n")[1] for s in pairs} #%% po = polib.pofile('lang/de_DE/LC_MESSAGES/site.po') valid_entries = [e for e in po if not e.obsolete] for entry in valid_entries: #print(entry.msgid) if entry.msgid in mappings: print("replacing", entry.msgid[:10], "with",mappings[entry.msgid][:10]) entry.msgid=mappings[entry.msgid] po.save() po.save_as_mofile('lang/de_DE/LC_MESSAGES/site.mo') #%% pot = polib.pofile('lang/update.pot') for entry in pot: print (entry.msgid, entry.msgstr) #%% #%% display old translations po = polib.pofile('lang/de_DE/LC_MESSAGES/update.po') valid_entries = [e for e in po if not e.obsolete] for entry in valid_entries: print(entry.msgid) #%% #%% getting files from glob import glob paths = glob('lang/*/LC_MESSAGES/') paths=[p[5:10] for p in paths] paths #%% updating all site.po for p in paths: print("updating %s"%p) try: po = polib.pofile('lang/%s/LC_MESSAGES/site.po'%p) except OSError: print("no file found") continue valid_entries = [e for e in po if not e.obsolete] for entry in valid_entries: #print(entry.msgid) if entry.msgid in mappings: print(" ", entry.msgid[:10], "-->",mappings[entry.msgid][:10]) entry.msgid=mappings[entry.msgid] po.save() po.save_as_mofile('lang/%s/LC_MESSAGES/site.mo'%p) #%% updating all update.po for p in paths: print("updating %s"%p) try: po = polib.pofile('lang/%s/LC_MESSAGES/update.po'%p) except OSError: print("no file found") continue valid_entries = [e for e in po if not e.obsolete] for entry in valid_entries: #print(entry.msgid) if entry.msgid in mappings: print(" ", entry.msgid[:10], "-->",mappings[entry.msgid][:10]) entry.msgid=mappings[entry.msgid] po.save() po.save_as_mofile('lang/%s/LC_MESSAGES/update.mo'%p) #%% pairs="""aaa bbb Optionally include up to two placeholders "%s" which will be replaced with the browser version and contents of the link tag. Example: "Your browser (%s) is old. Please &lt;a%s&gtupdate&lt;/a&gt;" Optionally include up to two placeholders "%s" which will be replaced with the browser version and contents of the link tag. Example: "Your browser (%s) is old. Please &lt;a%s&gt;update&lt;/a&gt;" bla fasel""" pairs=pairs.replace("\r","")[1:-1].split("\n\n") mappings={s.split("\n")[0]:s.split("\n")[1] for s in pairs} #%% from glob import glob paths = glob('lang/*/LC_MESSAGES/') paths=[p[5:10] for p in paths] paths #%% updating all site.po for p in paths: print("customize %s"%p) try: po = polib.pofile('lang/%s/LC_MESSAGES/customize.po'%p) except OSError: print("no file found") continue valid_entries = [e for e in po if not e.obsolete] for entry in valid_entries: #print(entry.msgid) if entry.msgid in mappings: print(" ", entry.msgid[:10], "-->",mappings[entry.msgid][:10]) entry.msgid=mappings[entry.msgid] po.save() po.save_as_mofile('lang/%s/LC_MESSAGES/customize.mo'%p) #%% extract strings import subprocess subprocess.call(['xgettext', "header.php", "footer.php", "update-browser.php", "--keyword=T_gettext", "--keyword=T_", "--keyword=T_ngettext:1,2", "--from-code=utf-8", "--package-name=browser-update-update", "--language=PHP", "--output=lang/update.pot"]) #%% extract site strings import subprocess subprocess.call(['xgettext', "blog.php", "stat.php", "index.php", "contact.php", "update.testing.php", "--keyword=T_gettext", "--keyword=T_", "--keyword=T_ngettext:1,2", "--from-code=utf-8", "--package-name=browser-update-site", "--language=PHP", "--output=lang/site.pot"]) #%% extract customize strings import subprocess subprocess.call(['xgettext', "customize.php", "--keyword=T_gettext", "--keyword=T_", "--keyword=T_ngettext:1,2", "--from-code=utf-8", "--package-name=browser-update-customize", "--language=PHP", "--output=lang/customize.pot"]) #%% upload new sources for translations import subprocess subprocess.call(['crowdin-cli-py', 'upload', 'sources']) #subprocess.call(['java', '-jar', 'manage\crowdin-cli.jar', 'upload', 'sources','--config','manage\crowdin.yaml']) #subprocess.call(['java', '-jar', 'manage\crowdin-cli.jar', 'upload', 'sources'])
2.484375
2
scripts/test_template.py
1466899531/auto_api_test
16
18060
# -*- coding:utf-8 -*- """ @File : test_template @Author : Chen @Contact : <EMAIL> @Date : 2021/1/20 20:09 @Desc : """ # 导包 import pytest import requests from time import sleep from api.template_api import TemplateAPI from tools.get_log import GetLog from tools.read_file import read_json import allure # 获取日志器 log = GetLog.get_log() @allure.feature('测试类模板') @pytest.skip("参考模板, 不执行") class TestTemplate: session = None # 初始化方法 @classmethod def setup_class(cls): cls.session = requests.Session() # 初始化session对象 cls.template = TemplateAPI() # 结束方法 @classmethod def teardown_class(cls): cls.session.close() @classmethod def setup(cls): sleep(1.5) # 测试方法 @allure.story("测试方法模板-add") @pytest.mark.parametrize(("attr1", "attr2", "success", "expect"), read_json("test_add")) def test_add(self, attr1, attr2, success, expect): # 添加功能API调用 response = self.template.api_add(self.session, attr1, attr2) # 打印日志 log.info("添加功能-状态码为: {}".format(response.status_code)) # 断言状态码 assert response.status_code == expect, "状态码断言失败" @allure.story("测试方法模板-upd") @pytest.mark.parametrize(("attr1", "attr2", "success", "expect"), read_json("test_upd")) def test_upd(self, attr1, attr2, success, expect): # 添加功能API调用 response = self.template.api_upd(self.session, attr1, attr2) # 打印日志 log.info("修改功能-状态码为: {}".format(response.status_code)) # 断言状态码 assert response.status_code == expect, "状态码断言失败" @allure.story("测试方法模板-get") @pytest.mark.parametrize(("attr1", "attr2", "success", "expect"), read_json("test_get")) def test_get(self, attr1, attr2, success, expect): # 添加功能API调用 response = self.template.api_get(self.session, attr1, attr2) # 打印日志 log.info("查询功能-状态码为: {}".format(response.status_code)) # 断言状态码 assert response.status_code == expect, "状态码断言失败" @allure.story("测试方法模板-del") @pytest.mark.parametrize(("uid", "success", "expect"), read_json("test_del")) def test_del(self, uid, success, expect): # 添加功能API调用 response = self.template.api_del(self.session, uid) # 打印日志 log.info("删除功能-状态码为: {}".format(response.status_code)) # 断言状态码 assert response.status_code == expect, "状态码断言失败"
2.3125
2
dockerfilegenerator/generator.py
ccurcanu/aws-serverless-dockerfile-generator
2
18061
# -*- coding: utf-8 -*- import botocore.exceptions import logging import dockerfilegenerator.lib.constants as constants import dockerfilegenerator.lib.exceptions as exceptions import dockerfilegenerator.lib.versions as versions import dockerfilegenerator.lib.jsonstore as jsonstore import dockerfilegenerator.lib.s3store as s3store import dockerfilegenerator.lib.github as github logger = logging.getLogger() TRACKED_TOOLS = { "terraform": versions.get_latest_hashicorp_terraform_version, "packer": versions.get_latest_hashicorp_packer_version, "go": versions.get_latest_golango_go_version } class UtilsMixin: @property def tools_current_versions(self): if not hasattr(self, "_tools_current_versions"): self._tools_current_versions = None if self._tools_current_versions is None: self._tools_current_versions = dict( (tool_name, self.dockerfile.version(tool_name)) for tool_name in self.dockerfile.json) return self._tools_current_versions @property def tools_next_versions(self): if not hasattr(self, "_tools_next_versions"): self._tools_next_versions = None if self._tools_next_versions is None: self._tools_next_versions = dict( (tool_name, TRACKED_TOOLS[tool_name]()) for tool_name in TRACKED_TOOLS) return self._tools_next_versions def update_dockerfile_versions(self): dockerfile_changed = False for tool in self.tools_current_versions: # TODO: Refactor this method... if self.dockerfile.force_version(tool): logger.info("Update versions: %s has force_version" % tool) continue if tool == self.dockerfile.dockerfile_repo_name: continue current_version = self.tools_current_versions[tool] next_version = self.tools_next_versions.get(tool, None) if next_version is None: logger.info("Update versions: %s has no next version" % tool) continue if current_version == next_version: logger.info( "Update versions: %s has no changed version" % tool) continue self.dockerfile.set_version(tool, next_version) logger.info("Update versions: %s has next version %s" % (tool, next_version)) dockerfile_changed = True if dockerfile_changed: self.dockerfile.set_next_version_dockerfile() return dockerfile_changed class DockerfileGeneratorLambda(UtilsMixin): def __init__(self): self.s3bucket = s3store.get_s3_bucket_manager() self.dockerfile_repo = github.get_github_repository( constants.DOCKERFILE_GITHUB_REPO) self.dockerfile = jsonstore.get_dockerfile(self.dockerfile_repo) self._internal_state = None self.exit_code = 0 @property def internal_state(self): """ Get the state from AWS S3 json file, or use the one from Github, if there is none.""" if self._internal_state is None: internal_state = self.s3bucket.read_object( constants.INTERNAL_STATE_FILE) if internal_state is None: logger.info("Internal state: No state from S3") internal_state = self.dockerfile.dump self.save_state_to_s3(internal_state) self._internal_state = jsonstore.Store(internal_state) return self._internal_state def update_files_on_github(self): template_dockerfile = self.dockerfile_repo.get_file_contents( constants.TEMPLATE_GITHUB_DOCKERFILE_PATH) template_readme = self.dockerfile_repo.get_file_contents( constants.TEMPLATE_GITHUB_README_PATH) commit_msg = self.dockerfile.update_summary(self.internal_state) commit_files = [ (constants.INTERNAL_STATE_FILE, self.dockerfile.dump), ("Dockerfile", template_dockerfile.format( **self.dockerfile.template_variables)), ("README.md", template_readme.format( **self.dockerfile.template_variables))] logger.info("Updating files on Github with message:\n\t%s" % commit_msg) self.dockerfile_repo.commit(commit_files, commit_msg) def save_state_to_s3(self, content): try: logger.info("Saving state to S3") self.s3bucket.write_object(constants.INTERNAL_STATE_FILE, content) except (botocore.exceptions.ClientError, Exception) as e: raise exceptions.LambdaException( "Error: Uploading object to s3 bucket: %s" % (str(e))) def main(self): if self.update_dockerfile_versions(): self.update_files_on_github() self.save_state_to_s3(self.dockerfile.dump) return self.exit_code # Making Lambda Service happy def lambda_handler(): return DockerfileGeneratorLambda().main()
1.929688
2
ajustes_UM/tesis/main/urls.py
abelgonzalez/ajustes
1
18062
<gh_stars>1-10 from django.conf.urls import patterns, url from main import views urlpatterns = patterns('', url(r'^$', views.inicio, name='inicio'), url(r'^acerca/', views.acerca, name='acerca'), url(r'^contacto/', views.contacto, name='contacto'), url(r'^autenticar/', views.autenticar, name='autenticar'), url(r'^cerrar_sesion/', views.cerrar_sesion, name='cerrar_sesion'), url(r'^tiempo/', views.tiempo, name='tiempo'), url(r'^perfil/(?P<usuario>\d+)/$', views.perfil, name='perfil'), url(r'^imprimir_ajuste/', views.imprimir_ajuste, name='imprimir_ajuste'), url(r'^imprimir_ajusteId/(?P<ajusteEstudianteId>\d+)/$', views.imprimir_ajusteId, name='imprimir_ajusteId'), url(r'^imprimir_expediente/', views.imprimir_expediente, name='imprimir_expediente'), url(r'^imprimir_expedienteId/(?P<expedienteEstudianteId>\d+)/$', views.imprimir_expedienteId, name='imprimir_expedienteId'), )
1.976563
2
python/0011. maxArea.py
whtahy/leetcode
1
18063
class Solution: def maxArea(self, ls): n = len(ls) - 1 v, left, right = [], 0, n while 0 <= left < right <= n: h = min(ls[left], ls[right]) v += [h * (right - left)] while ls[left] <= h and left < right: left += 1 while ls[right] <= h and left < right: right -= 1 return max(v)
2.984375
3
test/test_views.py
Nemoden/Simblin
53
18064
# -*- coding: utf-8 -*- """ Simblin Test Views ~~~~~~~~~~~~~~~~~~ Test the different views of the blogging application. :copyright: (c) 2010 by <NAME>. :license: BSD, see LICENSE for more details. """ from __future__ import with_statement import datetime import flask from simblin.extensions import db from simblin.models import Post, Tag, Category, post_tags, post_categories, Admin from nose.tools import assert_equal, assert_true, assert_false from test import TestCase class ViewTestCase(TestCase): """Base TestClass for views""" def register(self, username, password, password2='', email=''): """Helper function to register a user""" return self.client.post('/register', data=dict( username=username, password=password, password2=<PASSWORD>, email=email, ), follow_redirects=True) def login(self, username, password): """Helper function to login""" return self.client.post('/login', data=dict( username=username, password=password ), follow_redirects=True) def register_and_login(self, username, password): """Register and login in one go""" self.register(username, password, password) self.login(username, password) def logout(self): """Helper function to logout""" return self.client.get('/logout', follow_redirects=True) def add_post(self, title, markup='', comments_allowed=None, visible=None, tags='', categories=[]): """Helper functions to create a blog post""" data=dict( title=title, markup=markup, tags=tags, action='Publish', ) if comments_allowed is not None: data['comments_allowed'] = True if visible is not None: data['visible'] = True # Mimic select form fields for i, category_id in enumerate(categories): data['category-%d' % i] = category_id return self.client.post('/compose', data=data, follow_redirects=True) def update_post(self, slug, title, markup='', comments_allowed=None, visible=None, tags=None, categories=[]): """Helper functions to create a blog post""" data=dict( title=title, markup=markup, tags=tags, action='Update', ) if comments_allowed is not None: data['comments_allowed'] = True if visible is not None: data['visible'] = True # Mimic select form fields for i, category_id in enumerate(categories): data['category-%d' % i] = category_id return self.client.post('/update/%s' % slug, data=data, follow_redirects=True) def delete_post(self, slug): """Helper function to delete a blog post""" return self.client.post('/_delete/%s' % slug, data=dict(next=''), follow_redirects=True) def add_category(self, name): """Register category in the database and return its id""" return flask.json.loads( self.client.post('/_add_category', data=dict(name=name)).data)['id'] def delete_category(self, id): return self.client.post('/_delete_category', data=dict(id=id)) class TestRegistration(ViewTestCase): def test_validation(self): """Test form validation""" self.clear_db() rv = self.register('', 'password') assert 'You have to enter a username' in rv.data rv = self.register('britney spears', '') assert 'You have to enter a password' in rv.data rv = self.register('barney', 'abv', 'abc') assert 'Passwords must match' in rv.data def test_registration(self): """Test successful registration and automatic login""" self.clear_db() with self.client: rv = self.register('barney', 'abc', 'abc') assert 'You are the new master of this blog' in rv.data assert flask.session['logged_in'] def test_reregistration(self): """Test that only one admin can exist at a time and reregistration with new credentials only works when logged in""" self.clear_db() rv = self.register('barney', 'abc', 'abc') self.logout() rv = self.register('barney', 'abc', 'abc') assert 'There can only be one admin' in rv.data self.login('barney', 'abc') rv = self.register('moe', 'ugly', 'ugly') # clears the admin rv = self.register('moe', 'ugly', 'ugly') assert 'You are the new master of this blog' in rv.data assert_equal(Admin.query.count(), 1) class TestLogin(ViewTestCase): def test_validation(self): """Test form validation""" self.clear_db() self.register('barney', 'abc', 'abc') rv = self.login('borney', 'abc') assert 'Invalid username' in rv.data rv = self.login('barney', 'abd') assert 'Invalid password' in rv.data def test_login_logout(self): """Test logging in and out""" self.clear_db() self.register('barney', 'abc', 'abc') with self.client: rv = self.login('barney', 'abc') assert 'You have been successfully logged in' in rv.data assert flask.session['logged_in'] rv = self.logout() assert 'You have been successfully logged out' in rv.data assert 'logged_in' not in flask.session class TestPost(ViewTestCase): """Tags and categories are tested alongside""" def test_validation(self): """Check if form validation and validation in general works""" self.clear_db() self.register_and_login('barney', 'abc') rv = self.add_post(title='', markup='a', tags='b') assert 'You must provide a title' in rv.data rv = self.update_post(title='a', markup='', tags='', slug='999x00') assert 'Invalid slug' in rv.data rv = self.add_post(title='a', markup='', tags='') assert 'New post was successfully posted' in rv.data def test_creation(self): """Test the blog post's fields' correctness after adding an post and test proper category association""" self.clear_db() self.register_and_login('barney', 'abc') title = "My post" markup = "# Title" tags = "django, franz und bertha,vil/bil" category1_id = self.add_category('cool') category2_id = self.add_category('cooler') self.add_post(title=title, markup=markup, tags=tags, categories=[category1_id, category1_id, category2_id]) post = Post.query.get(1) post_tagnames = [tag.name for tag in post.tags] category_names = [x.name for x in post.categories] assert_equal(post.id, 1) assert_equal(post.title, title) assert_equal(post.markup, markup) assert_false(post.comments_allowed) assert_false(post.visible) assert_equal(post.slug, 'my-post') assert '<h1>Title</h1>' in post.html assert_equal(post.datetime.date(), datetime.date.today()) assert_equal(sorted(post_tagnames), sorted(['django','franz-und-bertha','vil-bil'])) assert_equal(sorted(category_names), sorted(['cool', 'cooler'])) assert_equal(Tag.query.count(), 3) assert_equal(Category.query.count(), 2) assert_equal(db.session.query(post_tags).count(), 3) # Expect only two mappings although the mapping to category1 # has been added twice assert_equal(db.session.query(post_categories).count(), 2) # Add another post self.add_post(title=post.title, tags=['django'], comments_allowed=True, visible=True) post2 = Post.query.get(2) assert_equal(post2.title, post.title) assert_true(post2.comments_allowed) assert_true(post2.visible) assert_equal(post2.slug, post.slug + '-2') assert_equal(post2.categories, []) assert_equal(Tag.query.count(), 3) return post def test_updating(self): """Test the blog post's fields' correctness after updating a post and test the proper creation and automatic tidying of tags and tag mappings and category associations""" post = self.test_creation() datetime = post.datetime self.update_post(title='cool', markup='## Title', slug=post.slug, tags=['django'], comments_allowed=True, visible=True) updated_post = Post.query.get(1) assert_equal(updated_post.title, 'cool') assert_equal(updated_post.markup, '## Title') assert_true(updated_post.comments_allowed) assert_true(updated_post.visible) assert_equal(updated_post.slug, 'cool') assert '<h2>Title</h2>' in updated_post.html assert_equal(updated_post.datetime, datetime) assert_equal([x.name for x in updated_post.tags], ['django']) # Expect two rows in the posts table because two posts were # created and one updated. Expect only one row in the tags table # because only 'django' is used as a tag. assert_equal(Post.query.count(), 2) assert_equal(Tag.query.count(), 1) # Because there are two post with a tag expect two rows # in the post_tag association table assert_equal(db.session.query(post_tags).count(), 2) # Because there is no post in a category anymore expect not rows # in the post_categories association table assert_equal(db.session.query(post_categories).count(), 0) def test_deletion(self): """Test the deletion of a blog post and the accompanying deletion of tags""" self.clear_db() self.register_and_login('barney', 'abc') self.add_post(title='Title', markup='', tags='cool') posts = Post.query.all() tags = Tag.query.all() assert_equal(len(posts), 1) assert_equal(len(tags), 1) rv = self.delete_post(slug='idontexist') assert 'No such post' in rv.data rv = self.delete_post(slug='title') assert 'Post deleted' in rv.data posts = Post.query.all() tags = Tag.query.all() assert_equal(len(posts), 0) assert_equal(len(tags), 0) def test_singleview(self): """Test the displaying of one blog post""" self.clear_db() self.register_and_login('barney', 'abc') self.add_post(title='Title', markup='', visible=True) rv = self.client.get('/post/title') self.assert_200(rv) assert 'Title' in rv.data self.add_post(title='Title2', visible=None) rv = self.client.get('/post/title2') self.assert_200(rv) assert 'Title2' in rv.data self.logout() rv = self.client.get('/post/title') self.assert_200(rv) assert 'Title' in rv.data rv = self.client.get('/post/title2') self.assert_404(rv) def test_multipleview(self): """Test the displaying of multiple blog posts on home page""" self.clear_db() self.register_and_login('barney', 'abc') self.add_post(title='Title', markup='', visible=True) self.add_post(title='Title2', visible=None) self.logout() rv = self.client.get('/') self.assert_200(rv) assert 'Title' in rv.data assert 'Title2' not in rv.data class TestArchives(ViewTestCase): def test_archives_page(self): """Test the displaying of the archives page""" self.clear_db() rv = self.client.get('/archives/') self.assert_200(rv) def test_month_view(self): """Test the displaying of the month view""" self.clear_db() self.register_and_login('barney', 'abc') post = Post('the chronic 2001', visible=False) post.datetime = datetime.datetime(1999, 11, 16) db.session.add(post) db.session.commit() rv = self.client.get('/1999/11/') self.assert_200(rv) assert 'the chronic 2001' in rv.data rv = self.client.get('/7777/12/') assert 'No entries here so far' in rv.data rv = self.client.get('/1999/14/') self.assert_404(rv) self.logout() rv = self.client.get('/1999/11/') self.assert_200(rv) assert 'No entries here so far' in rv.data class TestTag(ViewTestCase): def test_view(self): """Test the displaying of the tag view""" self.clear_db() self.register_and_login('barney', 'abc') tag = Tag('drdre') db.session.add(tag) db.session.commit() post = Post('the chronic 2001', visible=True) post2 = Post('the chronic 2002', visible=False) post._tags = [tag] post2._tags = [tag] db.session.add(post) db.session.add(post2) db.session.commit() rv = self.client.get('/tag/drdre/') self.assert_200(rv) assert 'the chronic 2001' in rv.data rv = self.client.get('/tag/bobbybrown/') self.assert_404(rv) self.logout() rv = self.client.get('/tag/drdre/') self.assert_200(rv) assert 'the chronic 2001' in rv.data assert 'the chronic 2002' not in rv.data class TestCategory(ViewTestCase): def test_view(self): """Test the displaying of the category view""" self.clear_db() self.register_and_login('barney', 'abc') category = Category('drdre') db.session.add(category) db.session.commit() post = Post('the chronic', visible=True) post2 = Post('the chrinoc', visible=False) post._categories = [category] post2._categories = [category] db.session.add(post) db.session.add(post2) db.session.commit() rv = self.client.get('/category/drdre/') self.assert_200(rv) assert 'the chronic' in rv.data rv = self.client.get('/category/sugeknight/') self.assert_404(rv) self.logout() rv = self.client.get('/category/drdre/') self.assert_200(rv) assert 'the chronic' in rv.data assert 'the chrinoc' not in rv.data rv = self.client.get('/uncategorized/') self.assert_200(rv) assert 'Uncategorized posts' in rv.data post2 = Post('dancing in the moonlight') db.session.add(post2) db.session.commit() rv = self.client.get('/uncategorized/') self.assert_200(rv) assert 'dancing in the moonlight' in rv.data def test_deletion_view(self): """Test if deletion works properly""" self.clear_db() self.register_and_login('barney', 'abc') category = Category('drdre') db.session.add(category) db.session.commit() assert_equal(Category.query.count(), 1) rv = self.delete_category(1) print rv assert_equal(Category.query.count(), 0)
2.265625
2
freeclimb/models/message_result.py
FreeClimbAPI/python-sdk
0
18065
# coding: utf-8 """ FreeClimb API FreeClimb is a cloud-based application programming interface (API) that puts the power of the Vail platform in your hands. FreeClimb simplifies the process of creating applications that can use a full range of telephony features without requiring specialized or on-site telephony equipment. Using the FreeClimb REST API to write applications is easy! You have the option to use the language of your choice or hit the API directly. Your application can execute a command by issuing a RESTful request to the FreeClimb API. The base URL to send HTTP requests to the FreeClimb REST API is: /apiserver. FreeClimb authenticates and processes your request. # noqa: E501 The version of the OpenAPI document: 1.0.0 Contact: <EMAIL> Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from freeclimb.configuration import Configuration class MessageResult(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'uri': 'str', 'date_created': 'str', 'date_updated': 'str', 'revision': 'int', 'account_id': 'str', 'message_id': 'str', 'status': 'str', '_from': 'str', 'to': 'str', 'text': 'str', 'direction': 'str', 'notification_url': 'str' } attribute_map = { 'uri': 'uri', 'date_created': 'dateCreated', 'date_updated': 'dateUpdated', 'revision': 'revision', 'account_id': 'accountId', 'message_id': 'messageId', 'status': 'status', '_from': 'from', 'to': 'to', 'text': 'text', 'direction': 'direction', 'notification_url': 'notificationUrl' } def __init__(self, uri=None, date_created=None, date_updated=None, revision=None, account_id=None, message_id=None, status=None, _from=None, to=None, text=None, direction=None, notification_url=None, local_vars_configuration=None): # noqa: E501 """MessageResult - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._uri = None self._date_created = None self._date_updated = None self._revision = None self._account_id = None self._message_id = None self._status = None self.__from = None self._to = None self._text = None self._direction = None self._notification_url = None self.discriminator = None if uri is not None: self.uri = uri if date_created is not None: self.date_created = date_created if date_updated is not None: self.date_updated = date_updated if revision is not None: self.revision = revision if account_id is not None: self.account_id = account_id if message_id is not None: self.message_id = message_id if status is not None: self.status = status if _from is not None: self._from = _from if to is not None: self.to = to if text is not None: self.text = text if direction is not None: self.direction = direction if notification_url is not None: self.notification_url = notification_url @property def uri(self): """Gets the uri of this MessageResult. # noqa: E501 The URI for this resource, relative to /apiserver. # noqa: E501 :return: The uri of this MessageResult. # noqa: E501 :rtype: str """ return self._uri @uri.setter def uri(self, uri): """Sets the uri of this MessageResult. The URI for this resource, relative to /apiserver. # noqa: E501 :param uri: The uri of this MessageResult. # noqa: E501 :type: str """ self._uri = uri @property def date_created(self): """Gets the date_created of this MessageResult. # noqa: E501 The date that this resource was created (GMT) in RFC 1123 format (e.g., Mon, 15 Jun 2009 20:45:30 GMT). # noqa: E501 :return: The date_created of this MessageResult. # noqa: E501 :rtype: str """ return self._date_created @date_created.setter def date_created(self, date_created): """Sets the date_created of this MessageResult. The date that this resource was created (GMT) in RFC 1123 format (e.g., Mon, 15 Jun 2009 20:45:30 GMT). # noqa: E501 :param date_created: The date_created of this MessageResult. # noqa: E501 :type: str """ self._date_created = date_created @property def date_updated(self): """Gets the date_updated of this MessageResult. # noqa: E501 The date that this resource was last updated (GMT) in RFC 1123 format (e.g., Mon, 15 Jun 2009 20:45:30 GMT). # noqa: E501 :return: The date_updated of this MessageResult. # noqa: E501 :rtype: str """ return self._date_updated @date_updated.setter def date_updated(self, date_updated): """Sets the date_updated of this MessageResult. The date that this resource was last updated (GMT) in RFC 1123 format (e.g., Mon, 15 Jun 2009 20:45:30 GMT). # noqa: E501 :param date_updated: The date_updated of this MessageResult. # noqa: E501 :type: str """ self._date_updated = date_updated @property def revision(self): """Gets the revision of this MessageResult. # noqa: E501 Revision count for the resource. This count is set to 1 on creation and is incremented every time it is updated. # noqa: E501 :return: The revision of this MessageResult. # noqa: E501 :rtype: int """ return self._revision @revision.setter def revision(self, revision): """Sets the revision of this MessageResult. Revision count for the resource. This count is set to 1 on creation and is incremented every time it is updated. # noqa: E501 :param revision: The revision of this MessageResult. # noqa: E501 :type: int """ self._revision = revision @property def account_id(self): """Gets the account_id of this MessageResult. # noqa: E501 String that uniquely identifies this account resource. # noqa: E501 :return: The account_id of this MessageResult. # noqa: E501 :rtype: str """ return self._account_id @account_id.setter def account_id(self, account_id): """Sets the account_id of this MessageResult. String that uniquely identifies this account resource. # noqa: E501 :param account_id: The account_id of this MessageResult. # noqa: E501 :type: str """ self._account_id = account_id @property def message_id(self): """Gets the message_id of this MessageResult. # noqa: E501 String that uniquely identifies this message resource # noqa: E501 :return: The message_id of this MessageResult. # noqa: E501 :rtype: str """ return self._message_id @message_id.setter def message_id(self, message_id): """Sets the message_id of this MessageResult. String that uniquely identifies this message resource # noqa: E501 :param message_id: The message_id of this MessageResult. # noqa: E501 :type: str """ self._message_id = message_id @property def status(self): """Gets the status of this MessageResult. # noqa: E501 Indicates the state of the message through the message lifecycle including: new, queued, rejected, sending, sent, failed, received, undelivered, expired, deleted, and unknown # noqa: E501 :return: The status of this MessageResult. # noqa: E501 :rtype: str """ return self._status @status.setter def status(self, status): """Sets the status of this MessageResult. Indicates the state of the message through the message lifecycle including: new, queued, rejected, sending, sent, failed, received, undelivered, expired, deleted, and unknown # noqa: E501 :param status: The status of this MessageResult. # noqa: E501 :type: str """ allowed_values = ["new", "queued", "rejected", "sending", "sent", "failed", "received", "undelivered", "expired", "deleted", "unknown"] # noqa: E501 if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501 raise ValueError( "Invalid value for `status` ({0}), must be one of {1}" # noqa: E501 .format(status, allowed_values) ) self._status = status @property def _from(self): """Gets the _from of this MessageResult. # noqa: E501 Phone number in E.164 format that sent the message. # noqa: E501 :return: The _from of this MessageResult. # noqa: E501 :rtype: str """ return self.__from @_from.setter def _from(self, _from): """Sets the _from of this MessageResult. Phone number in E.164 format that sent the message. # noqa: E501 :param _from: The _from of this MessageResult. # noqa: E501 :type: str """ self.__from = _from @property def to(self): """Gets the to of this MessageResult. # noqa: E501 Phone number in E.164 format that received the message. # noqa: E501 :return: The to of this MessageResult. # noqa: E501 :rtype: str """ return self._to @to.setter def to(self, to): """Sets the to of this MessageResult. Phone number in E.164 format that received the message. # noqa: E501 :param to: The to of this MessageResult. # noqa: E501 :type: str """ self._to = to @property def text(self): """Gets the text of this MessageResult. # noqa: E501 Message contents # noqa: E501 :return: The text of this MessageResult. # noqa: E501 :rtype: str """ return self._text @text.setter def text(self, text): """Sets the text of this MessageResult. Message contents # noqa: E501 :param text: The text of this MessageResult. # noqa: E501 :type: str """ self._text = text @property def direction(self): """Gets the direction of this MessageResult. # noqa: E501 Noting whether the message was inbound or outbound # noqa: E501 :return: The direction of this MessageResult. # noqa: E501 :rtype: str """ return self._direction @direction.setter def direction(self, direction): """Sets the direction of this MessageResult. Noting whether the message was inbound or outbound # noqa: E501 :param direction: The direction of this MessageResult. # noqa: E501 :type: str """ self._direction = direction @property def notification_url(self): """Gets the notification_url of this MessageResult. # noqa: E501 URL invoked when message sent # noqa: E501 :return: The notification_url of this MessageResult. # noqa: E501 :rtype: str """ return self._notification_url @notification_url.setter def notification_url(self, notification_url): """Sets the notification_url of this MessageResult. URL invoked when message sent # noqa: E501 :param notification_url: The notification_url of this MessageResult. # noqa: E501 :type: str """ self._notification_url = notification_url def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) attr = self.to_camel_case(attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) elif value is None: continue else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, MessageResult): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, MessageResult): return True return self.to_dict() != other.to_dict() def to_camel_case(self, snake_str): components = snake_str.split('_') return components[0] + ''.join(x.title() for x in components[1:])
2.578125
3
blog/views.py
artkapl/django-blog-project
0
18066
<filename>blog/views.py from django.shortcuts import render from .models import Post def home(request): context = { 'posts': Post.objects.all() } return render(request=request, template_name='blog/home.html', context=context) def about(request): return render(request=request, template_name='blog/about.html', context={'title': 'About'})
2.078125
2
az/private/common/utils.bzl
jullianoacqio/rules_microsoft_azure
4
18067
def _check_stamping_format(f): if f.startswith("{") and f.endswith("}"): return True return False def _resolve_stamp(ctx, string, output): stamps = [ctx.info_file, ctx.version_file] args = ctx.actions.args() args.add_all(stamps, format_each = "--stamp-info-file=%s") args.add(string, format = "--format=%s") args.add(output, format = "--output=%s") ctx.actions.run( executable = ctx.executable._stamper, arguments = [args], inputs = stamps, tools = [ctx.executable._stamper], outputs = [output], mnemonic = "Stamp", ) utils = struct( resolve_stamp = _resolve_stamp, check_stamping_format = _check_stamping_format, )
2.546875
3
hue.py
desheffer/hue-adapter
0
18068
from config import Config import flask import json import os from ssdp import SSDP from threading import Thread import urllib3 config = None config_file_paths = [ os.path.dirname(os.path.realpath(__file__)) + "/config/default.cfg.local", "/etc/hue-adapter/default.cfg.local", ] for config_file_path in config_file_paths: if os.path.isfile(config_file_path): config = Config(file(config_file_path)) if not config: print "Cannot find configuration file" exit(1) app = flask.Flask(__name__) @app.route("/setup.xml") def get_setup_file(): """Serve the SSDP setup file.""" out = "<?xml version=\"1.0\"?>\n" + \ "<root xmlns=\"urn:schemas-upnp-org:device-1-0\">\n" + \ "<specVersion>\n" + \ "<major>1</major>\n" + \ "<minor>0</minor>\n" + \ "</specVersion>\n" + \ "<URLBase>http://%s:%d/</URLBase>\n" % (config.web.addr, config.web.port) + \ "<device>\n" + \ "<deviceType>urn:schemas-upnp-org:device:Basic:1</deviceType>\n" + \ "<friendlyName>Philips Hue Emulator</friendlyName>\n" + \ "<manufacturer>Royal Philips Electronics</manufacturer>\n" + \ "<manufacturerURL></manufacturerURL>\n" + \ "<modelDescription>Philips Hue Emulator</modelDescription>\n" + \ "<modelName>Philips hue bridge 2012</modelName>\n" + \ "<modelNumber>929000226503</modelNumber>\n" + \ "<modelURL></modelURL>\n" + \ "<serialNumber>00000000000000000001</serialNumber>\n" + \ "<UDN>uuid:776c1cbc-790a-425f-a890-a761ec57513c</UDN>\n" + \ "</device>\n" + \ "</root>\n" return flask.Response(out, mimetype="text/xml") @app.route("/api/<username>/lights", methods=["GET"]) def get_all_lights(username): """Get all lights""" out = {} for id, light in config.lights.iteritems(): out[id] = { "state": { "on": False, "bri": 0, "hue": 0, "sat": 0, "xy": [0, 0], "ct": 0, "alert": "none", "effect": "none", "colormode": "hs", "reachable": True, }, "type": "Extended color light", "name": light["name"], "modelid": "LCT001", "swversion": "6609461", "pointsymbol": {}, } return flask.jsonify(out) @app.route("/api/<username>/lights/<id>", methods=["GET"]) def get_light(username, id): """Get light attributes and state""" if id in config.lights: light = config.lights[id] else: return "", 3 out = { "state": { "on": False, "bri": 0, "hue": 0, "sat": 0, "xy": [0, 0], "ct": 0, "alert": "none", "effect": "none", "colormode": "hs", "reachable": True, }, "type": "Extended color light", "name": light["name"], "modelid": "LCT001", "swversion": "6609461", "pointsymbol": {}, } return flask.jsonify(out) @app.route("/api/<username>/lights/<id>/state", methods=["PUT"]) def set_lights_state(username, id): """Set light state""" if id in config.lights: light = config.lights[id] else: return "", 3 data = flask.request.get_json(force=True) if not data or "on" not in data: return "", 6 if data["on"]: url = light["on_url"] else: url = light["off_url"] try: http = urllib3.PoolManager() r = http.request("GET", url) except: return "", 901 out = [ { "success": { "/lights/" + id + "/state/on": data["on"] } } ] return flask.Response(json.dumps(out), mimetype="text/json") if __name__ == "__main__": ssdp = SSDP(config.web.addr, config.web.port) ssdp_thread = Thread(target=ssdp.run) ssdp_thread.setDaemon(True) ssdp_thread.start() app.run(host=config.web.addr, port=config.web.port)
2.296875
2
safe/geokdbush/kdbushTest.py
s-a-f-e/backend
1
18069
from kdbush import KDBush # test data points = [ [54,1],[97,21],[65,35],[33,54],[95,39],[54,3],[53,54],[84,72],[33,34],[43,15],[52,83],[81,23],[1,61],[38,74], [11,91],[24,56],[90,31],[25,57],[46,61],[29,69],[49,60],[4,98],[71,15],[60,25],[38,84],[52,38],[94,51],[13,25], [77,73],[88,87],[6,27],[58,22],[53,28],[27,91],[96,98],[93,14],[22,93],[45,94],[18,28],[35,15],[19,81],[20,81], [67,53],[43,3],[47,66],[48,34],[46,12],[32,38],[43,12],[39,94],[88,62],[66,14],[84,30],[72,81],[41,92],[26,4], [6,76],[47,21],[57,70],[71,82],[50,68],[96,18],[40,31],[78,53],[71,90],[32,14],[55,6],[32,88],[62,32],[21,67], [73,81],[44,64],[29,50],[70,5],[6,22],[68,3],[11,23],[20,42],[21,73],[63,86],[9,40],[99,2],[99,76],[56,77], [83,6],[21,72],[78,30],[75,53],[41,11],[95,20],[30,38],[96,82],[65,48],[33,18],[87,28],[10,10],[40,34], [10,20],[47,29],[46,78]] ids = [ 97, 74, 95, 30, 77, 38, 76, 27, 80, 55, 72, 90, 88, 48, 43, 46, 65, 39, 62, 93, 9, 96, 47, 8, 3, 12, 15, 14, 21, 41, 36, 40, 69, 56, 85, 78, 17, 71, 44, 19, 18, 13, 99, 24, 67, 33, 37, 49, 54, 57, 98, 45, 23, 31, 66, 68, 0, 32, 5, 51, 75, 73, 84, 35, 81, 22, 61, 89, 1, 11, 86, 52, 94, 16, 2, 6, 25, 92, 42, 20, 60, 58, 83, 79, 64, 10, 59, 53, 26, 87, 4, 63, 50, 7, 28, 82, 70, 29, 34, 91] coords = [ 10,20,6,22,10,10,6,27,20,42,18,28,11,23,13,25,9,40,26,4,29,50,30,38,41,11,43,12,43,3,46,12,32,14,35,15,40,31,33,18, 43,15,40,34,32,38,33,34,33,54,1,61,24,56,11,91,4,98,20,81,22,93,19,81,21,67,6,76,21,72,21,73,25,57,44,64,47,66,29, 69,46,61,38,74,46,78,38,84,32,88,27,91,45,94,39,94,41,92,47,21,47,29,48,34,60,25,58,22,55,6,62,32,54,1,53,28,54,3, 66,14,68,3,70,5,83,6,93,14,99,2,71,15,96,18,95,20,97,21,81,23,78,30,84,30,87,28,90,31,65,35,53,54,52,38,65,48,67, 53,49,60,50,68,57,70,56,77,63,86,71,90,52,83,71,82,72,81,94,51,75,53,95,39,78,53,88,62,84,72,77,73,99,76,73,81,88, 87,96,98,96,82] index = KDBush(points) result = index.range(20, 30, 50, 70) print(result) # [60, 20, 45, 3, 17, 71, 44, 19, 18, 15, 69, 90, 62, 96, 47, 8, 77, 72] for id in result: p = points[id] if p[0] < 20 or p[0] > 50 or p[1] < 30 or p[1] > 70: print("FAIL") for id in result: p = points[id] if id not in result and p[0] >= 20 and p[0] <= 50 and p[1] >= 30 and p[1] <= 70: print("FAIL: outside point not in range") def sqDist2(a, b): dx = a[0] - b[0] dy = a[1] - b[1] return dx * dx + dy * dy; index2 = KDBush(points) qp = [50, 50] r = 20 r2 = 20 * 20 result = index.within(qp[0], qp[1], r) print(result) # [60, 6, 25, 92, 42, 20, 45, 3, 71, 44, 18, 96] for id in result: p = points[id] if (sqDist2(p, qp) > r2): print('FAIL: result point in range') for id in result: p = points[id] if (id not in result and sqDist2(p, qp) <= r2): print('FAIL: result point not in range')
1.523438
2
buckit/compiler.py
martarozek/buckit
0
18070
<filename>buckit/compiler.py #!/usr/bin/env python3 # Copyright 2016-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. import logging import os import subprocess import platform from constants import BUCKCONFIG_LOCAL from configure_buck import update_config def get_current_platform_flavor(): platforms = { 'Darwin': 'macos', 'Linux': 'linux', 'Windows': 'windows', } return platforms.get(platform.system(), 'default') def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) def which(program, get_canonical=False): fpath, fname = os.path.split(program) if fpath: if is_exe(program): return os.path.realpath(program) if get_canonical else program else: for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return os.path.realpath(exe_file) if get_canonical else exe_file return None def detect_py2(): return which('python2') def detect_py3(): return which('python3', get_canonical=True) def detect_python_libs(python): # We want to strip version and site-packages off from the path to get lib # path return subprocess.check_output([ python, '-c', ( 'from __future__ import print_function; ' 'from distutils import sysconfig; ' 'import os; ' 'print(os.sep.join(sysconfig.get_python_lib().split(os.sep)[:-2]))' )]).decode('utf-8').split('\n')[0] def detect_python_include(python): return subprocess.check_output([ python, '-c', ( 'from __future__ import print_function; ' 'from distutils import sysconfig; ' 'print(sysconfig.get_python_inc())' )]).decode('utf-8').split('\n')[0] def get_system_lib_paths(): libs = { 'linux': [ '/usr/local/lib64', '/usr/local/lib', '/usr/lib64', '/usr/lib', '/lib64', '/lib', ], 'macos': [ '/usr/local/lib', '/usr/local/opt/{name}/lib', '/usr/lib', ], } return libs[get_current_platform_flavor()] def detect_cc(): if 'CC' in os.environ: return os.environ['CC'] clang = which('clang') if clang: return clang gcc = which('gcc') if gcc: return gcc def detect_cxx(): if 'CXX' in os.environ: return os.environ['CXX'] clang_pp = which('clang++') if clang_pp: return clang_pp g_pp = which('g++') if g_pp: return g_pp return None def detect_c_standard(compiler_cmd): versions = [ '-std=gnu11', '-std=c11', '-std=gnu99', '-std=c99', ] for version in versions: logging.debug("Checking %s support for -std=%s", compiler_cmd, version) cmd = [compiler_cmd, version, '-x', 'c', '-'] proc = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) stdout, stderr = proc.communicate( 'int main() { return 0; }'.encode('utf-8') ) if proc.returncode != 0: logging.debug( "Got return code %s, output: %s. trying next", proc.returncode, stdout ) else: return version return None def detect_cxx_standard(compiler_cmd): versions = [ # '-std=gnu++1z', # '-std=c++1z', '-std=gnu++14', '-std=c++14', '-std=gnu++1y', '-std=c++1y', '-std=gnu++11', '-std=c++11', ] for version in versions: logging.debug("Checking %s support for -std=%s", compiler_cmd, version) cmd = [compiler_cmd, version, '-x', 'c++', '-'] proc = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) stdout, stderr = proc.communicate( 'int main() { return 0; }'.encode('utf-8') ) if proc.returncode != 0: logging.debug( "Got return code %s, output: %s. trying next", proc.returncode, stdout ) else: return version return None def configure_compiler(project_root): """ Sets up .buckconfig.local in the root project with basic c++/c compiler settings. More advanced probing will probably be done in the future """ buckconfig_local = os.path.join(project_root, BUCKCONFIG_LOCAL) logging.info("{bold}Detecting compiler{clear}") current_platform = get_current_platform_flavor() cc = detect_cc() cxx = detect_cxx() if not cc or not cxx: logging.warn("Could not find clang or g++ in PATH") return 0 c_standard = detect_c_standard(cc) if c_standard: cflags = [c_standard] else: cflags = [] cxx_standard = detect_cxx_standard(cxx) if cxx_standard: cxxflags = [cxx_standard] else: cxxflags = [] py2 = detect_py2() py3 = detect_py3() py2_include = detect_python_include(py2) py2_libs = detect_python_libs(py2) py3_include = detect_python_include(py3) py3_libs = detect_python_libs(py3) to_set = { 'cxx': { 'cflags': cflags + ['-pthread', '-g'], 'cxxflags': cxxflags + ['-pthread', '-g'], 'ldflags': ['-pthread'], 'cxx': [cxx], 'cc': [cc], }, } to_set['cxx#' + current_platform] = to_set['cxx'].copy() to_set['cxx']['default_platform'] = current_platform py2_settings = { 'interpreter': py2, 'includes': py2_include, 'libs': py2_libs, } py3_settings = { 'interpreter': py3, 'includes': py3_include, 'libs': py3_libs, } if py2: to_set['python#py2'] = py2_settings to_set['python#py2-%s' % current_platform] = py2_settings if py3: to_set['python#py3'] = py3_settings to_set['python#py3-%s' % current_platform] = py3_settings to_set['buckit'] = {'system_lib_paths': ','.join(get_system_lib_paths())} update_config(project_root, buckconfig_local, to_set) return 0
1.9375
2
test/python/.dbwebb/test/suite.d/kmom06/analyzer/test_analyzer.py
AndreasArne/python-examination
0
18071
#!/usr/bin/env python3 """ Contains testcases for the individual examination. """ import unittest from io import StringIO import os import sys from unittest.mock import patch from examiner import ExamTestCase, ExamTestResult, tags from examiner import import_module, find_path_to_assignment FILE_DIR = os.path.dirname(os.path.realpath(__file__)) REPO_PATH = find_path_to_assignment(FILE_DIR) if REPO_PATH not in sys.path: sys.path.insert(0, REPO_PATH) # Path to file and basename of the file to import main = import_module(REPO_PATH, "main") class Test1Files(ExamTestCase): """ Each assignment has 1 testcase with multiple asserts. The different asserts https://docs.python.org/3.6/library/unittest.html#test-cases """ class Test2Counters(ExamTestCase): """ Meny options for counting """ @classmethod def setUpClass(cls): # Otherwise the .txt files will not be found os.chdir(REPO_PATH) @tags("count", "lines") def test_b_lines(self): """ Testar att anropa menyval 'lines' i main.py. Använder följande som input: {arguments} Förväntar att följande finns med i utskrift: {correct} Fick följande: {student} """ self.norepr = True self._multi_arguments = ["lines", "", "q"] with patch('builtins.input', side_effect=self._multi_arguments): with patch('sys.stdout', new=StringIO()) as fake_out: main.main() str_data = fake_out.getvalue() self.assertIn("17", str_data) @tags("count", "words") def test_c_words(self): """ Testar att anropa menyval 'words' i main.py. Använder följande som input: {arguments} Förväntar att följande finns med i utskrift: {correct} Fick följande: {student} """ self.norepr = True self._multi_arguments = ["words", "", "q"] with patch('builtins.input', side_effect=self._multi_arguments): with patch('sys.stdout', new=StringIO()) as fake_out: main.main() str_data = fake_out.getvalue() self.assertIn("199", str_data) @tags("count", "letters") def test_d_letters(self): """ Testar att anropa menyval 'letters' i main.py. Använder följande som input: {arguments} Förväntar att följande finns med i utskrift: {correct} Fick följande: {student} """ self.norepr = True self._multi_arguments = ["letters", "", "q"] self.norepr = True with patch('builtins.input', side_effect=self._multi_arguments): with patch('sys.stdout', new=StringIO()) as fake_out: main.main() str_data = fake_out.getvalue() self.assertIn("907", str_data) class Test3Frequencies(ExamTestCase): """ Meny options for frequency """ def check_print_contain(self, inp, correct): """ One function for testing print input functions. """ with patch("builtins.input", side_effect=inp): with patch("sys.stdout", new=StringIO()) as fake_out: main.main() for val in correct: str_data = fake_out.getvalue() self.assertIn(val, str_data) @tags("freq", "word_frequency") def test_a_word_frequency(self): """ Testar att anropa menyval 'word_frequency' i main.py. Använder följande som input: {arguments} Förväntar att följande finns med i utskrift: {correct} Fick följande: {student} """ self.norepr = True self._multi_arguments = ["word_frequency", "", "q"] self.check_print_contain(self._multi_arguments, [ "the: 12 | 6.0%", "to: 8 | 4.0%", "and: 7 | 3.5%", "of: 6 | 3.0%", "street: 5 | 2.5%", "him: 5 | 2.5%", "he: 5 | 2.5%", ]) @tags("freq", "letter_frequency") def test_b_letter_frequency(self): """ Testar att anropa menyval 'letter_frequency' i main.py. Använder följande som input: {arguments} Förväntar att följande finns med i utskrift: {correct} Fick följande: {student} """ self.norepr = True self._multi_arguments = ["letter_frequency", "", "q"] self.check_print_contain(self._multi_arguments, [ "e: 108 | 11.9%", "t: 91 | 10.0%", "o: 77 | 8.5%", "h: 67 | 7.4%", "n: 66 | 7.3%", "i: 64 | 7.1%", "a: 64 | 7.1%", ]) class Test4All(ExamTestCase): """ Meny options for frequency """ def check_print_contain(self, inp, correct): """ One function for testing print input functions. """ with patch("builtins.input", side_effect=inp): with patch("sys.stdout", new=StringIO()) as fake_out: main.main() for val in correct: str_data = fake_out.getvalue() self.assertIn(val, str_data) @tags("all") def test_a_all(self): """ Testar att anropa menyval 'all' i main.py. Använder följande som input: {arguments} Förväntar att följande finns med i utskrift: {correct} Fick följande: {student} """ self.norepr = True self._multi_arguments = ["all", "", "q"] self.check_print_contain(self._multi_arguments, [ "17", "199", "907", "the: 12 | 6.0%", "to: 8 | 4.0%", "and: 7 | 3.5%", "of: 6 | 3.0%", "street: 5 | 2.5%", "him: 5 | 2.5%", "he: 5 | 2.5%", "e: 108 | 11.9%", "t: 91 | 10.0%", "o: 77 | 8.5%", "h: 67 | 7.4%", "n: 66 | 7.3%", "i: 64 | 7.1%", "a: 64 | 7.1%", ]) class Test4Change(ExamTestCase): """ Meny options for frequency """ @tags("change") def test_a_change(self): """ Testar att anropa menyval 'all' i main.py. Använder följande som input: {arguments} Förväntar att följande finns med i utskrift: {correct} Fick följande: {student} """ self.norepr = True self._multi_arguments = ["change", "lorum.txt", "", "all", "", "q"] with patch('builtins.input', side_effect=self._multi_arguments): with patch('sys.stdout', new=StringIO()) as fake_out: main.main() str_data = fake_out.getvalue() self.assertIn("23", str_data) self.assertIn("3", str_data) self.assertIn("140", str_data) self.assertIn("dolor: 2 | 8.0%", str_data) self.assertIn("vivamus: 1 | 4.0%", str_data) self.assertIn("vitae: 1 | 4.0%", str_data) self.assertIn("varius: 1 | 4.0%", str_data) self.assertIn("urna: 1 | 4.0%", str_data) self.assertIn("sit: 1 | 4.0%", str_data) self.assertIn("pellentesque: 1 | 4.0%", str_data) self.assertIn("i: 18 | 12.9%", str_data) self.assertIn("e: 16 | 11.4%", str_data) self.assertIn("u: 12 | 8.6%", str_data) self.assertIn("a: 12 | 8.6%", str_data) self.assertIn("t: 10 | 7.1%", str_data) self.assertIn("l: 10 | 7.1%", str_data) self.assertIn("s: 9 | 6.4%", str_data) if __name__ == '__main__': runner = unittest.TextTestRunner(resultclass=ExamTestResult, verbosity=2) unittest.main(testRunner=runner, exit=False)
2.890625
3
olha_boca/infratores/admin.py
Perceu/olha-boca
0
18072
from django.contrib import admin from olha_boca.infratores.models import Infratores # Register your models here. class InfratoresAdmin(admin.ModelAdmin): list_display = ('nome', 'infracoes_a_pagar', 'total_infracoes', 'valor_a_pagar') @admin.display(empty_value='???') def total_infracoes(self, obj): return obj.infracoes.count() @admin.display(empty_value='???') def infracoes_a_pagar(self, obj): return obj.infracoes.filter(paga=False).count() @admin.display(empty_value='???') def valor_a_pagar(self, obj): total = 0 infracoes_a_pagar = obj.infracoes.filter(paga=False).all() for inf in infracoes_a_pagar: total += (inf.tipo.vibs * inf.tipo.multiplicador_vibs) return f'R$ {total:.2f}' admin.site.register(Infratores, InfratoresAdmin)
1.960938
2
model.py
ogugugugugua/Cycle-Gan-Pytorch-Implementation
0
18073
from __future__ import absolute_import from __future__ import division from __future__ import print_function import torch import functools import torch.nn as nn from torch.nn import init import torch.functional as F from torch.autograd import Variable print('ok') def weights_init_normal(m): classname = m.__class__.__name__ print(classname) if classname.find('Conv') != -1: init.normal(m.weight.data, 0.0, 0.02) elif classname.find('Linear') != -1: init.normal(m.weight.data, 0.0, 0.02) elif classname.find('BatchNorm2d') != -1: init.normal(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0) def init_weight(net,init_type='normal'): print('initialization method [%s]' % init_type) if init_type == 'normal': net.apply(weights_init_normal) else: raise NotImplementedError('initialization method [%s] is not implemented' % init_type) class ResnetBlock(nn.Module): def __init__(self, dim, use_dropout, use_bias): super(ResnetBlock, self).__init__() self.conv_block = self.build_conv_block(dim, use_dropout, use_bias) def build_conv_block(self,dim,use_dropout,use_bias): conv_block = [] conv_block += [nn.ReflectionPad2d(1)] conv_block += [nn.Conv2d(dim,dim,kernel_size=3,padding=0,bias=use_bias), nn.InstanceNorm2d(dim), nn.ReLU(True)] if use_dropout: conv_block += [nn.Dropout(0.5)] conv_block += [nn.ReflectionPad2d(1)] conv_block += [nn.Conv2d(dim,dim,kernel_size=3,padding=0,bias=use_bias), nn.InstanceNorm2d(dim)] return nn.Sequential(*conv_block) def forward(self,x): out = x + self.conv_block(x) return out class G(nn.Module): def __init__(self,dim=64,device_ids=[]): super(G,self).__init__() self.device_ids = device_ids model = [nn.ReflectionPad2d(3), nn.Conv2d(3, dim, kernel_size=7, padding=0,bias=False), nn.InstanceNorm2d(dim), nn.ReLU(True)] for i in range(2): mult = 2 ** i model += [nn.Conv2d(dim * mult, dim * mult * 2, kernel_size=3, stride=2, padding=1, bias=False), nn.InstanceNorm2d(dim * mult * 2), nn.ReLU(True)] for i in range(9): model += [ResnetBlock(dim*4,use_dropout=False,use_bias=False)] for i in range(2): mult = 2**(2 - i) model += [nn.ConvTranspose2d(dim * mult, int(dim * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1, bias=False), nn.InstanceNorm2d(int(dim * mult / 2)), nn.ReLU(True)] model += [nn.ReflectionPad2d(3)] model += [nn.Conv2d(dim,3,kernel_size=7,padding=0)] model += [nn.Tanh()] self.model = nn.Sequential(*model) def forward(self, input): use_gpu = len(self.device_ids) > 0 if (use_gpu): assert (torch.cuda.is_available()) if len(self.device_ids)and isinstance(input.data, torch.cuda.FloatTensor): print('Train on GPU...') return nn.parallel.data_parallel(self.model, input, self.device_ids) else: print('Train on CPU...') return self.model(input) class D(nn.Module): def __init__(self,dim=64,device_ids=[]): super(D,self).__init__() self.device_ids = device_ids model = [nn.Conv2d(3,dim,kernel_size=4,stride=2,padding=1), nn.LeakyReLU(0.2,True)] model += [nn.Conv2d(dim,dim*2,kernel_size=4,stride=2,padding=1,bias=False), nn.InstanceNorm2d(dim*2), nn.LeakyReLU(0.2,True)] model += [nn.Conv2d(dim*2, dim*4, kernel_size=4, stride=2, padding=1, bias=False), nn.InstanceNorm2d(dim*4), nn.LeakyReLU(0.2,True)] model += [nn.Conv2d(dim*4, dim*8, kernel_size=4, stride=1, padding=1, bias=False), nn.InstanceNorm2d(dim*8), nn.LeakyReLU(0.2,True)] model += [nn.Conv2d(dim*8,1,kernel_size=4,stride=1,padding=1)] self.model = nn.Sequential(*model) def forward(self, input): use_gpu = len(self.device_ids) > 0 if (use_gpu): assert (torch.cuda.is_available()) if len(self.device_ids)and isinstance(input.data, torch.cuda.FloatTensor): print('Train on GPU...') return nn.parallel.data_parallel(self.model, input, self.device_ids) else: print('Train on CPU...') return self.model(input) print ('kkk') # class te(nn.Module): # def __init__(self): # super(te,self).__init__() # norm_layer=nn.InstanceNorm2d # kw = 4 # padw = 1 # input_nc=3 # n_layers=3 # ndf=64 # use_bias = False # sequence = [ # nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), # nn.LeakyReLU(0.2, True) # ] # # nf_mult = 1 # nf_mult_prev = 1 # for n in range(1, n_layers): # nf_mult_prev = nf_mult # nf_mult = min(2**n, 8) # sequence += [ # nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, # kernel_size=kw, stride=2, padding=padw, bias=use_bias), # norm_layer(ndf * nf_mult), # nn.LeakyReLU(0.2, True) # ] # # nf_mult_prev = nf_mult # nf_mult = min(2**n_layers, 8) # sequence += [ # nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, # kernel_size=kw, stride=1, padding=padw, bias=use_bias), # norm_layer(ndf * nf_mult), # nn.LeakyReLU(0.2, True) # ] # # sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # # self.model1 = nn.Sequential(*sequence) # def forward(self,x): # return self.model1(x)
2.484375
2
hansberger/analysis/migrations/0001_initial.py
097475/hansberger
1
18074
# Generated by Django 2.0.13 on 2019-06-27 17:04 import django.contrib.postgres.fields.jsonb from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('research', '0001_initial'), ('datasets', '0001_initial'), ] operations = [ migrations.CreateModel( name='Bottleneck', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('homology', models.PositiveIntegerField()), ('kind', models.CharField(choices=[('consecutive', 'consecutive'), ('one_to_all', 'one_to_all'), ('all_to_all', 'all_to_all')], max_length=20)), ], ), migrations.CreateModel( name='Diagram', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('image', models.TextField()), ('bottleneck_distance', models.FloatField()), ('bottleneck', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='analysis.Bottleneck')), ], ), migrations.CreateModel( name='FiltrationAnalysis', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(help_text='Name this analysis', max_length=100)), ('slug', models.SlugField(max_length=110)), ('description', models.TextField(blank=True, help_text='Write a brief description of the analysis', max_length=500)), ('creation_date', models.DateTimeField(auto_now_add=True)), ('precomputed_distance_matrix_json', django.contrib.postgres.fields.jsonb.JSONField(default='"[]"')), ('window_size', models.PositiveIntegerField(blank=True, default=None, help_text="Leave window size blank to not use windows. Window parameter\n is ignored when dealing with precomputed distance matrix. Always check\n the dimensions of the dataset your are operating on and plan your windows\n accordingly; eventual data that won't fit into the final window will be\n discarded.", null=True)), ('window_overlap', models.PositiveIntegerField(default=0, help_text='How many columns of overlap to have in\n consequent windows, if windows are being used. It must be at most 1\n less than window size.')), ('filtration_type', models.CharField(choices=[('VRF', 'Vietoris Rips Filtration'), ('CWRF', 'Clique Weighted Rank Filtration')], help_text='Choose the type of analysis.', max_length=50)), ('distance_matrix_metric', models.CharField(blank=True, choices=[('braycurtis', 'Braycurtis'), ('canberra', 'Canberra'), ('chebyshev', 'Chebyshev'), ('cityblock', 'City block'), ('correlation', 'Correlation'), ('cosine', 'Cosine'), ('dice', 'Dice'), ('euclidean', 'Euclidean'), ('hamming', 'Hamming'), ('jaccard', 'Jaccard'), ('jensenshannon', '<NAME>'), ('kulsinski', 'Kulsinski'), ('mahalanobis', 'Mahalonobis'), ('matching', 'Matching'), ('minkowski', 'Minkowski'), ('rogerstanimoto', 'Rogers-Tanimoto'), ('russellrao', '<NAME>'), ('seuclidean', 'Seuclidean'), ('sokalmichener', 'Sojal-Michener'), ('sokalsneath', 'Sokal-Sneath'), ('sqeuclidean', 'Sqeuclidean'), ('yule', 'Yule')], help_text='If Vietoris-Rips filtration is selected and not using a precomputed distance matrix, choose the\n distance metric to use on the selected dataset. This parameter is ignored in all other cases.', max_length=20)), ('max_homology_dimension', models.PositiveIntegerField(default=1, help_text='Maximum homology dimension computed. Will compute all dimensions lower than and equal to this value.\n For 1, H_0 and H_1 will be computed.')), ('max_distances_considered', models.FloatField(blank=True, default=None, help_text='Maximum distances considered when constructing filtration.\n If blank, compute the entire filtration.', null=True)), ('coeff', models.PositiveIntegerField(default=2, help_text='Compute homology with coefficients in the prime field Z/pZ for\n p=coeff.')), ('do_cocycles', models.BooleanField(default=False, help_text='Indicator of whether to compute cocycles.')), ('n_perm', models.IntegerField(blank=True, default=None, help_text='The number of points to subsample in\n a “greedy permutation,” or a furthest point sampling of the points. These points will\n be used in lieu of the full point cloud for a faster computation, at the expense of\n some accuracy, which can be bounded as a maximum bottleneck distance to all diagrams\n on the original point set', null=True)), ('entropy_normalized_graph', models.TextField(blank=True, null=True)), ('entropy_unnormalized_graph', models.TextField(blank=True, null=True)), ('dataset', models.ForeignKey(blank=True, default=None, help_text='Select the source dataset from the loaded datasets', null=True, on_delete=django.db.models.deletion.CASCADE, to='datasets.Dataset')), ('research', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='research.Research')), ], options={ 'verbose_name': 'filtration analysis', 'verbose_name_plural': 'filtration analyses', 'abstract': False, }, ), migrations.CreateModel( name='FiltrationWindow', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.PositiveIntegerField()), ('slug', models.SlugField(max_length=150)), ('creation_date', models.DateTimeField(auto_now_add=True)), ('start', models.PositiveIntegerField(blank=True, null=True)), ('end', models.PositiveIntegerField(blank=True, null=True)), ('result_matrix', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)), ('diagrams', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)), ('result_entropy_normalized', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)), ('result_entropy_unnormalized', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)), ('analysis', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='analysis.FiltrationAnalysis')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='MapperAnalysis', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(help_text='Name this analysis', max_length=100)), ('slug', models.SlugField(max_length=110)), ('description', models.TextField(blank=True, help_text='Write a brief description of the analysis', max_length=500)), ('creation_date', models.DateTimeField(auto_now_add=True)), ('precomputed_distance_matrix_json', django.contrib.postgres.fields.jsonb.JSONField(default='"[]"')), ('window_size', models.PositiveIntegerField(blank=True, default=None, help_text="Leave window size blank to not use windows. Window parameter\n is ignored when dealing with precomputed distance matrix. Always check\n the dimensions of the dataset your are operating on and plan your windows\n accordingly; eventual data that won't fit into the final window will be\n discarded.", null=True)), ('window_overlap', models.PositiveIntegerField(default=0, help_text='How many columns of overlap to have in\n consequent windows, if windows are being used. It must be at most 1\n less than window size.')), ('distance_matrix_metric', models.CharField(blank=True, choices=[('braycurtis', 'Braycurtis'), ('canberra', 'Canberra'), ('chebyshev', 'Chebyshev'), ('cityblock', 'City block'), ('correlation', 'Correlation'), ('cosine', 'Cosine'), ('dice', 'Dice'), ('euclidean', 'Euclidean'), ('hamming', 'Hamming'), ('jaccard', 'Jaccard'), ('jensenshannon', '<NAME>'), ('kulsinski', 'Kulsinski'), ('mahalanobis', 'Mahalonobis'), ('matching', 'Matching'), ('minkowski', 'Minkowski'), ('rogerstanimoto', 'Rogers-Tanimoto'), ('russellrao', '<NAME>'), ('seuclidean', 'Seuclidean'), ('sokalmichener', 'Sojal-Michener'), ('sokalsneath', 'Sokal-Sneath'), ('sqeuclidean', 'Sqeuclidean'), ('yule', 'Yule')], help_text='If not using a precomputed matrix, choose the distance metric to use on the dataset.', max_length=20)), ('projection', models.CharField(choices=[('sum', 'Sum'), ('mean', 'Mean'), ('median', 'Median'), ('max', 'Max'), ('min', 'Min'), ('std', 'Std'), ('dist_mean', 'Dist_mean'), ('l2norm', 'L2norm'), ('knn_distance_n', 'knn_distance_n')], default='sum', help_text='Specify a projection/lens type.', max_length=50)), ('knn_n_value', models.PositiveIntegerField(blank=True, help_text='Specify the value of n in knn_distance_n', null=True)), ('scaler', models.CharField(choices=[('None', 'None'), ('MinMaxScaler', 'MinMaxScaler'), ('MaxAbsScaler', 'MaxAbsScaler'), ('RobustScaler', 'RobustScaler'), ('StandardScaler', 'StandardScaler')], default='MinMaxScaler', help_text='Scaler of the data applied after mapping. Use None for no scaling.', max_length=50)), ('use_original_data', models.BooleanField(default=False, help_text='If ticked, clustering is run on the original data,\n else it will be run on the lower dimensional projection.')), ('clusterer', models.CharField(choices=[('k-means', 'K-Means'), ('affinity_propagation', 'Affinity propagation'), ('mean-shift', 'Mean-shift'), ('spectral_clustering', 'Spectral clustering'), ('agglomerative_clustering', 'StandardScaler'), ('DBSCAN(min_samples=1)', 'DBSCAN(min_samples=1)'), ('DBSCAN', 'DBSCAN'), ('gaussian_mixtures', 'Gaussian mixtures'), ('birch', 'Birch')], default='DBSCAN', help_text='Select the clustering algorithm.', max_length=50)), ('cover_n_cubes', models.PositiveIntegerField(default=10, help_text='Number of hypercubes along each dimension.\n Sometimes referred to as resolution.')), ('cover_perc_overlap', models.FloatField(default=0.5, help_text='Amount of overlap between adjacent cubes calculated\n only along 1 dimension.')), ('graph_nerve_min_intersection', models.IntegerField(default=1, help_text='Minimum intersection considered when\n computing the nerve. An edge will be created only when the\n intersection between two nodes is greater than or equal to\n min_intersection')), ('precomputed', models.BooleanField(default=False, help_text='Tell Mapper whether the data that you are clustering on\n is a precomputed distance matrix. If set to True, the assumption is that you are\n also telling your clusterer that metric=’precomputed’ (which is an argument for\n DBSCAN among others), which will then cause the clusterer to expect a square\n distance matrix for each hypercube. precomputed=True will give a square matrix\n to the clusterer to fit on for each hypercube.')), ('remove_duplicate_nodes', models.BooleanField(default=False, help_text='Removes duplicate nodes before edges are\n determined. A node is considered to be duplicate if it has exactly\n the same set of points as another node.')), ('dataset', models.ForeignKey(blank=True, default=None, help_text='Select the source dataset from the loaded datasets', null=True, on_delete=django.db.models.deletion.CASCADE, to='datasets.Dataset')), ('research', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='research.Research')), ], options={ 'verbose_name': 'mapper algorithm analysis', 'verbose_name_plural': 'mapper algoritm analyses', 'abstract': False, }, ), migrations.CreateModel( name='MapperWindow', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.PositiveIntegerField()), ('slug', models.SlugField(max_length=150)), ('creation_date', models.DateTimeField(auto_now_add=True)), ('start', models.PositiveIntegerField(blank=True, null=True)), ('end', models.PositiveIntegerField(blank=True, null=True)), ('graph', models.TextField(blank=True, null=True)), ('analysis', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='windows', related_query_name='window', to='analysis.MapperAnalysis')), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='diagram', name='window1', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='window1', to='analysis.FiltrationWindow'), ), migrations.AddField( model_name='diagram', name='window2', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='window2', to='analysis.FiltrationWindow'), ), migrations.AddField( model_name='bottleneck', name='analysis', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='analysis.FiltrationAnalysis'), ), migrations.AddField( model_name='bottleneck', name='window', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='analysis.FiltrationWindow'), ), migrations.AlterUniqueTogether( name='mapperanalysis', unique_together={('slug', 'research')}, ), migrations.AlterUniqueTogether( name='filtrationanalysis', unique_together={('slug', 'research')}, ), ]
1.859375
2
src/DOMObjects/schema.py
villagertech/DOMObjects
0
18075
<gh_stars>0 __author__ = "<NAME> <<EMAIL>>" __package__ = "DOMObjects" __name__ = "DOMObjects.schema" __license__ = "MIT" class DOMSchema(object): """ @abstract Structure object for creating more advanced DOM trees @params children [dict] Default structure of children @params dictgroups [dict] Default structure of dictgroups @params props [dict] Default structure of properties @example Sample object _schema = DOMSchema() _schema.children= _settings_schema.children = { "sip": {}, "schedules": {}, "favorites": { "dictgroups": ["sip", "http"] } } """ def __init__(self, children: dict = {}, dictgroups: dict = {}, props: dict = {}): """ @abstract Object initializer and bootstraps first object. @params children [dict] Default structure of children @params dictgroups [dict] Default structure of dictgroups @params props [dict] Default structure of properties @returns [DOMSchema] object """ self.dictgroups = dictgroups self.children = children self.props = props @property def keys(self) -> list: """ @abstract Returns all top-level keys in schema @returns [list] of keys """ _keys = list() _keys.extend(self.children.keys()) _keys.extend(self.dictgroups.keys()) _keys.extend(self.props.keys()) return _keys
2.203125
2
Python/6 - kyu/6 kyu - Detect Pangram.py
danielbom/codewars
0
18076
<gh_stars>0 # https://www.codewars.com/kata/detect-pangram/train/python # My solution import string def is_pangram(text): return len( {letter.lower() for letter in text if letter.isalpha()} ) == 26 # ... import string def is_pangram(s): return set(string.lowercase) <= set(s.lower()) # ... import string def is_pangram(s): s = s.lower() return all(letter in s for letter in string.lowercase)
3.84375
4
L1Trigger/L1TCalorimeter/python/customiseReEmulateCaloLayer2.py
pasmuss/cmssw
0
18077
import FWCore.ParameterSet.Config as cms def reEmulateLayer2(process): process.load('L1Trigger/L1TCalorimeter/simCaloStage2Digis_cfi') process.load('L1Trigger.L1TCalorimeter.caloStage2Params_2017_v1_7_excl30_cfi') process.simCaloStage2Digis.towerToken = cms.InputTag("caloStage2Digis", "CaloTower") process.caloLayer2 = cms.Path(process.simCaloStage2Digis) process.schedule.append(process.caloLayer2) return process def hwEmulCompHistos(process): process.TFileService = cms.Service("TFileService", fileName = cms.string("l1tCalo_2016_simHistos.root"), closeFileFast = cms.untracked.bool(True) ) # histograms process.load('L1Trigger.L1TCalorimeter.l1tStage2CaloAnalyzer_cfi') process.l1tStage2CaloAnalyzer.doEvtDisp = False process.l1tStage2CaloAnalyzer.mpBx = 0 process.l1tStage2CaloAnalyzer.dmxBx = 0 process.l1tStage2CaloAnalyzer.allBx = False process.l1tStage2CaloAnalyzer.towerToken = cms.InputTag("simCaloStage2Digis", "MP") process.l1tStage2CaloAnalyzer.clusterToken = cms.InputTag("None") process.l1tStage2CaloAnalyzer.mpEGToken = cms.InputTag("simCaloStage2Digis", "MP") process.l1tStage2CaloAnalyzer.mpTauToken = cms.InputTag("simCaloStage2Digis", "MP") process.l1tStage2CaloAnalyzer.mpJetToken = cms.InputTag("simCaloStage2Digis", "MP") process.l1tStage2CaloAnalyzer.mpEtSumToken = cms.InputTag("simCaloStage2Digis", "MP") process.l1tStage2CaloAnalyzer.egToken = cms.InputTag("simCaloStage2Digis") process.l1tStage2CaloAnalyzer.tauToken = cms.InputTag("simCaloStage2Digis") process.l1tStage2CaloAnalyzer.jetToken = cms.InputTag("simCaloStage2Digis") process.l1tStage2CaloAnalyzer.etSumToken = cms.InputTag("simCaloStage2Digis") import L1Trigger.L1TCalorimeter.l1tStage2CaloAnalyzer_cfi process.l1tCaloStage2HwHistos = L1Trigger.L1TCalorimeter.l1tStage2CaloAnalyzer_cfi.l1tStage2CaloAnalyzer.clone() process.l1tCaloStage2HwHistos.doEvtDisp = False process.l1tCaloStage2HwHistos.mpBx = 0 process.l1tCaloStage2HwHistos.dmxBx = 0 process.l1tCaloStage2HwHistos.allBx = False process.l1tCaloStage2HwHistos.towerToken = cms.InputTag("caloStage2Digis", "CaloTower") process.l1tCaloStage2HwHistos.clusterToken = cms.InputTag("None") process.l1tCaloStage2HwHistos.mpEGToken = cms.InputTag("caloStage2Digis", "MP") process.l1tCaloStage2HwHistos.mpTauToken = cms.InputTag("caloStage2Digis","MP") process.l1tCaloStage2HwHistos.mpJetToken = cms.InputTag("caloStage2Digis", "MP") process.l1tCaloStage2HwHistos.mpEtSumToken = cms.InputTag("caloStage2Digis", "MP") process.l1tCaloStage2HwHistos.egToken = cms.InputTag("caloStage2Digis", "EGamma") process.l1tCaloStage2HwHistos.tauToken = cms.InputTag("caloStage2Digis", "Tau") process.l1tCaloStage2HwHistos.jetToken = cms.InputTag("caloStage2Digis", "Jet") process.l1tCaloStage2HwHistos.etSumToken = cms.InputTag("caloStage2Digis", "EtSum") process.hwEmulHistos = cms.Path( process.l1tStage2CaloAnalyzer +process.l1tCaloStage2HwHistos ) process.schedule.append(process.hwEmulHistos) return process def reEmulateLayer2ValHistos(process): process.load('EventFilter.L1TRawToDigi.caloTowersFilter_cfi') reEmulateLayer2(process) hwEmulCompHistos(process) #process.l1ntupleraw.insert(0,process.caloTowersFilter) #process.l1ntuplesim.insert(0,process.caloTowersFilter) process.caloLayer2.insert(0,process.caloTowersFilter) process.hwEmulHistos.insert(0,process.caloTowersFilter) return process
1.523438
2
lintcode/1375.2.py
jianershi/algorithm
1
18078
""" 1375. Substring With At Least K Distinct Characters """ class Solution: """ @param s: a string @param k: an integer @return: the number of substrings there are that contain at least k distinct characters """ def kDistinctCharacters(self, s, k): # Write your code here n = len(s) left = 0 count = [0] * 256 distinct_count = 0 substring_count = 0 for right in range(n): count[ord(s[right])] += 1 if count[ord(s[right])] == 1: distinct_count += 1 while left <= right and distinct_count >= k: substring_count += n - right count[ord(s[left])] -= 1 if count[ord(s[left])] == 0: distinct_count -= 1 left += 1 return substring_count
3.796875
4
ex29_half.py
youknowone/learn-python3-thw-code-ko
0
18079
people = 20 cats = 30 dogs = 15 if people < cats: print("고양이가 너무 많아요! 세상은 멸망합니다!") if people > cats: print("고양이가 많지 않아요! 세상은 지속됩니다!") if people < dogs: print("세상은 침에 젖습니다!") if people > dogs: print("세상은 말랐습니다!") dogs += 5 if people >= dogs: print("사람은 개보다 많거나 같습니다") if people <= dogs: print("사람은 개보다 적거나 같습니다.") if people == dogs: print("사람은 개입니다.")
3.828125
4
env/lib/python3.6/site-packages/traits/util/tests/test_import_symbol.py
Raniac/NEURO-LEARN
8
18080
<reponame>Raniac/NEURO-LEARN<gh_stars>1-10 """ Tests for the import manager. """ from traits.util.api import import_symbol from traits.testing.unittest_tools import unittest class TestImportSymbol(unittest.TestCase): """ Tests for the import manager. """ def test_import_dotted_symbol(self): """ import dotted symbol """ import tarfile symbol = import_symbol("tarfile.TarFile") self.assertEqual(symbol, tarfile.TarFile) return def test_import_nested_symbol(self): """ import nested symbol """ import tarfile symbol = import_symbol("tarfile:TarFile.open") self.assertEqual(symbol, tarfile.TarFile.open) return def test_import_dotted_module(self): """ import dotted module """ symbol = import_symbol("traits.util.import_symbol:import_symbol") self.assertEqual(symbol, import_symbol) return if __name__ == "__main__": unittest.main() #### EOF ######################################################################
2.484375
2
cubecode/二阶段算法合集/python版/RubiksCube-TwophaseSolver-master/client_gui.py
YuYuCong/Color-recognition-of-Rubik-s-Cube
11
18081
<reponame>YuYuCong/Color-recognition-of-Rubik-s-Cube<filename>cubecode/二阶段算法合集/python版/RubiksCube-TwophaseSolver-master/client_gui.py # ################ A simple graphical interface which communicates with the server ##################################### from tkinter import * import socket import face import cubie # ################################## some global variables and constants ############################################### DEFAULT_HOST = 'localhost' DEFAULT_PORT = '8080' width = 60 # width of a facelet in pixels facelet_id = [[[0 for col in range(3)] for row in range(3)] for face in range(6)] colorpick_id = [0 for i in range(6)] curcol = None t = ("U", "R", "F", "D", "L", "B") cols = ("yellow", "green", "red", "white", "blue", "orange") ######################################################################################################################## # ################################################ Diverse functions ################################################### def show_text(txt): """Displays messages.""" print(txt) display.insert(INSERT, txt) root.update_idletasks() def create_facelet_rects(a): """Initializes the facelet grid on the canvas.""" offset = ((1, 0), (2, 1), (1, 1), (1, 2), (0, 1), (3, 1)) for f in range(6): for row in range(3): y = 10 + offset[f][1] * 3 * a + row * a for col in range(3): x = 10 + offset[f][0] * 3 * a + col * a facelet_id[f][row][col] = canvas.create_rectangle(x, y, x + a, y + a, fill="grey") if row == 1 and col == 1: canvas.create_text(x + width // 2, y + width // 2, font=("", 14), text=t[f], state=DISABLED) for f in range(6): canvas.itemconfig(facelet_id[f][1][1], fill=cols[f]) def create_colorpick_rects(a): """Initializes the "paintbox" on the canvas""" global curcol global cols for i in range(6): x = (i % 3)*(a+5) + 7*a y = (i // 3)*(a+5) + 7*a colorpick_id[i] = canvas.create_rectangle(x, y, x + a, y + a, fill=cols[i]) canvas.itemconfig(colorpick_id[0], width=4) curcol = cols[0] def get_definition_string(): """Generates the cube definition string from the facelet colors.""" color_to_facelet = {} for i in range(6): color_to_facelet.update({canvas.itemcget(facelet_id[i][1][1], "fill"): t[i]}) s = '' for f in range(6): for row in range(3): for col in range(3): s += color_to_facelet[canvas.itemcget(facelet_id[f][row][col], "fill")] return s ######################################################################################################################## # ############################### Solve the displayed cube with a local or remote server ############################### def solve(): """Connects to the server and returns the solving maneuver.""" display.delete(1.0, END) # clear output window try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) except socket.error: show_text('Failed to create socket') return # host = 'f9f0b2jt6zmzyo6b.myfritz.net' # my RaspberryPi, if online host = txt_host.get(1.0, END).rstrip() # default is localhost port = int(txt_port.get(1.0, END)) # default is port 8080 try: remote_ip = socket.gethostbyname(host) except socket.gaierror: show_text('Hostname could not be resolved.') return try: s.connect((remote_ip, port)) except: show_text('Cannot connect to server!') return show_text('Connected with ' + remote_ip + '\n') try: defstr = get_definition_string()+'\n' except: show_text('Invalid facelet configuration.\nWrong or missing colors.') return show_text(defstr) try: s.sendall((defstr+'\n').encode()) except: show_text('Cannot send cube configuration to server.') return show_text(s.recv(2048).decode()) ######################################################################################################################## # ################################# Functions to change the facelet colors ############################################# def clean(): """Restores the cube to a clean cube.""" for f in range(6): for row in range(3): for col in range(3): canvas.itemconfig(facelet_id[f][row][col], fill=canvas.itemcget(facelet_id[f][1][1], "fill")) def empty(): """Removes the facelet colors except the center facelets colors.""" for f in range(6): for row in range(3): for col in range(3): if row != 1 or col != 1: canvas.itemconfig(facelet_id[f][row][col], fill="grey") def random(): """Generates a random cube and sets the corresponding facelet colors.""" cc = cubie.CubieCube() cc.randomize() fc = cc.to_facelet_cube() idx = 0 for f in range(6): for row in range(3): for col in range(3): canvas.itemconfig(facelet_id[f][row][col], fill=cols[fc.f[idx]] ) idx += 1 ######################################################################################################################## # ################################### Edit the facelet colors ########################################################## def click(event): """Defines how to react on left mouse clicks""" global curcol idlist = canvas.find_withtag("current") if len(idlist) > 0: if idlist[0] in colorpick_id: curcol = canvas.itemcget("current", "fill") for i in range(6): canvas.itemconfig(colorpick_id[i], width=1) canvas.itemconfig("current", width=5) else: canvas.itemconfig("current", fill=curcol) ######################################################################################################################## # ###################################### Generate and display the TK_widgets ########################################## root = Tk() root.wm_title("Solver Client") canvas = Canvas(root, width=12 * width + 20, height=9 * width + 20) canvas.pack() bsolve = Button(text="Solve", height=2, width=10, relief=RAISED, command=solve) bsolve_window = canvas.create_window(10 + 10.5 * width, 10 + 6.5 * width, anchor=NW, window=bsolve) bclean = Button(text="Clean", height=1, width=10, relief=RAISED, command=clean) bclean_window = canvas.create_window(10 + 10.5 * width, 10 + 7.5 * width, anchor=NW, window=bclean) bempty = Button(text="Empty", height=1, width=10, relief=RAISED, command=empty) bempty_window = canvas.create_window(10 + 10.5 * width, 10 + 8 * width, anchor=NW, window=bempty) brandom = Button(text="Random", height=1, width=10, relief=RAISED, command=random) brandom_window = canvas.create_window(10 + 10.5 * width, 10 + 8.5 * width, anchor=NW, window=brandom) display = Text(height=7, width=39) text_window = canvas.create_window(10 + 6.5 * width, 10 + .5 * width, anchor=NW, window=display) hp = Label(text=' Hostname and Port') hp_window = canvas.create_window(10 + 0 * width, 10 + 0.6 * width, anchor=NW, window=hp) txt_host = Text(height=1, width=20) txt_host_window = canvas.create_window(10 + 0 * width, 10 + 1 * width, anchor=NW, window=txt_host) txt_host.insert(INSERT, DEFAULT_HOST) txt_port = Text(height=1, width=20) txt_port_window = canvas.create_window(10 + 0 * width, 10 + 1.5 * width, anchor=NW, window=txt_port) txt_port.insert(INSERT, DEFAULT_PORT) canvas.bind("<Button-1>", click) create_facelet_rects(width) create_colorpick_rects(width) root.mainloop() ########################################################################################################################
2.890625
3
bootcamp/wiki/core/compat.py
basiltiger/easy_bootcamp
0
18082
<filename>bootcamp/wiki/core/compat.py<gh_stars>0 """Abstraction layer to deal with Django related changes in order to keep compatibility with several Django versions simultaneously.""" from __future__ import unicode_literals from django.conf import settings as django_settings USER_MODEL = getattr(django_settings, 'AUTH_USER_MODEL', 'auth.User') # Django 1.11 Widget.build_attrs has a different signature, designed for the new # template based rendering. The previous version was more useful for our needs, # so we restore that version. # When support for Django < 1.11 is dropped, we should look at using the # new template based rendering, at which point this probably won't be needed at all. class BuildAttrsCompat(object): def build_attrs_compat(self, extra_attrs=None, **kwargs): "Helper function for building an attribute dictionary." attrs = self.attrs.copy() if extra_attrs is not None: attrs.update(extra_attrs) if kwargs is not None: attrs.update(kwargs) return attrs try: # Python 3 from urllib.parse import urljoin # noqa except ImportError: # Python 2 from urlparse import urljoin # noqa @UnusedImport
2.09375
2
ethereum.py/ethereum/clients/ethereum.py
dixonwhitmire/connect-clients
0
18083
""" ethereum.py ethereum.py contains an EthereumClient class that provides functions for interacting with the Coverage.sol solidity contract on an Ethereum blockchain network. """ import asyncio import datetime import json import logging import os from ethereum.clients.nats import get_nats_client from ethereum.config import get_settings, nats_eligibility_subject from ethereum.exceptions import EthereumNetworkConnectionError from hexbytes import HexBytes from typing import Optional, Any, List from web3 import Web3 logger = logging.getLogger(__name__) # client instance eth_client = None class EthereumClient: """ Ethereum client for LFH that utilizes the Web3 library for interacting with an Ethereum blockchain network. """ def __init__(self, **qwargs): logger.debug("Initializing EthereumClient") self.eth_network_uri = qwargs["eth_network_uri"] logger.debug("Initializing Web3") self.client: Optional[Web3] = Web3(Web3.HTTPProvider(self.eth_network_uri)) self.from_acct = {"from": self.client.eth.accounts[0]} if (self.client and self.client.isConnected()): logger.info(f"Connected to the Ethereum network at: {self.eth_network_uri}") self.contract = self.client.eth.contract(address=qwargs["contract_address"], abi=qwargs["contract_abi"]) event_filter = self.contract.events.EligibilityResult.createFilter(fromBlock="latest") self.cancelled = False contract_event_loop = asyncio.get_event_loop() contract_event_loop.create_task(self.event_loop(event_filter, qwargs["event_poll_interval"])) logger.info(f"Connected to the contract at: {qwargs['contract_address']}") else: error_msg = f"Failed to connect to the Ethereum network at: {self.eth_network_uri}" logger.error(error_msg) raise EthereumNetworkConnectionError(error_msg) def add_coverage_resource(self, path: str, fhir_json: Any, payor_ref: str, subscriber_ref: str, coverage_start: int, coverage_end: int): """ Send a Coverage FHIR resource to the Coverage.sol contract. :param path: FHIR path of the resource, e.g. /Coverage/001 :param fhir_json: The string representation of the FHIR resource :param payor_ref: coverage.payor[0].reference :param subscriber_ref: coverage.subscriber.reference :param coverage_start: coverage.period.start converted to a timestamp :param coverage_end: coverage.period.end converted to a timestamp :return: The hash of the submitted transaction or None """ if not self.client.isConnected(): error = f"Not connected to the Ethereum network" logger.error(error) return {"error": error} try: tx_hash = self.contract.functions.add_coverage_resource(path, json.dumps(fhir_json), payor_ref, subscriber_ref, coverage_start, coverage_end).transact(self.from_acct) tx_receipt = self.client.eth.waitForTransactionReceipt(tx_hash) receipt_dict = dict(tx_receipt) hash_str = receipt_dict["transactionHash"].hex() logger.info(f"tx hash: {hash_str}") return {"result": hash_str} except Exception as ex: error = f"Transaction error {ex}" logger.error(error) return {"error": error} def check_eligibility(self, path: str, fhir_json: Any, insurer_ref: str, patient_ref: str, coverage_ref: str, coverage_date: int): """ Send a CoverageEligibilityRequest FHIR resource to the Coverage.sol contract. :param path: FHIR path of the resource, e.g. /CoverageEligibilityRequest/001 :param fhir_json: The string representation of the FHIR resource :param insurer_ref: coverageeligibilityrequest.insurer.reference :param patient_ref: coverageeligibilityrequest.patient.reference :param coverage_ref: coverageeligibilityrequest.insurance[0].coverage :param coverage_date: coverageeligibilityrequest.created converted to a timestamp :return: The hash of the submitted transaction or None """ if not self.client.isConnected(): error = f"Not connected to the Ethereum network" logger.error(error) return {"error": error} try: tx_hash = self.contract.functions.check_eligibility(path, json.dumps(fhir_json), insurer_ref, patient_ref, coverage_ref, coverage_date).transact(self.from_acct) tx_receipt = self.client.eth.waitForTransactionReceipt(tx_hash) receipt_dict = dict(tx_receipt) hash_str = receipt_dict["transactionHash"].hex() logger.info(f"tx hash: {hash_str}") return {"result": hash_str} except Exception as ex: error = f"Transaction error {ex}" logger.error(error) return {"error": error} def add_fhir_resource(self, fhir_type: str, path: str, fhir_json: Any): """ Send a Patient or Organization FHIR resource to the Coverage.sol contract. :param fhir_type: FHIR type of the resource, e.g. Patient :param path: FHIR path of the resource, e.g. /Patient/001 :param fhir_json: The string representation of the FHIR resource :return: The hash of the submitted transaction or None """ if not self.client.isConnected(): error = f"Not connected to the Ethereum network" logger.error(error) return {"error": error} try: tx_hash = self.contract.functions.add_fhir_resource(fhir_type, path, json.dumps(fhir_json)).transact(self.from_acct) tx_receipt = self.client.eth.waitForTransactionReceipt(tx_hash) receipt_dict = dict(tx_receipt) hash_str = receipt_dict["transactionHash"].hex() logger.info(f"tx hash: {hash_str}") return {"result": hash_str} except Exception as ex: error = f"Transaction error {ex}" logger.error(error) return {"error": error} def close(self): self.cancelled = True async def event_loop(self, event_filter, poll_interval: int): while not self.cancelled: for event in event_filter.get_new_entries(): await self.handle_event(json.loads(Web3.toJSON(event))) await asyncio.sleep(poll_interval) async def handle_event(self, event: dict): """ Send a FHIR CoverageEligibilityResponse based on the eligibility decision from the contract. :param event: The JSON contract event containing the eligibility decision and supporting info. """ logger.trace(f"Received contract event: {event}") path: List[str] = event["args"]["path"].split("/") request_id: str = path[1] result: bool = event["args"]["result"] disposition: str = "Policy is currently in effect." if not result: disposition = "Policy is not in effect." today: str = datetime.date.today().isoformat() message: Any = { "resourceType": "CoverageEligibilityResponse", "id": request_id, "text": { "status": "generated", "div": "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the CoverageEligibilityResponse.</div>" }, "identifier": [ { "system": "http://localhost:5000/fhir/coverageeligibilityresponse/" + request_id, "value": request_id } ], "status": "active", "purpose": [ "validation" ], "patient": { "reference": event["args"]["patient_ref"] }, "created": today, "request": { "reference": "http://www.BenefitsInc.com/fhir/coverageeligibilityrequest/" + request_id }, "outcome": "complete", "disposition": disposition, "insurer": { "reference": event["args"]["insurer_ref"] }, "insurance": [ { "coverage": { "reference": event["args"]["coverage_ref"] }, "inforce": result } ] }; nats_client = await get_nats_client() msg_str = json.dumps(message) logger.info(f"CoverageEligibilityResponse: {msg_str}") await nats_client.publish(nats_eligibility_subject, bytearray(msg_str, "utf-8")) logger.trace("Sent CoverageEligibilityResponse via NATS") def get_ethereum_client() -> Optional[EthereumClient]: """ :return: a connected EthereumClient instance """ global eth_client if not eth_client: settings = get_settings() # load ABI file abi_file: str = os.path.join(settings.ethereum_config_directory, settings.ethereum_contract_abi) contract_info = json.load(open(abi_file)) eth_client = EthereumClient( eth_network_uri=settings.ethereum_network_uri, contract_address=settings.ethereum_contract_address, contract_abi=contract_info["abi"], event_poll_interval=settings.ethereum_event_poll_seconds ) return eth_client def stop_ethereum_client(): client = get_ethereum_client() client.close() class HexJsonEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, HexBytes): return obj.hex() return super().default(obj)
2.59375
3
cli/actions/mc_combination_action.py
daneshvar-amrollahi/polar
1
18084
<reponame>daneshvar-amrollahi/polar from argparse import Namespace from .action import Action from symengine.lib.symengine_wrapper import sympify from termcolor import colored from program.mc_comb_finder import MCCombFinder from cli.common import prepare_program class MCCombinationAction(Action): cli_args: Namespace def __init__(self, cli_args: Namespace): self.cli_args = cli_args def __call__(self, *args, **kwargs): benchmark = args[0] combination_deg = self.cli_args.mc_comb_deg program = prepare_program(benchmark, self.cli_args) if len(program.non_mc_variables) == 0: print(f"--mc_comb not applicable to {benchmark} since all variables are already moment computable.") return combination_vars = [] if len(combination_vars) == 0: for var in program.non_mc_variables: if var in program.original_variables: combination_vars.append(var) else: combination_vars = [sympify(v) for v in self.cli_args.mc_comb] print(colored("-------------------", "cyan")) print(colored("- Analysis Result -", "cyan")) print(colored("-------------------", "cyan")) print() combinations = MCCombFinder.find_good_combination( combination_vars, combination_deg, program, self.cli_args.numeric_roots, self.cli_args.numeric_croots, self.cli_args.numeric_eps ) if combinations is None: print(f"No combination found with degree {combination_deg}. Try using other degrees.") else: for combination in combinations: candidate, solution = combination[0], combination[1] print(f"E({candidate})[n] = {solution}")
2.328125
2
docker/gunicorn.py
admariner/madewithwagtail
0
18085
<filename>docker/gunicorn.py import gunicorn accesslog = "-" errorlog = "-" access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s" "%({X-Forwarded-For}i)s"' capture_output = True forwarded_allow_ips = "*" secure_scheme_headers = {"X-CLOUDFRONT": "yes"} workers = 2 worker_class = "gthread" worker_connections = 5 bind = ":8000" keep_alive = 75 chdir = "/madewithwagtail" # Obfuscate the Server header (to the md5sum of "Springload") gunicorn.SERVER_SOFTWARE = "04e96149a2f64d6135c82d199ab62122"
2.109375
2
Lista 2/Questao_1.py
flaviomelo10/Python-para-PLN
0
18086
<gh_stars>0 # -- encoding:utf-8 -- # ''' Crie uma variável com a string “ instituto de ciências matemáticas e de computação” e faça: a. Concatene (adicione) uma outra string chamada “usp” b. Concatene (adicione) uma outra informação: 2021 c. Verifique o tamanho da nova string (com as informações adicionadas das questões a e b), com referência a caracteres e espaços d. Transforme a string inteiramente em maiúsculo e. Transforme a string inteiramente em minúsculo f. Retire o espaço que está no início da string e imprima a string g. Substitua todas as letras ‘a’ por ‘x’ h. Separe a string em palavras únicas i. Verifique quantas palavras existem na string j. Separe a string por meio da palavra “de” k. Verifique agora quantas palavras/frases foram formadas quando houve a separação pela palavra “de” l. Junte as palavras que foram separadas (pode usar a separação resultante da questão h ou j) m. Junte as palavras que foram separadas, mas agora separadas por uma barra invertida, não por espaços (pode usar a separação resultante da questão h ou j) ''' texto = " instituto de ciências matemáticas e de computação" #a) texto = texto + " usp" print(texto) #b) texto = texto + " 2021" print(texto) #c) tamanho = len(texto) print(tamanho) #d) print(texto.upper()) #e) print(texto.lower()) #f) print(texto[1:]) print(texto.strip()) #g) print(texto.replace('a', 'x')) #h separar = texto.split() print(separar) #i) print(separar) #j) separar2 = texto.split('de') print(separar2) #k) print(len(separar2)) #l) juntar = " ".join(separar) print(juntar) #m) juntar2 = "/".join(separar) print(juntar2)
4.1875
4
breadcrumbs/templatetags/breadcrumbs_tags.py
LinuxOSsk/Shakal-NG
10
18087
# -*- coding: utf-8 -*- from django.shortcuts import resolve_url from django.template.loader import render_to_string from django_jinja import library from jinja2 import contextfunction @contextfunction @library.global_function def breadcrumb(context, contents, *args, **kwargs): class_name = kwargs.pop('class', False) url = kwargs.pop('url', False) if url is not False: url = resolve_url(url, *args, **kwargs) breadcrumb_context = { 'contents': contents, 'url': url, 'class': class_name } context['breadcrumbs'].append(breadcrumb_context) return '' @library.global_function def render_breadcrumbs(breadcrumbs): breadcrumbs.reverse() ctx = {'breadcrumbs': breadcrumbs} return render_to_string('breadcrumbs/breadcrumbs.html', ctx)
2.28125
2
contrib/opencensus-ext-datadog/opencensus/ext/datadog/transport.py
Flared/opencensus-python
650
18088
<reponame>Flared/opencensus-python import platform import requests class DDTransport(object): """ DDTransport contains all the logic for sending Traces to Datadog :type trace_addr: str :param trace_addr: trace_addr specifies the host[:port] address of the Datadog Trace Agent. """ def __init__(self, trace_addr): self._trace_addr = trace_addr self._headers = { "Datadog-Meta-Lang": "python", "Datadog-Meta-Lang-Interpreter": platform.platform(), # Following the example of the Golang version it is prefixed # OC for Opencensus. "Datadog-Meta-Tracer-Version": "OC/0.0.1", "Content-Type": "application/json", } @property def trace_addr(self): """ specifies the host[:port] address of the Datadog Trace Agent. """ return self._trace_addr @property def headers(self): """ specifies the headers that will be attached to HTTP request sent to DD. """ return self._headers def send_traces(self, trace): """ Sends traces to the Datadog Tracing Agent :type trace: dic :param trace: Trace dictionary """ requests.post("http://" + self.trace_addr + "/v0.4/traces", json=trace, headers=self.headers)
2.734375
3
credsweeper/file_handler/analysis_target.py
ARKAD97/CredSweeper
0
18089
<reponame>ARKAD97/CredSweeper from typing import List class AnalysisTarget: def __init__(self, line: str, line_num: int, lines: List[str], file_path: str): self.line = line self.line_num = line_num self.lines = lines self.file_path = file_path
2.546875
3
model/vgg_deeplab.py
ireina7/zero-shot-segmentation
0
18090
import torchvision import torch import torch.nn as nn import torch.nn.functional as F class Vgg_Deeplab(nn.Module): def __init__(self,*args, **kwargs): super(Vgg_Deeplab, self).__init__() vgg16 = torchvision.models.vgg16() layers = [] layers.append(nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)) layers.append(nn.ReLU(inplace=True)) layers.append(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)) layers.append(nn.ReLU(inplace=True)) layers.append(nn.MaxPool2d(3, stride=2, padding=1)) layers.append(nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)) layers.append(nn.ReLU(inplace=True)) layers.append(nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)) layers.append(nn.ReLU(inplace=True)) layers.append(nn.MaxPool2d(3, stride=2, padding=1)) layers.append(nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1)) layers.append(nn.ReLU(inplace=True)) layers.append(nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)) layers.append(nn.ReLU(inplace=True)) layers.append(nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)) layers.append(nn.ReLU(inplace=True)) layers.append(nn.MaxPool2d(3, stride=2, padding=1)) layers.append(nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1)) layers.append(nn.ReLU(inplace=True)) layers.append(nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)) layers.append(nn.ReLU(inplace=True)) layers.append(nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)) layers.append(nn.ReLU(inplace=True)) layers.append(nn.MaxPool2d(3, stride=1, padding=1)) layers.append(nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=2, dilation=2)) layers.append(nn.ReLU(inplace=True)) layers.append(nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=2, dilation=2)) layers.append(nn.ReLU(inplace=True)) layers.append(nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=2, dilation=2)) layers.append(nn.ReLU(inplace=True)) layers.append(nn.MaxPool2d(3, stride=1, padding=1)) self.features = nn.Sequential(*layers) classifier = [] classifier.append(nn.AvgPool2d(3, stride=1, padding=1)) classifier.append(nn.Conv2d(512, 1024, kernel_size=3, stride=1, padding=12, dilation=12)) classifier.append(nn.ReLU(inplace=True)) classifier.append(nn.Dropout(p=0.5)) self.classifier = nn.Sequential(*classifier) self.init_weights() def forward(self, x): x = self.features(x) x = self.classifier(x) return x def init_weights(self): vgg = torchvision.models.vgg16(pretrained=True) state_vgg = vgg.features.state_dict() self.features.load_state_dict(state_vgg) for ly in self.classifier.children(): if isinstance(ly, nn.Conv2d): nn.init.kaiming_normal_(ly.weight, a=1) nn.init.constant_(ly.bias, 0) def get_1x_lr_params(self): """ This generator returns all the parameters for the last layer of the net, which does the classification of pixel into classes """ # b = [] # # b.append(self.conv1) # b.append(self.bn1) # b.append(self.layer1) # b.append(self.layer2) # b.append(self.layer3) # b.append(self.layer4) for i in self.features: #for j in self.features[i].modules(): jj = 0 for k in i.parameters(): jj += 1 if k.requires_grad: yield k def optim_parameters_1x(self, args): return [{"params": self.get_1x_lr_params(), "lr": 1 * args.learning_rate}] def get_10x_lr_params(self): """ This generator returns all the parameters for the last layer of the net, which does the classification of pixel into classes """ # b = [] # b.append(self.layer.parameters()) for i in self.classifier: #for j in self.classifier[i].modules(): jj = 0 for k in i.parameters(): jj += 1 if k.requires_grad: yield k def optim_parameters_10x(self, args): return [{"params": self.get_10x_lr_params(), "lr": 10 * args.learning_rate}] if __name__ == "__main__": net = Vgg_Deeplab(3, 10) in_ten = torch.randn(1, 3, 224, 224) out = net(in_ten) print(net) print(out.size()) in_ten = torch.randn(1, 3, 64, 64) mod = nn.Conv2d(3, 512, kernel_size=3, stride=1, padding=2, dilation=2) out = mod(in_ten) print(out.shape)
2.640625
3
web/app.py
erberlin/themepark-times-API
7
18091
# -*- coding: utf-8 -*- """ This module defines a connexion app object and configures the API endpoints based the swagger.yml configuration file. copyright: © 2019 by <NAME>. license: MIT, see LICENSE for more details. """ import connexion app = connexion.App(__name__, specification_dir="./") app.app.url_map.strict_slashes = False app.add_api("swagger.yml") if __name__ == "__main__": # FLASK_ENV=development & FLASK_DEBUG=1 w/ Docker don't seem to enable debug mode. app.run(debug=True)
1.71875
2
CLIMATExScience/air-pollution-index/data-visualization/pollutant-freq.py
MY-Climate-Observatory/myco-data
0
18092
<gh_stars>0 # -*- coding: utf-8 -*- """ 17 June 2020 Author: <NAME> Visualizing the types of pollutants. """ import pandas as pd from plotly.offline import plot import plotly.graph_objects as go # Get the file from us df = pd.read_csv(https://www.dropbox.com/s/u0ymg0ufne0an60/api-20200713.csv?dl=1", sep = ";") # Make the selection selected_area = "Sandakan" df_select = df.loc[(df.Area == selected_area), ["Area", "Dominant", "Datetime"]] # Data wrangling for this particular visual df_update = df_select.set_index(pd.DatetimeIndex(df_select["Datetime"])) df_update.drop(df_update.columns[2], axis = 1, inplace = True) # Wrangling df_group_time = df_update.groupby(pd.Grouper(freq = "Q")).size().reset_index(name = "Total") df_group = df_update.groupby([pd.Grouper(freq = "Q"), pd.Grouper("Dominant")]).size().reset_index(name = "Count") df_output = df_group.set_index("Datetime").join(df_group_time.set_index("Datetime")) df_output["Frequency"] = df_output["Count"] / df_output["Total"] # Creating df subset for the stacked bars, here we are only dealing with the main dominant pollutants df_pm2_5 = df_output.loc[(df_output.Dominant == "**")] df_pm10 = df_output.loc[(df_output.Dominant == "*")] df_so2 = df_output.loc[(df_output.Dominant == "a")] df_no2 = df_output.loc[(df_output.Dominant == "b")] df_o3 = df_output.loc[(df_output.Dominant == "c")] df_co = df_output.loc[(df_output.Dominant == "d")] # Now comes the bar chart fig = go.Figure() fig.add_trace(go.Bar(x = df_pm2_5.index, y = df_pm2_5["Frequency"], name = "PM 2.5")) fig.add_trace(go.Bar(x = df_pm10.index, y = df_pm10["Frequency"], name = "PM 10")) fig.add_trace(go.Bar(x = df_so2.index, y = df_so2["Frequency"], name = "SO2")) fig.add_trace(go.Bar(x = df_no2.index, y = df_no2["Frequency"], name = "NO2")) fig.add_trace(go.Bar(x = df_o3.index, y = df_o3["Frequency"], name = "O3")) fig.add_trace(go.Bar(x = df_co.index, y = df_co["Frequency"], name = "CO")) fig.update_layout(barmode = "stack", title_text="Frequency of Detected Pollutants") plot(fig)
3.28125
3
tests/bs3/test_block_fields.py
rpkilby/django-template-forms
1
18093
from django import forms from django.test import TestCase from template_forms import bs3 def startswith_a(value): if value.startswith('a'): return value raise forms.ValidationError('Value must start with "a".') def not_now(value): if value: raise forms.ValidationError('I cannot let you do that right now.') class StandardFieldTests(TestCase): class Form(bs3.BlockForm, forms.Form): field = forms.CharField(required=False, validators=[startswith_a], help_text='Example text.', ) def get_attrs(self, bf): return { 'name': bf.html_name, 'id': bf.auto_id, 'label': bf.label, } def test_field(self): form = self.Form() field = form['field'] template = """ <div class="form-group"> <label for="{id}" class="control-label">{label}:</label> <input id="{id}" name="{name}" type="text" class="form-control"> <small class="help-block">Example text.</small> </div> """ self.assertHTMLEqual( template.format(**self.get_attrs(field)), form.render_field(field, field.errors) ) def test_field_bound(self): form = self.Form({'field': 'a value'}) field = form['field'] template = """ <div class="form-group"> <label for="{id}" class="control-label">{label}:</label> <input id="{id}" name="{name}" type="text" class="form-control" value="a value"> <small class="help-block">Example text.</small> </div> """ self.assertHTMLEqual( template.format(**self.get_attrs(field)), form.render_field(field, field.errors) ) def test_field_error(self): form = self.Form({'field': 'error'}) field = form['field'] template = """ <div class="form-group has-error"> <label for="{id}" class="control-label">{label}:</label> <input id="{id}" name="{name}" type="text" class="form-control has-error" value="error"> <small class="help-block">Value must start with &quot;a&quot;.</small> <small class="help-block">Example text.</small> </div> """ self.assertHTMLEqual( template.format(**self.get_attrs(field)), form.render_field(field, field.errors) ) class CheckboxFieldTests(TestCase): class Form(bs3.BlockForm, forms.Form): field = forms.BooleanField(required=False, validators=[not_now], help_text='Example text.') def get_attrs(self, bf): return { 'name': bf.html_name, 'id': bf.auto_id, 'label': bf.label, } def test_field(self): form = self.Form() field = form['field'] template = """ <div class="form-group"> <div class="checkbox"> <label> <input id="{id}" name="{name}" type="checkbox"> {label} </label> </div> <small class="help-block">Example text.</small> </div> """ self.assertHTMLEqual( template.format(**self.get_attrs(field)), form.render_field(field, field.errors) ) def test_field_error(self): form = self.Form({'field': 'on'}) field = form['field'] template = """ <div class="form-group has-error"> <div class="checkbox"> <label> <input id="{id}" name="{name}" type="checkbox" checked> {label} </label> </div> <small class="help-block">I cannot let you do that right now.</small> <small class="help-block">Example text.</small> </div> """ self.assertHTMLEqual( template.format(**self.get_attrs(field)), form.render_field(field, field.errors) )
2.71875
3
bzt/modules/java.py
3dgiordano/taurus
1
18094
<gh_stars>1-10 """ Copyright 2017 BlazeMeter Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import json import os import shutil import subprocess import time from os import listdir from os.path import join from bzt import ToolError, TaurusConfigError from bzt.engine import HavingInstallableTools, Scenario from bzt.modules import SubprocessedExecutor from bzt.utils import get_full_path, shell_exec, TclLibrary, JavaVM, RequiredTool, MirrorsManager SELENIUM_DOWNLOAD_LINK = "http://selenium-release.storage.googleapis.com/3.6/" \ "selenium-server-standalone-3.6.0.jar" SELENIUM_VERSION = "3.6" # FIXME: unused, remove it JUNIT_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=junit/junit/" \ "{version}/junit-{version}.jar" JUNIT_VERSION = "4.12" JUNIT_MIRRORS_SOURCE = "http://search.maven.org/solrsearch/select?q=g%3A%22junit%22%20AND%20a%3A%22" \ "junit%22%20AND%20v%3A%22{version}%22&rows=20&wt=json".format(version=JUNIT_VERSION) TESTNG_VERSION = "6.8.5" TESTNG_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=org/testng/testng/" \ "{version}/testng-{version}.jar".format(version=TESTNG_VERSION) HAMCREST_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=org/hamcrest/hamcrest-core" \ "/1.3/hamcrest-core-1.3.jar" JSON_JAR_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=org/json/json/20160810/json-20160810.jar" class JavaTestRunner(SubprocessedExecutor, HavingInstallableTools): """ Allows to test java and jar files :type script: str """ def __init__(self): super(JavaTestRunner, self).__init__() self.working_dir = os.getcwd() self.target_java = "1.8" self.props_file = None self.base_class_path = [] def path_lambda(self, x): return os.path.abspath(self.engine.find_file(x)) def install_required_tools(self): self.hamcrest_path = self.path_lambda(self.settings.get("hamcrest-core", "~/.bzt/selenium-taurus/tools/junit/hamcrest-core.jar")) self.json_jar_path = self.path_lambda( self.settings.get("json-jar", "~/.bzt/selenium-taurus/tools/junit/json.jar")) self.selenium_server_jar_path = self.path_lambda(self.settings.get("selenium-server", "~/.bzt/selenium-taurus/selenium-server.jar")) def prepare(self): """ make jar. """ self.script = self.get_scenario().get(Scenario.SCRIPT, TaurusConfigError("Script not passed to runner %s" % self)) self.script = self.engine.find_file(self.script) self.install_required_tools() self.working_dir = self.engine.create_artifact(self.settings.get("working-dir", "classes"), "") self.target_java = str(self.settings.get("compile-target-java", self.target_java)) self.base_class_path.extend(self.settings.get("additional-classpath", [])) self.base_class_path.extend(self.get_scenario().get("additional-classpath", [])) self.base_class_path.extend([self.hamcrest_path, self.json_jar_path, self.selenium_server_jar_path]) self.props_file = self.engine.create_artifact("runner", ".properties") if not os.path.exists(self.working_dir): os.makedirs(self.working_dir) self.reporting_setup(suffix=".ldjson") def resource_files(self): resources = super(JavaTestRunner, self).resource_files() resources.extend(self.get_scenario().get("additional-classpath", [])) global_additional_classpath = self.settings.get("additional-classpath", []) execution_files = self.execution.get('files', []) # later we need to fix path for sending into cloud execution_files.extend(global_additional_classpath) return resources def _collect_script_files(self, extensions): file_list = [] if self.script is not None and os.path.isdir(self.script): for root, _, files in os.walk(self.script): for test_file in files: if os.path.splitext(test_file)[1].lower() in extensions: path = get_full_path(join(root, test_file)) file_list.append(path) else: if os.path.splitext(self.script)[1].lower() in extensions: file_list.append(get_full_path(self.script)) return file_list def compile_scripts(self): """ Compile .java files """ self.log.debug("Compiling .java files started") jar_path = join(self.engine.artifacts_dir, self.working_dir, self.settings.get("jar-name", "compiled.jar")) if os.path.exists(jar_path): self.log.debug(".java files are already compiled, skipping") return compile_cl = ["javac", "-source", self.target_java, "-target", self.target_java, "-d", self.working_dir, ] compile_cl.extend(["-cp", os.pathsep.join(self.base_class_path)]) compile_cl.extend(self._collect_script_files({".java"})) with open(self.engine.create_artifact("javac", ".out"), 'ab') as javac_out: with open(self.engine.create_artifact("javac", ".err"), 'ab') as javac_err: self.log.debug("running javac: %s", compile_cl) self.process = shell_exec(compile_cl, stdout=javac_out, stderr=javac_err) ret_code = self.process.poll() while ret_code is None: self.log.debug("Compiling .java files...") time.sleep(1) ret_code = self.process.poll() if ret_code != 0: self.log.debug("javac exit code: %s", ret_code) with open(javac_err.name) as err_file: out = err_file.read() raise ToolError("Javac exited with code: %s\n %s" % (ret_code, out.strip())) self.log.info("Compiling .java files completed") self.make_jar() def make_jar(self): """ move all .class files to compiled.jar """ self.log.debug("Making .jar started") with open(join(self.engine.artifacts_dir, "jar.out"), 'ab') as jar_out: with open(join(self.engine.artifacts_dir, "jar.err"), 'ab') as jar_err: class_files = [java_file for java_file in listdir(self.working_dir) if java_file.endswith(".class")] jar_name = self.settings.get("jar-name", "compiled.jar") if class_files: compile_jar_cl = ["jar", "-cf", jar_name] compile_jar_cl.extend(class_files) else: compile_jar_cl = ["jar", "-cf", jar_name, "."] self.process = shell_exec(compile_jar_cl, cwd=self.working_dir, stdout=jar_out, stderr=jar_err) ret_code = self.process.poll() while ret_code is None: self.log.debug("Making jar file...") time.sleep(1) ret_code = self.process.poll() if ret_code != 0: with open(jar_err.name) as err_file: out = err_file.read() raise ToolError("Jar exited with code %s\n%s" % (ret_code, out.strip())) self.log.info("Making .jar file completed") class JUnitTester(JavaTestRunner, HavingInstallableTools): """ Allows to test java and jar files """ def __init__(self): super(JUnitTester, self).__init__() self.junit_path = None self.junit_listener_path = None def prepare(self): super(JUnitTester, self).prepare() self.install_required_tools() self.base_class_path += [self.junit_path, self.junit_listener_path] self.base_class_path = [self.path_lambda(x) for x in self.base_class_path] if any(self._collect_script_files({'.java'})): self.compile_scripts() def install_required_tools(self): super(JUnitTester, self).install_required_tools() self.junit_path = self.path_lambda(self.settings.get("path", "~/.bzt/selenium-taurus/tools/junit/junit.jar")) self.junit_listener_path = join(get_full_path(__file__, step_up=2), "resources", "taurus-junit-1.0.jar") tools = [] # only check javac if we need to compile. if we have JAR as script - we don't need javac if self.script and any(self._collect_script_files({'.java'})): tools.append(JavaC(self.log)) tools.append(TclLibrary(self.log)) tools.append(JavaVM(self.log)) link = SELENIUM_DOWNLOAD_LINK.format(version=SELENIUM_VERSION) tools.append(SeleniumServerJar(self.selenium_server_jar_path, link, self.log)) tools.append(JUnitJar(self.junit_path, self.log, JUNIT_VERSION)) tools.append(HamcrestJar(self.hamcrest_path, HAMCREST_DOWNLOAD_LINK)) tools.append(JsonJar(self.json_jar_path, JSON_JAR_DOWNLOAD_LINK)) tools.append(JUnitListenerJar(self.junit_listener_path, "")) self._check_tools(tools) def startup(self): # java -cp junit.jar:selenium-test-small.jar: # selenium-2.46.0/selenium-java-2.46.0.jar:./../selenium-server.jar # taurusjunit.CustomRunner runner.properties jar_list = [join(self.working_dir, jar) for jar in listdir(self.working_dir) if jar.endswith(".jar")] jar_list.extend(self._collect_script_files({".jar"})) self.base_class_path.extend(jar_list) with open(self.props_file, 'wt') as props: props.write("report_file=%s\n" % self.report_file) load = self.get_load() if load.iterations: props.write("iterations=%s\n" % load.iterations) if load.hold: props.write("hold_for=%s\n" % load.hold) for index, item in enumerate(jar_list): props.write("target_%s=%s\n" % (index, item.replace(os.path.sep, '/'))) class_path = os.pathsep.join(self.base_class_path) junit_cmd_line = ["java", "-cp", class_path, "-Djna.nosys=true", "taurusjunit.CustomRunner", self.props_file] self._start_subprocess(junit_cmd_line) class TestNGTester(JavaTestRunner, HavingInstallableTools): """ Allows to test java and jar files with TestNG """ __test__ = False # Hello, nosetests discovery mechanism def __init__(self): super(TestNGTester, self).__init__() self.testng_path = None self.testng_plugin_path = None def prepare(self): super(TestNGTester, self).prepare() self.install_required_tools() self.base_class_path += [self.testng_path, self.testng_plugin_path] if any(self._collect_script_files({'.java'})): self.compile_scripts() def detected_testng_xml(self): script_path = self.get_script_path() if script_path and self.settings.get("autodetect-xml", True): script_dir = get_full_path(script_path, step_up=1) testng_xml = os.path.join(script_dir, 'testng.xml') if os.path.exists(testng_xml): return testng_xml return None def resource_files(self): resources = super(TestNGTester, self).resource_files() testng_xml = self.execution.get('testng-xml', None) if not testng_xml: testng_xml = self.detected_testng_xml() if testng_xml: self.log.info("Detected testng.xml file at %s", testng_xml) self.execution['testng-xml'] = testng_xml if testng_xml: resources.append(testng_xml) return resources def install_required_tools(self): super(TestNGTester, self).install_required_tools() self.testng_path = self.path_lambda(self.settings.get("path", "~/.bzt/selenium-taurus/tools/testng/testng.jar")) self.testng_plugin_path = join(get_full_path(__file__, step_up=2), "resources", "taurus-testng-1.0.jar") tools = [] if self.script and any(self._collect_script_files({'.java'})): tools.append(JavaC(self.log)) tools.append(TclLibrary(self.log)) tools.append(JavaVM(self.log)) link = SELENIUM_DOWNLOAD_LINK.format(version=SELENIUM_VERSION) tools.append(SeleniumServerJar(self.selenium_server_jar_path, link, self.log)) tools.append(TestNGJar(self.testng_path, TESTNG_DOWNLOAD_LINK)) tools.append(HamcrestJar(self.hamcrest_path, HAMCREST_DOWNLOAD_LINK)) tools.append(JsonJar(self.json_jar_path, JSON_JAR_DOWNLOAD_LINK)) tools.append(TestNGPluginJar(self.testng_plugin_path, "")) self._check_tools(tools) def startup(self): # java -classpath # testng.jar:selenium-server.jar:taurus-testng-1.0.jar:json.jar:compiled.jar # taurustestng.TestNGRunner runner.properties jar_list = [join(self.working_dir, jar) for jar in listdir(self.working_dir) if jar.endswith(".jar")] jar_list.extend(self._collect_script_files({".jar"})) self.base_class_path.extend(jar_list) with open(self.props_file, 'wt') as props: props.write("report_file=%s\n" % self.report_file) load = self.get_load() if load.iterations: props.write("iterations=%s\n" % load.iterations) if load.hold: props.write("hold_for=%s\n" % load.hold) for index, item in enumerate(jar_list): props.write("target_%s=%s\n" % (index, item.replace(os.path.sep, '/'))) testng_xml = self.execution.get('testng-xml', None) or self.detected_testng_xml() if testng_xml: props.write('testng_config=%s\n' % testng_xml.replace(os.path.sep, '/')) cmdline = ["java", "-cp", os.pathsep.join(self.base_class_path), "taurustestng.TestNGRunner", self.props_file] self._start_subprocess(cmdline) class TestNGJar(RequiredTool): def __init__(self, tool_path, download_link): super(TestNGJar, self).__init__("TestNG", tool_path, download_link) class HamcrestJar(RequiredTool): def __init__(self, tool_path, download_link): super(HamcrestJar, self).__init__("HamcrestJar", tool_path, download_link) class JsonJar(RequiredTool): def __init__(self, tool_path, download_link): super(JsonJar, self).__init__("JsonJar", tool_path, download_link) class JavaC(RequiredTool): def __init__(self, parent_logger, tool_path='javac', download_link=''): super(JavaC, self).__init__("JavaC", tool_path, download_link) self.log = parent_logger.getChild(self.__class__.__name__) def check_if_installed(self): try: output = subprocess.check_output([self.tool_path, '-version'], stderr=subprocess.STDOUT) self.log.debug("%s output: %s", self.tool_name, output) return True except (subprocess.CalledProcessError, OSError): return False def install(self): raise ToolError("The %s is not operable or not available. Consider installing it" % self.tool_name) class SeleniumServerJar(RequiredTool): def __init__(self, tool_path, download_link, parent_logger): super(SeleniumServerJar, self).__init__("Selenium server", tool_path, download_link) self.log = parent_logger.getChild(self.__class__.__name__) def check_if_installed(self): self.log.debug("%s path: %s", self.tool_name, self.tool_path) selenium_launch_command = ["java", "-jar", self.tool_path, "-help"] selenium_subproc = shell_exec(selenium_launch_command, stderr=subprocess.STDOUT) output = selenium_subproc.communicate() self.log.debug("%s output: %s", self.tool_name, output) if selenium_subproc.returncode == 0: self.already_installed = True return True else: return False class JUnitJar(RequiredTool): def __init__(self, tool_path, parent_logger, junit_version): super(JUnitJar, self).__init__("JUnit", tool_path) self.log = parent_logger.getChild(self.__class__.__name__) self.version = junit_version self.mirror_manager = JUnitMirrorsManager(self.log, self.version) def install(self): dest = get_full_path(self.tool_path, step_up=1) self.log.info("Will install %s into %s", self.tool_name, dest) junit_dist = self._download(suffix=".jar") if not os.path.exists(dest): os.makedirs(dest) shutil.move(junit_dist, self.tool_path) self.log.info("Installed JUnit successfully") if not self.check_if_installed(): raise ToolError("Unable to run %s after installation!" % self.tool_name) class JUnitListenerJar(RequiredTool): def __init__(self, tool_path, download_link): super(JUnitListenerJar, self).__init__("JUnitListener", tool_path, download_link) def install(self): raise ToolError("Automatic installation of JUnitListener isn't implemented") class TestNGPluginJar(RequiredTool): def __init__(self, tool_path, download_link): super(TestNGPluginJar, self).__init__("TestNGPlugin", tool_path, download_link) def install(self): raise ToolError("TestNG plugin should be bundled with Taurus distribution") class JUnitMirrorsManager(MirrorsManager): def __init__(self, parent_logger, junit_version): self.junit_version = junit_version super(JUnitMirrorsManager, self).__init__(JUNIT_MIRRORS_SOURCE, parent_logger) def _parse_mirrors(self): links = [] if self.page_source is not None: self.log.debug('Parsing mirrors...') try: resp = json.loads(self.page_source) objects = resp.get("response", {}).get("docs", []) if objects: obj = objects[0] group = obj.get("g") artifact = obj.get("a") version = obj.get("v") ext = obj.get("p") link_template = "http://search.maven.org/remotecontent?filepath={group}/{artifact}/" \ "{version}/{artifact}-{version}.{ext}" link = link_template.format(group=group, artifact=artifact, version=version, ext=ext) links.append(link) except BaseException as exc: self.log.error("Error while parsing mirrors %s", exc) default_link = JUNIT_DOWNLOAD_LINK.format(version=self.junit_version) if default_link not in links: links.append(default_link) self.log.debug('Total mirrors: %d', len(links)) return links
1.59375
2
various_modules/interface_segregation_principle.py
Neykah/design_patterns_python
0
18095
<gh_stars>0 """ Maybe not so relevant in Python due to the possibility to use multiple inheritance... """ from abc import ABC, abstractmethod class CloudHostingProvider(ABC): @abstractmethod def create_server(region): ... @abstractmethod def list_servers(region): ... class CDNProvider(ABC): @abstractmethod def get_cdna_address(): ... class CloudStorageProvider(ABC): @abstractmethod def store_file(name): ... @abstractmethod def get_file(name): ... class Amazon(CloudHostingProvider, CDNProvider, CloudStorageProvider): def store_file(self, name: str): print(f"Storing the file {name} in AWS...") def get_file(self, name: str): print(f"Getting the file {name} from AWS...") def create_server(self, region: str): print(f"Creating a new server in the following region: {region}...") def list_servers(self, region: str): print(f"List all servers available in {region}...") def get_cdna_address(self): print("AWS CDNA address: ...") class Dropbox(CloudStorageProvider): def store_file(self, name: str): print(f"Storing the file {name} in Dropbox...") def get_file(self, name: str): print(f"Getting the file {name} from Dropbox...") if __name__ == "__main__": amazon = Amazon() dropbox = Dropbox() amazon.get_file("Baba") dropbox.store_file("Baba")
3.28125
3
SCSCons/Variables/PackageVariable.py
Relintai/pandemonium_engine
1,403
18096
<reponame>Relintai/pandemonium_engine # MIT License # # Copyright The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """Variable type for package Variables. To be used whenever a 'package' may be enabled/disabled and the package path may be specified. Given these options :: x11=no (disables X11 support) x11=yes (will search for the package installation dir) x11=/usr/local/X11 (will check this path for existence) Can be used as a replacement for autoconf's ``--with-xxx=yyy`` :: opts = Variables() opts.Add( PackageVariable( key='x11', help='use X11 installed here (yes = search some places)', default='yes' ) ) ... if env['x11'] == True: dir = ... # search X11 in some standard places ... env['x11'] = dir if env['x11']: ... # build with x11 ... """ from typing import Tuple, Callable import SCons.Errors __all__ = ['PackageVariable',] ENABLE_STRINGS = ('1', 'yes', 'true', 'on', 'enable', 'search') DISABLE_STRINGS = ('0', 'no', 'false', 'off', 'disable') def _converter(val): """ """ lval = val.lower() if lval in ENABLE_STRINGS: return True if lval in DISABLE_STRINGS: return False return val def _validator(key, val, env, searchfunc) -> None: """ """ # NB: searchfunc is currently undocumented and unsupported # TODO write validator, check for path import os if env[key] is True: if searchfunc: env[key] = searchfunc(key, val) elif env[key] and not os.path.exists(val): raise SCons.Errors.UserError( 'Path does not exist for option %s: %s' % (key, val)) def PackageVariable(key, help, default, searchfunc=None) -> Tuple[str, str, str, Callable, Callable]: """Return a tuple describing a package list SCons Variable. The input parameters describe a 'package list' option. Returns a tuple including the correct converter and validator appended. The result is usable as input to :meth:`Add` . A 'package list' option may either be 'all', 'none' or a pathname string. This information is appended to *help*. """ # NB: searchfunc is currently undocumented and unsupported help = '\n '.join( (help, '( yes | no | /path/to/%s )' % key)) return (key, help, default, lambda k, v, e: _validator(k, v, e, searchfunc), _converter) # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
1.726563
2
tests/test_protocol.py
kwikiel/edgedb
0
18097
<gh_stars>0 # # This source file is part of the EdgeDB open source project. # # Copyright 2020-present MagicStack Inc. and the EdgeDB authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import asyncio import edgedb from edb.server import compiler from edb import protocol from edb.testbase.protocol.test import ProtocolTestCase class TestProtocol(ProtocolTestCase): async def test_proto_executescript_01(self): # Test that ExecuteScript returns ErrorResponse immediately. await self.con.connect() await self.con.send( protocol.ExecuteScript( headers=[], script='SELECT 1/0' ) ) await self.con.recv_match( protocol.ErrorResponse, message='division by zero' ) await self.con.recv_match( protocol.ReadyForCommand, transaction_state=protocol.TransactionState.NOT_IN_TRANSACTION, ) # Test that the protocol has recovered. await self.con.send( protocol.ExecuteScript( headers=[], script='SELECT 1' ) ) await self.con.recv_match( protocol.CommandComplete, status='SELECT' ) await self.con.recv_match( protocol.ReadyForCommand, transaction_state=protocol.TransactionState.NOT_IN_TRANSACTION, ) async def test_proto_executescript_02(self): # Test ReadyForCommand.transaction_state await self.con.connect() await self.con.send( protocol.ExecuteScript( headers=[], script='START TRANSACTION; SELECT 1/0' ) ) await self.con.recv_match( protocol.ErrorResponse, message='division by zero' ) await self.con.recv_match( protocol.ReadyForCommand, transaction_state=protocol.TransactionState.IN_FAILED_TRANSACTION, ) # Test that the protocol is still in a failed transaction await self.con.send( protocol.ExecuteScript( headers=[], script='SELECT 1/0' ) ) await self.con.recv_match( protocol.ErrorResponse, message='current transaction is aborted' ) await self.con.recv_match( protocol.ReadyForCommand, transaction_state=protocol.TransactionState.IN_FAILED_TRANSACTION, ) # Test recovery await self.con.send( protocol.ExecuteScript( headers=[], script='ROLLBACK' ) ) await self.con.recv_match( protocol.CommandComplete, status='ROLLBACK' ) await self.con.recv_match( protocol.ReadyForCommand, transaction_state=protocol.TransactionState.NOT_IN_TRANSACTION, ) async def test_proto_flush_01(self): await self.con.connect() await self.con.send( protocol.Prepare( headers=[], io_format=protocol.IOFormat.BINARY, expected_cardinality=compiler.Cardinality.AT_MOST_ONE, statement_name=b'', command='SEL ECT 1', ) ) # Should come through even without an explicit 'flush' await self.con.recv_match( protocol.ErrorResponse, message="Unexpected 'SEL'" ) # Recover the protocol state from the error self.assertEqual( await self.con.sync(), protocol.TransactionState.NOT_IN_TRANSACTION) # This Prepare should be handled alright await self.con.send( protocol.Prepare( headers=[], io_format=protocol.IOFormat.BINARY, expected_cardinality=compiler.Cardinality.AT_MOST_ONE, statement_name=b'', command='SELECT 1', ), protocol.Flush() ) await self.con.recv_match( protocol.PrepareComplete, cardinality=compiler.Cardinality.AT_MOST_ONE, ) # Test that Flush has completed successfully -- the # command should be executed and no exception should # be received. # While at it, rogue ROLLBACK should be allowed. await self.con.send( protocol.ExecuteScript( headers=[], script='ROLLBACK' ) ) await self.con.recv_match( protocol.CommandComplete, status='ROLLBACK' ) await self.con.recv_match( protocol.ReadyForCommand, transaction_state=protocol.TransactionState.NOT_IN_TRANSACTION, ) async def test_proto_connection_lost_cancel_query(self): # This test is occasionally hanging - adding a timeout to find out why await asyncio.wait_for( self._test_proto_connection_lost_cancel_query(), 30 ) async def _test_proto_connection_lost_cancel_query(self): # Prepare the test data con2 = await edgedb.async_connect(**self.get_connect_args()) try: await con2.execute( 'CREATE TYPE tclcq { CREATE PROPERTY p -> str }' ) try: await con2.execute("INSERT tclcq { p := 'initial' }") # Ready the nested connection await self.con.connect() # Use an implicit transaction in the nested connection: lock # the row with an UPDATE, and then hold the transaction for 10 # seconds, which is long enough for the upcoming cancellation await self.con.send( protocol.ExecuteScript( headers=[], script="""\ UPDATE tclcq SET { p := 'inner' }; SELECT sys::_sleep(10); """, ) ) # Sanity check - we shouldn't get anything here with self.assertRaises(asyncio.TimeoutError): await asyncio.wait_for( self.con.recv_match( protocol.CommandComplete, status='UPDATE' ), 0.1, ) # Close the nested connection without waiting for the result; # the server is supposed to cancel the pending query. await self.con.aclose() # In the outer connection, let's wait until the lock is # released by either an expected cancellation, or an unexpected # commit after 10 seconds. tx = con2.raw_transaction() await tx.start() try: await tx.execute("UPDATE tclcq SET { p := 'lock' }") except edgedb.TransactionSerializationError: # In case the nested transaction succeeded, we'll meet an # concurrent update error here, which can be safely ignored pass finally: await tx.rollback() # Let's check what's in the row - if the cancellation didn't # happen, the test will fail with value "inner". val = await con2.query_single('SELECT tclcq.p LIMIT 1') self.assertEqual(val, 'initial') finally: # Clean up await con2.execute( "DROP TYPE tclcq" ) finally: await con2.aclose()
1.890625
2
210125/homework_re_3.py
shadowsmain/pyton-adv
0
18098
<filename>210125/homework_re_3.py<gh_stars>0 import re RE_NUMBER_VALIDATOR = re.compile(r'^\d+[.,]\d+$') def number_is_valid(number): return RE_NUMBER_VALIDATOR.match(number) assert number_is_valid('1.32') assert number_is_valid('1,32') assert not number_is_valid('asdasd1234') assert not number_is_valid('22,a44')
3.234375
3
shp_code/prec_reformat.py
anahm/inferring-population-preferences
4
18099
""" prec_reformat.py Taking state data and having each line be a precinct's voting results and candidate cf-scores (rather than each line be each candidate per precinct. | prec_id | cf_score_0 | num_votes_0 | cf_score_1 | num_votes_1 | """ import math import numpy as np import pandas as pd from prec_cd import prec_cd_main from check_data import check_main def convert_by_prec(old_df, state, year, dirname): precs = [] years = [] cf_score_0 = [] num_votes_0 = [] cf_score_1 = [] num_votes_1 = [] # group by precinct (year assumed) for key, group in old_df.groupby(['geoid']): cf_iter = iter(group['cf_score']) votes_iter = iter(group['num_votes']) nxt_score = cf_iter.next() if math.isnan(nxt_score): nxt_score = 0 cf_0 = nxt_score nv_0 = votes_iter.next() try: nxt_score = cf_iter.next() if math.isnan(nxt_score): nxt_score = 0 cf_1 = nxt_score nv_1 = votes_iter.next() # enforcing the idea that cfscore0 < cfscore1 precs.append(key) if cf_1 < cf_0: cf_score_0.append(cf_1) num_votes_0.append(nv_1) cf_score_1.append(cf_0) num_votes_1.append(nv_0) else: cf_score_0.append(cf_0) num_votes_0.append(nv_0) cf_score_1.append(cf_1) num_votes_1.append(nv_1) except StopIteration: # get rid of pass # use arrays to create dataframe new_df = pd.DataFrame({ 'cf_score_0': cf_score_0, 'num_votes_0': num_votes_0, 'cf_score_1': cf_score_1, 'num_votes_1': num_votes_1, 'geoid': precs}, index=None) new_df['tot_votes'] = new_df['num_votes_0'] + new_df['num_votes_1'] new_df['midpoint'] = (new_df['cf_score_0'] + new_df['cf_score_1']) / 2.0 # write new dataframe out to csv outfile = '%s/precline_%s_house_%s.csv' % (dirname, state, year) new_df.to_csv(outfile) return outfile """ data_clean() Function to parse out certain types of data that are not useful in our results. # NOTE: overwrites the old file, since it is unnecessary """ def data_clean(precline_file): df = pd.read_csv(precline_file, index_col = 0) # remove all precincts with tot_votes == 0 df = df[df['tot_votes'] > 0] # remove all uncontested candidates (cf_score_1 == 0) df = df[df['cf_score_1'] != 0] df.to_csv(precline_file, index=False) """ prec_reformat_main() Function that does the bulk of the original main function and can be called by the commandline. @param: state, year @return: location of new precline file """ def prec_reformat_main(state, year): prec_cd_main(state, year) csv_dir = '../data/%s_data/%s_%s' % (state, state, year) infile = '%s/%s_house_%s_final.csv' % (csv_dir, state, year) outfile = '%s/precline_%s_house_%s.csv' % (csv_dir, state, year) # read in file old_df = pd.read_csv(infile) convert_by_prec(old_df, state, year, csv_dir) data_clean(outfile) print 'Precinct data written to: %s' % outfile rep_col = 't_USH_R_%s' % year dem_col = 't_USH_D_%s' % year check_main(outfile, state, year, rep_col, dem_col) def main(): state = raw_input('State: ') year = raw_input('Year: ') prec_reformat_main(state, year) if __name__ == "__main__": main()
2.859375
3