text
string
size
int64
token_count
int64
#!/usr/bin/env python from pkg_resources import get_distribution from setuptools import setup, find_packages with open("README.md", "r") as f: long_description = f.read() version = get_distribution("autolabel").version setup( packages=find_packages(), install_requires=[ 'click', 'more-itertools', 'torchvision', 'torch', 'pillow', 'numpy' ], entry_points=''' [console_scripts] autolabel=autolabel.cli:main ''', url='https://github.com/walwe/autolabel', version=version, author='walwe', python_requires='>=3.6', description='Autolabel is an image labeling tool using Neural Network', long_description_content_type="text/markdown", long_description=long_description, classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ] )
954
295
""" Exercício Python 5: Faça um programa que leia um número Inteiro e mostre na tela o seu sucessor e seu antecessor. """ n = int(input('digite um numero inteiro ')) #ant = n-1 #post = n+1 #print('O antecessor de {} é {} e posterior é {}' .format(n, ant, post)) print('{} o antercessor é {} o sucessor é {}'.format(n, (n-1), (n+1)))
334
133
version = '2.0.706'
20
13
import os.path import numpy from scipy.spatial import Delaunay import meshio from meshplex import MeshTri def simple0(): # # 3___________2 # |\_ 2 _/| # | \_ _/ | # | 3 \4/ 1 | # | _/ \_ | # | _/ \_ | # |/ 0 \| # 0-----------1 # X = numpy.array( [ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.5, 0.0], ] ) cells = numpy.array([[0, 1, 4], [1, 2, 4], [2, 3, 4], [3, 0, 4]]) return X, cells def simple1(): # # 3___________2 # |\_ 2 _/| # | \_ _/ | # | 3 \4/ 1 | # | _/ \_ | # | _/ \_ | # |/ 0 \| # 0-----------1 # X = numpy.array([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.4, 0.5]]) cells = numpy.array([[0, 1, 4], [1, 2, 4], [2, 3, 4], [3, 0, 4]]) return X, cells def simple2(): # # 3___________2 # |\_ 3 _/ \_ # | \_ _/ 2 \_ # | 4 \4/_________\5 # | _/ \_ _/ # | _/ \_ 1 _/ # |/ 0 \ / # 0-----------1 # X = numpy.array( [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.7, 0.5], [1.7, 0.5]] ) cells = numpy.array([[0, 1, 4], [1, 5, 4], [2, 4, 5], [2, 3, 4], [3, 0, 4]]) return X, cells def simple3(): # # 5___________4___________3 # |\_ 6 _/ \_ 4 _/| # | \_ _/ 5 \_ _/ | # | 7 \6/_________\7/ 3 | # | _/ \_ _/ \_ | # | _/ \_ 1 _/ 2 \_ | # |/ 0 \ / \| # 0-----------1-----------2 # X = numpy.array( [ [0.0, 0.0], [1.0, 0.0], [2.0, 0.0], [2.0, 1.0], [1.0, 1.0], [0.0, 1.0], [0.7, 0.5], [1.7, 0.5], ] ) cells = numpy.array( [ [0, 1, 6], [1, 7, 6], [1, 2, 7], [2, 3, 7], [3, 4, 7], [4, 6, 7], [4, 5, 6], [5, 0, 6], ] ) return X, cells def pacman(): this_dir = os.path.dirname(os.path.realpath(__file__)) mesh = meshio.read(os.path.join(this_dir, "meshes", "pacman.vtk")) return mesh.points[:, :2], mesh.cells["triangle"] def circle_gmsh(): this_dir = os.path.dirname(os.path.realpath(__file__)) mesh = meshio.read(os.path.join(this_dir, "meshes", "circle-gmsh.vtk")) c = mesh.cells["triangle"].astype(numpy.int) return mesh.points[:, :2], c def circle_random(): n = 40 radius = 1.0 k = numpy.arange(n) boundary_pts = radius * numpy.column_stack( [numpy.cos(2 * numpy.pi * k / n), numpy.sin(2 * numpy.pi * k / n)] ) # Compute the number of interior nodes such that all triangles can be somewhat # equilateral. edge_length = 2 * numpy.pi * radius / n domain_area = numpy.pi - n * ( radius ** 2 / 2 * (edge_length - numpy.sin(edge_length)) ) cell_area = numpy.sqrt(3) / 4 * edge_length ** 2 target_num_cells = domain_area / cell_area # Euler: # 2 * num_points - num_boundary_edges - 2 = num_cells # <=> # num_interior_points ~= 0.5 * (num_cells + num_boundary_edges) + 1 - num_boundary_points m = int(0.5 * (target_num_cells + n) + 1 - n) # generate random points in circle; <http://mathworld.wolfram.com/DiskPointPicking.html> numpy.random.seed(0) r = numpy.random.rand(m) alpha = 2 * numpy.pi * numpy.random.rand(m) interior_pts = numpy.column_stack( [numpy.sqrt(r) * numpy.cos(alpha), numpy.sqrt(r) * numpy.sin(alpha)] ) pts = numpy.concatenate([boundary_pts, interior_pts]) tri = Delaunay(pts) pts = numpy.column_stack([pts[:, 0], pts[:, 1], numpy.zeros(pts.shape[0])]) # Make sure there are exactly `n` boundary points mesh = MeshTri(pts, tri.simplices) assert numpy.sum(mesh.is_boundary_node) == n return pts, tri.simplices def circle_rotated(): pts, cells = circle_random() # <https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula> theta = numpy.pi / 4 k = numpy.array([1.0, 0.0, 0.0]) pts = ( pts * numpy.cos(theta) + numpy.cross(k, pts) * numpy.sin(theta) + numpy.outer(numpy.einsum("ij,j->i", pts, k), k) * (1.0 - numpy.cos(theta)) ) meshio.write_points_cells("out.vtk", pts, {"triangle": cells}) return pts, cells
4,506
2,018
# -*- coding: utf-8 -*- """ Created on Mon Aug 11 16:19:39 2014 """ import os import sys import imp # Put location of sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..')) + '\\modules') # add ODYM module directory to system path #NOTE: Hidden variable __file__ must be know to script for the directory structure to work. # Therefore: When first using the model, run the entire script with F5 so that the __file__ variable can be created. import dynamic_stock_model as dsm # remove and import the class manually if this unit test is run as standalone script imp.reload(dsm) import numpy as np import unittest ############################################################################### """My Input for fixed lifetime""" Time_T_FixedLT = np.arange(0,10) Inflow_T_FixedLT = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] lifetime_FixedLT = {'Type': 'Fixed', 'Mean': np.array([5])} lifetime_FixedLT0 = {'Type': 'Fixed', 'Mean': np.array([0])} #lifetime_FixedLT = {'Type': 'Fixed', 'Mean': np.array([5,5,5,5,5,5,5,5,5,5])} lifetime_NormLT = {'Type': 'Normal', 'Mean': np.array([5]), 'StdDev': np.array([1.5])} lifetime_NormLT0 = {'Type': 'Normal', 'Mean': np.array([0]), 'StdDev': np.array([1.5])} ############################################################################### """My Output for fixed lifetime""" Outflow_T_FixedLT = np.array([0, 0, 0, 0, 0, 1, 2, 3, 4, 5]) Outflow_TC_FixedLT = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 2, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 3, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 4, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 5, 0, 0, 0, 0, 0]]) Stock_T_FixedLT = np.array([1, 3, 6, 10, 15, 20, 25, 30, 35, 40]) StockChange_T_FixedLT = np.array([1, 2, 3, 4, 5, 5, 5, 5, 5, 5]) Stock_TC_FixedLT = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2, 3, 0, 0, 0, 0, 0, 0, 0], [1, 2, 3, 4, 0, 0, 0, 0, 0, 0], [1, 2, 3, 4, 5, 0, 0, 0, 0, 0], [0, 2, 3, 4, 5, 6, 0, 0, 0, 0], [0, 0, 3, 4, 5, 6, 7, 0, 0, 0], [0, 0, 0, 4, 5, 6, 7, 8, 0, 0], [0, 0, 0, 0, 5, 6, 7, 8, 9, 0], [0, 0, 0, 0, 0, 6, 7, 8, 9, 10]]) Bal = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) """My Output for normally distributed lifetime""" Stock_TC_NormLT = np.array([[ 9.99570940e-01, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 9.96169619e-01, 1.99914188e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 9.77249868e-01, 1.99233924e+00, 2.99871282e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 9.08788780e-01, 1.95449974e+00, 2.98850886e+00, 3.99828376e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 7.47507462e-01, 1.81757756e+00, 2.93174960e+00, 3.98467848e+00, 4.99785470e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 5.00000000e-01, 1.49501492e+00, 2.72636634e+00, 3.90899947e+00, 4.98084810e+00, 5.99742564e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 2.52492538e-01, 1.00000000e+00, 2.24252239e+00, 3.63515512e+00, 4.88624934e+00, 5.97701772e+00, 6.99699658e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 9.12112197e-02, 5.04985075e-01, 1.50000000e+00, 2.99002985e+00, 4.54394390e+00, 5.86349921e+00, 6.97318734e+00, 7.99656752e+00, 0.00000000e+00, 0.00000000e+00], [ 2.27501319e-02, 1.82422439e-01, 7.57477613e-01, 2.00000000e+00, 3.73753731e+00, 5.45273268e+00, 6.84074908e+00, 7.96935696e+00, 8.99613846e+00, 0.00000000e+00], [ 3.83038057e-03, 4.55002639e-02, 2.73633659e-01, 1.00997015e+00, 2.50000000e+00, 4.48504477e+00, 6.36152146e+00, 7.81799894e+00, 8.96552657e+00, 9.99570940e+00]]) Stock_T_NormLT = np.array([ 0.99957094, 2.9953115 , 5.96830193, 9.85008113, 14.4793678 , 19.60865447, 24.99043368, 30.46342411, 35.95916467, 41.45873561]) Outflow_T_NormLT = np.array([ 4.29060333e-04, 4.25944090e-03, 2.70095728e-02, 1.18220793e-01, 3.70713330e-01, 8.70713330e-01, 1.61822079e+00, 2.52700957e+00, 3.50425944e+00, 4.50042906e+00]) Outflow_TC_NormLT = np.array([[ 4.29060333e-04, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 3.40132023e-03, 8.58120666e-04, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00], [ 1.89197514e-02, 6.80264047e-03, 1.28718100e-03, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00], [ 6.84610878e-02, 3.78395028e-02, 1.02039607e-02, 1.71624133e-03, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00], [ 1.61281318e-01, 1.36922176e-01, 5.67592541e-02, 1.36052809e-02, 2.14530167e-03, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00], [ 2.47507462e-01, 3.22562636e-01, 2.05383263e-01, 7.56790055e-02, 1.70066012e-02, 2.57436200e-03, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00], [ 2.47507462e-01, 4.95014925e-01, 4.83843953e-01, 2.73844351e-01, 9.45987569e-02, 2.04079214e-02, 3.00342233e-03, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00], [ 1.61281318e-01, 4.95014925e-01, 7.42522387e-01, 6.45125271e-01, 3.42305439e-01, 1.13518508e-01, 2.38092416e-02, 3.43248267e-03, -0.00000000e+00, -0.00000000e+00], [ 6.84610878e-02, 3.22562636e-01, 7.42522387e-01, 9.90029850e-01, 8.06406589e-01, 4.10766527e-01, 1.32438260e-01, 2.72105619e-02, 3.86154300e-03, -0.00000000e+00], [ 1.89197514e-02, 1.36922176e-01, 4.83843953e-01, 9.90029850e-01, 1.23753731e+00, 9.67687907e-01, 4.79227614e-01, 1.51358011e-01, 3.06118821e-02, 4.29060333e-03]]) StockChange_T_NormLT = np.array([ 0.99957094, 1.99574056, 2.97299043, 3.88177921, 4.62928667, 5.12928667, 5.38177921, 5.47299043, 5.49574056, 5.49957094]) """My Output for Weibull-distributed lifetime""" Stock_TC_WeibullLT = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], # computed with Excel and taken from there [0.367879441, 2, 0, 0, 0, 0, 0, 0, 0, 0], [0.100520187, 0.735758882, 3, 0, 0, 0, 0, 0, 0, 0], [0.023820879, 0.201040373, 1.103638324, 4, 0, 0, 0, 0, 0, 0], [0.005102464, 0.047641758, 0.30156056, 1.471517765,5, 0, 0, 0, 0, 0], [0.001009149, 0.010204929, 0.071462637, 0.402080746,1.839397206, 6, 0, 0, 0, 0], [0.000186736, 0.002018297, 0.015307393, 0.095283516, 0.502600933, 2.207276647, 7, 0, 0, 0], [3.26256E-05, 0.000373472, 0.003027446, 0.020409858, 0.119104394, 0.60312112, 2.575156088, 8, 0, 0], [5.41828E-06, 6.52513E-05, 0.000560208, 0.004036594, 0.025512322, 0.142925273, 0.703641306, 2.943035529, 9, 0], [8.59762E-07, 1.08366E-05, 9.78769E-05, 0.000746944, 0.005045743, 0.030614786, 0.166746152, 0.804161493, 3.310914971, 10]]) Stock_T_WeibullLT = np.array([1,2.367879441,3.836279069,5.328499576,6.825822547,8.324154666,9.822673522,11.321225,12.8197819,14.31833966]) Outflow_T_WeibullLT = np.array([0,0.632120559,1.531600372,2.507779493,3.502677029,4.50166788,5.501481144,6.501448519,7.5014431,8.501442241]) Outflow_TC_WeibullLT = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0.632120559, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0.267359255, 1.264241118, 0, 0, 0, 0, 0, 0, 0, 0], [0.076699308, 0.534718509, 1.896361676, 0, 0, 0, 0, 0, 0, 0], [0.018718414, 0.153398615, 0.802077764, 2.528482235, 0, 0, 0, 0, 0, 0], [0.004093316, 0.037436829, 0.230097923, 1.069437018, 3.160602794, 0, 0, 0, 0, 0], [0.000822413, 0.008186632, 0.056155243, 0.306797231, 1.336796273, 3.792723353, 0, 0, 0, 0], [0.00015411, 0.001644825, 0.012279947, 0.074873658, 0.383496539, 1.604155527, 4.424843912, 0, 0, 0], [2.72074E-05, 0.000308221, 0.002467238, 0.016373263, 0.093592072, 0.460195846, 1.871514782, 5.056964471, 0, 0], [4.55852E-06, 5.44147E-05 , 0.000462331 , 0.00328965, 0.020466579, 0.112310487, 0.536895154, 2.138874037, 5.689085029, 0]]) StockChange_T_WeibullLT = np.array([1,1.367879441,1.468399628,1.492220507,1.497322971,1.49833212,1.498518856,1.498551481,1.4985569,1.498557759]) lifetime_WeibullLT = {'Type': 'Weibull', 'Shape': np.array([1.2]), 'Scale': np.array([1])} InitialStock_WB = np.array([0.01, 0.01, 0.08, 0.2, 0.2, 2, 2, 3, 4, 7.50]) Inflow_WB = np.array([11631.1250671964, 1845.6048709861, 2452.0593141014, 1071.0305279511, 198.1868742385, 391.9674590243, 83.9599583940, 29.8447516023, 10.8731273138, 7.5000000000]) # We need 10 digits AFTER the . to get a 9 digits after the . overlap with np.testing. # The total number of counting digits is higher, because there are up to 5 digits before the . # For the stock-driven model with initial stock, colculated with Excel Sc_InitialStock_2_Ref = np.array([[ 3.29968072, 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ], [ 3.28845263, 5.1142035 , 0. , 0. , 0. , 0. , 0. , 0. , 0. ], [ 3.2259967 , 5.09680099, 2.0068288 , 0. , 0. , 0. , 0. , 0. , 0. ], [ 3. , 5. , 2. , 4. , 0. , 0. , 0. , 0. , 0. ], [ 2.46759471, 4.64972578, 1.962015 , 3.98638888, 4.93427563, 0. , 0. , 0. , 0. ], [ 1.65054855, 3.82454624, 1.82456634, 3.91067739, 4.91748538, 3.8721761 , 0. , 0. , 0. ], [ 0.83350238, 2.55819937, 1.50076342, 3.63671549, 4.82409004, 3.85899993, 2.78772936, 0. , 0. ], [ 0.30109709, 1.2918525 , 1.00384511, 2.9913133 , 4.48613916, 3.78570788, 2.77824333, 3.36180162, 0. ], [ 0.07510039, 0.46667297, 0.5069268 , 2.00085849, 3.68999109, 3.5205007 , 2.72547754, 3.35036215, 3.66410986]]) Sc_InitialStock_2_Ref_Sum = np.array([ 3.29968072, 8.40265614, 10.32962649, 14. , 18. , 20. , 20. , 20. , 20. ]) Oc_InitialStock_2_Ref = np.array([[ 1.41636982e-03, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 1.12280883e-02, 2.19524375e-03, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00], [ 6.24559363e-02, 1.74025106e-02, 8.61420234e-04, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00], [ 2.25996698e-01, 9.68009922e-02, 6.82879736e-03, 1.71697802e-03, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00], [ 5.32405289e-01, 3.50274224e-01, 3.79849998e-02, 1.36111209e-02, 2.11801070e-03, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00], [ 8.17046165e-01, 8.25179532e-01, 1.37448656e-01, 7.57114903e-02, 1.67902556e-02, 1.66211031e-03, -0.00000000e+00, -0.00000000e+00, -0.00000000e+00], [ 8.17046165e-01, 1.26634687e+00, 3.23802924e-01, 2.73961897e-01, 9.33953405e-02, 1.31761643e-02, 1.19661751e-03, -0.00000000e+00, -0.00000000e+00], [ 5.32405289e-01, 1.26634687e+00, 4.96918311e-01, 6.45402188e-01, 3.37950879e-01, 7.32920558e-02, 9.48603036e-03, 1.44303487e-03, -0.00000000e+00], [ 2.25996698e-01, 8.25179532e-01, 4.96918311e-01, 9.90454815e-01, 7.96148072e-01, 2.65207178e-01, 5.27657861e-02, 1.14394721e-02, 1.57279902e-03]]) I_InitialStock_2_Ref = np.array([ 3.30109709, 5.11639875, 2.00769022, 4.00171698, 4.93639364, 3.87383821, 2.78892598, 3.36324466, 3.66568266]) """ Test case with fixed lifetime for initial stock""" Time_T_FixedLT_X = np.arange(1, 9, 1) lifetime_FixedLT_X = {'Type': 'Fixed', 'Mean': np.array([5])} InitialStock_X = np.array([0, 0, 0, 7, 5, 4, 3, 2]) Inflow_X = np.array([0, 0, 0, 7, 5, 4, 3, 2]) Time_T_FixedLT_XX = np.arange(1, 11, 1) lifetime_NormLT_X = {'Type': 'Normal', 'Mean': np.array([5]), 'StdDev': np.array([1.5])} InitialStock_XX = np.array([0.01, 0.01, 0.08, 0.2, 0.2, 2, 2, 3, 4, 7.50]) Inflow_XX = np.array([ 2.61070664, 0.43955789, 0.87708508, 0.79210262, 0.4, 2.67555857, 2.20073139, 3.06983925, 4.01538044, 7.50321933]) """ Test case with normally distributed lifetime for initial stock and stock-driven model""" Time_T_FixedLT_2 = np.arange(1, 10, 1) lifetime_NormLT_2 = {'Type': 'Normal', 'Mean': np.array([5]), 'StdDev': np.array([1.5])} InitialStock_2 = np.array([3,5,2,4]) FutureStock_2 = np.array([0,0,0,0,18,20,20,20,20]) ThisSwitchTime = 5 # First year with future stock curve, start counting from 1. Inflow_2 = np.array([3.541625588, 5.227890554,2.01531097,4]) ############################################################################### """Create Dynamic Stock Models and hand over the pre-defined values.""" # For zero lifetime: border case myDSM0 = dsm.DynamicStockModel(t=Time_T_FixedLT, i=Inflow_T_FixedLT, lt=lifetime_FixedLT0) # For fixed LT myDSM = dsm.DynamicStockModel(t=Time_T_FixedLT, i=Inflow_T_FixedLT, lt=lifetime_FixedLT) myDSM2 = dsm.DynamicStockModel(t=Time_T_FixedLT, s=Stock_T_FixedLT, lt=lifetime_FixedLT) myDSMx = dsm.DynamicStockModel(t=Time_T_FixedLT_X, lt=lifetime_FixedLT_X) TestInflow_X = myDSMx.compute_i_from_s(InitialStock=InitialStock_X) myDSMxy = dsm.DynamicStockModel(t=Time_T_FixedLT_X, i=TestInflow_X, lt=lifetime_FixedLT_X) # For zero normally distributed lifetime: border case myDSM0n = dsm.DynamicStockModel(t=Time_T_FixedLT, i=Inflow_T_FixedLT, lt=lifetime_NormLT0) # For normally distributed Lt myDSM3 = dsm.DynamicStockModel(t=Time_T_FixedLT, i=Inflow_T_FixedLT, lt=lifetime_NormLT) myDSM4 = dsm.DynamicStockModel(t=Time_T_FixedLT, s=Stock_T_NormLT, lt=lifetime_NormLT) myDSMX = dsm.DynamicStockModel(t=Time_T_FixedLT_XX, lt=lifetime_NormLT_X) TestInflow_XX = myDSMX.compute_i_from_s(InitialStock=InitialStock_XX) myDSMXY = dsm.DynamicStockModel(t=Time_T_FixedLT_XX, i=TestInflow_XX, lt=lifetime_NormLT_X) # Test compute_stock_driven_model_initialstock: TestDSM_IntitialStock = dsm.DynamicStockModel(t=Time_T_FixedLT_2, s=FutureStock_2, lt=lifetime_NormLT_2) Sc_InitialStock_2,Oc_InitialStock_2,I_InitialStock_2 = TestDSM_IntitialStock.compute_stock_driven_model_initialstock(InitialStock = InitialStock_2, SwitchTime = ThisSwitchTime) # Compute stock back from inflow TestDSM_IntitialStock_Verify = dsm.DynamicStockModel(t=Time_T_FixedLT_2, i=I_InitialStock_2, lt=lifetime_NormLT_2) Sc_Stock_2 = TestDSM_IntitialStock_Verify.compute_s_c_inflow_driven() Sc_Stock_2_Sum = Sc_Stock_2.sum(axis =1) Sc_Stock_Sum = TestDSM_IntitialStock_Verify.compute_stock_total() Sc_Outflow_t_c = TestDSM_IntitialStock_Verify.compute_o_c_from_s_c() # For Weibull-distributed Lt myDSMWB1 = dsm.DynamicStockModel(t=Time_T_FixedLT, i=Inflow_T_FixedLT, lt=lifetime_WeibullLT) myDSMWB2 = dsm.DynamicStockModel(t=Time_T_FixedLT, s=Stock_T_WeibullLT, lt=lifetime_WeibullLT) myDSMWB3 = dsm.DynamicStockModel(t=Time_T_FixedLT_XX, lt=lifetime_WeibullLT) TestInflow_WB = myDSMWB3.compute_i_from_s(InitialStock=InitialStock_XX) myDSMWB4 = dsm.DynamicStockModel(t=Time_T_FixedLT_XX, i=TestInflow_WB, lt=lifetime_WeibullLT) # Compute full stock model in correct order ############################################################################### """Unit Test Class""" class KnownResultsTestCase(unittest.TestCase): def test_inflow_driven_model_fixedLifetime_0(self): """Test Inflow Driven Model with Fixed product lifetime of 0.""" np.testing.assert_array_equal(myDSM0.compute_s_c_inflow_driven(), np.zeros(Stock_TC_FixedLT.shape)) np.testing.assert_array_equal(myDSM0.compute_stock_total(), np.zeros((Stock_TC_FixedLT.shape[0]))) np.testing.assert_array_equal(myDSM0.compute_stock_change(), np.zeros((Stock_TC_FixedLT.shape[0]))) np.testing.assert_array_equal(myDSM0.compute_outflow_mb(), Inflow_T_FixedLT) np.testing.assert_array_equal(myDSM0.check_stock_balance(), Bal.transpose()) def test_inflow_driven_model_fixedLifetime(self): """Test Inflow Driven Model with Fixed product lifetime.""" np.testing.assert_array_equal(myDSM.compute_s_c_inflow_driven(), Stock_TC_FixedLT) np.testing.assert_array_equal(myDSM.compute_stock_total(),Stock_T_FixedLT) np.testing.assert_array_equal(myDSM.compute_o_c_from_s_c(), Outflow_TC_FixedLT) np.testing.assert_array_equal(myDSM.compute_outflow_total(), Outflow_T_FixedLT) np.testing.assert_array_equal(myDSM.compute_stock_change(), StockChange_T_FixedLT) np.testing.assert_array_equal(myDSM.check_stock_balance(), Bal.transpose()) def test_stock_driven_model_fixedLifetime(self): """Test Stock Driven Model with Fixed product lifetime.""" np.testing.assert_array_equal(myDSM2.compute_stock_driven_model()[0], Stock_TC_FixedLT) np.testing.assert_array_equal(myDSM2.compute_stock_driven_model()[1], Outflow_TC_FixedLT) np.testing.assert_array_equal(myDSM2.compute_stock_driven_model()[2], Inflow_T_FixedLT) np.testing.assert_array_equal(myDSM2.compute_outflow_total(), Outflow_T_FixedLT) np.testing.assert_array_equal(myDSM2.compute_stock_change(), StockChange_T_FixedLT) np.testing.assert_array_equal(myDSM2.check_stock_balance(), Bal.transpose()) def test_inflow_driven_model_normallyDistrLifetime_0(self): """Test Inflow Driven Model with Fixed product lifetime of 0.""" np.testing.assert_array_equal(myDSM0n.compute_s_c_inflow_driven(), np.zeros(Stock_TC_FixedLT.shape)) np.testing.assert_array_equal(myDSM0n.compute_stock_total(), np.zeros((Stock_TC_FixedLT.shape[0]))) np.testing.assert_array_equal(myDSM0n.compute_stock_change(), np.zeros((Stock_TC_FixedLT.shape[0]))) np.testing.assert_array_equal(myDSM0n.compute_outflow_mb(), Inflow_T_FixedLT) np.testing.assert_array_equal(myDSM0n.check_stock_balance(), Bal.transpose()) def test_inflow_driven_model_normallyDistLifetime(self): """Test Inflow Driven Model with normally distributed product lifetime.""" np.testing.assert_array_almost_equal(myDSM3.compute_s_c_inflow_driven(), Stock_TC_NormLT, 8) np.testing.assert_array_almost_equal(myDSM3.compute_stock_total(), Stock_T_NormLT, 8) np.testing.assert_array_almost_equal(myDSM3.compute_o_c_from_s_c(), Outflow_TC_NormLT, 8) np.testing.assert_array_almost_equal(myDSM3.compute_outflow_total(), Outflow_T_NormLT, 8) np.testing.assert_array_almost_equal(myDSM3.compute_stock_change(), StockChange_T_NormLT, 8) np.testing.assert_array_almost_equal(myDSM3.check_stock_balance(), Bal.transpose(), 12) def test_stock_driven_model_normallyDistLifetime(self): """Test Stock Driven Model with normally distributed product lifetime.""" np.testing.assert_array_almost_equal( myDSM4.compute_stock_driven_model()[0], Stock_TC_NormLT, 8) np.testing.assert_array_almost_equal( myDSM4.compute_stock_driven_model()[1], Outflow_TC_NormLT, 8) np.testing.assert_array_almost_equal( myDSM4.compute_stock_driven_model()[2], Inflow_T_FixedLT, 8) np.testing.assert_array_almost_equal(myDSM4.compute_outflow_total(), Outflow_T_NormLT, 8) np.testing.assert_array_almost_equal( myDSM4.compute_stock_change(), StockChange_T_NormLT, 8) np.testing.assert_array_almost_equal(myDSM4.check_stock_balance(), Bal.transpose(), 12) def test_inflow_driven_model_WeibullDistLifetime(self): """Test Inflow Driven Model with Weibull-distributed product lifetime.""" np.testing.assert_array_almost_equal( myDSMWB1.compute_s_c_inflow_driven(), Stock_TC_WeibullLT, 9) np.testing.assert_array_almost_equal(myDSMWB1.compute_stock_total(), Stock_T_WeibullLT, 8) np.testing.assert_array_almost_equal(myDSMWB1.compute_o_c_from_s_c(), Outflow_TC_WeibullLT, 9) np.testing.assert_array_almost_equal(myDSMWB1.compute_outflow_total(), Outflow_T_WeibullLT, 9) np.testing.assert_array_almost_equal( myDSMWB1.compute_stock_change(), StockChange_T_WeibullLT, 9) np.testing.assert_array_almost_equal(myDSMWB1.check_stock_balance(), Bal.transpose(), 12) def test_stock_driven_model_WeibullDistLifetime(self): """Test Stock Driven Model with Weibull-distributed product lifetime.""" np.testing.assert_array_almost_equal( myDSMWB1.compute_stock_driven_model()[0], Stock_TC_WeibullLT, 8) np.testing.assert_array_almost_equal( myDSMWB1.compute_stock_driven_model()[1], Outflow_TC_WeibullLT, 8) np.testing.assert_array_almost_equal( myDSMWB1.compute_stock_driven_model()[2], Inflow_T_FixedLT, 8) np.testing.assert_array_almost_equal(myDSMWB1.compute_outflow_total(), Outflow_T_WeibullLT, 9) np.testing.assert_array_almost_equal( myDSMWB1.compute_stock_change(), StockChange_T_WeibullLT, 8) np.testing.assert_array_almost_equal(myDSMWB1.check_stock_balance(), Bal.transpose(), 12) def test_inflow_from_stock_fixedLifetime(self): """Test computation of inflow from stock with Fixed product lifetime.""" np.testing.assert_array_equal(TestInflow_X, Inflow_X) np.testing.assert_array_equal(myDSMxy.compute_s_c_inflow_driven()[-1, :], InitialStock_X) def test_inflow_from_stock_normallyDistLifetime(self): """Test computation of inflow from stock with normally distributed product lifetime.""" np.testing.assert_array_almost_equal(TestInflow_XX, Inflow_XX, 8) np.testing.assert_array_almost_equal(myDSMXY.compute_s_c_inflow_driven()[-1, :], InitialStock_XX, 9) def test_inflow_from_stock_WeibullDistLifetime(self): """Test computation of inflow from stock with Weibull-distributed product lifetime.""" np.testing.assert_array_almost_equal(TestInflow_WB, Inflow_WB, 9) np.testing.assert_array_almost_equal(myDSMWB4.compute_s_c_inflow_driven()[-1, :], InitialStock_WB, 9) def test_compute_stock_driven_model_initialstock(self): """Test stock-driven model with initial stock given.""" np.testing.assert_array_almost_equal(I_InitialStock_2, I_InitialStock_2_Ref, 8) np.testing.assert_array_almost_equal(Sc_InitialStock_2, Sc_InitialStock_2_Ref, 8) np.testing.assert_array_almost_equal(Sc_InitialStock_2.sum(axis =1), Sc_InitialStock_2_Ref_Sum, 8) np.testing.assert_array_almost_equal(Oc_InitialStock_2, Oc_InitialStock_2_Ref, 8) if __name__ == '__main__': unittest.main()
25,439
14,229
import os from docker.models.containers import Container import docker import pytest from tox_docker.config import runas_name def find_container(instance_name: str) -> Container: # TODO: refactor this as a pytest fixture # this is running in a child-process of the tox instance which # spawned the container; so we need to pass the parent pid to # get the right runas_name() running_name = runas_name(instance_name, pid=os.getppid()) client = docker.from_env(version="auto") for container in client.containers.list(): container.attrs["Config"].get("Labels", {}) if container.name == running_name: return container pytest.fail(f"No running container with instance name {running_name!r}")
753
217
from django.core.exceptions import PermissionDenied from rest_framework import viewsets, mixins from rest_framework.decorators import action from rest_framework.response import Response from emstrack.mixins import UpdateModelUpdateByMixin, BasePermissionMixin from equipment.models import EquipmentItem, EquipmentHolder, Equipment from equipment.serializers import EquipmentItemSerializer, EquipmentSerializer from hospital.viewsets import logger from login.permissions import get_permissions class EquipmentItemViewSet(mixins.ListModelMixin, mixins.RetrieveModelMixin, UpdateModelUpdateByMixin, viewsets.GenericViewSet): """ API endpoint for manipulating hospital equipment. list: Retrieve list of hospital equipment. retrieve: Retrieve an existing hospital equipment instance. update: Update existing hospital equipment instance. partial_update: Partially update existing hospital equipment instance. """ queryset = EquipmentItem.objects.all() serializer_class = EquipmentItemSerializer lookup_field = 'equipment_id' # make sure both fields are looked up def get_queryset(self): # retrieve user user = self.request.user # return nothing if anonymous if user.is_anonymous: raise PermissionDenied() # retrieve id equipmentholder_id = int(self.kwargs['equipmentholder_id']) logger.debug('kwargs = {}'.format(self.kwargs)) try: # retrieve equipmentholder equipmentholder = EquipmentHolder.objects.get(id=equipmentholder_id) # read or write? if self.request.method == 'GET': is_write = False elif (self.request.method == 'PUT' or self.request.method == 'PATCH' or self.request.method == 'DELETE'): is_write = True # is hospital? if equipmentholder.is_hospital(): # check permission (and also existence) if is_write: if not get_permissions(user).check_can_write(hospital=equipmentholder.hospital.id): raise PermissionDenied() else: if not get_permissions(user).check_can_read(hospital=equipmentholder.hospital.id): raise PermissionDenied() # is ambulance? elif equipmentholder.is_ambulance(): # check permission (and also existence) if is_write: if not get_permissions(user).check_can_write(ambulance=equipmentholder.ambulance.id): raise PermissionDenied() else: if not get_permissions(user).check_can_read(ambulance=equipmentholder.ambulance.id): raise PermissionDenied() else: raise PermissionDenied() except EquipmentHolder.DoesNotExist as e: raise PermissionDenied() # build queryset filter = {'equipmentholder_id': equipmentholder_id} return self.queryset.filter(**filter) class EquipmentViewSet(BasePermissionMixin, viewsets.GenericViewSet): """ API endpoint for manipulating equipment. metadata Partially update existing hospital instance. """ profile_field = 'equipments' filter_field = 'id' queryset = EquipmentHolder.objects.all() @action(detail=True) def metadata(self, request, pk=None, **kwargs): """ Retrive hospital equipment metadata. """ equipmentholder = self.get_object() equipment_list = equipmentholder.equipmentitem_set.values('equipment') equipment = Equipment.objects.filter(id__in=equipment_list) serializer = EquipmentSerializer(equipment, many=True) return Response(serializer.data)
4,024
1,048
import FWCore.ParameterSet.Config as cms from ..modules.hltBTagPFPuppiDeepCSV0p865DoubleEta2p4_cfi import * from ..modules.hltDoublePFPuppiJets128Eta2p4MaxDeta1p6_cfi import * from ..modules.hltDoublePFPuppiJets128MaxEta2p4_cfi import * from ..modules.l1tDoublePFPuppiJet112offMaxEta2p4_cfi import * from ..modules.l1tDoublePFPuppiJets112offMaxDeta1p6_cfi import * from ..sequences.HLTAK4PFPuppiJetsReconstruction_cfi import * from ..sequences.HLTBeginSequence_cfi import * from ..sequences.HLTBtagDeepCSVSequencePFPuppiModEta2p4_cfi import * from ..sequences.HLTEndSequence_cfi import * from ..sequences.HLTParticleFlowSequence_cfi import * HLT_DoublePFPuppiJets128_DoublePFPuppiBTagDeepCSV_2p4 = cms.Path( HLTBeginSequence + l1tDoublePFPuppiJet112offMaxEta2p4 + l1tDoublePFPuppiJets112offMaxDeta1p6 + HLTParticleFlowSequence + HLTAK4PFPuppiJetsReconstruction + hltDoublePFPuppiJets128MaxEta2p4 + hltDoublePFPuppiJets128Eta2p4MaxDeta1p6 + HLTBtagDeepCSVSequencePFPuppiModEta2p4 + hltBTagPFPuppiDeepCSV0p865DoubleEta2p4 + HLTEndSequence )
1,081
488
def extract_to_m2(filename, annot_triples): """ Extracts error detection annotations in m2 file format Args: filename: the output m2 file annot_triples: the annotations of form (sentence, indexes, selections) """ with open(filename, 'w+') as m2_file: for triple in annot_triples: s_line = 'S ' + triple[0] + '\n' m2_file.write(s_line) for i in range(len(triple[1])): if triple[2][i] == 1: a_line = 'A ' if isinstance(triple[1][i], int): a_line += str(triple[1][i]) + ' ' + str(triple[1][i] + 1) else: a_line += triple[1][i] + ' ' + triple[1][i] a_line += '|||IG|||IG|||REQUIRED|||-NONE-|||1\n' m2_file.write(a_line) m2_file.write('\n')
891
302
# Copyright (c) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from webkitpy.common.system.executive import ScriptError from webkitpy.common.net.layouttestresults import LayoutTestResults class CommitQueueTaskDelegate(object): def run_command(self, command): raise NotImplementedError("subclasses must implement") def command_passed(self, message, patch): raise NotImplementedError("subclasses must implement") def command_failed(self, message, script_error, patch): raise NotImplementedError("subclasses must implement") def refetch_patch(self, patch): raise NotImplementedError("subclasses must implement") def layout_test_results(self): raise NotImplementedError("subclasses must implement") def archive_last_layout_test_results(self, patch): raise NotImplementedError("subclasses must implement") # We could make results_archive optional, but for now it's required. def report_flaky_tests(self, patch, flaky_tests, results_archive): raise NotImplementedError("subclasses must implement") class CommitQueueTask(object): def __init__(self, delegate, patch): self._delegate = delegate self._patch = patch self._script_error = None def _validate(self): # Bugs might get closed, or patches might be obsoleted or r-'d while the # commit-queue is processing. self._patch = self._delegate.refetch_patch(self._patch) if self._patch.is_obsolete(): return False if self._patch.bug().is_closed(): return False if not self._patch.committer(): return False if not self._patch.review() != "-": return False # Reviewer is not required. Missing reviewers will be caught during # the ChangeLog check during landing. return True def _run_command(self, command, success_message, failure_message): try: self._delegate.run_command(command) self._delegate.command_passed(success_message, patch=self._patch) return True except ScriptError, e: self._script_error = e self.failure_status_id = self._delegate.command_failed(failure_message, script_error=self._script_error, patch=self._patch) return False def _clean(self): return self._run_command([ "clean", ], "Cleaned working directory", "Unable to clean working directory") def _update(self): # FIXME: Ideally the status server log message should include which revision we updated to. return self._run_command([ "update", ], "Updated working directory", "Unable to update working directory") def _apply(self): return self._run_command([ "apply-attachment", "--no-update", "--non-interactive", self._patch.id(), ], "Applied patch", "Patch does not apply") def _build(self): return self._run_command([ "build", "--no-clean", "--no-update", "--build-style=both", ], "Built patch", "Patch does not build") def _build_without_patch(self): return self._run_command([ "build", "--force-clean", "--no-update", "--build-style=both", ], "Able to build without patch", "Unable to build without patch") def _test(self): return self._run_command([ "build-and-test", "--no-clean", "--no-update", # Notice that we don't pass --build, which means we won't build! "--test", "--non-interactive", ], "Passed tests", "Patch does not pass tests") def _build_and_test_without_patch(self): return self._run_command([ "build-and-test", "--force-clean", "--no-update", "--build", "--test", "--non-interactive", ], "Able to pass tests without patch", "Unable to pass tests without patch (tree is red?)") def _failing_results_from_last_run(self): results = self._delegate.layout_test_results() if not results: return [] # Makes callers slighty cleaner to not have to deal with None return results.failing_test_results() def _land(self): # Unclear if this should pass --quiet or not. If --parent-command always does the reporting, then it should. return self._run_command([ "land-attachment", "--force-clean", "--ignore-builders", "--non-interactive", "--parent-command=commit-queue", self._patch.id(), ], "Landed patch", "Unable to land patch") def _report_flaky_tests(self, flaky_test_results, results_archive): self._delegate.report_flaky_tests(self._patch, flaky_test_results, results_archive) def _test_patch(self): if self._test(): return True first_results = self._failing_results_from_last_run() first_failing_tests = [result.filename for result in first_results] first_results_archive = self._delegate.archive_last_layout_test_results(self._patch) if self._test(): # Only report flaky tests if we were successful at archiving results. if first_results_archive: self._report_flaky_tests(first_results, first_results_archive) return True second_results = self._failing_results_from_last_run() second_failing_tests = [result.filename for result in second_results] if first_failing_tests != second_failing_tests: # We could report flaky tests here, but since run-webkit-tests # is run with --exit-after-N-failures=1, we would need to # be careful not to report constant failures as flaky due to earlier # flaky test making them not fail (no results) in one of the runs. # See https://bugs.webkit.org/show_bug.cgi?id=51272 return False if self._build_and_test_without_patch(): return self.report_failure() # The error from the previous ._test() run is real, report it. return False # Tree must be red, just retry later. def report_failure(self): if not self._validate(): return False raise self._script_error def run(self): if not self._validate(): return False if not self._clean(): return False if not self._update(): return False if not self._apply(): return self.report_failure() if not self._patch.is_rollout(): if not self._build(): if not self._build_without_patch(): return False return self.report_failure() if not self._test_patch(): return False # Make sure the patch is still valid before landing (e.g., make sure # no one has set commit-queue- since we started working on the patch.) if not self._validate(): return False # FIXME: We should understand why the land failure occured and retry if possible. if not self._land(): return self.report_failure() return True
8,986
2,517
""" .. See the NOTICE file distributed with this work for additional information regarding copyright ownership. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from django.db import models from assembly.models import Assembly from tark.fields import ChecksumField from sequence.models import Sequence from gene.models import Gene from session.models import Session import logging # Get an instance of a logger logger = logging.getLogger(__name__) class Transcript(models.Model): MANY2ONE_RELATED = {'SEQUENCE': 'sequence', 'SESSION': 'session', 'ASSEMBLY': 'assembly'} ONE2MANY_RELATED = {'RELEASE_SET': 'transcript_release_set', 'GENE': 'genes', 'TRANSLATION': "translations", "EXONTRANSCRIPT": "exons" } # You'll normally want to ensure that you've set an appropriate related_name argument on the relationship, # that you can use as the field name. transcript_id = models.AutoField(primary_key=True) stable_id = models.CharField(max_length=64) stable_id_version = models.PositiveIntegerField() assembly = models.ForeignKey(Assembly, models.DO_NOTHING, blank=True, null=True) loc_start = models.PositiveIntegerField(blank=True, null=True) loc_end = models.PositiveIntegerField(blank=True, null=True) loc_strand = models.IntegerField(blank=True, null=True) loc_region = models.CharField(max_length=42, blank=True, null=True) loc_checksum = ChecksumField(unique=True, max_length=20, blank=True, null=True) exon_set_checksum = ChecksumField(unique=True, max_length=20, blank=True, null=True) transcript_checksum = ChecksumField(unique=True, max_length=20, blank=True, null=True) sequence = models.ForeignKey(Sequence, models.DO_NOTHING, db_column='seq_checksum', blank=True, null=True) session = models.ForeignKey(Session, models.DO_NOTHING, blank=True, null=True) transcript_release_set = models.ManyToManyField('release.ReleaseSet', through='release.TranscriptReleaseTag', related_name='transcript_release_set') biotype = models.CharField(max_length=40, blank=True, null=True) genes = models.ManyToManyField('gene.Gene', through='transcript.TranscriptGene') exons = models.ManyToManyField('exon.Exon', through='exon.ExonTranscript') translations = models.ManyToManyField('translation.Translation', through='translation.TranslationTranscript') class Meta: managed = False db_table = 'transcript' @classmethod def fetch_mane_transcript_and_type(cls, transcript_id=None): transcript = None source = "Ensembl" if transcript_id is not None: transcript = Transcript.objects.get(pk=transcript_id) if transcript is not None: try: source = transcript.transcript_release_set.all()[:1].get().source.shortname except Exception as e: logger.error("Exception from get_mane_transcript " + str(e)) if "Ensembl" in source: raw_sql = "SELECT DISTINCT\ t1.transcript_id, t1.stable_id as ens_stable_id, t1.stable_id_version as ens_stable_id_version,\ relationship_type.shortname as mane_type,\ t2.stable_id as refseq_stable_id, t2.stable_id_version as refseq_stable_id_version \ FROM \ transcript t1 \ JOIN transcript_release_tag trt1 ON t1.transcript_id=trt1.feature_id \ JOIN transcript_release_tag_relationship ON \ trt1.transcript_release_id=transcript_release_tag_relationship.transcript_release_object_id \ JOIN transcript_release_tag trt2 ON \ transcript_release_tag_relationship.transcript_release_subject_id=trt2.transcript_release_id \ JOIN transcript t2 ON trt2.feature_id=t2.transcript_id \ JOIN relationship_type ON \ transcript_release_tag_relationship.relationship_type_id=relationship_type.relationship_type_id" else: raw_sql = "SELECT DISTINCT\ t1.transcript_id, t1.stable_id as ens_stable_id, t1.stable_id_version as ens_stable_id_version,\ relationship_type.shortname as mane_type,\ t2.stable_id as refseq_stable_id, t2.stable_id_version as refseq_stable_id_version \ FROM \ transcript t1 \ JOIN transcript_release_tag trt1 ON t1.transcript_id=trt1.feature_id \ JOIN transcript_release_tag_relationship ON \ trt1.transcript_release_id=transcript_release_tag_relationship.transcript_release_subject_id \ JOIN transcript_release_tag trt2 ON \ transcript_release_tag_relationship.transcript_release_object_id=trt2.transcript_release_id \ JOIN transcript t2 ON trt2.feature_id=t2.transcript_id \ JOIN relationship_type ON \ transcript_release_tag_relationship.relationship_type_id=relationship_type.relationship_type_id" if transcript_id is not None: raw_sql = raw_sql + " WHERE t1.transcript_id=%s limit 1" mane_transcripts = Transcript.objects.raw(raw_sql, [transcript_id]) mane_transcript_dict = {} if mane_transcripts is not None and len(list(mane_transcripts)) > 0: mane_transcript = mane_transcripts[0] mane_transcript_dict = {"mane_transcript_stableid": "{}.{}".format(mane_transcript.refseq_stable_id, mane_transcript.refseq_stable_id_version), "mane_transcript_type": mane_transcript.mane_type} return mane_transcript_dict else: raw_sql = "SELECT DISTINCT\ t1.transcript_id, t1.stable_id as ens_stable_id, t1.stable_id_version as ens_stable_id_version,\ relationship_type.shortname as mane_type,\ t2.stable_id as refseq_stable_id, t2.stable_id_version as refseq_stable_id_version,\ gn1.name as ens_gene_name \ FROM \ transcript t1 \ JOIN transcript_release_tag trt1 ON t1.transcript_id=trt1.feature_id \ JOIN transcript_release_tag_relationship ON \ trt1.transcript_release_id=transcript_release_tag_relationship.transcript_release_object_id \ JOIN transcript_release_tag trt2 ON \ transcript_release_tag_relationship.transcript_release_subject_id=trt2.transcript_release_id \ JOIN transcript t2 ON trt2.feature_id=t2.transcript_id \ JOIN relationship_type ON \ transcript_release_tag_relationship.relationship_type_id=relationship_type.relationship_type_id\ JOIN transcript_gene tg1 ON \ t1.transcript_id=tg1.transcript_id \ JOIN gene gene1 ON \ tg1.gene_id=gene1.gene_id \ JOIN gene_names gn1 ON \ gene1.name_id=gn1.external_id \ where gn1.primary_id=1" mane_transcripts = Transcript.objects.raw(raw_sql) return mane_transcripts class TranscriptGene(models.Model): gene_transcript_id = models.AutoField(primary_key=True) gene = models.ForeignKey(Gene, models.DO_NOTHING, blank=True, null=True) transcript = models.ForeignKey(Transcript, models.DO_NOTHING, blank=True, null=True) session = models.ForeignKey(Session, models.DO_NOTHING, blank=True, null=True) class Meta: managed = False db_table = 'transcript_gene' unique_together = (('gene', 'transcript'),)
8,710
2,535
#!usr/bin/env python3 # -*- coding: utf-8 -*- # # @author Nastasia Vanderperren # # get posts of a fb page, group or account # returns a json lines files with a line for each post # from argparse import ArgumentParser from datetime import date, datetime from facebook_scraper import get_posts from json import dumps, JSONEncoder class DateTimeEncoder(JSONEncoder): def default(self, o): if isinstance(o, (date, datetime)): return str(o) else: return super().default(o) def write_posts(account, posts): today = date.today().strftime("%Y%m%d") with open("{}_{}_facebook.jsonl".format(today, account), 'w') as output_file: for post in posts: del post['text'] output_file.write(dumps(post, cls=DateTimeEncoder, ensure_ascii=False)) output_file.write("\n") output_file.close() def get_fb_posts(args): account = args.account reactions = args.reactions comments = args.comments pages = args.pages cookies = None if args.cookies: cookies = args.cookies if args.group: posts = get_posts(group=account, cookies=cookies, pages=pages, extra_info=True, options={"comments": comments, "reactors": reactions}) else: posts = get_posts(account=account, cookies=cookies, pages=pages, extra_info=True, options={"comments": comments, "reactors": reactions}) write_posts(account, posts) if __name__ == '__main__': parser = ArgumentParser() parser.add_argument('account', help="name of the account", type=str) parser.add_argument('--cookies', help="cookie file for getting data of a private account", required=False) parser.add_argument('--reactions', help="extract likes and so from posts", action='store_true') parser.add_argument('--comments', help="scrape comments too", action='store_true') parser.add_argument('--group', help="account is a group", action='store_true') parser.add_argument('--pages', help="number of pages to scrape", type=int, default=10) args = parser.parse_args() get_fb_posts(args)
2,086
629
import gi gi.require_version('Gtk', '3.0') from gi.repository import Gtk as gtk class Ked: def __init__(self, app_path, ufile=None): glade_layout = f'{app_path}/data/ked_layout.glade' self.builder = gtk.Builder() self.builder.add_from_file(glade_layout) win = self.builder.get_object("KedMain") win.connect("delete-event", gtk.main_quit) win.show() def echo(self, msg): print(f'Message: {msg}') def start_ked(app_path, user_file=None): main = Ked(app_path) gtk.main()
548
205
from .patchy import PatchySan from .helper.labeling import labelings,\ scanline,\ betweenness_centrality from .helper.neighborhood_assembly import neighborhood_assemblies,\ neighborhoods_weights_to_root,\ neighborhoods_grid_spiral
375
81
#!/usr/bin/env python # vim: set fileencoding=utf-8 ts=4 sts=4 sw=4 et tw=80 : # # Compare an image file and its associated uncertainty image. # # Rob Siverd # Created: 2021-06-03 # Last modified: 2021-06-03 #-------------------------------------------------------------------------- #************************************************************************** #-------------------------------------------------------------------------- ## Logging setup: import logging #logging.basicConfig(level=logging.DEBUG) logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) #logger.setLevel(logging.DEBUG) logger.setLevel(logging.INFO) ## Current version: __version__ = "0.0.1" ## Python version-agnostic module reloading: try: reload # Python 2.7 except NameError: try: from importlib import reload # Python 3.4+ except ImportError: from imp import reload # Python 3.0 - 3.3 ## Modules: #import argparse #import shutil import resource import signal #import glob import gc import os import sys import time #import vaex #import calendar #import ephem import numpy as np #from numpy.lib.recfunctions import append_fields #import datetime as dt #from dateutil import parser as dtp #import scipy.linalg as sla #import scipy.signal as ssig #import scipy.ndimage as ndi #import scipy.optimize as opti #import scipy.interpolate as stp #import scipy.spatial.distance as ssd import matplotlib.pyplot as plt #import matplotlib.cm as cm #import matplotlib.ticker as mt #import matplotlib._pylab_helpers as hlp #from matplotlib.colors import LogNorm #import matplotlib.colors as mplcolors #import matplotlib.collections as mcoll #import matplotlib.gridspec as gridspec #from functools import partial #from collections import OrderedDict #from collections.abc import Iterable #import multiprocessing as mp #np.set_printoptions(suppress=True, linewidth=160) #import pandas as pd #import statsmodels.api as sm #import statsmodels.formula.api as smf #from statsmodels.regression.quantile_regression import QuantReg #import PIL.Image as pli #import seaborn as sns #import cmocean import theil_sen as ts #import window_filter as wf #import itertools as itt _have_np_vers = float('.'.join(np.__version__.split('.')[:2])) ##--------------------------------------------------------------------------## ## Disable buffering on stdout/stderr: class Unbuffered(object): def __init__(self, stream): self.stream = stream def write(self, data): self.stream.write(data) self.stream.flush() def __getattr__(self, attr): return getattr(self.stream, attr) sys.stdout = Unbuffered(sys.stdout) sys.stderr = Unbuffered(sys.stderr) ##--------------------------------------------------------------------------## ##--------------------------------------------------------------------------## ## Home-brew robust statistics: try: import robust_stats reload(robust_stats) rs = robust_stats except ImportError: logger.error("module robust_stats not found! Install and retry.") sys.stderr.write("\nError! robust_stats module not found!\n" "Please install and try again ...\n\n") sys.exit(1) ## Home-brew KDE: #try: # import my_kde # reload(my_kde) # mk = my_kde #except ImportError: # logger.error("module my_kde not found! Install and retry.") # sys.stderr.write("\nError! my_kde module not found!\n" # "Please install and try again ...\n\n") # sys.exit(1) ## Fast FITS I/O: #try: # import fitsio #except ImportError: # logger.error("fitsio module not found! Install and retry.") # sys.stderr.write("\nError: fitsio module not found!\n") # sys.exit(1) ## Various from astropy: try: # import astropy.io.ascii as aia import astropy.io.fits as pf # import astropy.io.votable as av # import astropy.table as apt # import astropy.time as astt # import astropy.wcs as awcs # from astropy import constants as aconst # from astropy import coordinates as coord # from astropy import units as uu except ImportError: # logger.error("astropy module not found! Install and retry.") sys.stderr.write("\nError: astropy module not found!\n") sys.exit(1) ## Star extraction: #try: # import easy_sep # reload(easy_sep) #except ImportError: # logger.error("easy_sep module not found! Install and retry.") # sys.stderr.write("Error: easy_sep module not found!\n\n") # sys.exit(1) #pse = easy_sep.EasySEP() ##--------------------------------------------------------------------------## ## Colors for fancy terminal output: NRED = '\033[0;31m' ; BRED = '\033[1;31m' NGREEN = '\033[0;32m' ; BGREEN = '\033[1;32m' NYELLOW = '\033[0;33m' ; BYELLOW = '\033[1;33m' NBLUE = '\033[0;34m' ; BBLUE = '\033[1;34m' NMAG = '\033[0;35m' ; BMAG = '\033[1;35m' NCYAN = '\033[0;36m' ; BCYAN = '\033[1;36m' NWHITE = '\033[0;37m' ; BWHITE = '\033[1;37m' ENDC = '\033[0m' ## Suppress colors in cron jobs: if (os.getenv('FUNCDEF') == '--nocolors'): NRED = '' ; BRED = '' NGREEN = '' ; BGREEN = '' NYELLOW = '' ; BYELLOW = '' NBLUE = '' ; BBLUE = '' NMAG = '' ; BMAG = '' NCYAN = '' ; BCYAN = '' NWHITE = '' ; BWHITE = '' ENDC = '' ## Fancy text: degree_sign = u'\N{DEGREE SIGN}' ## Dividers: halfdiv = '-' * 40 fulldiv = '-' * 80 ##--------------------------------------------------------------------------## ## Save FITS image with clobber (astropy / pyfits): #def qsave(iname, idata, header=None, padkeys=1000, **kwargs): # this_func = sys._getframe().f_code.co_name # parent_func = sys._getframe(1).f_code.co_name # sys.stderr.write("Writing to '%s' ... " % iname) # if header: # while (len(header) < padkeys): # header.append() # pad header # if os.path.isfile(iname): # os.remove(iname) # pf.writeto(iname, idata, header=header, **kwargs) # sys.stderr.write("done.\n") ##--------------------------------------------------------------------------## ## Save FITS image with clobber (fitsio): #def qsave(iname, idata, header=None, **kwargs): # this_func = sys._getframe().f_code.co_name # parent_func = sys._getframe(1).f_code.co_name # sys.stderr.write("Writing to '%s' ... " % iname) # #if os.path.isfile(iname): # # os.remove(iname) # fitsio.write(iname, idata, clobber=True, header=header, **kwargs) # sys.stderr.write("done.\n") ##--------------------------------------------------------------------------## def ldmap(things): return dict(zip(things, range(len(things)))) def argnear(vec, val): return (np.abs(vec - val)).argmin() ##--------------------------------------------------------------------------## ## New-style string formatting (more at https://pyformat.info/): #oldway = '%s %s' % ('one', 'two') #newway = '{} {}'.format('one', 'two') #oldway = '%d %d' % (1, 2) #newway = '{} {}'.format(1, 2) # With padding: #oldway = '%10s' % ('test',) # right-justified #newway = '{:>10}'.format('test') # right-justified #oldway = '%-10s' % ('test',) # left-justified #newway = '{:10}'.format('test') # left-justified # Ordinally: #newway = '{1} {0}'.format('one', 'two') # prints "two one" # Dictionarily: #newway = '{lastname}, {firstname}'.format(firstname='Rob', lastname='Siverd') # Centered (new-only): #newctr = '{:^10}'.format('test') # prints " test " # Numbers: #oldway = '%06.2f' % (3.141592653589793,) #newway = '{:06.2f}'.format(3.141592653589793) ##--------------------------------------------------------------------------## ## Quick ASCII I/O: #data_file = 'data.txt' #gftkw = {'encoding':None} if (_have_np_vers >= 1.14) else {} #gftkw.update({'names':True, 'autostrip':True}) #gftkw.update({'delimiter':'|', 'comments':'%0%0%0%0'}) #gftkw.update({'loose':True, 'invalid_raise':False}) #all_data = np.genfromtxt(data_file, dtype=None, **gftkw) #all_data = aia.read(data_file) #all_data = pd.read_csv(data_file) #all_data = pd.read_table(data_file, delim_whitespace=True) #all_data = pd.read_table(data_file, skipinitialspace=True) #all_data = pd.read_table(data_file, sep='|') #fields = all_data.dtype.names #if not fields: # x = all_data[:, 0] # y = all_data[:, 1] #else: # x = all_data[fields[0]] # y = all_data[fields[1]] #vot_file = 'neato.xml' #vot_data = av.parse_single_table(vot_file) #vot_data = av.parse_single_table(vot_file).to_table() ##--------------------------------------------------------------------------## ## Quick FITS I/O: ifile = 'SPITZER_I2_44772864_0004_0000_2_cbcd.fits' ufile = 'SPITZER_I2_44772864_0004_0000_2_cbunc.fits' idata, ihdrs = pf.getdata(ifile, header=True) udata, uhdrs = pf.getdata(ufile, header=True) gain = ihdrs['GAIN'] exptime = ihdrs['EXPTIME'] fluxconv = ihdrs['FLUXCONV'] ignore = np.isnan(idata) | np.isnan(udata) isafe = idata[~ignore] usafe = udata[~ignore] ignore = (isafe <= 0.0) iclean = isafe[~ignore] uclean = usafe[~ignore] ui_ratio = uclean / iclean ## Try to reproduce the idata:udata relationship ... icounts = iclean / fluxconv * exptime * gain # in electrons ucounts = uclean / fluxconv * exptime * gain # in electrons #icounts -= np.median(icounts) ##--------------------------------------------------------------------------## ##--------------------------------------------------------------------------## ## Estimate icounts:ucounts relationship from bright pixels: cutoff = 1e3 bright = (icounts >= cutoff) ic_fit = icounts[bright] uc_fit = ucounts[bright] vc_fit = uc_fit**2 sys.stderr.write("Fitting variance(counts) for bright pixels ... ") model = ts.linefit(ic_fit, vc_fit) sys.stderr.write("done.\n") #model = np.array([375., 1.05]) ## A line for plotting: pcounts = np.linspace(0.1, 3e4, 1000) pcounts = np.logspace(-1.0, 4.5, 1000) pvarian = model[0] + model[1] * pcounts ##--------------------------------------------------------------------------## ## Theil-Sen line-fitting (linear): #model = ts.linefit(xvals, yvals) #icept, slope = ts.linefit(xvals, yvals) ## Theil-Sen line-fitting (loglog): #xvals, yvals = np.log10(original_xvals), np.log10(original_yvals) #xvals, yvals = np.log10(df['x'].values), np.log10(df['y'].values) #llmodel = ts.linefit(np.log10(xvals), np.log10(yvals)) #icept, slope = ts.linefit(xvals, yvals) #fit_exponent = slope #fit_multiplier = 10**icept #bestfit_x = np.arange(5000) #bestfit_y = fit_multiplier * bestfit_x**fit_exponent ## Log-log evaluator: #def loglog_eval(xvals, model): # icept, slope = model # return 10**icept * xvals**slope #def loglog_eval(xvals, icept, slope): # return 10**icept * xvals**slope ##--------------------------------------------------------------------------## ## Plot config: # gridspec examples: # https://matplotlib.org/users/gridspec.html #gs1 = gridspec.GridSpec(4, 4) #gs1.update(wspace=0.025, hspace=0.05) # set axis spacing #ax1 = plt.subplot2grid((3, 3), (0, 0), colspan=3) # top-left + center + right #ax2 = plt.subplot2grid((3, 3), (1, 0), colspan=2) # mid-left + mid-center #ax3 = plt.subplot2grid((3, 3), (1, 2), rowspan=2) # mid-right + bot-right #ax4 = plt.subplot2grid((3, 3), (2, 0)) # bot-left #ax5 = plt.subplot2grid((3, 3), (2, 1)) # bot-center ##--------------------------------------------------------------------------## #plt.style.use('bmh') # Bayesian Methods for Hackers style fig_dims = (12, 10) fig = plt.figure(1, figsize=fig_dims) plt.gcf().clf() #fig, axs = plt.subplots(2, 2, sharex=True, figsize=fig_dims, num=1) # sharex='col' | sharex='row' #fig.frameon = False # disable figure frame drawing #fig.subplots_adjust(left=0.07, right=0.95) #ax1 = plt.subplot(gs[0, 0]) ax1 = fig.add_subplot(111) #ax1 = fig.add_axes([0, 0, 1, 1]) #ax1.patch.set_facecolor((0.8, 0.8, 0.8)) #ax1.grid(True) #ax1.axis('off') ax1.grid(True) #ax1.scatter(iclean, uclean, lw=0, s=5) ax1.scatter(icounts, ucounts**2, lw=0, s=5) ax1.plot(pcounts, pvarian, c='r') ax1.set_yscale('log') ax1.set_xscale('log') plot_name = 'gain_log.png' fig.tight_layout() # adjust boundaries sensibly, matplotlib v1.1+ plt.draw() fig.savefig(plot_name, bbox_inches='tight') ax1.set_xscale('linear') ax1.set_yscale('linear') plot_name = 'gain_lin.png' fig.tight_layout() # adjust boundaries sensibly, matplotlib v1.1+ plt.draw() fig.savefig(plot_name, bbox_inches='tight') ## Disable axis offsets: #ax1.xaxis.get_major_formatter().set_useOffset(False) #ax1.yaxis.get_major_formatter().set_useOffset(False) #ax1.plot(kde_pnts, kde_vals) #blurb = "some text" #ax1.text(0.5, 0.5, blurb, transform=ax1.transAxes) #ax1.text(0.5, 0.5, blurb, transform=ax1.transAxes, # va='top', ha='left', bbox=dict(facecolor='white', pad=10.0)) # fontdict={'family':'monospace'}) # fixed-width #colors = cm.rainbow(np.linspace(0, 1, len(plot_list))) #for camid, c in zip(plot_list, colors): # cam_data = subsets[camid] # xvalue = cam_data['CCDATEMP'] # yvalue = cam_data['PIX_MED'] # yvalue = cam_data['IMEAN'] # ax1.scatter(xvalue, yvalue, color=c, lw=0, label=camid) #mtickpos = [2,5,7] #ndecades = 1.0 # for symlog, set width of linear portion in units of dex #nonposx='mask' | nonposx='clip' | nonposy='mask' | nonposy='clip' #ax1.set_xscale('log', basex=10, nonposx='mask', subsx=mtickpos) #ax1.set_xscale('log', nonposx='clip', subsx=[3]) #ax1.set_yscale('symlog', basey=10, linthreshy=0.1, linscaley=ndecades) #ax1.xaxis.set_major_formatter(formatter) # re-format x ticks #ax1.set_ylim(ax1.get_ylim()[::-1]) #ax1.set_xlabel('whatever', labelpad=30) # push X label down #ax1.set_xticks([1.0, 3.0, 10.0, 30.0, 100.0]) #ax1.set_xticks([1, 2, 3], ['Jan', 'Feb', 'Mar']) #for label in ax1.get_xticklabels(): # label.set_rotation(30) # label.set_fontsize(14) #ax1.xaxis.label.set_fontsize(18) #ax1.yaxis.label.set_fontsize(18) #ax1.set_xlim(nice_limits(xvec, pctiles=[1,99], pad=1.2)) #ax1.set_ylim(nice_limits(yvec, pctiles=[1,99], pad=1.2)) #spts = ax1.scatter(x, y, lw=0, s=5) ##cbar = fig.colorbar(spts, orientation='vertical') # old way #cbnorm = mplcolors.Normalize(*spts.get_clim()) #scm = plt.cm.ScalarMappable(norm=cbnorm, cmap=spts.cmap) #scm.set_array([]) #cbar = fig.colorbar(scm, orientation='vertical') #cbar = fig.colorbar(scm, ticks=cs.levels, orientation='vertical') # contours #cbar.formatter.set_useOffset(False) #cbar.update_ticks() fig.tight_layout() # adjust boundaries sensibly, matplotlib v1.1+ plt.draw() #fig.savefig(plot_name, bbox_inches='tight') ###################################################################### # CHANGELOG (compare_images.py): #--------------------------------------------------------------------- # # 2021-06-03: # -- Increased __version__ to 0.0.1. # -- First created compare_images.py. #
14,862
5,784
import logging import os try: import simplejson as json except ImportError: import json from flask import Flask, request, make_response, Response from cStringIO import StringIO import zipfile def get_mocked_server(binary_directory): mocked_cb_server = Flask('cb') files = os.listdir(binary_directory) @mocked_cb_server.route('/api/v1/binary', methods=['GET', 'POST']) def binary_search_endpoint(): if request.method == 'GET': query_string = request.args.get('q', '') rows = int(request.args.get('rows', 10)) start = int(request.args.get('start', 0)) elif request.method == 'POST': parsed_data = json.loads(request.data) if 'q' in parsed_data: query_string = parsed_data['q'] else: query_string = '' if 'rows' in parsed_data: rows = int(parsed_data['rows']) else: rows = 10 if 'start' in parsed_data: start = int(parsed_data['start']) else: start = 0 else: return make_response('Invalid Request', 500) return Response(response=json.dumps(binary_search(query_string, rows, start)), mimetype='application/json') def binary_search(q, rows, start): return { 'results': [json.load(open(os.path.join(binary_directory, fn), 'r')) for fn in files[start:start+rows]], 'terms': '', 'total_results': len(files), 'start': start, 'elapsed': 0.1, 'highlights': [], 'facets': {} } @mocked_cb_server.route('/api/v1/binary/<md5sum>/summary') def get_binary_summary(md5sum): filepath = os.path.join(binary_directory, '%s.json' % md5sum.lower()) if not os.path.exists(filepath): return Response("File not found", 404) binary_data = open(filepath, 'r').read() return Response(response=binary_data, mimetype='application/json') @mocked_cb_server.route('/api/v1/binary/<md5sum>') def get_binary(md5sum): metadata_filepath = os.path.join(binary_directory, '%s.json' % md5sum.lower()) content_filepath = os.path.join(binary_directory, '%s' % md5sum.lower()) for filepath in [metadata_filepath, content_filepath]: if not os.path.exists(filepath): return Response("File not found", 404) zipfile_contents = StringIO() zf = zipfile.ZipFile(zipfile_contents, 'w', zipfile.ZIP_DEFLATED, False) zf.writestr('filedata', open(content_filepath, 'r').read()) zf.writestr('metadata', open(metadata_filepath, 'r').read()) zf.close() return Response(response=zipfile_contents.getvalue(), mimetype='application/zip') @mocked_cb_server.route('/api/info') def info(): return Response(response=json.dumps({"version": "5.1.0"}), mimetype='application/json') return mocked_cb_server if __name__ == '__main__': mydir = os.path.dirname(os.path.abspath(__file__)) binaries_dir = os.path.join(mydir, '..', 'data', 'binary_data') mock_server = get_mocked_server(binaries_dir) mock_server.run('127.0.0.1', 7982, debug=True)
3,317
1,047
import numpy as np def softmax_loss(in_, label): ''' The softmax loss computing process inputs: in_ : the output of previous layer, shape: [number of images, number of kinds of labels] label : the ground true of these images, shape: [1, number of images] outputs loss : the average loss, scale variable accuracy: the accuracy of the classification sentivity : the sentivity for in, shape: [number of images, number of kinds of labels] ''' n, k = in_.shape in_ = in_ - np.tile(np.max(in_, axis=1, keepdims=True), (1, k)) h = np.exp(in_) total = np.sum(h, axis=1, keepdims=True) probs = h / np.tile(total, k) idx = (np.arange(n), label.flatten() - 1) loss = -np.sum(np.log(probs[idx])) / n max_idx = np.argmax(probs, axis=1) accuracy = np.sum(max_idx == (label - 1).flatten()) / n sensitivity = np.zeros((n, k)) sensitivity[idx] = -1 sensitivity = sensitivity + probs return loss, accuracy, sensitivity
1,048
348
import os TELEGRAM_CHAT_ID = os.environ.get("TELEGRAM_CHAT_ID") if not TELEGRAM_CHAT_ID: raise ValueError("No TELEGRAM_CHAT_ID set for application") TELEGRAM_TOKEN = os.environ.get("TELEGRAM_TOKEN") if not TELEGRAM_TOKEN: raise ValueError("No TELEGRAM_TOKEN set for application") TEMPLATES_AUTO_RELOAD = True
320
137
from common.parse_tex import ( BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor, ) from common.types import MacroDefinition from entities.sentences.extractor import SentenceExtractor def test_extract_plaintext_with_newlines(): extractor = PlaintextExtractor() plaintext_segments = list( extractor.parse( "main.tex", "This sentence is followed by a newline.\nThis is the second sentence.", ) ) # Earlier versions of the plaintext extractor inadvertently removed newlines, which are needed # to accurately perform downstream tasks like sentence boundary detection. This test makes sure # that the newlines are preserved. plaintext = "".join([segment.text for segment in plaintext_segments]) assert ( plaintext == "This sentence is followed by a newline.\nThis is the second sentence." ) def test_extract_sentences(): extractor = SentenceExtractor() sentences = list( extractor.parse( "main.tex", "This is the first \\macro[arg]{sentence}. This is the second sentence.", ) ) assert len(sentences) == 2 sentence1 = sentences[0] assert sentence1.start == 0 assert sentence1.end == 40 assert sentences[0].text == "This is the first argsentence." sentence2 = sentences[1] assert sentence2.start == 41 assert sentence2.end == 69 assert sentences[1].text == "This is the second sentence." def test_ignore_periods_in_equations(): extractor = SentenceExtractor() sentences = list( extractor.parse("main.tex", "This sentence has an $ equation. In $ the middle.") ) assert len(sentences) == 1 assert sentences[0].text == "This sentence has an [[math]] the middle." def test_extract_equation_from_dollar_sign(): extractor = EquationExtractor() equations = list(extractor.parse("main.tex", "$x + y$")) assert len(equations) == 1 equation = equations[0] assert equation.start == 0 assert equation.content_start == 1 assert equation.end == 7 assert equation.content_tex == "x + y" assert equation.tex == "$x + y$" def test_extract_equation_from_equation_environment(): extractor = EquationExtractor() equations = list(extractor.parse("main.tex", "\\begin{equation}x\\end{equation}")) assert len(equations) == 1 equation = equations[0] assert equation.start == 0 assert equation.content_start == 16 assert equation.end == 31 assert equation.content_tex == "x" assert equation.tex == "\\begin{equation}x\\end{equation}" def test_extract_equation_from_star_environment(): extractor = EquationExtractor() equations = list(extractor.parse("main.tex", "\\begin{equation*}x\\end{equation*}")) assert len(equations) == 1 equation = equations[0] assert equation.start == 0 assert equation.end == 33 def test_extract_equation_environment_with_argument(): extractor = EquationExtractor() equations = list(extractor.parse("main.tex", "\\begin{array}{c}x\\end{array}")) assert len(equations) == 1 equation = equations[0] assert equation.content_start == 16 def test_extract_equation_from_double_dollar_signs(): extractor = EquationExtractor() equations = list(extractor.parse("main.tex", "$$x$$")) assert len(equations) == 1 equation = equations[0] assert equation.start == 0 assert equation.end == 5 def test_dont_extract_equation_from_command_argument_brackets(): extractor = EquationExtractor() equations = list(extractor.parse("main.tex", "\\documentclass[11pt]{article}")) assert len(equations) == 0 def test_extract_equation_from_brackets(): extractor = EquationExtractor() equations = list(extractor.parse("main.tex", "\\[x + y\\]")) assert len(equations) == 1 equation = equations[0] assert equation.start == 0 assert equation.content_start == 2 assert equation.end == 9 def test_extract_nested_equations(): extractor = EquationExtractor() equations = list( extractor.parse("main.tex", "$x + \\hbox{\\begin{equation}y\\end{equation}}$") ) assert len(equations) == 2 outer = next(filter(lambda e: e.start == 0, equations)) assert outer.end == 44 inner = next(filter(lambda e: e.start == 11, equations)) assert inner.end == 42 def test_handle_unclosed_environments(): extractor = EquationExtractor() equations = list(extractor.parse("main.tex", "$x + \\hbox{\\begin{equation}y}$")) assert len(equations) == 1 equation = equations[0] assert equation.start == 0 assert equation.end == 30 def test_ignore_escaped_dollar_sign(): extractor = EquationExtractor() equations = list(extractor.parse("main.tex", "\\$\\$")) assert len(equations) == 0 def test_extract_begindocument(): extractor = BeginDocumentExtractor() tex = "\\RequirePackage[hyperindex]{hyperref}\n\\begin{document}" begindocument = extractor.parse(tex) assert begindocument.start == 38 assert begindocument.end == 54 def test_extract_documentclass_after_comment_ending_with_whitespace(): extractor = DocumentclassExtractor() tex = "\n\n%\\documentclass{IEEEtran} \n\\documentclass{article}" documentclass = extractor.parse(tex) assert documentclass is not None def test_documentclass_after_macro(): # In some TeX files, the documentclass isn't declared until after some initial macros. # We still want to detect the documentclass in these documents. extractor = DocumentclassExtractor() tex = "\\def\year{2020}\n\\documentclass{article}" documentclass = extractor.parse(tex) assert documentclass is not None def test_extract_bibitems(): tex = "\n".join( [ "\\bibitem[label]{key1}", "token1", "\\newblock \\emph{token2}", "\\newblock token3", "\\bibitem[label]{key2}", "token4", "\\newblock \\emph{token5}", ] ) extractor = BibitemExtractor() bibitems = list(extractor.parse(tex)) assert len(bibitems) == 2 assert bibitems[0].key == "key1" assert bibitems[0].text == "token1 token2 token3" assert bibitems[1].key == "key2" assert bibitems[1].text == "token4 token5" def test_extract_bibitem_tokens_from_curly_braces(): tex = "\n".join(["\\bibitem[label]{key1}", "token1 {token2} {token3}",]) extractor = BibitemExtractor() bibitems = list(extractor.parse(tex)) assert len(bibitems) == 1 assert bibitems[0].key == "key1" assert bibitems[0].text == "token1 token2 token3" def test_extract_bibitems_from_environment(): tex = "\n".join( [ "\\begin{thebibliography}", "\\bibitem[label]{key1}", "token1", "\\end{thebibliography}", ] ) extractor = BibitemExtractor() bibitems = list(extractor.parse(tex)) assert len(bibitems) == 1 assert bibitems[0].key == "key1" assert bibitems[0].text == "token1" def test_extract_bibitem_stop_at_newline(): tex = "\n".join( ["\\bibitem[label]{key1}", "token1", "", "text after bibliography (to ignore)"] ) extractor = BibitemExtractor() bibitems = list(extractor.parse(tex)) assert len(bibitems) == 1 assert bibitems[0].key == "key1" assert bibitems[0].text == "token1" def test_extract_macro(): tex = "\\macro" extractor = MacroExtractor() macros = list(extractor.parse(tex, MacroDefinition("macro", ""))) assert len(macros) == 1 assert macros[0].start == 0 assert macros[0].end == 6 def test_extract_macro_with_delimited_parameter(): tex = "\\macro arg." extractor = MacroExtractor() macros = list(extractor.parse(tex, MacroDefinition("macro", "#1."))) assert len(macros) == 1 assert macros[0].start == 0 assert macros[0].end == 11 assert macros[0].tex == "\\macro arg." def test_extract_macro_with_undelimited_parameter(): # the scanner for undelimited parameter '#1' should match the first non-blank token 'a'. tex = "\\macro a" extractor = MacroExtractor() macros = list(extractor.parse(tex, MacroDefinition("macro", "#1"))) assert len(macros) == 1 assert macros[0].start == 0 assert macros[0].end == 9 assert macros[0].tex == "\\macro a" def test_extract_macro_balance_nested_braces_for_argument(): tex = "\\macro{{nested}}" extractor = MacroExtractor() macros = list(extractor.parse(tex, MacroDefinition("macro", "#1"))) assert len(macros) == 1 assert macros[0].start == 0 assert macros[0].end == 16 assert macros[0].tex == "\\macro{{nested}}" def test_sentence_splitting_end_points(): extractor = SentenceExtractor() sentences = list( extractor.parse( "main.tex", "This is a sentence. Next we describe two items. 1) The first item. 2) The second item.", ) ) assert len(sentences) == 4 sentence_end_points = [[0, 19], [20, 47], [48, 66], [67, 86]] for i, [start, end] in enumerate(sentence_end_points): assert sentences[i].start == start assert sentences[i].end == end def test_sentence_splitting_end_points_and_more_text(): extractor = SentenceExtractor() sentences = list( extractor.parse( "main.tex", "This sentence. has extra. text. 1. first 2. second 3. third. And some extra. stuff.", ) ) assert len(sentences) == 8 sentence_end_points = [ [0, 14], [15, 25], [26, 31], [32, 40], [41, 50], [51, 60], [61, 76], [77, 83], ] for i, [start, end] in enumerate(sentence_end_points): assert sentences[i].start == start assert sentences[i].end == end
9,935
3,187
import sys from pathlib import Path from appexemple import __version__ print( f""" Hello you are in App Exemple version {__version__}\n sys.argv[-1] : {sys.argv[-1]}\n Path().cwd() : {Path().cwd()}\n Path(__file__) : {Path(__file__)},\n """ ) input("Press [Enter] to quit.")
282
109
import pytest pytestmark = pytest.mark.django_db class TestUserManagers: def test_create_user(self, django_user_model, faker): email = faker.email() password = faker.password() user = django_user_model.objects.create_user(email, password) assert user.email == email assert user.check_password(password) def test_create_user_empty_password(self, django_user_model, faker): email = faker.email() user = django_user_model.objects.create_user(email) assert user.email == email assert not user.has_usable_password() def test_create_user_raises_error_on_empty_email(self, django_user_model): with pytest.raises(ValueError): django_user_model.objects.create_user(email='') def test_create_superuser(self, django_user_model, faker): email = faker.email() password = faker.password() user = django_user_model.objects.create_superuser(email, password) assert user.email == email assert user.check_password(password) assert user.is_superuser assert user.is_staff def test_create_superuser_raises_error_on_false_is_superuser( self, django_user_model, faker ): with pytest.raises(ValueError): django_user_model.objects.create_superuser( email=faker.email(), password=faker.password(), is_superuser=False, ) def test_create_superuser_raises_error_on_false_is_staff( self, django_user_model, faker ): with pytest.raises(ValueError): django_user_model.objects.create_superuser( email=faker.email(), password=faker.password(), is_staff=False, )
1,793
528
from testbase import PersistTest import sqlalchemy.topological as topological import unittest, sys, os from sqlalchemy import util # TODO: need assertion conditions in this suite class DependencySorter(topological.QueueDependencySorter):pass class DependencySortTest(PersistTest): def assert_sort(self, tuples, node, collection=None): print str(node) def assert_tuple(tuple, node): if node.cycles: cycles = [i.item for i in node.cycles] else: cycles = [] if tuple[0] is node.item or tuple[0] in cycles: tuple.pop() if tuple[0] is node.item or tuple[0] in cycles: return elif len(tuple) > 1 and tuple[1] is node.item: assert False, "Tuple not in dependency tree: " + str(tuple) for c in node.children: assert_tuple(tuple, c) for tuple in tuples: assert_tuple(list(tuple), node) if collection is None: collection = [] items = util.Set() def assert_unique(node): for item in [n.item for n in node.cycles or [node,]]: assert item not in items items.add(item) if item in collection: collection.remove(item) for c in node.children: assert_unique(c) assert_unique(node) assert len(collection) == 0 def testsort(self): rootnode = 'root' node2 = 'node2' node3 = 'node3' node4 = 'node4' subnode1 = 'subnode1' subnode2 = 'subnode2' subnode3 = 'subnode3' subnode4 = 'subnode4' subsubnode1 = 'subsubnode1' tuples = [ (subnode3, subsubnode1), (node2, subnode1), (node2, subnode2), (rootnode, node2), (rootnode, node3), (rootnode, node4), (node4, subnode3), (node4, subnode4) ] head = DependencySorter(tuples, []).sort() self.assert_sort(tuples, head) def testsort2(self): node1 = 'node1' node2 = 'node2' node3 = 'node3' node4 = 'node4' node5 = 'node5' node6 = 'node6' node7 = 'node7' tuples = [ (node1, node2), (node3, node4), (node4, node5), (node5, node6), (node6, node2) ] head = DependencySorter(tuples, [node7]).sort() self.assert_sort(tuples, head, [node7]) def testsort3(self): ['Mapper|Keyword|keywords,Mapper|IKAssociation|itemkeywords', 'Mapper|Item|items,Mapper|IKAssociation|itemkeywords'] node1 = 'keywords' node2 = 'itemkeyowrds' node3 = 'items' tuples = [ (node1, node2), (node3, node2), (node1,node3) ] head1 = DependencySorter(tuples, [node1, node2, node3]).sort() head2 = DependencySorter(tuples, [node3, node1, node2]).sort() head3 = DependencySorter(tuples, [node3, node2, node1]).sort() # TODO: figure out a "node == node2" function #self.assert_(str(head1) == str(head2) == str(head3)) print "\n" + str(head1) print "\n" + str(head2) print "\n" + str(head3) def testsort4(self): node1 = 'keywords' node2 = 'itemkeyowrds' node3 = 'items' node4 = 'hoho' tuples = [ (node1, node2), (node4, node1), (node1, node3), (node3, node2) ] head = DependencySorter(tuples, []).sort() self.assert_sort(tuples, head) def testsort5(self): # this one, depenending on the weather, node1 = 'node1' #'00B94190' node2 = 'node2' #'00B94990' node3 = 'node3' #'00B9A9B0' node4 = 'node4' #'00B4F210' tuples = [ (node4, node1), (node1, node2), (node4, node3), (node2, node3), (node4, node2), (node3, node3) ] allitems = [ node1, node2, node3, node4 ] head = DependencySorter(tuples, allitems).sort() self.assert_sort(tuples, head) def testcircular(self): node1 = 'node1' node2 = 'node2' node3 = 'node3' node4 = 'node4' node5 = 'node5' tuples = [ (node4, node5), (node5, node4), (node1, node2), (node2, node3), (node3, node1), (node4, node1) ] head = DependencySorter(tuples, []).sort(allow_all_cycles=True) self.assert_sort(tuples, head) def testcircular2(self): # this condition was arising from ticket:362 # and was not treated properly by topological sort node1 = 'node1' node2 = 'node2' node3 = 'node3' node4 = 'node4' tuples = [ (node1, node2), (node3, node1), (node2, node4), (node3, node2), (node2, node3) ] head = DependencySorter(tuples, []).sort(allow_all_cycles=True) self.assert_sort(tuples, head) def testcircular3(self): nodes = {} tuples = [('Question', 'Issue'), ('ProviderService', 'Issue'), ('Provider', 'Question'), ('Question', 'Provider'), ('ProviderService', 'Question'), ('Provider', 'ProviderService'), ('Question', 'Answer'), ('Issue', 'Question')] head = DependencySorter(tuples, []).sort(allow_all_cycles=True) self.assert_sort(tuples, head) def testbigsort(self): tuples = [] for i in range(0,1500, 2): tuples.append((i, i+1)) head = DependencySorter(tuples, []).sort() if __name__ == "__main__": unittest.main()
6,021
1,918
""" backend magic inherited from tensornetwork """ from typing import Union, Text, Any, Optional, Callable, Sequence from functools import partial from scipy.linalg import expm import numpy as np import warnings from tensornetwork.backends.tensorflow import tensorflow_backend from tensornetwork.backends.numpy import numpy_backend from tensornetwork.backends.jax import jax_backend from tensornetwork.backends.shell import shell_backend from tensornetwork.backends.pytorch import pytorch_backend from tensornetwork.backends import base_backend Tensor = Any libjax: Any jnp: Any jsp: Any torchlib: Any tf: Any class NumpyBackend(numpy_backend.NumPyBackend): # type: ignore def expm(self, a: Tensor) -> Tensor: return expm(a) def abs(self, a: Tensor) -> Tensor: return np.abs(a) def sin(self, a: Tensor) -> Tensor: return np.sin(a) def cos(self, a: Tensor) -> Tensor: return np.cos(a) def i(self, dtype: Any = None) -> Tensor: if not dtype: dtype = npdtype # type: ignore if isinstance(dtype, str): dtype = getattr(np, dtype) return np.array(1j, dtype=dtype) def is_tensor(self, a: Any) -> bool: if isinstance(a, np.ndarray): return True return False def real(self, a: Tensor) -> Tensor: return np.real(a) def cast(self, a: Tensor, dtype: str) -> Tensor: return a.astype(getattr(np, dtype)) def grad(self, f: Callable[..., Any]) -> Callable[..., Any]: raise NotImplementedError("numpy backend doesn't support AD") def jit(self, f: Callable[..., Any]) -> Callable[..., Any]: warnings.warn("numpy backend has no parallel as jit, just do nothing") return f # raise NotImplementedError("numpy backend doesn't support jit compiling") def vmap(self, f: Callable[..., Any]) -> Any: warnings.warn( "numpy backend has no intrinsic vmap like interface" ", use vectorize instead (plain for loop)" ) return np.vectorize(f) class JaxBackend(jax_backend.JaxBackend): # type: ignore # Jax doesn't support 64bit dtype, unless claim # from jax.config import config # config.update("jax_enable_x64", True) # at very beginning, i.e. before import tensorcircuit def __init__(self) -> None: global libjax # Jax module global jnp # jax.numpy module global jsp # jax.scipy module super(JaxBackend, self).__init__() try: import jax except ImportError: raise ImportError( "Jax not installed, please switch to a different " "backend or install Jax." ) libjax = jax jnp = libjax.numpy jsp = libjax.scipy self.name = "jax" # it is already child of numpy backend, and self.np = self.jax.np def convert_to_tensor(self, tensor: Tensor) -> Tensor: result = jnp.asarray(tensor) return result def abs(self, a: Tensor) -> Tensor: return jnp.abs(a) def sin(self, a: Tensor) -> Tensor: return jnp.sin(a) def cos(self, a: Tensor) -> Tensor: return jnp.cos(a) def i(self, dtype: Any = None) -> Tensor: if not dtype: dtype = npdtype # type: ignore if isinstance(dtype, str): dtype = getattr(jnp, dtype) return np.array(1j, dtype=dtype) def real(self, a: Tensor) -> Tensor: return jnp.real(a) def cast(self, a: Tensor, dtype: str) -> Tensor: return a.astype(getattr(jnp, dtype)) def expm(self, a: Tensor) -> Tensor: return jsp.linalg.expm(a) # currently expm in jax doesn't support AD, it will raise an AssertError, see https://github.com/google/jax/issues/2645 def is_tensor(self, a: Any) -> bool: if not isinstance(a, jnp.ndarray): return False # isinstance(np.eye(1), jax.numpy.ndarray) = True! if getattr(a, "_value", None) is not None: return True return False def grad( self, f: Callable[..., Any], argnums: Union[int, Sequence[int]] = 0 ) -> Any: # TODO return libjax.grad(f, argnums=argnums) def jit(self, f: Callable[..., Any]) -> Any: return libjax.jit(f) def vmap(self, f: Callable[..., Any]) -> Any: return libjax.vmap(f) # since tf doesn't support in&out axes options, we don't support them in universal backend class TensorFlowBackend(tensorflow_backend.TensorFlowBackend): # type: ignore def __init__(self) -> None: global tf super(TensorFlowBackend, self).__init__() try: import tensorflow except ImportError: raise ImportError( "Tensorflow not installed, please switch to a " "different backend or install Tensorflow." ) tf = tensorflow self.name = "tensorflow" def expm(self, a: Tensor) -> Tensor: return tf.linalg.expm(a) def sin(self, a: Tensor) -> Tensor: return tf.math.sin(a) def cos(self, a: Tensor) -> Tensor: return tf.math.cos(a) def i(self, dtype: Any = None) -> Tensor: if not dtype: dtype = getattr(tf, dtypestr) # type: ignore if isinstance(dtype, str): dtype = getattr(tf, dtype) return tf.constant(1j, dtype=dtype) def is_tensor(self, a: Any) -> bool: if isinstance(a, tf.Tensor) or isinstance(a, tf.Variable): return True return False def abs(self, a: Tensor) -> Tensor: return tf.math.abs(a) def real(self, a: Tensor) -> Tensor: return tf.math.real(a) def cast(self, a: Tensor, dtype: str) -> Tensor: return tf.cast(a, dtype=getattr(tf, dtype)) def grad( self, f: Callable[..., Any], argnums: Union[int, Sequence[int]] = 0 ) -> Callable[..., Any]: # experimental attempt # Note: tensorflow grad is gradient while jax grad is derivative, they are different with a conjugate! def wrapper(*args: Any, **kws: Any) -> Any: with tf.GradientTape() as t: t.watch(args) y = f(*args, **kws) if isinstance(argnums, int): x = args[argnums] else: x = [args[i] for i in argnums] g = t.gradient(y, x) return g return wrapper def jit(self, f: Callable[..., Any]) -> Any: return tf.function(f) def vmap(self, f: Callable[..., Any]) -> Any: def wrapper(f: Callable[..., Any], args: Sequence[Any]) -> Any: return f(*args) wrapper = partial(wrapper, f) def own_vectorized_map(f: Callable[..., Any], *args: Any) -> Any: return tf.vectorized_map(f, args) return partial(own_vectorized_map, wrapper) class PyTorchBackend(pytorch_backend.PyTorchBackend): # type: ignore def __init__(self) -> None: super(PyTorchBackend, self).__init__() global torchlib try: import torch except ImportError: raise ImportError( "PyTorch not installed, please switch to a different " "backend or install PyTorch." ) torchlib = torch self.name = "pytorch" def expm(self, a: Tensor) -> Tensor: raise NotImplementedError("pytorch backend doesn't support expm") # in 2020, torch has no expm, hmmm. but that's ok, it doesn't support complex numbers which is more severe issue. # see https://github.com/pytorch/pytorch/issues/9983 def sin(self, a: Tensor) -> Tensor: return torchlib.sin(a) def cos(self, a: Tensor) -> Tensor: return torchlib.cos(a) def i(self, dtype: Any = None) -> Tensor: raise NotImplementedError( "pytorch backend doesn't support imaginary numbers at all!" ) def real(self, a: Tensor) -> Tensor: return a # hmm, in torch, everyone is real. def is_tensor(self, a: Any) -> bool: if isinstance(a, torchlib.Tensor): return True return False def cast(self, a: Tensor, dtype: str) -> Tensor: return a.type(getattr(torchlib, dtype)) def grad( self, f: Callable[..., Any], argnums: Union[int, Sequence[int]] = 0 ) -> Callable[..., Any]: def wrapper(*args: Any, **kws: Any) -> Any: x = [] if isinstance(argnums, int): argnumsl = [argnums] # if you also call lhs as argnums, something weird may happen # the reason is that python then take it as local vars else: argnumsl = argnums # type: ignore for i, arg in enumerate(args): if i in argnumsl: x.append(arg.requires_grad_(True)) else: x.append(arg) y = f(*x, **kws) y.backward() gs = [x[i].grad for i in argnumsl] if len(gs) == 1: gs = gs[0] return gs return wrapper def vmap(self, f: Callable[..., Any]) -> Any: warnings.warn( "pytorch backend has no intrinsic vmap like interface" ", use plain for loop for compatibility" ) # the vmap support is vey limited, f must return one tensor # nested list of tensor as return is not supported def vmapf(*args: Tensor, **kws: Any) -> Tensor: r = [] for i in range(args[0].shape[0]): nargs = [arg[i] for arg in args] r.append(f(*nargs, **kws)) return torchlib.stack(r) return vmapf # raise NotImplementedError("pytorch backend doesn't support vmap") # There seems to be no map like architecture in pytorch for now # see https://discuss.pytorch.org/t/fast-way-to-use-map-in-pytorch/70814 def jit(self, f: Callable[..., Any]) -> Any: return f # do nothing here until I figure out what torch.jit is for and how does it work # see https://github.com/pytorch/pytorch/issues/36910 _BACKENDS = { "tensorflow": TensorFlowBackend, "numpy": NumpyBackend, "jax": JaxBackend, "shell": shell_backend.ShellBackend, # no intention to maintain this one "pytorch": PyTorchBackend, # no intention to fully maintain this one } def get_backend( backend: Union[Text, base_backend.BaseBackend] ) -> base_backend.BaseBackend: if isinstance(backend, base_backend.BaseBackend): return backend if backend not in _BACKENDS: raise ValueError("Backend '{}' does not exist".format(backend)) return _BACKENDS[backend]()
10,854
3,423
from webdriver_manager.driver import EdgeDriver, IEDriver from webdriver_manager.manager import DriverManager from webdriver_manager import utils class EdgeDriverManager(DriverManager): def __init__(self, version=None, os_type=utils.os_name()): super(EdgeDriverManager, self).__init__() self.driver = EdgeDriver(version=version, os_type=os_type) def install(self, path=None): # type: () -> str return self._file_manager.download_binary(self.driver, path).path class IEDriverManager(DriverManager): def __init__(self, version=None, os_type=utils.os_type()): super(IEDriverManager, self).__init__() self.driver = IEDriver(version=version, os_type=os_type) def install(self, path=None): # type: () -> str return self._file_manager.download_driver(self.driver, path).path
904
262
""" Replay and experiment ===================== In a previous example, we have shown how experiments can be resumed. Cardinal also allows for experiments to be replayed, meaning that one can save intermediate data to be able to run analysis on the experiment without having to retrain all the models. Let us now see how the ReplayCache allows it. """ import shutil import os import numpy as np import dataset from sklearn.datasets import load_iris from sklearn.svm import SVC from sklearn.model_selection import train_test_split from cardinal.random import RandomSampler from cardinal.uncertainty import MarginSampler from cardinal.cache import ReplayCache, ShelveStore, SqliteStore from cardinal.utils import SampleSelector ############################################################################## # Since we will be looking at the cache, we need a utility function to display # a tree folder. def print_folder_tree(startpath): for root, dirs, files in os.walk(startpath): level = root.replace(startpath, '').count(os.sep) indent = ' ' * 4 * (level) print('{}{}/'.format(indent, os.path.basename(root))) subindent = ' ' * 4 * (level + 1) for f in files: print('{}{}'.format(subindent, f)) ############################################################################# # We load the data and define the parameters of this experiment: # # * ``batch_size`` is the number of samples that will be annotated and added to # the training set at each iteration, # * ``n_iter`` is the number of iterations in our simulation iris = load_iris() X = iris.data y = iris.target X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=1) batch_size = 5 n_iter = 10 model = SVC(probability=True) sampler = MarginSampler(model, batch_size) experiment_config = dict(sampler='margin') CACHE_PATH = './cache' DATABASE_PATH = './cache.db' value_store = ShelveStore(DATABASE_PATH) ############################################################################# # We define our experiment in a dedicated function since we want to run it # several times. We also create a dedicated exception that we will rise to # simulate an interruption in the experiment. # # Note the use of the SampleSelector utils that facilitate the handing of # indices in an active learning experiment. # # In the end, all values for all iterations are kept. The cache structure # is human readable and can be shared for better reproducibility. with ReplayCache(CACHE_PATH, value_store, keys=experiment_config) as cache: # Create a selector with one sample from each class and persist it init_selector = SampleSelector(X_train.shape[0]) init_selector.add_to_selected([np.where(y_train == i)[0][0] for i in np.unique(y)]) selector = cache.persisted_value('selector', init_selector) predictions = cache.persisted_value('prediction', None) for j, prev_selector, prev_predictions in cache.iter(range(n_iter), selector.previous(), predictions.previous()): print('Computing iteration {}'.format(j)) model.fit(X_train[prev_selector.selected], y_train[prev_selector.selected]) sampler.fit(X_train[prev_selector.selected], y_train[prev_selector.selected]) prev_selector.add_to_selected(sampler.select_samples(X_train[prev_selector.non_selected])) selector.set(prev_selector) predictions.set(model.predict(X_test)) # All the values for the experiment are kept print_folder_tree('./cache') # This code could have been added to the script afterward to computer any metric. def compute_contradictions(previous_prediction, current_prediction): if previous_prediction is None: return 0 return (previous_prediction != current_prediction).sum() cache.compute_metric('contradictions', compute_contradictions, predictions.previous(), predictions.current()) from matplotlib import pyplot as plt contradictions = value_store.get('contradictions') plt.plot(contradictions['iteration'], contradictions['value']) plt.xlabel('Iteration') plt.ylabel('Contradictions') plt.title('Evolution of Contradictions during active learning experiment on Iris dataset') plt.show() value_store.close() ############################################################################# # We clean all the cache folder. shutil.rmtree(CACHE_PATH) os.remove(DATABASE_PATH)
4,459
1,245
"""Parent class DataN.""" import os import os.path from warnings import warn from typing import Union, NoReturn from pycifstar import Data, to_data from cryspy.A_functions_base.function_1_markdown import md_to_html from cryspy.A_functions_base.function_1_objects import \ get_functions_of_objet, get_table_html_for_variables from cryspy.B_parent_classes.cl_1_item import ItemN from cryspy.B_parent_classes.cl_2_loop import LoopN class DataN(object): """Data container of loops and items.""" def __repr__(self): """ Magic method print() is redefined. Returns ------- TYPE DESCRIPTION. """ ls_out = [f"# Object '{self.get_name():}'"] for item in self.items: if isinstance(item, ItemN): ls_out.append(f"{4*' ':}.{item.get_name():}") else: ls_out.append(f"{4*' ':}.{item.get_name():} (loop)") method = self.methods_html() if method != "": ls_out.append(f"\n# Methods:\n{method:}\n") return "\n".join(ls_out) def _repr_html_(self): """Representation in HTML format.""" ls_html = [f"<h2>Object '{self.get_name():}'</h2>"] ls_html.append(self.attributes_to_html()) ls_html.append(get_table_html_for_variables(self)) report = self.report_html() if report != "": ls_html.append(f"<h2>Description </h2> {report:}") ls_html.append(f"<h2>Classes and methods</h2>") try: names = sorted([obj.__name__ for obj in self.CLASSES_MANDATORY]) if len(names) != 0: ls_html.append("<b>Mandatory classes: </b>") ls_html.append(f"{', '.join(names):}.<br>") except AttributeError: pass try: names = sorted([obj.__name__ for obj in self.CLASSES_OPTIONAL]) if len(names) != 0: ls_html.append("<b>Optional classes: </b>") ls_html.append(f"{', '.join(names):}.<br>") except AttributeError: pass method = self.methods_html() if method != "": ls_html.append(f"<b>Methods: </b> {method:}") return " ".join(ls_html) def methods_html(self): ls_html = [f".{func_name}" for func_name in get_functions_of_objet(self)] return ", ".join(ls_html)+"." def attributes_to_html(self) -> str: """Representation of defined parameters in HTML format. """ ls_html = ["<table>"] ls_html.append("<tr><th>Attribute</th><th> Note </th></tr>") items_sorted = sorted(self.items, key=lambda item: item.get_name()) for item in items_sorted: item_type = item.__doc__.strip().split("\n")[0] ls_html.append(f"<tr><td>.{item.get_name():}</td>\ <td>{item_type:}</td></tr>") ls_html.append("</table>") return " ".join(ls_html) def __str__(self): """ Magic method str() is redefined. Returns ------- TYPE DESCRIPTION. """ return self.to_cif() def __getattr__(self, name): """ Magic method __getattr__ is slightly changed for special attributes. Parameters ---------- name : TYPE DESCRIPTION. Raises ------ AttributeError DESCRIPTION. Returns ------- res : TYPE DESCRIPTION. """ for item in self.items: if name.lower() == item.get_name(): return item raise AttributeError(f"Attribute '{name:}' is not defined") def is_attribute(self, name): """Temporary construction. Better to use: try: obj = self.attribute_name except AttributeError as e: obj = ... """ for item in self.items: if name.lower() == item.get_name(): return True return False def __setattr__(self, name, value) -> NoReturn: """ Rules to set attribute. Parameters ---------- name : TYPE DESCRIPTION. value : TYPE DESCRIPTION. Returns ------- NoReturn DESCRIPTION. """ flag_items, flag_direct = False, True if name == "data_name": flag_direct = False val_new = str(value).strip() elif name == "items": flag_items = True self.add_items(value) else: cls_value = type(value) if cls_value in self.CLASSES: l_name = [item.get_name() for item in self.items] name_new = value.get_name() if name_new in l_name: self.items.pop(l_name.index(name)) self.items.append(value) flag_items, flag_direct = True, False if name_new != name: warn(f"Access to variable by '{name_new:}'.", UserWarning) if flag_items: pass elif flag_direct: self.__dict__[name] = value else: self.__dict__[name] = val_new def add_items(self, items: list): """Add items.""" l_name = [item.get_name() for item in items] s_name = set(l_name) if len(s_name) != len(l_name): warn("Double items were given.", UserWarning) items_unique = [items[l_name.index(name)] for name in s_name] else: items_unique = items l_ind_del = [] for ind_item, item in enumerate(self.items): if item.get_name() in s_name: l_ind_del.append(ind_item) l_ind_del.reverse() for ind in l_ind_del: self.items.pop(ind) for item in items_unique: if isinstance(item, self.CLASSES): self.items.append(item) @classmethod def make_container(cls, cls_mandatory, cls_optional, prefix): """Create DataN object as a container for items.""" if cls is not DataN: warn("The method 'make_container' is used only for DataN class.") return obj = cls() obj.__dict__["CLASSES_MANDATORY"] = cls_mandatory obj.__dict__["CLASSES_OPTIONAL"] = cls_optional obj.__dict__["CLASSES"] = cls_mandatory+cls_optional obj.__dict__["PREFIX"] = prefix obj.__dict__["D_DEFAULT"] = {} obj.__dict__["items"] = [] obj.__dict__["data_name"] = "" return obj @classmethod def get_mandatory_attributes(cls, separator: str = "_"): """Get a list of mandatory attributes from mandatory classes.""" l_res = [] for cls_obj in cls.CLASSES_MANDATORY: if issubclass(cls_obj, ItemN): cls_item = cls_obj else: #LoopN cls_item = cls_obj.ITEM_CLASS l_res.extend([f"{cls_item.PREFIX:}{separator:}{name_cif:}" for name_cif in cls_item.ATTR_MANDATORY_CIF]) return l_res def __getitem__(self, name: Union[int, str]): """ Get item by index or predefined index. Parameters ---------- name : TYPE DESCRIPTION. Returns ------- TYPE DESCRIPTION. """ if isinstance(name, int): return self.items[name] elif isinstance(name, str): for item in self.items: if name.lower() == item.get_name(): return item return None def get_name(self) -> str: """Name of object.""" name = self.PREFIX data_name = self.data_name if data_name is not None: name = f"{name:}_{data_name:}" return name.lower() def get_variable_names(self) -> list: """ Get names of variable as a list. (((#prefix, #NAME), (#prefix, #NAME), (#attribute, #index)) Returns ------- list List of names of variable. """ prefix = self.PREFIX data_name = self.data_name l_var = [] for item in self.items: l_var.extend(item.get_variable_names()) l_var_out = [((prefix, data_name), ) + var for var in l_var] return l_var_out def is_variables(self) -> bool: """Define is there variables or not.""" flag = False for item in self.items: if item.is_variables(): flag = True break return flag def get_variable_by_name(self, name: tuple) -> Union[float, int, str]: """ Get variable given by name. Parameters ---------- name : tuple (((#prefix, #data_name), (#prefix, #loop_name), (#attribute, #index_item)) Returns ------- Union[float, int, str] DESCRIPTION. """ prefix = self.PREFIX data_name = self.data_name prefix_d, prefix_n = name[0], name[1] if prefix_d != (prefix, data_name): return None name_sh = tuple(name[1:]) for item in self.items: if isinstance(item, ItemN): prefix = item.PREFIX elif isinstance(item, LoopN): item_cls = item.ITEM_CLASS if item_cls is ItemN: prefix = item[0].PREFIX else: prefix = item_cls.PREFIX else: raise AttributeError( f"Unknown type object '{type(item).__name__:}'") if prefix == prefix_n[0]: res = item.get_variable_by_name(name_sh) if res is not None: return res return None def set_variable_by_name(self, name: tuple, value) -> NoReturn: """ Set value to variable given by name. Parameters ---------- name : tuple DESCRIPTION. value : TYPE DESCRIPTION. Returns ------- NoReturn DESCRIPTION. """ prefix = self.PREFIX data_name = self.data_name prefix_d, prefix_n = name[0], name[1] if prefix_d != (prefix, data_name): return name_sh = tuple(name[1:]) for item in self.items: if isinstance(item, ItemN): prefix = item.PREFIX elif isinstance(item, LoopN): item_cls = item.ITEM_CLASS if item_cls is ItemN: prefix = item[0].PREFIX else: prefix = item_cls.PREFIX else: raise AttributeError( f"Unknown type object '{type(item).__name__:}'") if prefix == prefix_n[0]: item.set_variable_by_name(name_sh, value) def is_defined(self) -> bool: """ If all mandatory attributes is defined. Returns ------- bool DESCRIPTION. """ flag = True for item in self.items: if not(item.is_defined()): flag = False if isinstance(item, ItemN): warn(f"{item.PREFIX:} is not fully described.", UserWarning) break elif isinstance(item, LoopN): warn(f"{item.ITEM_CLASS.PREFIX:} is not fully described.", UserWarning) break if flag: cls_items = [type(item) for item in self.items] for cls_mand in self.CLASSES_MANDATORY: if not(cls_mand in cls_items): flag = False warn(f"The object of {cls_mand.__name__:} is not defined.", UserWarning) break return flag def form_object(self): """Form object.""" pass def to_cif(self, separator="_") -> str: """Print information about object in string in STAR format. Arguments --------- prefix: prefix in front of label of attribute separator: separator between prefix and attribute ("_" or ".") flag: for undefined attribute "." will be printed flag_minimal if it's True the minimal set of object will be printed Returns ------- A string in STAR/CIF format """ ls_out = [] if self.data_name is None: ls_out.append("data_\n") else: ls_out.append(f"data_{self.data_name:}\n") l_item = self.items l_s_itemn = [item.to_cif(separator=separator)+"\n" for item in l_item if isinstance(item, ItemN)] l_s_loopn = [item.to_cif(separator=separator)+"\n" for item in l_item if isinstance(item, LoopN)] if l_s_loopn != []: n_max_loop = max([len(_) for _ in l_s_loopn]) if n_max_loop < 1000: n_max_loop = 1000 else: n_max_loop = 10000 l_n_max_item = [len(_) for _ in l_s_itemn] ls_out.extend([_1 for _1, _2 in zip(l_s_itemn, l_n_max_item) if _2 <= n_max_loop]) ls_out.extend([_ for _ in l_s_loopn]) ls_out.extend([_1 for _1, _2 in zip(l_s_itemn, l_n_max_item) if _2 > n_max_loop]) return "\n".join(ls_out) @classmethod def from_cif(cls, string: str): """Generate object from string of CIF format.""" cif_data = Data() flag = cif_data.take_from_string(string) cif_items = cif_data.items cif_loops = cif_data.loops items = [] flag = True n_mandatory = len(cls.CLASSES_MANDATORY) for i_cls, cls_ in enumerate(cls.CLASSES): flag = i_cls >= n_mandatory if issubclass(cls_, ItemN): prefix_cls = cls_.PREFIX if cif_items.is_prefix(prefix_cls): cif_items_prefix = cif_items[prefix_cls] cif_string = str(cif_items_prefix) obj_prefix = cls_.from_cif(cif_string) if obj_prefix is not None: items.append(obj_prefix) flag = True elif issubclass(cls_, LoopN): prefix_cls = cls_.ITEM_CLASS.PREFIX for cif_loop in cif_loops: if cif_loop.is_prefix("_"+prefix_cls): cif_string = str(cif_loop) obj_prefix = cls_.from_cif(cif_string) if obj_prefix is not None: items.append(obj_prefix) flag = True if (not(flag)): warn(f"Mandatory class: '{cls_.__name__:}' is not given.", UserWarning) break if not(flag): return None data_name = cif_data.name obj = cls(data_name=data_name, items=items) obj.form_object() return obj @classmethod def from_cif_file(cls, f_name: str): """Read from cif file.""" if not(os.path.isfile(f_name)): raise UserWarning(f"File {f_name:} is not found.") return None str_from_cif = str(to_data(f_name)) obj = cls.from_cif(str_from_cif) obj.file_input = f_name return obj def copy(self, data_name: str = ""): """Deep copy of object with new data name.""" s_cif = self.to_cif() obj_new = type(self).from_cif(s_cif) obj_new.data_name = data_name return obj_new def report(self): return "" def report_html(self): return md_to_html(self.report()) def plots(self): l_res = [] for item in self.items: for plot in item.plots(): if plot is not None: l_res.append(plot) return l_res def fix_variables(self): """Fix variables.""" for item in self.items: item.fix_variables() def set_variable(self, name: str, index=None): """Set refinement for variable given by name. Index parameters is used only for objects given as a matrix. """ name_sh = name.strip(".").lower() l_name = name_sh.split(".") name_1 = l_name[0] for item in self.items: if name_1 == item.get_name(): if len(l_name) == 1: attr_refs = [] if isinstance(item, ItemN): attr_refs = item.ATTR_REF elif isinstance(item, LoopN): item_class = item.ITEM_CLASS if item_class is ItemN: if len(self.items) != 0: attr_refs = item.items[0].ATTR_REF else: attr_refs = item_class.ATTR_REF for attr_ref in attr_refs: item.set_variable(attr_ref, index=index) else: item.set_variable(".".join(l_name[1:]), index=index)
17,585
5,237
import os import unittest def require(f): def skipit(*args, **kwargs): raise unittest.SkipTest('VIVBINS env var...') if os.getenv('VIVBINS') == None: return skipit return f
204
73
""" # Sample code to perform I/O: name = input() # Reading input from STDIN print('Hi, %s.' % name) # Writing output to STDOUT # Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail """ # Write your code here n = int(input()) start = list(map(int, input().strip().split())) finish = list(map(int, input().strip().split())) vertexes = [0] * n for i, v in enumerate(start): vertexes[v] = i parents = [-1] * n parent = vertexes[0] for i in range(1, n): cur = vertexes[i] if finish[cur] - i > 1: parents[cur] = parent parent = cur else: parents[cur] = parent while finish[cur] == finish[parents[cur]]: cur = parents[cur] parent = parents[cur] if parent == vertexes[0]: break for i in range(n): parents[i] += 1 print(*parents)
894
290
__author__ = 'przemyslaw.pioro'
32
16
import time def swap(arr, i , j): temp = arr[i] arr[i] = arr[j] arr[j] = temp def selection_sort(arr): for i in range(len(arr)): for j in range(i+1, len(arr)): if(arr[i] > arr[j]): swap(arr, i, j) return arr def bubble_sort(arr): swapped = True while(swapped == True): swapped = False for i in range(len(arr)-1): if(arr[i] > arr[i+1]): swap(arr, i, i+1) swapped = True return arr def insertion_sort(arr): for i in range(1, len(arr)): if(arr[i] < arr[i-1]): temp = i-1 print arr[i], arr[temp], arr while(arr[i] < arr[temp] and temp>=0): print "shifting...", arr[i], arr[temp] temp-=1 arr.insert(temp+1, arr[i]) del arr[i+1] return arr def merge_sort(arr): arr_len = len(arr) if(arr_len == 1): return arr a = merge_sort(arr[:arr_len/2]) b = merge_sort(arr[arr_len/2:]) c = [] i_a = i_b = 0 a_len = len(a) b_len = len(b) while(i_a < a_len and i_b < b_len): if(a[i_a]<b[i_b]): c.append(a[i_a]) i_a+=1 else: c.append(b[i_b]) i_b+=1 if(i_a < a_len): for remaining in a[i_a:]: c.append(remaining) if(i_b < b_len): for remaining in b[i_b:]: c.append(remaining) return c def quick_sort(arr): if(len(arr)<1): return arr pivot = arr[len(arr) // 2] left = [x for x in arr if x < pivot] middle = [x for x in arr if x == pivot] right = [x for x in arr if x > pivot] return quick_sort(left) + middle + quick_sort(right) def direct_addressing_sort(arr): maximum = max(arr) minimum = min(arr) a = len(range(minimum, maximum+1))*[0] for i in range(len(arr)): a[i]+=1 print a print(time.time()) print(quick_sort([-2.2, -2.2, -2.2]*5+[1.1]*5)) print(time.time())
1,674
847
#!/usr/bin/env python3 """ Source Code of Pdiskuploaderbot """
65
27
from setuptools import setup from setuptools.command.install import install as _install class Install(_install): def run(self): _install.do_egg_install(self) import nltk nltk.download("popular") setup( cmdclass={'install': Install}, install_requires=['nltk'], setup_requires=['nltk'])
329
109
''' author : bcgg 可惜时间爆了 其实写的很好 中间很多可以改进 ''' ans = 0 def merge(arr, l, m, r): global ans n1 = m - l + 1 n2 = r - m L = [0] * (n1) R = [0] * (n2) for i in range(0, n1): L[i] = arr[l + i] for j in range(0, n2): R[j] = arr[m + 1 + j] i = 0 j = 0 k = l while i < n1 and j < n2: if L[i] <= R[j]: arr[k] = L[i] i += 1 else: arr[k] = R[j] j += 1 ans += m - i + 1 k += 1 while i < n1: arr[k] = L[i] i += 1 k += 1 while j < n2: arr[k] = R[j] j += 1 k += 1 def mergeSort(arr, l, r): if l < r: m = int((l + (r - 1)) / 2) mergeSort(arr, l, m) mergeSort(arr, m + 1, r) merge(arr, l, m, r) if __name__ == '__main__': arr = list(map(int, input().split(','))) n = len(arr) mergeSort(arr, 0, n - 1) print(ans)
947
448
# cost function for the SIR model for python 2.7 # Marisa Eisenberg (marisae@umich.edu) # Yu-Han Kao (kaoyh@umich.edu) -7-9-17 import numpy as np import sir_ode from scipy.stats import poisson from scipy.stats import norm from scipy.integrate import odeint as ode def NLL(params, data, times): #negative log likelihood params = np.abs(params) data = np.array(data) res = ode(sir_ode.model, sir_ode.x0fcn(params,data), times, args=(params,)) y = sir_ode.yfcn(res, params) nll = sum(y) - sum(data*np.log(y)) # note this is a slightly shortened version--there's an additive constant term missing but it # makes calculation faster and won't alter the threshold. Alternatively, can do: # nll = -sum(np.log(poisson.pmf(np.round(data),np.round(y)))) # the round is b/c Poisson is for (integer) count data # this can also barf if data and y are too far apart because the dpois will be ~0, which makes the log angry # ML using normally distributed measurement error (least squares) # nll = -sum(np.log(norm.pdf(data,y,0.1*np.mean(data)))) # example WLS assuming sigma = 0.1*mean(data) # nll = sum((y - data)**2) # alternatively can do OLS but note this will mess with the thresholds # for the profile! This version of OLS is off by a scaling factor from # actual LL units. return nll
1,354
460
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. """ Runs functional tests, which invoke the RESTler engine and check the RESTler output logs for correctness. When new baseline logs are necessary due to known breaking changes in the logic, a run that matches the test should be run manually and the appropriate logs should be replaced in the unit_tests/log_baseline_test_files directory. Each log is named <test-type_log-type.txt> """ import unittest import os import glob import sys import shutil import subprocess import utils.logger as logger from collections import namedtuple from test_servers.log_parser import * Test_File_Directory = os.path.join( os.path.dirname(__file__), 'log_baseline_test_files' ) Restler_Path = os.path.join(os.path.dirname(__file__), '..', 'restler.py') Common_Settings = [ "python", "-B", Restler_Path, "--use_test_socket", '--custom_mutations', f'{os.path.join(Test_File_Directory, "test_dict.json")}', "--garbage_collection_interval", "30", "--host", "unittest" ] class FunctionalityTests(unittest.TestCase): def get_experiments_dir(self): """ Returns the most recent experiments directory that contains the restler logs @return: The experiments dir @rtype : Str """ results_dir = os.path.join(os.getcwd(), 'RestlerResults') # Return the newest experiments directory in RestlerResults return max(glob.glob(os.path.join(results_dir, 'experiment*/')), key=os.path.getmtime) def get_network_log_path(self, dir, log_type): """ Returns the path to the network log of the specified type @param dir: The directory that contains the log @type dir: Str @param log_type: The type of network log to get @type log_type: Str @return: The path to the network log @rtype : Str """ return glob.glob(os.path.join(dir, 'logs', f'network.{log_type}.*.1.txt'))[0] def tearDown(self): try: shutil.rmtree(self.get_experiments_dir()) except Exception as err: print(f"tearDown function failed: {err!s}.\n" "Experiments directory was not deleted.") def test_smoke_test(self): """ This checks that the directed smoke test executes all of the expected requests in the correct order with correct arguments from the dictionary. """ args = Common_Settings + [ '--fuzzing_mode', 'directed-smoke-test', '--restler_grammar', f'{os.path.join(Test_File_Directory, "test_grammar.py")}' ] result = subprocess.run(args, capture_output=True) if result.stderr: self.fail(result.stderr) try: result.check_returncode() except subprocess.CalledProcessError: self.fail(f"Restler returned non-zero exit code: {result.returncode}") experiments_dir = self.get_experiments_dir() try: default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "smoke_test_testing_log.txt")) test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Smoke test failed: Fuzzing") try: default_parser = GarbageCollectorLogParser(os.path.join(Test_File_Directory, "smoke_test_gc_log.txt")) test_parser = GarbageCollectorLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_GC)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Smoke test failed: Garbage Collector") def test_create_once(self): """ This checks that a directed smoke test, using create once endpoints, executes all of the expected requests in the correct order with correct arguments from the dictionary. """ args = Common_Settings + [ '--fuzzing_mode', 'directed-smoke-test', '--restler_grammar', f'{os.path.join(Test_File_Directory, "test_grammar.py")}', '--settings', f'{os.path.join(Test_File_Directory, "test_settings_createonce.json")}' ] result = subprocess.run(args, capture_output=True) if result.stderr: self.fail(result.stderr) try: result.check_returncode() except subprocess.CalledProcessError: self.fail(f"Restler returned non-zero exit code: {result.returncode}") experiments_dir = self.get_experiments_dir() try: default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "create_once_testing_log.txt")) test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Create-once failed: Fuzzing") try: default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "create_once_pre_log.txt")) test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_PREPROCESSING)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Create-once failed: Preprocessing") try: default_parser = GarbageCollectorLogParser(os.path.join(Test_File_Directory, "create_once_gc_log.txt")) test_parser = GarbageCollectorLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_GC)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Create-once failed: Garbage Collector") def test_checkers(self): """ This checks that a directed smoke test, with checkers enabled (sans namespacerule, payloadbody, examples), bugs planted for each checker, and a main driver bug, will produce the appropriate bug buckets and the requests will be sent in the correct order. """ args = Common_Settings + [ '--fuzzing_mode', 'directed-smoke-test', '--restler_grammar', f'{os.path.join(Test_File_Directory, "test_grammar_bugs.py")}', '--enable_checkers', '*' ] result = subprocess.run(args, capture_output=True) if result.stderr: self.fail(result.stderr) try: result.check_returncode() except subprocess.CalledProcessError: self.fail(f"Restler returned non-zero exit code: {result.returncode}") experiments_dir = self.get_experiments_dir() try: default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "checkers_testing_log.txt")) test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Checkers failed: Fuzzing") try: default_parser = BugLogParser(os.path.join(Test_File_Directory, "checkers_bug_buckets.txt")) test_parser = BugLogParser(os.path.join(experiments_dir, 'bug_buckets', 'bug_buckets.txt')) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Checkers failed: Bug Buckets") try: default_parser = GarbageCollectorLogParser(os.path.join(Test_File_Directory, "checkers_gc_log.txt")) test_parser = GarbageCollectorLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_GC)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Checkers failed: Garbage Collector") def test_multi_dict(self): """ This checks that the directed smoke test executes all of the expected requests in the correct order when a second dictionary is specified in the settings file to be used for one of the endpoints. """ args = Common_Settings + [ '--fuzzing_mode', 'directed-smoke-test', '--restler_grammar', f'{os.path.join(Test_File_Directory, "test_grammar.py")}', '--settings', f'{os.path.join(Test_File_Directory, "test_settings_multidict.json")}' ] result = subprocess.run(args, capture_output=True) if result.stderr: self.fail(result.stderr) try: result.check_returncode() except subprocess.CalledProcessError: self.fail(f"Restler returned non-zero exit code: {result.returncode}") experiments_dir = self.get_experiments_dir() try: default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "multidict_testing_log.txt")) test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Multi-dict failed: Fuzzing") try: default_parser = GarbageCollectorLogParser(os.path.join(Test_File_Directory, "multidict_gc_log.txt")) test_parser = GarbageCollectorLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_GC)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Multi-dict failed: Garbage Collector") def test_fuzz(self): """ This checks that a bfs-cheap fuzzing run executes all of the expected requests in the correct order with correct arguments from the dictionary. The test runs for 3 minutes and checks 100 sequences """ Fuzz_Time = 0.1 # 6 minutes Num_Sequences = 300 args = Common_Settings + [ '--fuzzing_mode', 'bfs-cheap', '--restler_grammar',f'{os.path.join(Test_File_Directory, "test_grammar.py")}', '--time_budget', f'{Fuzz_Time}', '--enable_checkers', '*', '--disable_checkers', 'namespacerule' ] result = subprocess.run(args, capture_output=True) if result.stderr: self.fail(result.stderr) try: result.check_returncode() except subprocess.CalledProcessError: self.fail(f"Restler returned non-zero exit code: {result.returncode}") experiments_dir = self.get_experiments_dir() try: default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "fuzz_testing_log.txt"), max_seq=Num_Sequences) test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING), max_seq=Num_Sequences) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Fuzz failed: Fuzzing") def test_payload_body_checker(self): """ This checks that the payload body checker sends all of the correct requests in the correct order and an expected 500 bug is logged. """ args = Common_Settings + [ '--fuzzing_mode', 'directed-smoke-test', '--restler_grammar', f'{os.path.join(Test_File_Directory, "test_grammar.py")}', '--enable_checkers', 'payloadbody' ] result = subprocess.run(args, capture_output=True) if result.stderr: self.fail(result.stderr) try: result.check_returncode() except subprocess.CalledProcessError: self.fail(f"Restler returned non-zero exit code: {result.returncode}") experiments_dir = self.get_experiments_dir() try: default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "payloadbody_testing_log.txt")) test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING)) except TestFailedException: self.fail("Payload body failed: Fuzzing") try: default_parser = BugLogParser(os.path.join(Test_File_Directory, "payloadbody_bug_buckets.txt")) test_parser = BugLogParser(os.path.join(experiments_dir, 'bug_buckets', 'bug_buckets.txt')) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Payload body failed: Bug Buckets") try: default_parser = GarbageCollectorLogParser(os.path.join(Test_File_Directory, "payloadbody_gc_log.txt")) test_parser = GarbageCollectorLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_GC)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Payload body failed: Garbage Collector") def test_examples_checker(self): """ This checks that the examples checker sends the correct requests in the correct order when query or body examples are present """ args = Common_Settings + [ '--fuzzing_mode', 'directed-smoke-test', '--restler_grammar', f'{os.path.join(Test_File_Directory, "test_grammar.py")}', '--enable_checkers', 'examples' ] result = subprocess.run(args, capture_output=True) if result.stderr: self.fail(result.stderr) try: result.check_returncode() except subprocess.CalledProcessError: self.fail(f"Restler returned non-zero exit code: {result.returncode}") experiments_dir = self.get_experiments_dir() try: default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "examples_testing_log.txt")) test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING)) except TestFailedException: self.fail("Payload body failed: Fuzzing") try: default_parser = GarbageCollectorLogParser(os.path.join(Test_File_Directory, "examples_gc_log.txt")) test_parser = GarbageCollectorLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_GC)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Payload body failed: Garbage Collector")
14,512
4,268
from django.urls import reverse from rest_framework import status from conf_site.api.tests import ConferenceSiteAPITestCase class ConferenceSiteAPIConferenceTestCase(ConferenceSiteAPITestCase): def test_conference_api_anonymous_user(self): response = self.client.get(reverse("conference-detail")) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data, { "title": self.conference.title, "start_date": self.conference.start_date.strftime("%Y-%m-%d"), "end_date": self.conference.end_date.strftime("%Y-%m-%d"), })
623
188
from .crud_user import crud_user # noqa: F401 from .crud_note import crud_note # noqa: F401
94
43
# Audit Event Outcomes AUDIT_SUCCESS = "0" AUDIT_MINOR_FAILURE = "4" AUDIT_SERIOUS_FAILURE = "8" AUDIT_MAJOR_FAILURE = "12"
125
69
# Python3.7+ import socket import json HOST, PORT = '', 1600 def parse_request(text): request_line = text.splitlines()[0] request_line = request_line.rstrip(b'\r\n') requests = request_line.split() params_dict = {} if requests[0] == b'POST': request_body = text.splitlines()[-1] request_body = request_body.rstrip(b'\r\n') params_list = request_body.split(b'&') for pair in params_list: print(pair) (key, value)=pair.split(b'=') params_dict[key]=value # Break down the request line into components requests = requests + [params_dict] return requests def handle_login(params_dict): with open("userdb.json", 'r') as result_f: creds=json.load(result_f) username=params_dict[b'Uname'] userpasswd=params_dict[b'Pass'] if creds[username.decode("utf-8")] == userpasswd.decode("utf-8"): return True else: return False listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) listen_socket.bind((HOST, PORT)) listen_socket.listen(1) print(f'Serving HTTP on port {PORT} ...') while True: client_connection, client_address = listen_socket.accept() request_data = client_connection.recv(1024) # Print formatted request data a la 'curl -v' print(''.join( f'< {line}\n' for line in request_data.splitlines() )) requests = parse_request(request_data) print(requests) if requests[0] == b'GET': http_response = b"""\ HTTP/1.1 200 OK <!DOCTYPE html> <html> <head> <title>Login Form</title> <link rel="icon" href="data:,"> </head> <body> <h2>Login Page</h2><br> <div class="login"> <form id="login" method="post" action="?"> <label><b>User Name </b> </label> <input type="text" name="Uname" id="Uname" placeholder="Username"> <br><br> <label><b>Password </b> </label> <input type="Password" name="Pass" id="Pass" placeholder="Password"> <br><br> <button type="submit" name="log" id="log" value="submitted"> Submit </button> <br><br> </form> </div> </body> </html> """ else: if handle_login(requests[3]): http_response = b"""\ HTTP/1.1 200 OK SUCCESS """ else: http_response = b"""\ HTTP/1.1 200 OK FAILED TO LOG IN """ client_connection.sendall(http_response) client_connection.close()
2,653
903
import asyncio from django.core.management.base import BaseCommand from Harvest.utils import get_logger from task_queue.scheduler import QueueScheduler logger = get_logger(__name__) class Command(BaseCommand): help = "Run the queue consumer" def handle(self, *args, **options): QueueScheduler().run()
323
96
# Generated by Django 3.0.10 on 2020-09-10 13:16 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0003_user_mobile_phone'), ] operations = [ migrations.AlterField( model_name='user', name='mobile_phone', field=models.CharField(blank=True, default='', max_length=255, verbose_name='Mobile phone number'), preserve_default=False, ), ]
485
158
import psi4 import resp # Initialize two different conformations of ethanol geometry = """C 0.00000000 0.00000000 0.00000000 C 1.48805540 -0.00728176 0.39653260 O 2.04971655 1.37648153 0.25604810 H 3.06429978 1.37151670 0.52641124 H 1.58679428 -0.33618761 1.43102358 H 2.03441010 -0.68906454 -0.25521028 H -0.40814044 -1.00553466 0.10208540 H -0.54635470 0.68178278 0.65174288 H -0.09873888 0.32890585 -1.03449097 """ mol1 = psi4.geometry(geometry) mol1.update_geometry() mol1.set_name('conformer1') geometry = """C 0.00000000 0.00000000 0.00000000 C 1.48013500 -0.00724300 0.39442200 O 2.00696300 1.29224100 0.26232800 H 2.91547900 1.25572900 0.50972300 H 1.61500700 -0.32678000 1.45587700 H 2.07197500 -0.68695100 -0.26493400 H -0.32500012 1.02293415 -0.30034094 H -0.18892141 -0.68463906 -0.85893815 H -0.64257065 -0.32709111 0.84987482 """ mol2 = psi4.geometry(geometry) mol2.update_geometry() mol2.set_name('conformer2') molecules = [mol1, mol2] # Specify options options = {'VDW_SCALE_FACTORS' : [1.4, 1.6, 1.8, 2.0], 'VDW_POINT_DENSITY' : 1.0, 'RESP_A' : 0.0005, 'RESP_B' : 0.1, 'RESTRAINT' : True, 'IHFREE' : False, 'WEIGHT' : [1, 1], } # Call for first stage fit charges1 = resp.resp(molecules, options) print("Restrained Electrostatic Potential Charges") print(charges1[1]) options['RESP_A'] = 0.001 resp.set_stage2_constraint(molecules[0], charges1[1], options) # Add constraint for atoms fixed in second stage fit options['grid'] = [] options['esp'] = [] for mol in range(len(molecules)): options['grid'].append('%i_%s_grid.dat' %(mol+1, molecules[mol].name())) options['esp'].append('%i_%s_grid_esp.dat' %(mol+1, molecules[mol].name())) # Call for second stage fit charges2 = resp.resp(molecules, options) print("\nStage Two\n") print("RESP Charges") print(charges2[1])
2,005
1,135
# -*- coding: utf-8 -*- # @Time : 5/31/2018 9:20 PM # @Author : sunyonghai # @File : test.py # @Software: ZJ_AI from multiprocessing import Pool, Lock, Value import os tests_count = 80 lock = Lock() counter = Value('i', 0) # int type,相当于java里面的原子变量 def run(fn): global tests_count, lock, counter with lock: counter.value += 1 print( 'NO. (%d/%d) test start. PID: %d ' % (counter.value, tests_count, os.getpid())) # do something below ... if __name__ == "__main__": pool = Pool(4) # 80个任务,会运行run()80次,每次传入xrange数组一个元素 pool.map(run, range(80)) pool.close() pool.join()
627
275
import torch.nn as nn import torch import torch.nn.functional as F from .hed import HED class FusionHED(nn.Module): def __init__(self, in_channels=3, out_channels=1, dilation=1, conv_type_key='default', block_type_key='default', output_type_key='default', upsampling_type_key='default'): super(FusionHED, self).__init__() self.out_channels = out_channels self.hed1 = HED(in_channels=in_channels, out_channels=out_channels, dilation=dilation, conv_type_key=conv_type_key, block_type_key=block_type_key, output_type_key=output_type_key, upsampling_type_key=upsampling_type_key) self.hed2 = HED(in_channels=in_channels, out_channels=out_channels, dilation=dilation, conv_type_key=conv_type_key, block_type_key=block_type_key, output_type_key=output_type_key, upsampling_type_key=upsampling_type_key) self.hed3 = HED(in_channels=in_channels, out_channels=out_channels, dilation=dilation, conv_type_key=conv_type_key, block_type_key=block_type_key, output_type_key=output_type_key, upsampling_type_key=upsampling_type_key) self.upscale = nn.UpsamplingBilinear2d(scale_factor=2) self.downscale = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.fusion = nn.Conv2d(3*out_channels, out_channels, 1) def forward(self, x): ds = self.downscale us = self.upscale # upscaled branch d11, d12, d13, d14, d15, d16 = self.hed1(self.upscale(x)) d11, d12, d13, d14, d15, d16 = ds(d11), ds(d12), ds(d13), ds(d14), ds(d15), ds(d16) # normal branch d21, d22, d23, d24, d25, d26 = self.hed2(x) # downscaled branch d31, d32, d33, d34, d35, d36 = self.hed3(ds(x)) d31, d32, d33, d34, d35, d36 = us(d31), us(d32), us(d33), us(d34), us(d35), us(d36) d_final = self.fusion(torch.cat((d16, d26, d36), 1)) self.output = F.sigmoid(d_final) return (d11, d12, d13, d14, d15, d16, d21, d22, d23, d24, d25, d26, d31, d32, d33, d34, d35, d36, self.output)
2,280
929
import sys sys.path.append("../") import duet from duet import pandas as pd epsilon = 1.0 alpha = 10 df = pd.read_csv("test.csv") with duet.RenyiFilter(9,1.0): noisy_count = duet.renyi_gauss(df.shape[0], α = alpha, ε = epsilon) print(f'NoisyCount : {noisy_count}') duet.print_privacy_cost()
303
131
import turtle tortuguinha = turtle.Turtle() tortuguinha.shape('turtle') tortuguinha.color('red') tortugo = turtle.Turtle() tortugo.shape('turtle') tortugo.color('blue') def faz_quadradin(the_turtle): for i in range(0,4): the_turtle.forward(100) the_turtle.right(90) def faz_espiral(the_turtle): for i in range(0,36): faz_quadradin(the_turtle) the_turtle.right(10) faz_espiral(tortuguinha) tortugo.right(5) faz_espiral(tortugo)
473
221
import torch import common.trainloop.context as ctx import common.trainloop.factory as factory import common.model.management as mgt import common.utils.torchhelper as th class MultiModelTorchTrainContext(ctx.TorchTrainContext): def __init__(self, device_str) -> None: super().__init__(device_str) self.additional_models = {} self.additional_optimizers = {} def load_from_new(self): super().load_from_new() # retrieval of the first/standard model if not hasattr(self.config.others, 'model_names'): raise ValueError('model_names entry missing in others section of configuration') if not hasattr(self.config.others, 'additional_models'): raise ValueError('additional_models entry missing in others section of configuration') if not hasattr(self.config.others, 'additional_optimizers'): raise ValueError('additional_optimizers entry missing in others section of configuration') for i, name in enumerate(self.config.others.model_names): model = factory.get_model(self.config.others.additional_models[i]) model = self._multi_gpu_if_available(model) model.to(self.device) self.additional_models[name] = model optimizer = factory.get_optimizer(model.parameters(), self.config.others.additional_optimizers[i]) self.additional_optimizers[name] = optimizer mgt.model_service.backup_model_parameters(self.model_files.model_path(postfix=name), self.config.model.to_dictable_parameter(), self.config.optimizer.to_dictable_parameter()) def save_to_checkpoint(self, epoch: int, is_best=False): super().save_to_checkpoint(epoch, is_best) for name in self.additional_models: checkpoint_path = self.model_files.build_checkpoint_path(epoch, is_best=is_best, postfix=name) mgt.model_service.save_checkpoint(checkpoint_path, epoch, self.additional_models[name], self.additional_optimizers[name]) def load_from_checkpoint(self, epoch): super().load_from_checkpoint(epoch) for name in self.additional_models: # build, since we know it is a int epoch checkpoint_path = self.model_files.build_checkpoint_path(epoch, postfix=name) model, optimizer = mgt.model_service.load_model_from_parameters(self.model_files.model_path(postfix=name), with_optimizer=True) mgt.model_service.load_checkpoint(checkpoint_path, model, optimizer) model = self._multi_gpu_if_available(model) self.additional_models[name] = model.to(self.device) self.additional_optimizers[name] = th.optimizer_to_device(optimizer, self.device) def set_mode(self, is_train: bool) -> None: self.is_train = is_train if self.is_train: self.model.train() for model in self.additional_models.values(): model.train() else: self.model.eval() for model in self.additional_models.values(): model.eval() torch.set_grad_enabled(self.is_train)
3,375
946
#The input to the gate can only be 0 or 1 ''' Single Layer Perceptrons ''' def AND_perceptron(x1,x2): w1, w2, t = 1, 1, 2 return w1*x1 + w2*x2 >=t def OR_perceptron(x1,x2): w1, w2, t = 1, 1, 1 return w1*x1 + w2*x2 >=t def AND_perceptron(x1): w1, t = -1, 0 return w1*x1 >=t ''' Multi Layer Perceptrons ''' def XOR_perceptron(x1,x2): w1, w2, t = 1, 1, 0.5 h_1_1 = (w1*x1 + w2*x2 >=t) #layer:1 node:1 w1, w2, t = -1, -1, -1.5 h_1_2 = (w1*x1 + w2*x2 >=t) #layer:1 node:2 w1, w2, t = 1, 1, 1.5 return w1*h_1_1 + w2*h_1_2 >=t #layer:2 or output layer print XOR_perceptron(0,0) print XOR_perceptron(0,1) print XOR_perceptron(1,0) print XOR_perceptron(1,1)
704
425
import pandas as pd import numpy as np import random, os, json from collections import defaultdict from matplotlib import pyplot as plt from sklearn.cluster import KMeans, SpectralClustering from scipy.spatial.distance import pdist, squareform from scipy.sparse import csgraph from numpy import linalg as LA from sklearn.metrics import silhouette_score PROXIMITY_FNAME = 'selected_ripe_ris__monitors_from_pathlens_100k.json' def get_argmax_total_similarity(similarity_matrix, from_items=None, rank_normalization=False): ''' Finds the item of a matrix (similarity_matrix) that has the maximum aggregate similarity to all other items. If the "from_items" is not None, then only the rows/columns of the matrix in the from_items list are taken into account. :param similarity_matrix: (pandas.DataFrame) an NxN dataframe; should be (a) symmetric and (b) values {i,j} to represent the similarity between item of row i and column j :param from_items: (list/set) a subset of the items (rows/columns) from which the item with the max similiarity will be selected :param rank_normalization: (boolean) whether to modify the similarity matrix giving more emphasis to most similar values per row by dividing each element with the rank it appears in the sorted list of values of the row e.g., a_row = [0.5, 0.3, 0.4] --> modified_row = [0.5/1, 0.3/3, 0.4/2] = [0.5, 0.1, 0.2] e.g., a_row = [0.1, 0.1, 0.4] --> modified_row = [0.1/2, 0.1/3, 0.4/1] = [0.05, 0.033, 0.4] :return: (scalar, e.g., str or int) the index of the item in the dataframe that has the max total similarity ''' if from_items is None: df = similarity_matrix.copy() else: df = similarity_matrix.loc[from_items, from_items].copy() np.fill_diagonal(df.values, np.nan) # set self-similarity to nan so that it is not taken into account if rank_normalization: for p1 in df.index: sorted_indexes = list(df.loc[p1, :].sort_values(ascending=False).index) df.loc[p1, sorted_indexes] = df.loc[p1, sorted_indexes] * [1.0 / i for i in range(1, 1 + df.shape[0])] sum_similarities = np.nansum(df, axis=1) if np.max(sum_similarities) == 0: # all similarities are nan or zero next_item = random.sample(from_items, 1)[0] else: next_item = df.index[np.argmax(sum_similarities)] return next_item def greedy_most_similar_elimination(similarity_matrix, rank_normalization=False): ''' Selects iteratively the item in the given similarity_matrix that has the maximum aggregate similarity to all other items. At each iteration, only the similarities among the non-selected items are taken into account. At each iteration, the selected item is placed in the beginning of a list. At the end, this list is returned. Example: returned_list = [item_selected_last, ..., item_selected_first] :param similarity_matrix: (pandas.DataFrame) an NxN dataframe; should be (a) symmetric and (b) values {i,j} to represent the similarity between item of row i and column j :param rank_normalization: (boolean) whether to modify the similarity matrix giving more emphasis to most similar values per row :return: (list) a list of ordered items (from the input's index); the first item is the least similar ''' selected_items = [] for i in range(similarity_matrix.shape[0]): from_items = list(set(similarity_matrix.index) - set(selected_items)) next_item = get_argmax_total_similarity(similarity_matrix, from_items=from_items, rank_normalization=rank_normalization) selected_items.insert(0, next_item) return selected_items def get_argmin_total_similarity(similarity_matrix, from_items=None): ''' Finds the item of a matrix (similarity_matrix) that has the minimum aggregate similarity to all other items. If the "from_items" is not None, then only the (a) rows of the matrix in the from_items list and (b) the columns of the matrix NOT in the from_items list are taken into account. :param similarity_matrix: (pandas.DataFrame) an NxN dataframe; should be (a) symmetric and (b) values {i,j} to represent the similarity between item of row i and column j :param from_items: (list/set) a subset of the items (rows/columns) from which the item with the min similiarity will be selected :return: (scalar, e.g., str or int) the index of the item in the dataframe that has the min total similarity ''' df = similarity_matrix.copy() np.fill_diagonal(df.values, np.nan) # set self-similarity to nan so that it is not taken into account if from_items is not None: other_items = list(set(df.index) - set(from_items)) df = df.loc[from_items, other_items] sum_similarities = np.nansum(df, axis=1) if np.max(sum_similarities) == 0: # all similarities are nan or zero next_item = random.sample(from_items, 1)[0] else: next_item = df.index[np.argmin(sum_similarities)] return next_item def greedy_least_similar_selection(similarity_matrix, nb_items=None): ''' Selects iteratively the item in the given similarity_matrix that has the minimum aggregate similarity to all other items. At each iteration, only the similarities among the non-selected items and the already selected items are taken into account. At each iteration, the selected item is placed in the end of a list. At the end, this list is returned. Example: returned_list = [item_selected_first, ..., item_selected_last] :param similarity_matrix: (pandas.DataFrame) an NxN dataframe; should be (a) symmetric and (b) values {i,j} to represent the similarity between item of row i and column j :param nb_items: (int) number of items to be selected; if None all items are selected in the returned list :return: (list) a list of ordered items (from the input's index); the first item is the least similar ''' selected_items = [] nb_total_items = similarity_matrix.shape[0] if (nb_items is None) or (nb_items > nb_total_items): nb_items = nb_total_items for i in range(nb_items): if len(selected_items) == 0: from_items = None else: from_items = list(set(similarity_matrix.index) - set(selected_items)) next_item = get_argmin_total_similarity(similarity_matrix, from_items=from_items) selected_items.append(next_item) return selected_items def sample_from_clusters(cluster_members_dict, nb_items=None): ''' Samples items from the clusters, starting from a random item in the largest cluster, then a random item in the second largest cluster, and so on. When elements of all clusters are selected, then starts again from the largest cluster, until all items (or up to nb_items) are selected. :param cluster_members_dict: (dict of lists) dict of the form {cluster label: list of members of the cluster} :param nb_items: (int) number of items to be selected; if None all items are selected in the returned list :return: (list) a list of ordered items that are the samples from clusters ''' nb_clusters = len(cluster_members_dict.keys()) nb_all_items = sum([len(v) for v in cluster_members_dict.values()]) if (nb_items is None) or (nb_items > nb_all_items): nb_items = nb_all_items sorted_clusters = sorted(cluster_members_dict, key=lambda k: len(cluster_members_dict.get(k)), reverse=True) selected_items = [] for i in range(nb_items): ind = i % nb_clusters # iterate over the sorted_clusters by getting the index of the current cluster current_cluster = sorted_clusters[ind] len_current_cluster = len(cluster_members_dict[current_cluster]) if len_current_cluster > 0: next_item_ind = random.sample(range(len_current_cluster), 1)[0] next_item = cluster_members_dict[current_cluster].pop(next_item_ind) selected_items.append(next_item) i += 1 return selected_items def random_selection(similarity_matrix, nb_items=None): """ Selects randomly an item from the given similarity_matrix :param similarity_matrix: (pandas.DataFrame) an NxN dataframe; should be (a) symmetric and (b) values {i,j} to represent the similarity between item of row i and column j :param nb_items: (int) number of items to be selected; if None all items are selected in the returned list :return: (list) a list of random items """ selected_items = [] nb_total_items = similarity_matrix.shape[0] if (nb_items is None) or (nb_items > nb_total_items): nb_items = nb_total_items for i in range(nb_items): temp = random.sample(list(similarity_matrix), 1)[0] selected_items.append(temp) return selected_items def getAffinityMatrix(coordinates, k=7): """ The Affinity matrix determines how close or similar are 2 points in our space. Calculate affinity matrix based on input coordinates matrix and the number of nearest neighbours. Apply local scaling based on the k nearest neighbour References: https://papers.nips.cc/paper/2619-self-tuning-spectral-clustering.pdf """ # calculate euclidian distance matrix dists = squareform(pdist(coordinates)) # for each row, sort the distances ascendingly and take the index of the # k-th position (nearest neighbour) knn_distances = np.sort(dists, axis=0)[k] knn_distances = knn_distances[np.newaxis].T # calculate sigma_i * sigma_j local_scale = knn_distances.dot(knn_distances.T) affinity_matrix = dists * dists affinity_matrix = -affinity_matrix / local_scale # divide square distance matrix by local scale affinity_matrix[np.where(np.isnan(affinity_matrix))] = 0.0 # apply exponential affinity_matrix = np.exp(affinity_matrix) np.fill_diagonal(affinity_matrix, 0) return affinity_matrix def eigenDecomposition(A, topK=5): """ :param A: Affinity matrix :param topK: Top k :return A tuple containing: - the optimal number of clusters by eigengap heuristic - all eigen values - all eigen vectors This method performs the eigen decomposition on a given affinity matrix, following the steps recommended in the paper: 1. Construct the normalized affinity matrix: L = D−1/2ADˆ −1/2. 2. Find the eigenvalues and their associated eigen vectors 3. Identify the maximum gap which corresponds to the number of clusters by eigengap heuristic References: https://papers.nips.cc/paper/2619-self-tuning-spectral-clustering.pdf http://www.kyb.mpg.de/fileadmin/user_upload/files/publications/attachments/Luxburg07_tutorial_4488%5b0%5d.pdf """ L = csgraph.laplacian(A, normed=True) n_components = A.shape[0] # LM parameter : Eigenvalues with largest magnitude (eigs, eigsh), that is, largest eigenvalues in # the euclidean norm of complex numbers. # eigenvalues, eigenvectors = eigsh(L, k=n_components, which="LM", sigma=1.0, maxiter=5000) eigenvalues, eigenvectors = LA.eig(L) plt.title('Largest eigen values of input matrix') plt.scatter(np.arange(len(eigenvalues)), eigenvalues) plt.grid() plt.show() # Identify the optimal number of clusters as the index corresponding # to the larger gap between eigen values index_largest_gap = np.argsort(np.diff(eigenvalues))[::-1][:topK] nb_clusters = index_largest_gap + 1 return nb_clusters, eigenvalues, eigenvectors def get_optimal_number_of_clusters(similarity): ''' A function that help us identify which is the optimal number of cluster for Kmeans :param similarity: The similarity matrix from graph embeddings ''' distortions = [] for i in range(1, 20): clustering = KMeans(n_clusters=i, init='random', n_init=10, max_iter=300, tol=1e-04, random_state=0).fit( similarity) distortions.append(clustering.inertia_) plt.plot(range(1, 20), distortions, marker='o') plt.xlabel('Number of clusters (k)') plt.ylabel('Sum of squared distance') plt.title("Elbow Method for Optimal k") plt.show() def get_plot_for_different_k_values(similarity, model_name): """ This function plots points after applying a cluster method for k=3,4,5,6. Furthermore prints silhouette score for each k :param similarity: Contains our dataset (The similarity of RIPE monitors) :return: A list containing silhouette score """ silhouette_scores = [] f = plt.figure() f.add_subplot(2, 2, 1) for i in range(3, 7): if model_name == 'Spectral': sc = SpectralClustering(n_clusters=i, affinity='precomputed').fit(similarity) else: sc = KMeans(n_clusters=i, init='random', n_init=10, max_iter=300, tol=1e-04, random_state=0).fit(similarity) silhouette_scores.append(silhouette_score(similarity, sc.labels_)) f.add_subplot(2, 2, i - 2) plt.scatter(similarity[:, 0], similarity[:, 1], s=5, c=sc.labels_, label="n_cluster-" + str(i)) plt.legend() plt.show() return silhouette_scores def plot_silhouette_score_for_various_k(similarity, model_name): """ In this function we plot the silhouette score for various number of K (number of clusters) :param similarity: Contains our dataset (The similarity of RIPE monitors) :param model_name: The clustering algorithm we use (K-means or SpectralClustering) """ sil = [] for i in range(2, 21): if model_name == 'Spectral': sc = SpectralClustering(n_clusters=i, affinity='precomputed').fit(similarity) else: sc = KMeans(n_clusters=i, init='random', n_init=10, max_iter=300, tol=1e-04, random_state=0).fit(similarity) sil.append(silhouette_score(similarity, sc.labels_)) plt.plot(range(2, 21), sil[:], '--bo') plt.title('Silhouette score for different cluster sizes for ' + str(model_name)) plt.xlabel('Silhouette Score') plt.ylabel('Number of clusters (K)') plt.show() def clustering_based_selection(similarity_matrix, clustering_method, nb_clusters, nb_items=None, **kwargs): ''' Applies a clustering algorithm to the similarity matrix to cluster items, and then selects samples from the classes. :param similarity_matrix: (pandas.DataFrame) an NxN dataframe; should be (a) symmetric and (b) values {i,j} to represent the similarity between item of row i and column j :param clustering_method: (str) 'SpectralClustering' or 'Kmeans' :param nb_clusters: (int) number of clusters :param nb_items: (int) number of items to be selected; if None all items are selected in the returned list :param **kwargs: (dict) optional kwargs for the clustering algorithms :return: (list) a list of ordered items that are the samples from clusters ''' sim = similarity_matrix.to_numpy() sim = np.nan_to_num(sim, nan=0) if clustering_method == 'SpectralClustering': clustering = getAffinityMatrix(sim, k=7) k, eigenvalues, eigenvectors = eigenDecomposition(sim) clustering = SpectralClustering(n_clusters=nb_clusters, affinity='precomputed', **kwargs).fit(sim) labels = clustering.labels_ plt.scatter(sim[:, 0], sim[:, 1], c=labels) plt.title('Number of Clusters: ' + str(nb_clusters)) plt.show() model = 'Spectral' silhouette_scores = get_plot_for_different_k_values(sim, model) # print(silhouette_scores) # print(f'Optimal number of clusters {k}') plot_silhouette_score_for_various_k(sim, model) elif clustering_method == 'Kmeans': get_optimal_number_of_clusters(sim) clustering = KMeans(n_clusters=nb_clusters, **kwargs).fit(sim) labels = clustering.labels_ plt.scatter(sim[:, 0], sim[:, 1], c=labels) plt.title('Number of Clusters: ' + str(nb_clusters)) plt.show() model = 'Kmeans' silhouette_scores = get_plot_for_different_k_values(sim, model) # print(silhouette_scores) plot_silhouette_score_for_various_k(sim, model) else: raise ValueError cluster_members_dict = defaultdict(list) for i, label in enumerate(clustering.labels_): cluster_members_dict[label].append(similarity_matrix.index[i]) return sample_from_clusters(cluster_members_dict, nb_items=nb_items) def select_from_similarity_matrix(similarity_matrix, method, **kwargs): if method == 'Greedy min': selected_items = greedy_most_similar_elimination(similarity_matrix, **kwargs) elif method == 'Greedy max': selected_items = greedy_least_similar_selection(similarity_matrix, **kwargs) elif method == 'Clustering': selected_items = clustering_based_selection(similarity_matrix, **kwargs) elif method == 'Random': selected_items = random_selection(similarity_matrix, **kwargs) else: raise ValueError return selected_items def return_the_selected_monitors_from_methods(): similarity_matrix = pd.read_csv('ALL_RIPE_RIS_withASns_similarity_embeddings_BGP2VEC_20210107.csv', header=0, index_col=0) similarity_matrix.columns = similarity_matrix.columns.astype(float) selected_items_greedy_random = select_from_similarity_matrix(similarity_matrix, 'Random') selected_items_greedy_min = select_from_similarity_matrix(similarity_matrix, 'Greedy min') selected_items_greedy_max = select_from_similarity_matrix(similarity_matrix, 'Greedy max') kwargs = {'clustering_method': 'Kmeans', 'nb_clusters': 10} selected_items_Kmeans = select_from_similarity_matrix(similarity_matrix, 'Clustering', **kwargs) kwargs = {'clustering_method': 'SpectralClustering', 'nb_clusters': 10} selected_items_Spectral = select_from_similarity_matrix(similarity_matrix, 'Clustering', **kwargs) return selected_items_greedy_random, selected_items_greedy_min, selected_items_greedy_max, selected_items_Kmeans, selected_items_Spectral # method_param_dict = { # 'Greedy min': {'method': 'Greedy min', 'sim_matrix': similarity_matrix, 'args': {}}, # 'Greedy max': {'method': 'Greedy max', 'sim_matrix': similarity_matrix, 'args': {}}, # 'Clustering kmeans k10 full': {'method': 'Clustering', 'sim_matrix': similarity_matrix, 'args': {'clustering_method': 'Kmeans', 'nb_clusters': 7}}, # 'Clustering spectral k10': {'method': 'Clustering', 'sim_matrix': similarity_matrix, 'args': {'clustering_method': 'SpectralClustering', 'nb_clusters': 7}}} # # for m, params in method_param_dict.items(): # selected_items = select_from_similarity_matrix(params['sim_matrix'], params['method'], **params['args']) # print('\t{} [DONE]'.format(m)) # with open('dataset_selected_monitors_ripe_ris_pathlens_100k_{}.json'.format('_'.join(m.lower().translate('()').split(' '))), 'w') as f: # json.dump(selected_items, f) # # asns_per_method = dict() # for m, params in method_param_dict.items(): # with open('dataset_selected_monitors_ripe_ris_pathlens_100k_{}.json'.format('_'.join(m.lower().split(' '))), 'r') as f: # selected_items = json.load(f) # asns_per_method[m] = selected_items # print('\t{} [DONE]'.format(m)) # with open(PROXIMITY_FNAME, 'w') as f: # json.dump(asns_per_method, f)
19,916
6,246
# Copyright (c) 2015 Ericsson AB. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from kingbird.objects import base as obj_base from kingbird.tests import base from oslo_versionedobjects import fields as obj_fields class TestBaseObject(base.KingbirdTestCase): def test_base_class(self): obj = obj_base.KingbirdObject() self.assertEqual(obj_base.KingbirdObject.OBJ_PROJECT_NAMESPACE, obj.OBJ_PROJECT_NAMESPACE) self.assertEqual(obj_base.KingbirdObject.VERSION, obj.VERSION) @mock.patch.object(obj_base.KingbirdObject, "obj_reset_changes") def test_from_db_object(self, mock_obj_reset_ch): class TestKingbirdObject(obj_base.KingbirdObject, obj_base.VersionedObjectDictCompat): fields = { "key1": obj_fields.StringField(), "key2": obj_fields.StringField(), } obj = TestKingbirdObject() context = mock.Mock() db_obj = { "key1": "value1", "key2": "value2", } res = obj_base.KingbirdObject._from_db_object(context, obj, db_obj) self.assertIsNotNone(res) self.assertEqual("value1", obj["key1"]) self.assertEqual("value2", obj["key2"]) self.assertEqual(obj._context, context) mock_obj_reset_ch.assert_called_once_with() def test_from_db_object_none(self): obj = obj_base.KingbirdObject() db_obj = None context = mock.Mock() res = obj_base.KingbirdObject._from_db_object(context, obj, db_obj) self.assertIsNone(res)
2,184
667
import sys import os # Add this directory to python path (contains nosetest_config) sys.path.append(os.path.dirname(os.path.realpath(__file__))) from biothings.tests import BiothingTest from biothings.tests.settings import NosetestSettings ns = NosetestSettings() class {% nosetest_settings_class %}(BiothingTest): __test__ = True # Add extra nosetests here pass
380
121
import tempfile from abc import ABC, abstractmethod from time import sleep, time from hardware.camera import Photo, Resolution class CameraDriver(ABC): @abstractmethod def capture(self) -> Photo: pass class PiCameraDriver(CameraDriver): def __init__(self, resolution=Resolution(1024, 768), iso=300): from picamera import PiCamera self.resolution = resolution self.iso = iso self._camera = PiCamera(resolution=resolution) self._camera.iso = iso sleep(2) print('Camera ready!') def capture(self) -> Photo: origin = tempfile.NamedTemporaryFile(mode="w+t", suffix='.jpg') self._camera.capture(origin.name) photo = Photo(origin.name, resolution=self.resolution, iso=self.iso) return photo.resize(ratio=1)
824
257
""" Django settings for sms_verifier project. Generated by 'django-admin startproject' using Django 2.2.6. For more information on this file, see https://docs.djangoproject.com/en/2.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.2/ref/settings/ """ import os import socket ENVIRONMENT = os.environ.get('environment', 'dev') DOMAIN_NAME = os.environ.get('domain_name', 'http://127.0.0.1:8000') try: HOSTNAME = socket.gethostname() except ImportError as e: HOSTNAME = 'localhost' PROJECT_NAME = 'SMS Verifier' VERSION = os.environ.get('version', 'null') EXTRA_ALLOWED_HOSTS = os.environ.get('allowed_hosts', '').split(',') # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.environ.get('django_secret_key', 'djangoSecretKey') # SECURITY WARNING: don't run with debug turned on in production! if ENVIRONMENT == 'dev' or ENVIRONMENT == 'ci': DEBUG = True EXTRA_ALLOWED_HOSTS.append('*') else: DEBUG = False INTERNAL_IPS = [ '127.0.0.1', ] ALLOWED_HOSTS = [ 'alpha.sms-verifier.nalkins.cloud', 'sms-verifier.nalkins.cloud', '127.0.0.1', '10.0.2.2', # Android AVD IP for localhost ] + EXTRA_ALLOWED_HOSTS # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework', 'oauth2_provider', 'django_user_email_extension', 'sms_verifier_app' ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'sms_verifier.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'sms_verifier.wsgi.application' # Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases ###################### # DATABASE SETTINGS ###################### if ENVIRONMENT == 'dev': DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } else: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': os.environ.get('db_name', 'sms_verifier'), 'USER': os.environ.get('db_user', 'sms_verifier'), 'PASSWORD': os.environ.get('db_pass', 'django'), 'HOST': os.environ.get('db_host', 'localhost'), 'PORT': '3306', } } # Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL = os.environ.get('static_url', "/static/") MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, 'media') ###################### # Custom User Model ###################### AUTH_USER_MODEL = 'django_user_email_extension.User' ################## # REST Framework ################## REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': ( 'oauth2_provider.contrib.rest_framework.OAuth2Authentication', # 'rest_framework.authentication.BasicAuthentication', 'rest_framework.authentication.SessionAuthentication', ), 'DEFAULT_PERMISSION_CLASSES': ( 'rest_framework.permissions.IsAuthenticated', ), } ###################### # Social Auth ###################### SOCIAL_AUTH_USER_MODEL = 'django_user_email_extension.User' SOCIAL_AUTH_LOGIN_REDIRECT_URL = 'index' SOCIAL_AUTH_LOGOUT_REDIRECT_URL = '/' SOCIAL_AUTH_LOGIN_ERROR_URL = '/' SOCIAL_AUTH_LOGIN_URL = 'index' SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = True SOCIAL_AUTH_PIPELINE = ( 'social_core.pipeline.social_auth.social_details', 'social_core.pipeline.social_auth.social_uid', 'social_core.pipeline.social_auth.auth_allowed', 'social_core.pipeline.social_auth.social_user', 'social_core.pipeline.user.get_username', 'social_core.pipeline.social_auth.associate_by_email', 'social_core.pipeline.user.create_user', 'social_core.pipeline.social_auth.associate_user', 'social_core.pipeline.social_auth.load_extra_data', 'social_core.pipeline.user.user_details', ) SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.environ.get('google_oauth_client_id', 'None') SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.environ.get('google_oauth_client_secret', 'None') SOCIAL_AUTH_GITHUB_KEY = os.environ.get('github_oauth_client_id', 'None') SOCIAL_AUTH_GITHUB_SECRET = os.environ.get('github_oauth_client_secret', 'None') SOCIAL_AUTH_GITHUB_SCOPE = [ 'read:user', 'user:email', 'read:org', ] AUTHENTICATION_BACKENDS = ( 'social_core.backends.github.GithubOAuth2', 'social_core.backends.google.GoogleOAuth2', 'django.contrib.auth.backends.ModelBackend', ) ###################### # LOGGING SETTINGS ###################### LOG_LEVEL = os.environ.get('LOG_LEVEL', 'INFO') HANDLERS = ['console'] LOGGING = { 'version': 1, 'handlers': { 'console': { 'level': LOG_LEVEL, 'class': 'logging.StreamHandler', }, }, 'loggers': { PROJECT_NAME: { 'handlers': HANDLERS, 'level': LOG_LEVEL, }, 'django.request': { 'handlers': HANDLERS, 'level': 'WARNING', 'propagate': False, }, }, }
6,897
2,439
from termCombinationLib import initializeTermSummary, applyRule, recurringTermsUnified, supertermRepresentsLessSignificantSubterm, subtermRepresentsLessSignificantSimilarSuperterm, subtermRepresentsSupertermWithLessSignificanceAndLessRepresentativePower, commonSupertermInListRepresentsSubtermsWithLessRepresentativePower, supertermRepresentsSubtermLargerThanMaxRep def test_initializeTermSummary_singleInput(): tbsGsIDsList=[['term1', 'term2', 'term3']] termSummary=initializeTermSummary(tbsGsIDsList) assert termSummary==[ ['term1', ['term1'], 1], ['term2', ['term2'], 2], ['term3', ['term3'], 3] ] def test_initializeTermSummary_multiInput(): tbsGsIDsList=[['term11', 'termCommon', 'term13', 'term14'], ['term21', 'term22', 'term23', 'termCommon']] termSummary=initializeTermSummary(tbsGsIDsList) assert termSummary==[ ['term11', ['term11'], 1], ['term21', ['term21'], 1], ['termCommon', ['termCommon'], 2], ['term22', ['term22'], 2], ['term13', ['term13'], 3], ['term23', ['term23'], 3], ['term14', ['term14'], 4], ['termCommon', ['termCommon'], 4] ] def test_rule_recurringTermsUnified(): tbsGsIDsList=[['term11', 'termCommon', 'term13', 'term14'], ['term21', 'term22', 'term23', 'termCommon']] termSummary=initializeTermSummary(tbsGsIDsList) geneSetsDict=dict()#Not important, not used in this rule termSummary=applyRule(termSummary, geneSetsDict, 2000, recurringTermsUnified) assert termSummary==[ ['term11', ['term11'], 1], ['term21', ['term21'], 1], ['termCommon', ['termCommon'], 2], ['term22', ['term22'], 2], ['term13', ['term13'], 3], ['term23', ['term23'], 3], ['term14', ['term14'], 4], ] def test_rule_supertermRepresentsLessSignificantSubterm(): tbsGsIDsList=[['term1', 'term2', 'term3', 'term4', 'term5', 'term6']] termSummary=initializeTermSummary(tbsGsIDsList) geneSetsDict={ 'term1':{'A','B','C'}, 'term2':{'A','B','C','D','E','F'}, 'term3':{'A','B','C','D'}, 'term4':{'A','B','G','H'}, 'term5':{'A','B'}, 'term6':{'G','H'} } termSummary=applyRule(termSummary, geneSetsDict, 2000, supertermRepresentsLessSignificantSubterm) assert termSummary==[ ['term1', ['term1', 'term5'], 1], ['term2', ['term2', 'term3'], 2], ['term4', ['term4', 'term6'], 4], ] def test_rule_subtermRepresentsLessSignificantSimilarSuperterm(): tbsGsIDsList=[['term1', 'term2', 'term3', 'term4', 'term5', 'term6']] termSummary=initializeTermSummary(tbsGsIDsList) geneSetsDict={ 'term1':{'A','B','C'}, 'term2':{'A','B','C','D','E','F'}, 'term3':{'A','B','C','D'}, 'term4':{'A','B','C','D','E','F','G'}, 'term5':{'A','B'}, 'term6':{'G','H'} } termSummary=applyRule(termSummary, geneSetsDict, 2000, subtermRepresentsLessSignificantSimilarSuperterm) assert termSummary==[ ['term1', ['term1', 'term3'], 1], ['term2', ['term2', 'term4'], 2], ['term5', ['term5'], 5], ['term6', ['term6'], 6] ] def test_rule_subtermRepresentsSupertermWithLessSignificanceAndLessRepresentativePower(): tbsGsIDsList=[['term1', 'term2', 'term3', 'term4', 'term5']] termSummary=initializeTermSummary(tbsGsIDsList) geneSetsDict={ 'term1':{'A','B','C'}, 'term2':{'G','H'}, 'term3':{'A','B','C','D'}, 'term4':{'A','B'}, 'term5':{'A','B','C','D','E','F'} } termSummary=applyRule(termSummary, geneSetsDict, 2000, subtermRepresentsLessSignificantSimilarSuperterm) assert termSummary==[ ['term1', ['term1', 'term3'], 1], ['term2', ['term2'], 2], ['term4', ['term4'], 4], ['term5', ['term5'], 5] ] termSummary=applyRule(termSummary, geneSetsDict, 2000, subtermRepresentsSupertermWithLessSignificanceAndLessRepresentativePower) assert termSummary==[ ['term1', ['term1', 'term3', 'term5'], 1], ['term2', ['term2'], 2], ['term4', ['term4'], 4] ] def test_rule_commonSupertermInListRepresentsSubtermsWithLessRepresentativePower(): tbsGsIDsList=[['term1', 'term2', 'term3', 'term4']] termSummary=initializeTermSummary(tbsGsIDsList) geneSetsDict={ 'term1':{'A','B','C'}, 'term2':{'G','H'}, 'term3':{'A','B','C','D','G','H'}, 'term4':{'A','B','D'} } termSummary=applyRule(termSummary, geneSetsDict, 2000, supertermRepresentsLessSignificantSubterm) assert termSummary==[ ['term1', ['term1'], 1], ['term2', ['term2'], 2], ['term3', ['term3','term4'], 3], ] termSummary=applyRule(termSummary, geneSetsDict, 2000, commonSupertermInListRepresentsSubtermsWithLessRepresentativePower) assert termSummary==[ ['term3', ['term3','term4', 'term1', 'term2'], 1] ] def test_rule_supertermRepresentsSubtermLargerThanMaxRep(): tbsGsIDsList=[['term1', 'term2', 'term3', 'term4', 'term5', 'term6']] termSummary=initializeTermSummary(tbsGsIDsList) geneSetsDict={ 'term1':{'A','B','C'}, 'term2':{'A','B','C','D','E','F','G','H','I','J'}, 'term3':{'A','B','C','D'}, 'term4':{'A','B','C','D','G','H','I','J'}, 'term5':{'A','B'}, 'term6':{'G','H'} } termSummary=applyRule(termSummary, geneSetsDict, 6, supertermRepresentsSubtermLargerThanMaxRep) assert termSummary==[ ['term1', ['term1'], 1], ['term2', ['term2', 'term4'], 2], ['term3', ['term3'], 3], ['term5', ['term5'], 5], ['term6', ['term6'], 6], ]
5,149
2,239
from .exceptions import PydeckException # noqa
48
14
import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline, make_union from tpot.builtins import StackingEstimator from xgboost import XGBClassifier from sklearn.preprocessing import FunctionTransformer from copy import copy # NOTE: Make sure that the outcome column is labeled 'target' in the data file tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64) features = tpot_data.drop('target', axis=1) training_features, testing_features, training_target, testing_target = \ train_test_split(features, tpot_data['target'], random_state=None) # Average CV score on the training set was: 0.8477898176814586 exported_pipeline = make_pipeline( make_union( FunctionTransformer(copy), FunctionTransformer(copy) ), XGBClassifier(learning_rate=0.5, max_depth=9, min_child_weight=9, n_estimators=100, n_jobs=1, subsample=0.8, verbosity=0) ) exported_pipeline.fit(training_features, training_target) results = exported_pipeline.predict(testing_features)
1,096
360
import os import numpy as np import pandas as pd import pytorch_lightning as pl import torch from sklearn.model_selection import train_test_split from torch.utils.data import DataLoader, Dataset from transformers import BertTokenizerFast os.environ["TOKENIZERS_PARALLELISM"] = "true" FILE_PATH = 'data/own.txt' def read_conll_from_txt_to_df(file_path): df = pd.DataFrame(columns=['SENTENCE', 'TOKEN', 'LABEL']) sent = 1 with open(file_path, 'r') as f: for i, line in enumerate(f): line = line.replace('\n', '') splitted = line.split() if not splitted: sent += 1 else: df.loc[i] = [sent, splitted[0], splitted[1]] return df data = read_conll_from_txt_to_df(FILE_PATH) class SentenceGetter(object): def __init__(self, data): self.n_sent = 1 self.data = data self.empty = False agg_func = lambda s: [(token, tag) for token, tag in zip(s["TOKEN"].values.tolist(), s["LABEL"].values.tolist())] self.grouped = self.data.groupby("SENTENCE").apply(agg_func) self.sentences = [s for s in self.grouped] def get_next(self): try: s = self.grouped["SENTENCE: {}".format(self.n_sent)] self.n_sent += 1 return s except: return None getter = SentenceGetter(data) tags_vals = list(set(data["LABEL"].values)) tag2index = {t: i for i, t in enumerate(tags_vals)} index2tag = {i: t for i, t in enumerate(tags_vals)} sentences = [' '.join([s[0] for s in sent]) for sent in getter.sentences] labels = [[s[1] for s in sent] for sent in getter.sentences] #labels = [[tag2idx.get(l) for l in lab] for lab in labels] ##### only overview ##### tags = ["[PAD]"] tags.extend(list(set(data["LABEL"].values))) tag2idx = {t: i for i, t in enumerate(tags)} print('Length of Labels : ' + str(len(tags))) words = ["[PAD]", "[UNK]"] words.extend(list(set(data["TOKEN"].values))) word2idx = {t: i for i, t in enumerate(words)} print('Length of unique words : ' + str(len(words))) # check dataset def dataset_checker(sent_list, label_list): sent_check = list() for el in sent_list: sent_check.append(len(el.split())) label_check = list() for el in label_list: label_check.append(len(el)) for index, (first, second) in enumerate(zip(sent_check, label_check)): if first != second: print(index, second) dataset_checker(sentences, labels) ##### only overview ##### unique_tags = list(set(tag for text in labels for tag in text)) train_sent, test_sent, train_label, test_label = train_test_split(sentences, labels, test_size=0.05) train_sent, val_sent, train_label, val_label = train_test_split(train_sent, train_label, test_size=0.15) print('FULL DATASET SENT: ' + str(len(sentences))) print('FULL DATASET LABELS: ' + str(len(labels))) print('Train sent size : ' + str(len(train_sent))) print('Train label size : ' + str(len(train_label))) print('Test sent size : ' + str(len(test_sent))) print('Test label size : ' + str(len(test_label))) print('Val sent size : ' + str(len(val_sent))) print('Val label size : ' + str(len(val_label))) print('Check Training dataset') dataset_checker(train_sent, train_label) print('Check Test dataset') dataset_checker(test_sent, test_label) print('Check Validation dataset') dataset_checker(val_sent, val_label) class ConllDataset(Dataset): def __init__(self, tokenizer, sentences, labels, max_len): self.len = len(sentences) self.sentences = sentences self.labels = labels self.tokenizer = tokenizer self.max_len = max_len def __len__(self): return self.len def __getitem__(self, index): sentence = self.sentences[index].strip().split() labels = self.labels[index] inputs = self.tokenizer.encode_plus( sentence, None, is_split_into_words=True, add_special_tokens=True, max_length=self.max_len, truncation=True, padding='max_length', return_offsets_mapping=True, return_token_type_ids=False, return_attention_mask=True ) ids = inputs['input_ids'] mask = inputs['attention_mask'] labels = [tag2index[label] for label in labels] # code based on https://huggingface.co/transformers/custom_datasets.html#tok-ner # create an empty array of -100 of length max_length encoded_labels = np.ones(len(inputs["offset_mapping"]), dtype=int) * -100 # set only labels whose first offset position is 0 and the second is not 0 i = 0 for idx, mapping in enumerate(inputs["offset_mapping"]): if mapping[0] == 0 and mapping[1] != 0: # overwrite label encoded_labels[idx] = labels[i] i += 1 inputs.pop('offset_mapping') return { 'ids': torch.tensor(ids).flatten(), 'mask': torch.tensor(mask).flatten(), 'tags': torch.tensor(encoded_labels) } class NERConllDataset(pl.LightningDataModule): def __init__(self, tokenizer, train_sent, train_label, val_sent, val_label, test_sent, test_label, max_len, batch_size): super().__init__() self.tokenizer = tokenizer self.train_sent = train_sent self.train_label = train_label self.val_sent = val_sent self.val_label = val_label self.test_sent = test_sent self.test_label = test_label self.max_len = max_len self.batch_size = batch_size def setup(self, stage=None): self.train_dataset = ConllDataset(self.tokenizer, self.train_sent, self.train_label, self.max_len) self.val_dataset = ConllDataset(self.tokenizer, self.val_sent, self.val_label, self.max_len) self.test_dataset = ConllDataset(self.tokenizer, self.test_sent, self.test_label, self.max_len) def eval_print(self, pos: int): for token, label in zip(self.tokenizer.convert_ids_to_tokens(self.train_dataset[pos]["ids"].numpy()), self.train_dataset[pos]["tags"].numpy()): print('{0:10} {1}'.format(token, label)) def train_dataloader(self): return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=False, num_workers=8) def val_dataloader(self): return DataLoader(self.val_dataset, batch_size=self.batch_size, shuffle=False, num_workers=8) def test_dataloader(self): return DataLoader(self.test_dataset, batch_size=self.batch_size, shuffle=False, num_workers=8) MAX_LEN = 256 BATCH_SIZE = 8 tokenizer = BertTokenizerFast.from_pretrained('bert-base-german-cased', do_lower_case=False) # save tokenizer tokenizer.save_pretrained("model/tokenizer/") data_module = NERConllDataset(tokenizer, train_sent, train_label, val_sent, val_label, test_sent, test_label, max_len=MAX_LEN, batch_size=BATCH_SIZE) data_module.setup() data_module.eval_print(pos=5) train = data_module.train_dataloader() val = data_module.val_dataloader() test = data_module.test_dataloader()
7,296
2,415
from .routes_test_fixture import app # noqa def test_stats(client): assert client.get("/stats/")._status_code == 200
124
45
import argparse import logging import pathlib import re import warnings import numpy as np import dirichlet from sklearn.linear_model import LogisticRegression from optimize import load_config, load_data, get_loglikelihood, get_mse logger = logging.getLogger() def main(data, src_dir): files = [] for src_path in src_dir.iterdir(): m = re.search(r"^(\d+?)\.npz$", str(src_path.name)) if not m: continue idx = int(m.group(1)) files.append((idx, src_path)) files.sort() q_prev = None for idx, src_path in files: logger.info("load {}".format(src_path)) d = np.load(str(src_path), allow_pickle=True) q = d["q"].flat[0] alpha = d["alpha"].flat[0] beta = d["beta"].flat[0] if q_prev: mse = get_mse(q_prev, q) else: mse = float("nan") q_prev = q ll = get_loglikelihood(q, alpha, beta, data, config) logger.info("{}-th iteration, mse={}, loglikelihood={}" .format(idx + 1, mse, ll)) def parse_args(argv=None): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( "--config-path", dest="config_path", default="config.toml", help="configuration file in the TOML format") parser.add_argument( "-v", "--verbose", help="verbose mode", action="store_true", default=False) args = parser.parse_args(argv) logger.info("argments: {}".format(args)) return args if __name__ == "__main__": warnings.filterwarnings( "ignore", category=RuntimeWarning, message="divide by zero encountered in log", module="sklearn.linear_model") logger.info("start") args = parse_args() if args.verbose: logger.setLevel(logging.DEBUG) config_path = pathlib.Path(args.config_path) config = load_config(config_path) data = load_data(config) main(data, config["data"]["exp_dir"]) logger.info("end")
2,017
662
from backend.common.decorators import cached_public from backend.web.profiled_render import render_template @cached_public(timeout=int(60 * 60 * 24 * 7)) def apidocs_trusted_v1() -> str: template_values = { "title": "Trusted APIv1", "swagger_url": "/swagger/api_trusted_v1.json", } return render_template("apidocs_swagger.html", template_values) @cached_public(timeout=int(60 * 60 * 24 * 7)) def apidocs_v3() -> str: template_values = { "title": "APIv3", "swagger_url": "/swagger/api_v3.json", } return render_template("apidocs_swagger.html", template_values)
620
231
from tests.common.devices.base import AnsibleHostBase class VMHost(AnsibleHostBase): """ @summary: Class for VM server For running ansible module on VM server """ def __init__(self, ansible_adhoc, hostname): AnsibleHostBase.__init__(self, ansible_adhoc, hostname) @property def external_port(self): if not hasattr(self, "_external_port"): vm = self.host.options["variable_manager"] im = self.host.options["inventory_manager"] hostvars = vm.get_vars(host=im.get_host(self.hostname), include_delegate_to=False) setattr(self, "_external_port", hostvars["external_port"]) return getattr(self, "_external_port")
712
215
"""All plugging called to check norm for a C file.""" __all__ = [ "columns", "comma", "function_line", "indent", "libc_func", "nested_branches", "number_function", "parenthesis", "preprocessor", "snake_case", "solo_space", "statements", "trailing_newline", "two_space", "operators", "newline_at_end_of_file", "subscriptor", "header", ] PREVIEW = [ "nb_params" ]
438
170
"""Control the sc2monitor.""" import asyncio import logging import math import time from datetime import datetime, timedelta from operator import itemgetter import aiohttp import sc2monitor.model as model from sc2monitor.handlers import SQLAlchemyHandler from sc2monitor.sc2api import SC2API logger = logging.getLogger(__name__) sql_logger = logging.getLogger() class Controller: """Control the sc2monitor.""" def __init__(self, **kwargs): """Init the sc2monitor.""" self.kwargs = kwargs self.sc2api = None self.db_session = None self.current_season = {} async def __aenter__(self): """Create a aiohttp and db session that will later be closed.""" headers = {'Accept-Encoding': 'gzip, deflate'} self.http_session = aiohttp.ClientSession(headers=headers) self.create_db_session() return self def create_db_session(self): """Create sqlalchemy database session.""" self.db_session = model.create_db_session( db=self.kwargs.pop('db', ''), encoding=self.kwargs.pop('encoding', '')) self.handler = SQLAlchemyHandler(self.db_session) self.handler.setLevel(logging.INFO) sql_logger.setLevel(logging.INFO) sql_logger.addHandler(self.handler) if len(self.kwargs) > 0: self.setup(**self.kwargs) self.sc2api = SC2API(self) self.cache_matches = self.get_config( 'cache_matches', default_value=1000) self.cache_logs = self.get_config( 'cache_logs', default_value=500) self.cache_runs = self.get_config( 'cache_runs', default_value=500) self.analyze_matches = self.get_config( 'analyze_matches', default_value=100) async def __aexit__(self, exc_type, exc, tb): """Close all aiohtto and database session.""" await self.http_session.close() self.db_session.commit() self.db_session.close() self.db_session = None def get_config(self, key, default_value=None, raise_key_error=True, return_object=False): """Read a config value from database.""" if default_value is not None: raise_key_error = False entry = self.db_session.query( model.Config).filter(model.Config.key == key).scalar() if not entry: if raise_key_error: raise ValueError(f'Unknown config key "{key}"') else: if return_object: return None else: return '' if default_value is None else default_value else: if return_object: return entry else: return entry.value def set_config(self, key, value, commit=True): """Save a config value to the database.""" entry = self.db_session.query( model.Config).filter(model.Config.key == key).scalar() if not entry: self.db_session.add(model.Config(key=key, value=value)) else: entry.value = value if commit: self.db_session.commit() def setup(self, **kwargs): """Set up the sc2monitor with api-key and api-secret.""" valid_keys = ['api_key', 'api_secret', 'cache_matches', 'analyze_matches'] for key, value in kwargs.items(): if key not in valid_keys: raise ValueError( f"Invalid configuration key '{key}'" f" (valid keys: {', '.join(valid_keys)})") self.set_config(key, value, commit=False) self.db_session.commit() if self.sc2api: self.sc2api.read_config() def add_player(self, url, race=model.Race['Random']): """Add a player by url to the sc2monitor.""" close_db = False if self.db_session is None: self.create_db_session() close_db = True server, realm, player_id = self.sc2api.parse_profile_url(url) count = self.db_session.query(model.Player).filter( model.Player.realm == realm, model.Player.player_id == player_id, model.Player.server == server).count() if count == 0: new_player = model.Player( realm=realm, player_id=player_id, server=server, race=race) self.db_session.add(new_player) self.db_session.commit() if close_db: self.db_session.close() self.db_session = None def remove_player(self, url): """Remove a player by url to the sc2monitor.""" close_db = False if self.db_session is None: self.create_db_session() close_db = True server, realm, player_id = self.sc2api.parse_profile_url(url) for player in self.db_session.query(model.Player).filter( model.Player.realm == realm, model.Player.player_id == player_id, model.Player.server == server).all(): self.db_session.delete(player) self.db_session.commit() if close_db: self.db_session.close() self.db_session = None async def update_season(self, server: model.Server): """Update info about the current season in the database.""" current_season = await self.sc2api.get_season(server) season = self.db_session.query(model.Season).\ filter(model.Season.server == server).\ order_by(model.Season.season_id.desc()).\ limit(1).scalar() if not season or current_season.season_id != season.season_id: self.db_session.add(current_season) self.db_session.commit() self.db_session.refresh(current_season) logger.info(f'Found a new ladder season: {current_season}') return current_season else: season.start = current_season.start season.end = current_season.end season.year = current_season.year season.number = current_season.number self.db_session.commit() return season async def update_seasons(self): """Update seasons info for all servers.""" servers = [server[0] for server in self.db_session.query( model.Player.server).distinct()] tasks = [] for server in servers: tasks.append(asyncio.create_task(self.update_season(server))) for season in await asyncio.gather(*tasks, return_exceptions=True): try: if isinstance(season, model.Season): self.current_season[season.server.id()] = season else: raise season except Exception: logger.exception( ('The following exception was' ' raised while updating seasons:')) async def query_player(self, player: model.Player): """Collect api data of a player.""" complete_data = [] for ladder in await self.sc2api.get_ladders(player): async for data in self.sc2api.get_ladder_data(player, ladder): current_player = await self.get_player_with_race(player, data) missing_games, new = self.count_missing_games( current_player, data) if missing_games['Total'] > 0: complete_data.append({'player': current_player, 'new_data': data, 'missing': missing_games, 'Win': 0, 'Loss': 0}) if len(complete_data) > 0: await self.process_player(complete_data, new) elif (not player.name or not isinstance(player.refreshed, datetime) or player.refreshed <= datetime.now() - timedelta(days=1)): await self.update_player_name(player) async def update_player_name(self, player: model.Player, name=''): """Update the name of a player from api data.""" if not name: metadata = await self.sc2api.get_metadata(player) name = metadata['name'] for tmp_player in self.db_session.query(model.Player).filter( model.Player.player_id == player.player_id, model.Player.realm == player.realm, model.Player.server == player.server, model.Player.name != name).all(): logger.info(f"{tmp_player.id}: Updating name to '{name}'") tmp_player.name = name self.db_session.commit() async def check_match_history(self, complete_data): """Check matches in match history and assign them to races.""" match_history = await self.sc2api.get_match_history( complete_data[0]['player']) for match in match_history: positive = [] for data_key, data in enumerate(complete_data): needed = data['missing'].get(match['result'].describe(), 0) > 0 try: datetime_check = (match['datetime'] - data['player'].last_played > timedelta(seconds=0)) except TypeError: datetime_check = True if (needed and datetime_check): positive.append(data_key) if len(positive) == 0: continue elif len(positive) >= 1: # Choose the race with most missing results. max_missing = 0 for key in positive: tmp_missing = complete_data[key][ 'missing'][match['result'].describe()] if tmp_missing > max_missing: data_key = key max_missing = tmp_missing complete_data[data_key][ 'missing'][match['result'].describe()] -= 1 complete_data[data_key][match['result'].describe()] += 1 try: complete_data[data_key]['games'].insert(0, match) except KeyError: complete_data[data_key]['games'] = [match] try: last_played = match['datetime'] except Exception: last_played = datetime.now() return last_played, len(match_history) async def process_player(self, complete_data, new=False): """Process the api data of a player.""" last_played, len_history \ = await self.check_match_history(complete_data) for race_player in complete_data: race_player['missing']['Total'] = race_player['missing']['Win'] + \ race_player['missing']['Loss'] if race_player['missing']['Total'] > 0: if new: logger.info( f"{race_player['player'].id}: Ignoring " f"{race_player['missing']['Total']} games missing in" f" match history ({len_history}) " "of new player.") else: self.guess_games(race_player, last_played) self.guess_mmr_changes(race_player) await self.update_player(race_player) self.calc_statistics(race_player['player']) async def update_player(self, complete_data): """Update database with new data of a player.""" player = complete_data['player'] new_data = complete_data['new_data'] player.mmr = new_data['mmr'] player.ladder_id = new_data['ladder_id'] player.league = new_data['league'] player.ladder_joined = new_data['joined'] player.wins = new_data['wins'] player.losses = new_data['losses'] player.last_active_season = self.get_season_id(player.server) if player.name != new_data['name']: await self.update_player_name( player, new_data['name']) if (not player.last_played or player.ladder_joined > player.last_played): player.last_played = player.ladder_joined self.db_session.commit() def calc_statistics(self, player: model.Player): """Recalculate player statistics.""" self.db_session.refresh(player) if not player.statistics: stats = model.Statistics(player=player) self.db_session.add(stats) self.db_session.commit() self.db_session.refresh(stats) else: stats = player.statistics matches = self.db_session.query(model.Match).filter( model.Match.player_id == player.id).order_by( model.Match.datetime.desc()).limit(self.analyze_matches).all() stats.games_available = len(matches) wma_mmr_denominator = stats.games_available * \ (stats.games_available + 1.0) / 2.0 stats.max_mmr = player.mmr stats.min_mmr = player.mmr stats.current_mmr = player.mmr wma_mmr = 0.0 expected_mmr_value = 0.0 expected_mmr_value2 = 0.0 current_wining_streak = 0 current_losing_streak = 0 for idx, match in enumerate(matches): if match.result == model.Result.Win: stats.wins += 1 current_wining_streak += 1 current_losing_streak = 0 if current_wining_streak > stats.longest_wining_streak: stats.longest_wining_streak = current_wining_streak elif match.result == model.Result.Loss: stats.losses += 1 current_losing_streak += 1 current_wining_streak = 0 if current_losing_streak > stats.longest_losing_streak: stats.longest_losing_streak = current_losing_streak if match.max_length <= 120: stats.instant_left_games += 1 if match.guess: stats.guessed_games += 1 mmr = match.mmr wma_mmr += mmr * \ (stats.games_available - idx) / wma_mmr_denominator if stats.max_mmr < mmr: stats.max_mmr = mmr if stats.min_mmr > mmr: stats.min_mmr = mmr expected_mmr_value += mmr / stats.games_available expected_mmr_value2 += mmr * (mmr / stats.games_available) if stats.games_available <= 1: stats.lr_mmr_slope = 0.0 stats.lr_mmr_intercept = expected_mmr_value else: ybar = expected_mmr_value xbar = -0.5 * (stats.games_available - 1) numerator = 0 denominator = 0 for x, match in enumerate(matches): x = -x y = match.mmr numerator += (x - xbar) * (y - ybar) denominator += (x - xbar) * (x - xbar) stats.lr_mmr_slope = numerator / denominator stats.lr_mmr_intercept = ybar - stats.lr_mmr_slope * xbar stats.sd_mmr = round( math.sqrt(expected_mmr_value2 - expected_mmr_value * expected_mmr_value)) # critical_idx = min(self.controller.config['no_critical_games'], # stats.games_available) - 1 # stats.critical_game_played = matches[critical_idx]["played"] stats.avg_mmr = expected_mmr_value stats.wma_mmr = wma_mmr self.db_session.commit() @classmethod def guess_games(cls, complete_data, last_played): """Guess games of a player if missing in match history.""" # If a player isn't new in the database and has played more # than 25 games since the last refresh or the match # history is not available for this player, there are # missing games in the match history. These are guessed to be very # close to the last game of the match history and in alternating # order. player = complete_data['player'] if 'games' not in complete_data: complete_data['games'] = [] logger.info(( "{}: {} missing games in match " + "history - more guessing!").format( player.id, complete_data['missing']['Total'])) try: delta = (last_played - player.last_played) / \ complete_data['missing']['Total'] except Exception: delta = timedelta(minutes=3) if delta > timedelta(minutes=3): delta = timedelta(minutes=3) if delta.total_seconds() <= 0: last_played = datetime.now() delta = timedelta(minutes=3) while (complete_data['missing']['Win'] > 0 or complete_data['missing']['Loss'] > 0): if complete_data['missing']['Win'] > 0: last_played = last_played - delta complete_data['games'].append( {'datetime': last_played, 'result': model.Result.Win}) complete_data['missing']['Win'] -= 1 complete_data['Win'] += 1 if (complete_data['missing']['Win'] > 0 and complete_data['missing']['Win'] > complete_data['missing']['Loss']): # If there are more wins than losses add # a second win before the next loss. last_played = last_played - delta complete_data['games'].append( {'datetime': last_played, 'result': model.Result.Win}) complete_data['missing']['Win'] -= 1 complete_data['Win'] += 1 if complete_data['missing']['Loss'] > 0: last_played = last_played - delta complete_data['games'].append( {'datetime': last_played, 'result': model.Result.Loss}) complete_data['missing']['Loss'] -= 1 complete_data['Loss'] += 1 if (complete_data['missing']['Loss'] > 0 and complete_data['missing']['Win'] < complete_data['missing']['Loss']): # If there are more losses than wins add second loss before # the next win. last_played = last_played - delta complete_data['games'].append( {'datetime': last_played, 'result': model.Result.Loss}) complete_data['missing']['Loss'] -= 1 complete_data['Loss'] += 1 def guess_mmr_changes(self, complete_data): """Guess MMR change of matches.""" MMR = complete_data['player'].mmr if MMR is None: MMR = 0 totalMMRchange = complete_data['new_data']['mmr'] - MMR wins = complete_data['Win'] losses = complete_data['Loss'] complete_data['games'] = sorted( complete_data.get('games', []), key=itemgetter('datetime')) logger.info('{}: Adding {} wins and {} losses!'.format( complete_data['player'].id, wins, losses)) if wins + losses <= 0: # No games to guess return # Estimate MMR change to be +/-21 for a win and losse, each adjusted # by the average deviation to achive the most recent MMR value. # Is 21 accurate? Yes, as the empirical avrage MMR change is 20.9016 # according to data gathered by this tool. if wins + losses == 1 and MMR != 0: MMRchange = abs(totalMMRchange) else: MMRchange = 21 if MMR == 0: totalMMRchange = MMRchange * (wins - losses) MMR = complete_data['new_data']['mmr'] - totalMMRchange while True: avgMMRadjustment = (totalMMRchange - MMRchange * (wins - losses)) / (wins + losses) # Make sure that sign of MMR change is correct if abs(avgMMRadjustment) >= MMRchange and MMRchange <= 50: MMRchange += 1 logger.info(f"{complete_data['player'].id}:" f" Adjusting avg. MMR change to {MMRchange}") else: break last_played = complete_data['player'].last_played previous_match = self.db_session.query(model.Match).\ filter(model.Match.player_id == complete_data['player'].id).\ order_by(model.Match.datetime.desc()).limit(1).scalar() # Warning breaks Travis CI # if not previous_match: # logger.warning('{}: No previous match found.'.format( # complete_data['player'].id)) for idx, match in enumerate(complete_data['games']): estMMRchange = round( MMRchange * match['result'].change() + avgMMRadjustment) MMR = MMR + estMMRchange try: delta = match['datetime'] - last_played except Exception: delta = timedelta(minutes=3) last_played = match['datetime'] max_length = delta.total_seconds() # Don't mark the most recent game as guess, as time and mmr value # should be accurate (but not mmr change). guess = not (idx + 1 == len(complete_data['games'])) alpha = 2.0 / (100.0 + 1.0) if previous_match and previous_match.ema_mmr > 0.0: delta = MMR - previous_match.ema_mmr ema_mmr = previous_match.ema_mmr + alpha * delta emvar_mmr = (1.0 - alpha) * \ (previous_match.emvar_mmr + alpha * delta * delta) else: ema_mmr = MMR emvar_mmr = 0.0 new_match = model.Match( player=complete_data['player'], result=match['result'], datetime=match['datetime'], mmr=MMR, mmr_change=estMMRchange, guess=guess, ema_mmr=ema_mmr, emvar_mmr=emvar_mmr, max_length=max_length) complete_data['player'].last_played = match['datetime'] self.db_session.add(new_match) previous_match = new_match self.db_session.commit() # Delete old matches: deletions = 0 for match in self.db_session.query(model.Match).\ filter(model.Match.player_id == complete_data['player'].id).\ order_by(model.Match.datetime.desc()).\ offset(self.cache_matches).all(): self.db_session.delete(match) deletions += 1 if deletions > 0: self.db_session.commit() logger.info(f"{complete_data['player'].id}: " f"{deletions} matches deleted!") def update_ema_mmr(self, player: model.Player): """Update the exponential moving avarage MMR of a player.""" matches = self.db_session.query(model.Match).\ filter(model.Match.player == player).\ order_by(model.Match.datetime.asc()).all() previous_match = None for match in matches: alpha = 2.0 / (100.0 + 1.0) if previous_match and previous_match.ema_mmr > 0.0: delta = match.mmr - previous_match.ema_mmr ema_mmr = previous_match.ema_mmr + alpha * delta emvar_mmr = (1.0 - alpha) * \ (previous_match.emvar_mmr + alpha * delta * delta) else: ema_mmr = match.mmr emvar_mmr = 0.0 match.ema_mmr = ema_mmr match.emvar_mmr = emvar_mmr previous_match = match self.db_session.commit() def get_season_id(self, server: model.Server): """Get the current season id on a server.""" return self.current_season[server.id()].season_id def count_missing_games(self, player: model.Player, data): """Count games of the api data that are not yet in the database.""" missing = {} missing['Win'] = data['wins'] missing['Loss'] = data['losses'] if player.last_active_season == 0 or player.mmr == 0: new = True elif (player.last_active_season < self.get_season_id(player.server)): # New Season! # TODO: Check if last season endpoint can be requested! # Only the legacy endpoints give the option to query the # previous season's data (given that the ladder ID is # known), e.g.: # https://eu.api.blizzard.com/sc2/legacy/ladder/2/209966 new = False elif (player.ladder_id != data['ladder_id'] or not player.ladder_joined or player.ladder_joined < data['joined'] or data['wins'] < player.wins or data['losses'] < player.losses): # Old season, but new ladder or same ladder, but rejoined if (data['wins'] < player.wins or data['losses'] < player.losses): # Forced ladder reset! logger.info('{}: Manual ladder reset to {}!'.format( player.id, data['ladder_id'])) new = True else: # Promotion?! missing['Win'] -= player.wins missing['Loss'] -= player.losses new = player.mmr == 0 if missing['Win'] + missing['Loss'] == 0: # Player was promoted/demoted to/from GM! promotion = data['league'] == model.League.Grandmaster demotion = player.league == model.League.Grandmaster if promotion == demotion: logger.warning( 'Logical error in GM promotion/' 'demotion detection.') player.ladder_joined = data['joined'] player.ladder_id = data['ladder_id'] player.league = data['league'] self.db_session.commit() logger.info(f"{player.id}: GM promotion/demotion.") else: if data['league'] < player.league: logger.warning('Logical error in promtion detection.') else: logger.info(f"{player.id}: Promotion " f"to ladder {data['ladder_id']}!") else: missing['Win'] -= player.wins missing['Loss'] -= player.losses new = player.mmr == 0 missing['Total'] = missing['Win'] + missing['Loss'] if (missing['Total']) > 0: logger.info( '{player}: {Total} new matches found!'.format( player=player.id, **missing)) return missing, new async def get_player_with_race(self, player, ladder_data): """Get the player with the race present in the ladder data.""" if player.ladder_id == 0: player.race = ladder_data['race'] correct_player = player elif player.race != ladder_data['race']: correct_player = self.db_session.query(model.Player).filter( model.Player.player_id == player.player_id, model.Player.realm == player.realm, model.Player.server == player.server, model.Player.race == ladder_data['race']).scalar() if not correct_player: correct_player = model.Player( player_id=player.player_id, realm=player.realm, server=player.server, race=ladder_data['race'], ladder_id=0) self.db_session.add(correct_player) self.db_session.commit() self.db_session.refresh(correct_player) else: correct_player = player return correct_player def delete_old_logs_and_runs(self): """ Delete old logs and runs from database.""" deletions = 0 for log_entry in self.db_session.query(model.Log).\ order_by(model.Log.datetime.desc()).\ offset(self.cache_logs).all(): self.db_session.delete(log_entry) deletions += 1 if deletions > 0: self.db_session.commit() logger.info(f"{deletions} old log entries were deleted!") deletions = 0 for run in self.db_session.query(model.Run).\ order_by(model.Run.datetime.desc()).\ offset(self.cache_runs).all(): self.db_session.delete(run) deletions += 1 if deletions > 0: self.db_session.commit() logger.info(f"{deletions} old run logs were deleted!") async def run(self): """Run the sc2monitor.""" start_time = time.time() logger.debug("Starting job...") await self.update_seasons() unique_group = (model.Player.player_id, model.Player.realm, model.Player.server) tasks = [] players = self.db_session.query(model.Player).distinct( *unique_group).group_by(*unique_group).all() for player in players: tasks.append(asyncio.create_task(self.query_player(player))) results = await asyncio.gather(*tasks, return_exceptions=True) for key, result in enumerate(results): try: if result is not None: raise result except Exception: logger.exception( 'The following exception was' f' raised while quering player {players[key].id}:') self.delete_old_logs_and_runs() duration = time.time() - start_time self.db_session.add( model.Run(duration=duration, api_requests=self.sc2api.request_count, api_retries=self.sc2api.retry_count, warnings=self.handler.warnings, errors=self.handler.errors)) self.db_session.commit() logger.debug(f"Finished job performing {self.sc2api.request_count}" f" api requests ({self.sc2api.retry_count} retries)" f" in {duration:.2f} seconds.")
30,862
8,614
from contextlib import contextmanager import errno import os import shutil import subprocess import tempfile class GitExeException(Exception): """ Thrown when the external git exe doesn't return a 0. """ pass class Repo(object): """ Interface to a git repo. Generally you should create one with `fleeting_repo`, which manages cleanup. """ def __init__(self, repo_root, git_exe=None): """ - `repo_root`: the (presumably temporary) dir in which to init and manage a git repo, e.g. '/tmp/Fkjwpa' - `git_exe`: the git exe to use. Should be a list, suitable for passing to subprocess.call. If None, defaults to ["/usr/bin/env", "git"], which should work fine on most systems. Calls "git init" to create a new repo, and therefore may raise a GitExeException. """ self.repo_root = _real_abs(repo_root) self.git_exe = git_exe if self.git_exe is None: self.git_exe = ["/usr/bin/env", "git"] self.do_git(["init"]) def commit(self, fnames_with_contents, commit_msg=None, author=None): """ Apply the sequential changes described in fnames_with_contents in the repo directory, and then commit the results. Like all methods in this class that invoke git, this can raise a GitExeException. - `fnames_with_contents`: a list of tuples of the form [(str(fname), str(contents)|None), ...] E.g. [('test.txt', 'this is a test\\n'), ('testdir_one/to_be_removed.txt', None)] Each fname should be relative to the git repo root. If fname contains directory paths, they will be created under the repo root. If the contents is not None, the string will be written to the file indicated by fname. If contents is None, the file will be git rm'ed. - `commmit_msg`: a utf-8 str() being the commit message to pass to the commit. If None, the commit message will be: "Test commit." - `author`: a utf-8 str() being the author for the commit, formatted according to git requirements as: "Author Name <author@email>" If author is None, the author will be: "Test Author <test@example.com>" """ self._write_add_rm(fnames_with_contents) commit_cmd = ["commit", "--author"] if author is None: author = "Test Author <test@example.com>" commit_cmd.append(author) if commit_msg is None: commit_msg = "Test commit." # Use a temp file to hold the commit message, in case it's # long or weird and wouldn't do well on the command line. with _temp_fname() as commit_fname: with open(commit_fname, 'wb') as fil: fil.write(commit_msg.encode('utf-8')) commit_cmd.append("-F") commit_cmd.append(commit_fname) self.do_git(commit_cmd) def get_path(self, fname): """ Return a full path to fname under the repository root. - `fname`: a file or directory name, relative to the repo root. >>> repo = Repo('/some/dir') >>> repo.get_path('testing/test.txt') '/some/dir/testing/test.txt' """ return os.path.join(self.repo_root, fname) def _write_add_rm(self, fnames_with_contents): """ Write contents for each fname in fname with contents, then git add it. For each fname with None contents, git rm it. """ for (fname, contents) in fnames_with_contents: fpath = os.path.join(self.repo_root, fname) _ensure_dir_for_fpath(fpath) if contents is None: self.do_git(["rm", fname]) else: with open(fpath, 'wb') as fil: fil.write(contents.encode('utf-8')) self.do_git(["add", fname]) def do_git(self, cmd): """ Run a git cmd in the repo root and don't worry about what it writes to stdout / stderr. If you need access to stdout / stderr, take a look at `yield_git`. Raises a GitExeException if git returns non-0. - `cmd`: a list of strings, suitable for passing to subprocess.call, e.g. ["add", "some_file.txt"] """ with self.yield_git(cmd): pass def yield_git(self, cmd): """ Run git_exe in the repo root, passing cmd to it. Raises a GitExeException if git returns non-0. - `cmd`: a list of strings, suitable for passing to subprocess.call, e.g. ["add", "some_file.txt"] Yields a tuple of open (file(), file()), being the stdout and stderr written to by the git process, to support use like: >>> repo = Repo('/some/dir') >>> with repo.yield_git(["log", "--oneline"]) as git_output: ... git_out, git_err = git_output ... output = git_out.read() Note that means this is *not* what you want to do: >>> repo.yield_git(["log", "--oneline"]) as this will just yield the generator and run nothing. Use do_git for that. """ # this is ugly, creating a local function and calling it, but # otherwise @contextmanager ruins the args in interactive help @contextmanager def _yield_git(): whole_cmd = self.git_exe + cmd with _temp_dir() as tmp_dir: # don't bother using temp files for these, just a tmp dir, # as it will be recursively cleaned up. stdout_fname = os.path.join(tmp_dir, 'git_stdout') stderr_fname = os.path.join(tmp_dir, 'git_stderr') with open(stdout_fname, 'wb') as o_f: with open(stderr_fname, 'wb') as e_f: returncode = subprocess.call(whole_cmd, stdout=o_f, stderr=e_f, cwd=self.repo_root) if returncode != 0: raise GitExeException(_fmt_err(whole_cmd, returncode, stderr_fname, stdout_fname)) # if we get here, the git command returned 0. We re-open # and yield the stdout and stderr in case the caller wants # them. After the yield returns, the entire temp # directory will be cleaned up. with open(stdout_fname, 'rb') as git_out_f: with open(stderr_fname, 'rb') as git_err_f: yield (git_out_f, git_err_f) return _yield_git() def fleeting_repo(git_exe=None): """ Create a temp directory and yield a Repo built from it. The temp dir will be cleaned up afterwards. >>> with fleeting_repo() as repo: ... repo.do_git(["some", "git", "cmd"]) - `git_exe`: the git exe to use. Should be a list, suitable for passing to subprocess.call. If None, defaults to ["/usr/bin/env", "git"], which should work fine on most systems. """ # this is ugly, creating a local function and returning it, but # otherwise @contextmanager screws up the param names in the # interactive help. @contextmanager def _fleeting_repo(): with _temp_dir() as temp_dir: yield Repo(temp_dir, git_exe=git_exe) return _fleeting_repo() @contextmanager def _temp_fname(): """ Yield a temp file name, delete the file afterwards. """ ntf = tempfile.NamedTemporaryFile(delete=False) fname = ntf.name ntf.close() try: yield fname finally: try: os.unlink(fname) except OSError: # this means it was already unlinked pass @contextmanager def _temp_dir(): """ Make a temp dir and yield it, removing it afterwards. """ temp_dir = tempfile.mkdtemp() try: yield temp_dir finally: shutil.rmtree(temp_dir) def _ensure_dir_for_fpath(fpath): """ Ensure that the directory in which fpath will reside exists, recursively creating it otherwise. Adapted from: http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python """ dir_name = os.path.dirname(fpath) try: os.makedirs(dir_name) except OSError as exc: if exc.errno != errno.EEXIST or not os.path.isdir(dir_name): raise def _fmt_cmd_for_err(cmd): """ Join a git cmd, quoting individual segments first so that it's relatively easy to see if there were whitespace issues or not. """ return ' '.join(['"%s"' % seg for seg in cmd]) def _fmt_err(git_cmd, returncode, stderr_fname, stdout_fname): """ Format an error string for a failed git command, which includes the harvested stdout and stderr of the process. """ git_cmd_s = _fmt_cmd_for_err(git_cmd) err_msg = '\n'.join([_read_fname(stderr_fname), _read_fname(stdout_fname)]) return ("Git command %s returned %d with err log %s" % (git_cmd_s, returncode, err_msg)) def _read_fname(fname): """ Open fname, read the contents, return them. """ with open(fname, 'rb') as fil: return str(fil.read()) def _real_abs(path): """ Return a real, absolute path. """ return os.path.abspath(os.path.realpath(path))
9,867
2,914
r""" Copulas are a type dependency structure imposed on independent variables to achieve to more complex problems without adding too much complexity. To construct a copula one needs a copula transformation and the Copula wrapper:: >>> dist = chaospy.Iid(chaospy.Uniform(), 2) >>> copula = chaospy.Gumbel(dist, theta=1.5) The resulting copula is then ready for use:: >>> print(numpy.around(copula.sample(5), 4)) [[0.6536 0.115 0.9503 0.4822 0.8725] [0.6286 0.0654 0.96 0.5073 0.9705]] """ from .baseclass import Copula from .archimedean import Archimedean from .gumbel import Gumbel from .clayton import Clayton from .joe import Joe from .nataf import Nataf from .t_copula import TCopula
717
278
import pandas as pd from tabulate import tabulate from cadCAD.configuration import append_configs from cadCAD.configuration.utils import config_sim from cadCAD.engine import ExecutionMode, ExecutionContext, Executor from cadCAD import configs # Policies per Mechanism def p1m1(_g, step, sH, s): return {'policy1': 1} def p2m1(_g, step, sH, s): return {'policy2': 2} def p1m2(_g, step, sH, s): return {'policy1': 2, 'policy2': 2} def p2m2(_g, step, sH, s): return {'policy1': 2, 'policy2': 2} def p1m3(_g, step, sH, s): return {'policy1': 1, 'policy2': 2, 'policy3': 3} def p2m3(_g, step, sH, s): return {'policy1': 1, 'policy2': 2, 'policy3': 3} # Internal States per Mechanism def add(y, x): return lambda _g, step, sH, s, _input: (y, s[y] + x) def policies(_g, step, sH, s, _input): y = 'policies' x = _input return (y, x) # Genesis States genesis_states = { 'policies': {}, 's1': 0 } variables = { 's1': add('s1', 1), "policies": policies } psubs = { "m1": { "policies": { "p1": p1m1, "p2": p2m1 }, "variables": variables }, "m2": { "policies": { "p1": p1m2, "p2": p2m2 }, "variables": variables }, "m3": { "policies": { "p1": p1m3, "p2": p2m3 }, "variables": variables } } sim_config = config_sim( { "N": 1, "T": range(3), } ) append_configs( sim_configs=sim_config, initial_state=genesis_states, partial_state_update_blocks=psubs, policy_ops=[lambda a, b: a + b, lambda y: y * 2] # Default: lambda a, b: a + b ) exec_mode = ExecutionMode() local_proc_ctx = ExecutionContext(context=exec_mode.local_mode) run = Executor(exec_context=local_proc_ctx, configs=configs) raw_result, tensor_field, sessions = run.execute() result = pd.DataFrame(raw_result) print() print("Tensor Field:") print(tabulate(tensor_field, headers='keys', tablefmt='psql')) print("Output:") print(tabulate(result, headers='keys', tablefmt='psql')) print()
2,112
826
from lib.imports import * from lib.Constants import * from lib.Environment import * from lib.ML_Modules import ML_Nnet from lib.parameter_server import Server as ParameterServer ''' Single Agent ''' class Agent: def __init__(self, name, parameter_server): self.brain = ML_NNet(name, parameter_server) self.memory = [] # Memory of s,a,r,s_ self.R = 0. # Time discounted total reward. def act(self, s, available_action_list, eps_steps): # Decide action using epsilon greedy. if frames >= eps_steps: eps = EPS_END else: # Linearly interpolate eps = EPS_START + frames * (EPS_END - EPS_START) / eps_steps if random.random() < eps: # Randomly select action. if len(available_action_list) != 0: return available_action_list[random.randint(0, len(available_action_list) - 1)], None, None else: return 'no payload', None, None else: # Select action according to probability p[0] (greedy). s = np.array([s]) p = self.brain.predict_p(s) if len(available_action_list) != 0: prob = [] for action in available_action_list: prob.append([action, p[0][action]]) prob.sort(key=lambda s: -s[1]) return prob[0][0], prob[0][1], prob else: return 'no payload', p[0][len(p[0]) - 1], None # Push s,a,r,s considering advantage to LocalBrain. def advantage_push_local_brain(self, s, a, r, s_): def get_sample(memory, n): s, a, _, _ = memory[0] _, _, _, s_ = memory[n - 1] return s, a, self.R, s_ # Create a_cats (one-hot encoding) a_cats = np.zeros(NUM_ACTIONS) a_cats[a] = 1 self.memory.append((s, a_cats, r, s_)) # Calculate R using previous time discounted total reward. self.R = (self.R + r * GAMMA_N) / GAMMA # Input experience considering advantage to LocalBrain. if s_ is None: while len(self.memory) > 0: n = len(self.memory) s, a, r, s_ = get_sample(self.memory, n) self.brain.train_push(s, a, r, s_) self.R = (self.R - self.memory[0][2]) / GAMMA self.memory.pop(0) self.R = 0 if len(self.memory) >= N_STEP_RETURN: s, a, r, s_ = get_sample(self.memory, N_STEP_RETURN) self.brain.train_push(s, a, r, s_) self.R = self.R - self.memory[0][2] self.memory.pop(0)
2,667
870
import sys, os, os.path, time, shutil import commands from xml.etree.ElementTree import ElementTree from xml.etree.ElementTree import Element from xml.etree.ElementTree import SubElement as SE import metacomm.combinatorics.all_pairs2 all_pairs = metacomm.combinatorics.all_pairs2.all_pairs2 totalNum = 0 failNum = 0 passNum = 0 Flag = "positive" ConstPath = os.getcwd() Start = time.strftime("%Y-%m-%d %H:%M:%S") ResultList = [] Direc = "./" def lineCount(fp): fileTmp = open(fp) count = len(fileTmp.readlines()) fileTmp.close() return count def genSelfcom(combIn, combOut): try: fp = open(combIn) comb = open(combOut, 'a+') comb.write(fp.read()) fp.close() comb.close() print "Update selfcomb.txt ---------------->O.k" return except Exception,e: print Exception,":",e print "Update selfcomb.txt ---------------->Error" sys.exit(1) def processMain(seedIn): try: print "Input Seed :" + os.path.basename(seedIn) print "Excute " + Flag + " cases ------------------------->Start" row = 0 sectionList = [] fp = open(seedIn) for line in fp: items = line.strip('\n\r').split(":") sectionName = items[0].split("--")[0] if sectionName not in sectionList: sectionList.append(sectionName) inputTxt = open(ConstPath + "/self/" + sectionName + "_input.txt", "a+") inputTxt.write(line) inputTxt.close() fp.close() for section in sectionList: caseline = "" counters = lineCount(ConstPath + "/self/" + section + "_input.txt") if counters >= 2: lists = [[] for m in range(counters)] inputTxt = open(ConstPath + "/self/" + section + "_input.txt") for line in inputTxt: items = line.strip('\n\r').split(":") values = items[1].split(",") lists[row].extend(values) row = row + 1 inputTxt.close() pairs = all_pairs(lists) outTxt = open(ConstPath + "/self/" + section + "_output.txt", 'w+') for e, v in enumerate(pairs): for c in range(len(v)): caseline = caseline + v[c] + "," outTxt.write(section + ":" + caseline[:-1] + "\n") outTxt.close() else: shutil.copy(ConstPath + "/self/" + section + "_input.txt", ConstPath + "/self/" + section + "_output.txt") #1*********XX_output.txt -> selfcomb.txt genSelfcom(ConstPath + "/self/" + section + "_output.txt", ConstPath + "/allpairs/selfcomb.txt") #2*********selfcomb.txt -> caseXX.txt genCases(ConstPath + "/allpairs/selfcomb.txt") #3*********output -> manifest.json caseExecute(ConstPath + "/allpairs/case_" + Flag + ".txt") print "Excute " + Flag + " cases ------------------------->O.K" except Exception,e: print Exception,":",e print "Excute " + Flag + " cases ------------------------->Error" sys.exit(1) def genCases(selfcomb): try: print "Genarate " + Flag + " case.txt file ---------------->Start" caseFile = open(ConstPath + "/allpairs/case_" + Flag + ".txt", 'w+') names = "" row = 0 counters = lineCount(selfcomb) lists = [[] for m in range(counters)] fobj = open(selfcomb) for line in fobj: items = line.strip('\n\r').split(":") names = names + items[0] + "\t" caseFile.write(names.rstrip("\t") + "\n") fobj.seek(0) for line in fobj: items = line.strip('\n\r').split(":") values = items[1:] lists[row].extend(":".join(values).split(",")) row = row + 1 fobj.close() pairs = all_pairs(lists) for e, v in enumerate(pairs): case = "" for c in range(0,len(v)): case = case + v[c] +"\t" caseFile.write(case.rstrip("\t") + "\n") caseFile.close() print "Genarate " + Flag + " case.txt file ---------------->O.k" except Exception,e: print "Generate " + Flag + " case.txt file ---------------->Error" print Exception,":",e sys.exit(1) def caseExecute(caseInput): try: global totalNum global failNum global passNum global ResultList global Flag global Direc print "Excute cases ------------------------->Start" caseIn = open(caseInput) line = caseIn.readline().strip('\n\r') sectionList = line.split("\t") os.chdir(ConstPath + "/tools/crosswalk") toolstatus = commands.getstatusoutput("python make_apk.py") if toolstatus[0] != 0: print "Crosswalk Binary is not ready, Please attention" sys.exit(1) for line in caseIn: totalNum = totalNum + 1 items = line.strip("\t\n").split("\t") command = "python make_apk.py " data = {"id":"","result":"","entry":"","start":"","end":"","set":""} data["start"] = time.strftime("%Y-%m-%d %H:%M:%S") for i in range(len(sectionList)): items[i] = items[i].replace("000", " ") command = command + "--" + sectionList[i] + "=" + '"' + items[i] + '" ' command = command.strip() if "target-dir" in sectionList: dirIndex = sectionList.index("target-dir") Direc = items[dirIndex] else: Direc = "./" nameIndex = sectionList.index("name") packIndex = sectionList.index("package") name = items[nameIndex] package = items[packIndex] print "##########" print "Case" + str(totalNum) + " :" print "Packer Tool Command:" print command print "Genarate APK ---------------->Start" packstatus = commands.getstatusoutput(command) if Flag == "negative": if packstatus[0] == 0: print "Genarate APK ---------------->O.K" result = "FAIL" failNum = failNum + 1 else: print "Genarate APK ---------------->Error" result = "PASS" passNum = passNum + 1 else: if packstatus[0] != 0: print "Genarate APK ---------------->Error" result = "FAIL" failNum = failNum + 1 else: print "Genarate APK ---------------->O.K" result = tryRunApp(name, package) data["end"] = time.strftime("%Y-%m-%d %H:%M:%S") data["id"] = "Case" + str(totalNum) data["result"] = result data["entry"] = command data["set"] = Flag ResultList.append(data) os.system("rm -rf " + ConstPath + "/tools/crosswalk/" + Direc + "/*apk") print "Case Result :",result print "##########" caseIn.close() print "Excute cases ------------------------->O.K" except Exception,e: print Exception,":",e print "Execute case ---------------->Error" sys.exit(1) def tryRunApp(name, package): try: global failNum global passNum result = "PASS" message = "" print "Install APK ---------------->Start" instatus = commands.getstatusoutput("adb install " + ConstPath + "/tools/crosswalk/" + Direc + "/*apk") if instatus[0] == 0: print "Install APK ---------------->O.K" print "Find Package in device ---------------->Start" pmstatus = commands.getstatusoutput("adb shell pm list packages |grep " + package) if pmstatus[0] == 0: print "Find Package in device ---------------->O.K" print "Launch APK ---------------->Start" launchstatus = commands.getstatusoutput("adb shell am start -n " + package + "/." + name + "Acivity") if launchstatus[0] != 0: print "Launch APK ---------------->Error" os.system("adb uninstall " + package) failNum = failNum + 1 result = "FAIL" else: print "Launch APK ---------------->O.K" print "Stop APK ---------------->Start" stopstatus = commands.getstatusoutput("adb shell am force-stop " + package) if stopstatus[0] == 0: print "Stop APK ---------------->O.K" print "Uninstall APK ---------------->Start" unistatus = commands.getstatusoutput("adb uninstall " + package) if unistatus[0] == 0: print "Uninstall APK ---------------->O.K" passNum = passNum + 1 else: print "Uninstall APK ---------------->Error" failNum = failNum + 1 result = "FAIL" else: print "Stop APK ---------------->Error" failNum = failNum + 1 result = "FAIL" os.system("adb uninstall " + package) else: print "Find Package in device ---------------->Error" os.system("adb uninstall " + package) failNum = failNum + 1 result = "FAIL" else: print "Install APK ---------------->Error" result = "FAIL" failNum = failNum + 1 os.system("rm -rf " + ConstPath + "/tools/crosswalk/" + Direc + "/*apk" + "&>/dev/null") return result except Exception,e: print Exception,":",e print "Try run webapp ---------------->Error" sys.exit(1) def updateXmlTitle(fp,title): fobj = open(fp, "r+") lines = fobj.readlines() fobj.seek(0) fobj.truncate() lines.insert(0,title) fobj.writelines(lines) fobj.close() def genResultXml(): try: tree = ElementTree() root = Element("test_definition") tree._setroot(root) env = Element("environment", {"build_id":"","device_id":"","device_name":"","host":"",\ "lite_version":"","manufacturer":"","resolution":"","screen_size":""}) root.append(env) #summary element summary = Element("summary", {"test_plan_name":""}) root.append(summary) tStart = SE(summary, "start_at") tEnd = SE(summary, "end_at") tStart.text = Start tEnd.text = End #suite element suite = SE(root, "suite", {"category":"Crosswalk_Packer_Tool","launcher":"xwalk",\ "name":"wrt-packertool-android-tests"}) setPositive = SE(suite, "set", {"name":"positive","set_debug_msg":""}) setNegitive = SE(suite, "set", {"name":"negitive","set_debug_msg":""}) #testcase element for case in ResultList: setElement = setPositive if case["set"] == "negative": setElement = setNegitive pur = "Check if packer tool work properly" testcase = SE(setElement, "testcase", {"component":"Crosswalk Packer Tool",\ "execution_type":"auto","id":case["id"],"purpose":pur,"result":case["result"]},) desc = SE(testcase, "description") entry = Element("test_script_entry") entry.text = "pack command: " + case["entry"].decode("utf-8") desc.append(entry) resultInfo = SE(testcase, "result_info") actualResult = SE(resultInfo, "actual_result") actualResult.text = case["result"] caseStart = SE(resultInfo, "start") caseStart.text = case["start"] caseEnd = SE(resultInfo, "end") caseEnd.text = case["end"] SE(resultInfo, "stdout") SE(resultInfo, "stderr") tree.write(ConstPath + "/report/wrt-packertool-android-tests.xml") updateXmlTitle(ConstPath + "/report/wrt-packertool-android-tests.xml",'<?xml version="1.0" encoding="UTF-8"?>\n<?xml-stylesheet type="text/xsl" href="./style/testresult.xsl"?>\n<?xml-stylesheet type="text/xsl" href="testresult.xsl"?>\n') print "Generate test.result.xml file ------------------------->O.K" except Exception,e: print Exception,"Generate test.result.xml error:",e def genSummaryXml(): try: tree = ElementTree() root = Element("result_summary", {"plan_name":""}) tree._setroot(root) env = SE(root,"environment",{"build_id":"","cts_version":"","device_id":"","device_model":"","device_name":"","host":"","resolution":"","screen_size":"","manufacturer":""}) summary = SE(root, "summary") startTime = SE(summary, "start_at") endTime = SE(summary, "end_at") startTime.text = Start endTime.text = End suite = SE(root, "suite", {"name":"wrt-packertool-android-tests"}) total_case = SE(suite, "total_case") total_case.text = str(totalNum) pass_case = SE(suite, "pass_case") pass_case.text = str(passNum) pass_rate = SE(suite, "pass_rate") pass_rate.text = str(float(passNum) / totalNum * 100) fail_case = SE(suite, "fail_case") fail_case.text = str(failNum) fail_rate = SE(suite, "fail_rate") fail_rate.text = str(float(failNum) / totalNum * 100) SE(suite, "block_case") SE(suite, "block_rate") SE(suite, "na_case") SE(suite, "na_rate") tree.write(ConstPath + "/report/summary.xml") updateXmlTitle(ConstPath + "/report/summary.xml",'<?xml version="1.0" encoding="UTF-8"?>\n<?xml-stylesheet type="text/xsl" href="./style/summary.xsl"?>\n') print "Generate summary.xml file ------------------------->O.K" except Exception,e: print Exception,"Generate summary.xml error:",e def devicesConform(): try: deviceList = os.popen("adb devices").readlines() if len(deviceList) == 2: print "No test devices connected, Please attention" sys.exit(1) except Exception,e: print Exception,"Device Connect error:",e sys.exit(1) def main(): try: global End global Flag os.system("rm -rf " + ConstPath + "/allpairs/negative/*~ &>/dev/null") os.system("rm -rf " + ConstPath + "/allpairs/positive/*~ &>/dev/null") os.system("rm -rf " + ConstPath + "/allpairs/positive/case*txt &>/dev/null") os.system("rm -rf " + ConstPath + "/tools/crosswalk/*apk &>/dev/null") os.system("rm -rf " + ConstPath + "/self &>/dev/null") os.system("mkdir -p " + ConstPath + "/self") devicesConform() #positive test for seed in os.listdir(ConstPath + "/allpairs/positive/"): os.system("rm -rf " + ConstPath + "/allpairs/selfcomb.txt &>/dev/null") os.system("rm -rf " + ConstPath + "/self &>/dev/null") os.system("mkdir -p " + ConstPath + "/self") processMain(ConstPath + "/allpairs/positive/" + seed) #negative case Flag = "negative" for seed in os.listdir(ConstPath + "/allpairs/negative/"): os.system("rm -rf " + ConstPath + "/allpairs/selfcomb.txt &>/dev/null") os.system("rm -rf " + ConstPath + "/self &>/dev/null") os.system("mkdir -p " + ConstPath + "/self") processMain(ConstPath + "/allpairs/negative/" + seed) End = time.strftime("%Y-%m-%d %H:%M:%S") genResultXml() genSummaryXml() except Exception,e: print Exception,":",e sys.exit(1) finally: os.system("rm -rf " + ConstPath + "/self &>/dev/null") if __name__=="__main__": main()
16,267
4,806
from .base import * class Options(ListableApiResource, CreateableApiResource, UpdateableApiResource, DeleteableApiResource): resource_name = 'options' def values(self, id=None): if id: return OptionValues.get(self.id, id, connection=self._connection) else: return OptionValues.all(self.id, connection=self._connection) class OptionValues(ListableApiSubResource, CreateableApiSubResource, UpdateableApiSubResource, DeleteableApiSubResource): resource_name = 'values' parent_resource = 'options' parent_key = 'option_id'
584
161
from .admision_carrer import * from .admision_event import *
61
23
from datetime import datetime from typing import Optional from tea_client.models import TeaClientModel class Result(TeaClientModel): """Evaluation table row object. Attributes: id (str): Result id. best_rank (int, optional): Best rank of the row. metrics (dict): Dictionary of metrics and metric values. methodology (str): Methodology used for this implementation. uses_additional_data (bool): Does this evaluation uses additional data not provided in the dataset used for other evaluations. paper (str, optional): Paper describing the evaluation. best_metric (str, optional): Name of the best metric. evaluated_on (str, optional): Date of the result evaluation in YYYY-MM-DD format. external_source_url (str, option): The URL to the external source (eg competition) """ id: str best_rank: Optional[int] metrics: dict methodology: str uses_additional_data: bool paper: Optional[str] best_metric: Optional[str] evaluated_on: Optional[str] external_source_url: Optional[str] class _ResultRequest(TeaClientModel): def dict( self, *, include=None, exclude=None, by_alias: bool = False, skip_defaults: bool = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, ): d = super().dict( include=include, exclude=exclude, by_alias=by_alias, skip_defaults=skip_defaults, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, ) evaluated_on = d.get("evaluated_on") if isinstance(evaluated_on, datetime): d["evaluated_on"] = evaluated_on.strftime("%Y-%m-%d") return d class ResultCreateRequest(_ResultRequest): """Evaluation table row object. Attributes: metrics (dict): Dictionary of metrics and metric values. methodology (str): Methodology used for this implementation. uses_additional_data (bool, optional): Does this evaluation uses additional data not provided in the dataset used for other evaluations. paper (str, optional): Paper describing the evaluation. evaluated_on (str, optional): Date of the result evaluation: YYYY-MM-DD format external_source_url (str, option): The URL to the external source (eg competition) """ metrics: dict methodology: str uses_additional_data: Optional[bool] = False paper: Optional[str] = None evaluated_on: Optional[str] = None external_source_url: Optional[str] = None class ResultUpdateRequest(_ResultRequest): """Evaluation table row object. Attributes: metrics (dict, optional): Dictionary of metrics and metric values. methodology (str, optional): Methodology used for this implementation. uses_additional_data (bool, optional): Does this evaluation uses additional data not provided in the dataset used for other evaluations. paper (str, optional): Paper describing the evaluation. evaluated_on (datetime, optional): Date of the result evaluation: YYYY-MM-DD format external_source_url (str, option): The URL to the external source (eg competition) """ metrics: Optional[dict] = None methodology: Optional[str] = None uses_additional_data: Optional[bool] = None paper: Optional[str] = None evaluated_on: Optional[str] = None external_source_url: Optional[str] = None
3,649
963
from __future__ import absolute_import, division, print_function import pytest import telnyx TEST_RESOURCE_ID = "f1486bae-f067-460c-ad43-73a92848f902" class TestPortingOrder(object): def test_is_listable(self, request_mock): resources = telnyx.PortingOrder.list() request_mock.assert_requested("get", "/v2/porting_orders") assert isinstance(resources.data, list) assert isinstance(resources.data[0], telnyx.PortingOrder) def test_is_retrievable(self, request_mock): resource = telnyx.PortingOrder.retrieve(TEST_RESOURCE_ID) request_mock.assert_requested("get", "/v2/porting_orders/%s" % TEST_RESOURCE_ID) assert isinstance(resource, telnyx.PortingOrder) def test_is_creatable(self, request_mock): resource = telnyx.PortingOrder.create( phone_numbers=["13035550000", "13035550001", "13035550002"], ) request_mock.assert_requested("post", "/v2/porting_orders") assert isinstance(resource.data[0], telnyx.PortingOrder) def test_is_saveable(self, request_mock): porting_order = telnyx.PortingOrder.retrieve(TEST_RESOURCE_ID) porting_order.webhook_event = "https://update.com" porting_order.customer_reference = "updated name" resource = porting_order.save() request_mock.assert_requested( "patch", "/v2/porting_orders/%s" % TEST_RESOURCE_ID ) assert isinstance(resource, telnyx.PortingOrder) assert resource is porting_order def test_is_modifiable(self, request_mock): resource = telnyx.PortingOrder.modify( TEST_RESOURCE_ID, webhook_event="https://update.com", customer_reference="updated name", ) request_mock.assert_requested( "patch", "/v2/porting_orders/%s" % TEST_RESOURCE_ID ) assert isinstance(resource, telnyx.PortingOrder) def test_is_deletable(self, request_mock): resource = telnyx.PortingOrder.retrieve(TEST_RESOURCE_ID) resource.delete() request_mock.assert_requested( "delete", "/v2/porting_orders/%s" % TEST_RESOURCE_ID ) assert isinstance(resource, telnyx.PortingOrder) def test_can_confirm_porting_order_action(self, request_mock): resource = telnyx.PortingOrder.retrieve(TEST_RESOURCE_ID) resource.confirm() request_mock.assert_requested( "post", "/v2/porting_orders/%s/actions/confirm" % TEST_RESOURCE_ID ) assert isinstance(resource, telnyx.PortingOrder) @pytest.mark.skip(reason="PDF endpoint not supported by mock currently") def test_can_get_loa_template(self, request_mock): resource = telnyx.PortingOrder.retrieve(TEST_RESOURCE_ID) resource.loaTemplate() request_mock.assert_requested( "get", "/v2/porting_orders/%s/loa_template" % TEST_RESOURCE_ID ) assert isinstance(resource, telnyx.PortingOrder) def test_can_list_porting_phone_numbers(self, request_mock): resource = telnyx.PortingPhoneNumber.list() request_mock.assert_requested("get", "/v2/porting_phone_numbers") assert isinstance(resource.data, list) assert isinstance(resource.data[0], telnyx.PortingPhoneNumber)
3,300
1,102
import numpy as np from garage.replay_buffer import SimpleReplayBuffer from tests.fixtures.envs.dummy import DummyDiscreteEnv class TestReplayBuffer: def test_add_transition_dtype(self): env = DummyDiscreteEnv() obs = env.reset() replay_buffer = SimpleReplayBuffer( env_spec=env, size_in_transitions=3, time_horizon=1) replay_buffer.add_transition( observation=obs, action=env.action_space.sample()) sample = replay_buffer.sample(1) sample_obs = sample['observation'] sample_action = sample['action'] assert sample_obs.dtype == env.observation_space.dtype assert sample_action.dtype == env.action_space.dtype def test_add_transitions_dtype(self): env = DummyDiscreteEnv() obs = env.reset() replay_buffer = SimpleReplayBuffer( env_spec=env, size_in_transitions=3, time_horizon=1) replay_buffer.add_transitions( observation=[obs], action=[env.action_space.sample()]) sample = replay_buffer.sample(1) sample_obs = sample['observation'] sample_action = sample['action'] assert sample_obs.dtype == env.observation_space.dtype assert sample_action.dtype == env.action_space.dtype def test_eviction_policy(self): env = DummyDiscreteEnv() obs = env.reset() replay_buffer = SimpleReplayBuffer( env_spec=env, size_in_transitions=3, time_horizon=1) replay_buffer.add_transitions(observation=[obs, obs], action=[1, 2]) assert not replay_buffer.full replay_buffer.add_transitions(observation=[obs, obs], action=[3, 4]) assert replay_buffer.full replay_buffer.add_transitions(observation=[obs, obs], action=[5, 6]) replay_buffer.add_transitions(observation=[obs, obs], action=[7, 8]) assert np.array_equal(replay_buffer._buffer['action'], [[7], [8], [6]]) assert replay_buffer.n_transitions_stored == 3
2,002
624
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License import numpy as np import unittest import paddle import paddle.nn as nn class SimpleReturnLayer(nn.Layer): def forward(self, x): return x class AddAttrLayer(nn.Layer): def __init__(self): super(AddAttrLayer, self).__init__() self.attr = None def forward(self, x): out = x + self.attr return out class IsInstanceLayer(nn.Layer): def __init__(self, layer): super(IsInstanceLayer, self).__init__() self.layer = layer @paddle.jit.to_static def forward(self, x): if isinstance(self.layer, (AddAttrLayer, )): self.layer.attr = x res = self.layer(x) return res class SequentialLayer(nn.Layer): def __init__(self, layers): super(SequentialLayer, self).__init__() self.layers = nn.LayerList(layers) @paddle.jit.to_static def forward(self, x): res = x for layer in self.layers: if isinstance(layer, AddAttrLayer): layer.attr = x res = layer(res) return res def train(model, to_static): prog_trans = paddle.jit.ProgramTranslator.get_instance() prog_trans.enable(to_static) x = paddle.ones(shape=[2, 3], dtype='int32') out = model(x) return out.numpy() class TestIsinstance(unittest.TestCase): def test_isinstance_simple_return_layer(self): model = IsInstanceLayer(SimpleReturnLayer()) self._test_model(model) def test_isinstance_add_attr_layer(self): model = IsInstanceLayer(AddAttrLayer()) self._test_model(model) def test_sequential_layer(self): layers = [] for i in range(5): layers.append(SimpleReturnLayer()) layers.append(AddAttrLayer()) model = SequentialLayer(layers) self._test_model(model) def _test_model(self, model): st_out = train(model, to_static=True) dy_out = train(model, to_static=False) self.assertTrue( np.allclose(dy_out, st_out), msg="dy_out:\n {}\n st_out:\n{}".format(dy_out, st_out)) if __name__ == "__main__": unittest.main()
3,238
1,009
""" The task of the registry is to register complex objects by an keyword/alias that you easily can build and instanciate these objects with a single keyword. This allows it in a easy manner to parse a yaml configuration file and use these values to instanciate the available objects. """ import tensorflow as tf from importlib import import_module from dlf.core.preprocessing import PreprocessingMethod from dlf.core.callback import Callback from dlf.core.evaluator import Evaluator FRAMEWORK_CALLBACKS = {} FRAMEWORK_DATA_GENERATORS = {} FRAMEWORK_LOSSES = {} FRAMEWORK_METRICS = {} FRAMEWORK_MODELS = {} FRAMEWORK_PREPROCESSING_METHODS = {} FRAMEWORK_EVALUATORS = {} FRAMEWORK_ACTIVE_EXPERIMENT = None def import_framework_modules(module_folder, package): """ Auto import of all files in module folder # Note This is necessary for the register_* decorator to work properly. # Args module_folder: str.path to folder where files to import are located package: str. module path e.g. dlf.metrics """ # auto import all files and register metrics # Path(__file__).parent for module in module_folder.iterdir(): if module.name == '__init__.py' or module.suffix != '.py': continue module = f'{package}.{module.stem}' import_module(module) def register_preprocessing_method(*names): """Decorator to register a preprocessing object to the framework # Args *names: Tuple(str). List of aliases for this preprocessing object # Raises ValueError: If the parent of this method is not of type [PreprocessingMethod](/dlf/core/preprocessing) """ def decorator(cls): if not issubclass(cls, PreprocessingMethod): raise ValueError("invalid base class for class {}".format(cls)) for name in names: FRAMEWORK_PREPROCESSING_METHODS[name] = cls return cls return decorator def register_metric(*names): """Decorator to register a custom metric to the framework # Args *names: Tuple(str). List of aliases for this metric # Raises ValueError: If the parent of this method is not of type `tf.keras.metrics.Metrics` ValueError: If a given alias is not valid """ def decorator(cls): if not issubclass(cls, tf.keras.metrics.Metric): raise ValueError("invalid base class for class {}".format(cls)) FRAMEWORK_METRICS[cls.__name__] = cls # alias for name in names: if not isinstance(name, str): raise ValueError( "Invalid type of name '{}' for register_metric decorator".format(name)) FRAMEWORK_METRICS[name] = cls return cls return decorator def register_loss(*names): """Decorator to register a custom loss to the framework # Args *names: Tuple(str) List of aliases for this loss # Raises Exception: If object is not subclass of `tf.keras.losses.Loss` ValueError: If a given alias is not valid """ def decorator(cls): if not issubclass(cls, tf.keras.losses.Loss): raise Exception("invalid base class for class {}".format(cls)) FRAMEWORK_LOSSES[cls.__name__] = cls # alias for name in names: if not isinstance(name, str): raise ValueError( "Invalid type of name '{}' for register_loss decorator".format(name)) FRAMEWORK_LOSSES[name] = cls return cls return decorator def register_data_generator(*names): """Decorator to register a data reader to the framework # Args *names: Tuple(str). List of aliases for this data reader # Raises ValueError: If a given alias is not valid """ def decorator(cls): for name in names: if not isinstance(name, str): raise ValueError( "Invalid type of name '{}' for register_data_generator decorator".format(name)) FRAMEWORK_DATA_GENERATORS[name] = cls return cls return decorator def register_model(*names): """Decorator to register a custom model to the framework # Args *names: Tuple(str). List of aliases for this model # Raises ValueError: If a given alias is not valid """ def decorator(cls): for name in names: if not isinstance(name, str): raise ValueError( "Invalid type of name '{}' for register_model decorator".format(name)) FRAMEWORK_MODELS[name] = cls return cls return decorator def register_callback(*names): """Decorator to register a callback to the framework # Args *names: Tuple(str). List of aliases for this callback # Raises ValueError: If a given alias is not valid """ def decorator(cls): for name in names: if not issubclass(cls, Callback): raise ValueError( "Invalid type of name '{}' for register_callback decorator".format(name)) FRAMEWORK_CALLBACKS[name] = cls return cls return decorator def register_evaluator(*names): """Decorator to register an evaluator to the framework # Args *names: Tuple(str). List of aliases for this evaluator # Raises ValueError: If a given alias is not valid """ def decorator(cls): for name in names: if not issubclass(cls, Evaluator): raise ValueError( "Invalid type of name '{}' for register_evaluator decorator".format(name)) FRAMEWORK_EVALUATORS[name] = cls return cls return decorator def set_active_experiment(exp): """Sets active experiment to global state and allows all modules to access it # Arguments exp: dlf.core.Experiment. Active experiment """ global FRAMEWORK_ACTIVE_EXPERIMENT FRAMEWORK_ACTIVE_EXPERIMENT = exp def get_active_experiment(): """Gets the current, active, experiment # Returns dlf.core.Experiment. Active experiment """ global FRAMEWORK_ACTIVE_EXPERIMENT return FRAMEWORK_ACTIVE_EXPERIMENT
6,244
1,852
import os import json import rmse TUNING_FILE = "/Users/markduan/duan/USC_course/USC_APDS/INF553/project/predict/tuning.json" CORATED_LIMIT = [3, 5, 7, 10] LONELY_THRESHOLD = [2, 3, 5, 7] N_NEIGHBORS_ITEMBASED = [5, 7, 10, 12] WEIGHT = [0.2, 0.4, 0.6, 0.8] def writeRes(c, l, n, w, res): with open(TUNING_FILE, 'a', encoding='utf-8') as fp: x = { 'c': c, 'l': l, 'n': n, 'w': w, 'rmse': res } x_j = json.dumps(x) fp.write(x_j) fp.write('\n') if os.path.exists(TUNING_FILE): os.remove(TUNING_FILE) for c in CORATED_LIMIT: for l in LONELY_THRESHOLD: train_comm = "spark-submit train.py %d %d %d" % (c, l, l) os.system(train_comm) for n in N_NEIGHBORS_ITEMBASED: for w in WEIGHT: test_comm = "spark-submit predict.py %d %f" % (n, w) os.system(test_comm) res = rmse.getRmse() writeRes(c, l, n, w, res)
1,017
453
from __future__ import with_statement import inspect from random import choice, randint import sys from whoosh import fields, query, scoring from whoosh.compat import u, xrange, permutations from whoosh.filedb.filestore import RamStorage def _weighting_classes(ignore): # Get all the subclasses of Weighting in whoosh.scoring return [c for _, c in inspect.getmembers(scoring, inspect.isclass) if scoring.Weighting in c.__bases__ and c not in ignore] def test_all(): domain = [u("alfa"), u("bravo"), u("charlie"), u("delta"), u("echo"), u("foxtrot")] schema = fields.Schema(text=fields.TEXT) storage = RamStorage() ix = storage.create_index(schema) w = ix.writer() for _ in xrange(100): w.add_document(text=u(" ").join(choice(domain) for _ in xrange(randint(10, 20)))) w.commit() # List ABCs that should not be tested abcs = () # provide initializer arguments for any weighting classes that require them init_args = {"MultiWeighting": ([scoring.BM25F()], {"text": scoring.Frequency()}), "ReverseWeighting": ([scoring.BM25F()], {})} for wclass in _weighting_classes(abcs): try: if wclass.__name__ in init_args: args, kwargs = init_args[wclass.__name__] weighting = wclass(*args, **kwargs) else: weighting = wclass() except TypeError: e = sys.exc_info()[1] raise TypeError("Error instantiating %r: %s" % (wclass, e)) with ix.searcher(weighting=weighting) as s: try: for word in domain: s.search(query.Term("text", word)) except Exception: e = sys.exc_info()[1] e.msg = "Error searching with %r: %s" % (wclass, e) raise def test_compatibility(): from whoosh.scoring import Weighting # This is the old way of doing a custom weighting model, check that # it's still supported... class LegacyWeighting(Weighting): use_final = True def score(self, searcher, fieldname, text, docnum, weight): return weight + 0.5 def final(self, searcher, docnum, score): return score * 1.5 schema = fields.Schema(text=fields.TEXT) ix = RamStorage().create_index(schema) w = ix.writer() domain = "alfa bravo charlie delta".split() for ls in permutations(domain, 3): w.add_document(text=u(" ").join(ls)) w.commit() s = ix.searcher(weighting=LegacyWeighting()) r = s.search(query.Term("text", u("bravo"))) assert r.score(0) == 2.25
2,736
840
# cubeProperties.py # A program to calculate the volume and surface area of a cube. """Same as Chapter 10 Programming Exercise 9, but for a cube. The constructor should accept the length of a side as a parameter.""" from cubeClass import Cube def main(): edge = 0 while edge <= 0: try: edge = float(input("Please enter the edge length of the cube: ")) if radius <= 0: print("You have to enter a number greater than zero.") except(SyntaxError, NameError, TypeError, ValueError): print("You have to enter a number greater than zero.") continue cube = Cube(edge) volume = cube.volume() surfaceArea = cube.surfaceArea() if volume == 1: print("\nThe volume of the cube is {0:.2f} unit.".format(volume)) else: print("\nThe volume of the cube is: {0:.2f} units.".format(volume)) if surfaceArea == 1: print("\nThe surface area of the cube is: {0:.2f} unit." .format(surfaceArea)) else: print("\nThe surface area of the cube is: {0:.2f} units." .format(surfaceArea)) main()
1,133
340
from __future__ import unicode_literals from datetime import datetime, date from django.db import models from django.contrib.auth.models import User from django.db.models.aggregates import Sum AGE_LIMIT = 7 # 7 days age limit class PriceTemplate(models.Model): name = models.CharField(max_length=200) def __unicode__(self): return self.name class Product(models.Model): name = models.CharField(max_length=200) rate = models.IntegerField('Default price of product') template = models.ManyToManyField( PriceTemplate, through='ProductPriceTemplate', through_fields=('product', 'template')) def __unicode__(self): return self.name @property def quantity(self): return self.product_sales.aggregate( Sum('quantity'))['quantity__sum'] or 0 @property def amount(self): return self.product_sales.aggregate( Sum('amount'))['amount__sum'] or 0 class ProductPriceTemplate(models.Model): product = models.ForeignKey(Product, on_delete=models.CASCADE) template = models.ForeignKey(PriceTemplate, on_delete=models.CASCADE) price = models.IntegerField() def __unicode__(self): return "{0} - {1}".format(self.product, self.template) class BatchSize(models.Model): name = models.CharField(max_length=100) quantity = models.PositiveIntegerField() def __unicode__(self): return self.name class Rep(models.Model): name = models.CharField(max_length=200) user = models.OneToOneField(User, on_delete=models.CASCADE) supervisor = models.ForeignKey('Rep', null=True, blank=True) last_activity = models.DateTimeField(blank=True, null=True) def __unicode__(self): return self.name @property def is_old(self): if not self.last_activity: return True if (date.today() - self.last_activity.date()) <= AGE_LIMIT: return False return True class Customer(models.Model): HOSPITAL = 0 INSTITUTION = 1 PHARMACY = 2 WHOLESELLER = 3 HEALTH_PERSONNEL = 4 CUSTOMER_TYPE = ( (0, 'Hospital'), (1, 'Institution'), (2, 'Pharmacy'), (3, 'Wholeseller'), (4, 'Health Personnel')) name = models.CharField(max_length=200) address = models.TextField(blank=True) contact_person = models.CharField(max_length=200, blank=True) phone1 = models.CharField(max_length=20, blank=True) email = models.EmailField(blank=True, null=True) customer_type = models.PositiveIntegerField(choices=CUSTOMER_TYPE) price_template = models.ForeignKey(PriceTemplate, null=True, blank=True) def __unicode__(self): return self.name @property def balance(self): sales = Sale.objects.filter(invoice__customer=self).aggregate( Sum('amount'))['amount__sum'] or 0 #sales = self.customer_sales.aggregate( # Sum('amount'))['amount__sum'] or 0 paymt = self.customer_payments.aggregate( Sum('amount'))['amount__sum'] or 0 return sales - paymt class Invoice(models.Model): ACTUAL_SALES = 0 SOR = 1 SAMPLES = 2 INVOICE_TYPES = ((0, 'Actual Sales'), (1, 'SOR'), (2, 'Samples')) rep = models.ForeignKey(Rep, related_name='rep_invoices') customer = models.ForeignKey(Customer, related_name='customer_invoices') invoice_no = models.CharField(max_length=200, blank=True) invoice_date = models.DateField(blank=True, null=True) sales_type = models.PositiveIntegerField(choices=INVOICE_TYPES) recorded_date = models.DateTimeField(default=datetime.now) def __unicode__(self): return unicode(self.invoice_no) @property def amount(self): return sum([sale.amount for sale in self.invoice_sales.all()]) class Sale(models.Model): invoice = models.ForeignKey( Invoice, related_name='invoice_sales', null=True) product = models.ForeignKey(Product, related_name='product_sales') batch_size = models.ForeignKey(BatchSize, null=True) quantity = models.PositiveIntegerField() amount = models.IntegerField() recorded_date = models.DateTimeField(default=datetime.now) def __unicode__(self): return unicode(self.invoice) @property def rate(self): templ = self.invoice.customer.price_template if not templ: price = self.product.rate else: try: prod_price_templ = ProductPriceTemplate.objects.get( product=self.product, template=templ) except ProductPriceTemplate.DoesNotExist: price = self.product.rate else: price = prod_price_templ.price return price class Payment(models.Model): EPAYMENT = 0 CHEQUE = 1 TELLER = 2 MODE_OF_PAYMENT = ((0, 'E-Payment'), (1, 'Cheque'), (2, 'Teller')) rep = models.ForeignKey(Rep, related_name='rep_payments') customer = models.ForeignKey(Customer, related_name='customer_payments') amount = models.PositiveIntegerField() receipt_no = models.CharField(max_length=50, blank=True) payment_date = models.DateField() receipt_date = models.DateField() recorded_date = models.DateTimeField(default=datetime.now) balance = models.IntegerField() bank_of_payment = models.CharField(max_length=200, blank=True) mode_of_payment = models.PositiveIntegerField(choices=MODE_OF_PAYMENT) teller_number = models.CharField(max_length=50, blank=True) teller_date = models.DateField(blank=True, null=True) cheque_date = models.DateField(blank=True, null=True) remarks = models.TextField(blank=True) def __unicode__(self): return unicode(self.customer)
5,768
1,831
from __future__ import annotations import json import logging from contextlib import AsyncExitStack, closing from datetime import datetime, timedelta, timezone from json import JSONDecodeError from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Type, Union from uuid import UUID import sniffio from anyio import TASK_STATUS_IGNORED, create_task_group, sleep from attr import asdict from sqlalchemy import ( Column, DateTime, Integer, LargeBinary, MetaData, Table, Unicode, and_, bindparam, func, or_, select) from sqlalchemy.engine import URL from sqlalchemy.exc import CompileError, IntegrityError from sqlalchemy.ext.asyncio import AsyncConnection, create_async_engine from sqlalchemy.ext.asyncio.engine import AsyncConnectable from sqlalchemy.sql.ddl import DropTable from ... import events as events_module from ...abc import AsyncDataStore, Job, Schedule, Serializer from ...events import ( AsyncEventHub, DataStoreEvent, Event, JobAdded, JobDeserializationFailed, ScheduleAdded, ScheduleDeserializationFailed, ScheduleRemoved, ScheduleUpdated, SubscriptionToken) from ...exceptions import ConflictingIdError, SerializationError from ...policies import ConflictPolicy from ...serializers.pickle import PickleSerializer from ...util import reentrant logger = logging.getLogger(__name__) def default_json_handler(obj: Any) -> Any: if isinstance(obj, datetime): return obj.timestamp() elif isinstance(obj, UUID): return obj.hex elif isinstance(obj, frozenset): return list(obj) raise TypeError(f'Cannot JSON encode type {type(obj)}') def json_object_hook(obj: Dict[str, Any]) -> Any: for key, value in obj.items(): if key == 'timestamp': obj[key] = datetime.fromtimestamp(value, timezone.utc) elif key == 'job_id': obj[key] = UUID(value) elif key == 'tags': obj[key] = frozenset(value) return obj @reentrant class SQLAlchemyDataStore(AsyncDataStore): _metadata = MetaData() t_metadata = Table( 'metadata', _metadata, Column('schema_version', Integer, nullable=False) ) t_schedules = Table( 'schedules', _metadata, Column('id', Unicode, primary_key=True), Column('task_id', Unicode, nullable=False), Column('serialized_data', LargeBinary, nullable=False), Column('next_fire_time', DateTime(timezone=True), index=True), Column('acquired_by', Unicode), Column('acquired_until', DateTime(timezone=True)) ) t_jobs = Table( 'jobs', _metadata, Column('id', Unicode(32), primary_key=True), Column('task_id', Unicode, nullable=False, index=True), Column('serialized_data', LargeBinary, nullable=False), Column('created_at', DateTime(timezone=True), nullable=False), Column('acquired_by', Unicode), Column('acquired_until', DateTime(timezone=True)) ) def __init__(self, bind: AsyncConnectable, *, schema: Optional[str] = None, serializer: Optional[Serializer] = None, lock_expiration_delay: float = 30, max_poll_time: Optional[float] = 1, max_idle_time: float = 60, start_from_scratch: bool = False, notify_channel: Optional[str] = 'apscheduler'): self.bind = bind self.schema = schema self.serializer = serializer or PickleSerializer() self.lock_expiration_delay = lock_expiration_delay self.max_poll_time = max_poll_time self.max_idle_time = max_idle_time self.start_from_scratch = start_from_scratch self._logger = logging.getLogger(__name__) self._exit_stack = AsyncExitStack() self._events = AsyncEventHub() # Find out if the dialect supports RETURNING statement = self.t_jobs.update().returning(self.t_schedules.c.id) try: statement.compile(bind=self.bind) except CompileError: self._supports_update_returning = False else: self._supports_update_returning = True self.notify_channel = notify_channel if notify_channel: if self.bind.dialect.name != 'postgresql' or self.bind.dialect.driver != 'asyncpg': self.notify_channel = None @classmethod def from_url(cls, url: Union[str, URL], **options) -> 'SQLAlchemyDataStore': engine = create_async_engine(url, future=True) return cls(engine, **options) async def __aenter__(self): asynclib = sniffio.current_async_library() or '(unknown)' if asynclib != 'asyncio': raise RuntimeError(f'This data store requires asyncio; currently running: {asynclib}') # Verify that the schema is in place async with self.bind.begin() as conn: if self.start_from_scratch: for table in self._metadata.sorted_tables: await conn.execute(DropTable(table, if_exists=True)) await conn.run_sync(self._metadata.create_all) query = select(self.t_metadata.c.schema_version) result = await conn.execute(query) version = result.scalar() if version is None: await conn.execute(self.t_metadata.insert(values={'schema_version': 1})) elif version > 1: raise RuntimeError(f'Unexpected schema version ({version}); ' f'only version 1 is supported by this version of APScheduler') await self._exit_stack.enter_async_context(self._events) if self.notify_channel: task_group = create_task_group() await self._exit_stack.enter_async_context(task_group) await task_group.start(self._listen_notifications) self._exit_stack.callback(task_group.cancel_scope.cancel) return self async def __aexit__(self, exc_type, exc_val, exc_tb): await self._exit_stack.__aexit__(exc_type, exc_val, exc_tb) async def _publish(self, conn: AsyncConnection, event: DataStoreEvent) -> None: if self.notify_channel: event_type = event.__class__.__name__ event_data = json.dumps(asdict(event), ensure_ascii=False, default=default_json_handler) notification = event_type + ' ' + event_data if len(notification) < 8000: await conn.execute(func.pg_notify(self.notify_channel, notification)) return self._logger.warning( 'Could not send %s notification because it is too long (%d >= 8000)', event_type, len(notification)) self._events.publish(event) async def _listen_notifications(self, *, task_status=TASK_STATUS_IGNORED) -> None: def callback(connection, pid, channel: str, payload: str) -> None: self._logger.debug('Received notification on channel %s: %s', channel, payload) event_type, _, json_data = payload.partition(' ') try: event_data = json.loads(json_data, object_hook=json_object_hook) except JSONDecodeError: self._logger.exception('Failed decoding JSON payload of notification: %s', payload) return event_class = getattr(events_module, event_type) event = event_class(**event_data) self._events.publish(event) task_started_sent = False while True: with closing(await self.bind.raw_connection()) as conn: asyncpg_conn = conn.connection._connection await asyncpg_conn.add_listener(self.notify_channel, callback) if not task_started_sent: task_status.started() task_started_sent = True try: while True: await sleep(self.max_idle_time) await asyncpg_conn.execute('SELECT 1') finally: await asyncpg_conn.remove_listener(self.notify_channel, callback) def _deserialize_jobs(self, serialized_jobs: Iterable[Tuple[UUID, bytes]]) -> List[Job]: jobs: List[Job] = [] for job_id, serialized_data in serialized_jobs: try: jobs.append(self.serializer.deserialize(serialized_data)) except SerializationError as exc: self._events.publish(JobDeserializationFailed(job_id=job_id, exception=exc)) return jobs def _deserialize_schedules( self, serialized_schedules: Iterable[Tuple[str, bytes]]) -> List[Schedule]: jobs: List[Schedule] = [] for schedule_id, serialized_data in serialized_schedules: try: jobs.append(self.serializer.deserialize(serialized_data)) except SerializationError as exc: self._events.publish( ScheduleDeserializationFailed(schedule_id=schedule_id, exception=exc)) return jobs def subscribe(self, callback: Callable[[Event], Any], event_types: Optional[Iterable[Type[Event]]] = None) -> SubscriptionToken: return self._events.subscribe(callback, event_types) def unsubscribe(self, token: SubscriptionToken) -> None: self._events.unsubscribe(token) async def clear(self) -> None: async with self.bind.begin() as conn: await conn.execute(self.t_schedules.delete()) await conn.execute(self.t_jobs.delete()) async def add_schedule(self, schedule: Schedule, conflict_policy: ConflictPolicy) -> None: serialized_data = self.serializer.serialize(schedule) statement = self.t_schedules.insert().\ values(id=schedule.id, task_id=schedule.task_id, serialized_data=serialized_data, next_fire_time=schedule.next_fire_time) try: async with self.bind.begin() as conn: await conn.execute(statement) event = ScheduleAdded(schedule_id=schedule.id, next_fire_time=schedule.next_fire_time) await self._publish(conn, event) except IntegrityError: if conflict_policy is ConflictPolicy.exception: raise ConflictingIdError(schedule.id) from None elif conflict_policy is ConflictPolicy.replace: statement = self.t_schedules.update().\ where(self.t_schedules.c.id == schedule.id).\ values(serialized_data=serialized_data, next_fire_time=schedule.next_fire_time) async with self.bind.begin() as conn: await conn.execute(statement) event = ScheduleUpdated(schedule_id=schedule.id, next_fire_time=schedule.next_fire_time) await self._publish(conn, event) async def remove_schedules(self, ids: Iterable[str]) -> None: async with self.bind.begin() as conn: now = datetime.now(timezone.utc) conditions = and_(self.t_schedules.c.id.in_(ids), or_(self.t_schedules.c.acquired_until.is_(None), self.t_schedules.c.acquired_until < now)) statement = self.t_schedules.delete(conditions) if self._supports_update_returning: statement = statement.returning(self.t_schedules.c.id) removed_ids = [row[0] for row in await conn.execute(statement)] else: await conn.execute(statement) for schedule_id in removed_ids: await self._publish(conn, ScheduleRemoved(schedule_id=schedule_id)) async def get_schedules(self, ids: Optional[Set[str]] = None) -> List[Schedule]: query = select([self.t_schedules.c.id, self.t_schedules.c.serialized_data]).\ order_by(self.t_schedules.c.id) if ids: query = query.where(self.t_schedules.c.id.in_(ids)) async with self.bind.begin() as conn: result = await conn.execute(query) return self._deserialize_schedules(result) async def acquire_schedules(self, scheduler_id: str, limit: int) -> List[Schedule]: async with self.bind.begin() as conn: now = datetime.now(timezone.utc) acquired_until = datetime.fromtimestamp( now.timestamp() + self.lock_expiration_delay, timezone.utc) schedules_cte = select(self.t_schedules.c.id).\ where(and_(self.t_schedules.c.next_fire_time.isnot(None), self.t_schedules.c.next_fire_time <= now, or_(self.t_schedules.c.acquired_until.is_(None), self.t_schedules.c.acquired_until < now))).\ limit(limit).cte() subselect = select([schedules_cte.c.id]) statement = self.t_schedules.update().where(self.t_schedules.c.id.in_(subselect)).\ values(acquired_by=scheduler_id, acquired_until=acquired_until) if self._supports_update_returning: statement = statement.returning(self.t_schedules.c.id, self.t_schedules.c.serialized_data) result = await conn.execute(statement) else: await conn.execute(statement) statement = select([self.t_schedules.c.id, self.t_schedules.c.serialized_data]).\ where(and_(self.t_schedules.c.acquired_by == scheduler_id)) result = await conn.execute(statement) return self._deserialize_schedules(result) async def release_schedules(self, scheduler_id: str, schedules: List[Schedule]) -> None: update_events: List[ScheduleUpdated] = [] finished_schedule_ids: List[str] = [] async with self.bind.begin() as conn: update_args: List[Dict[str, Any]] = [] for schedule in schedules: if schedule.next_fire_time is not None: try: serialized_data = self.serializer.serialize(schedule) except SerializationError: self._logger.exception('Error serializing schedule %r – ' 'removing from data store', schedule.id) finished_schedule_ids.append(schedule.id) continue update_args.append({ 'p_id': schedule.id, 'p_serialized_data': serialized_data, 'p_next_fire_time': schedule.next_fire_time }) else: finished_schedule_ids.append(schedule.id) # Update schedules that have a next fire time if update_args: p_id = bindparam('p_id') p_serialized = bindparam('p_serialized_data') p_next_fire_time = bindparam('p_next_fire_time') statement = self.t_schedules.update().\ where(and_(self.t_schedules.c.id == p_id, self.t_schedules.c.acquired_by == scheduler_id)).\ values(serialized_data=p_serialized, next_fire_time=p_next_fire_time) next_fire_times = {arg['p_id']: arg['p_next_fire_time'] for arg in update_args} if self._supports_update_returning: statement = statement.returning(self.t_schedules.c.id) updated_ids = [row[0] for row in await conn.execute(statement, update_args)] for schedule_id in updated_ids: event = ScheduleUpdated(schedule_id=schedule_id, next_fire_time=next_fire_times[schedule_id]) update_events.append(event) # Remove schedules that have no next fire time or failed to serialize if finished_schedule_ids: statement = self.t_schedules.delete().\ where(and_(self.t_schedules.c.id.in_(finished_schedule_ids), self.t_schedules.c.acquired_by == scheduler_id)) await conn.execute(statement) for event in update_events: await self._publish(conn, event) for schedule_id in finished_schedule_ids: await self._publish(conn, ScheduleRemoved(schedule_id=schedule_id)) async def add_job(self, job: Job) -> None: now = datetime.now(timezone.utc) serialized_data = self.serializer.serialize(job) statement = self.t_jobs.insert().values(id=job.id.hex, task_id=job.task_id, created_at=now, serialized_data=serialized_data) async with self.bind.begin() as conn: await conn.execute(statement) event = JobAdded(job_id=job.id, task_id=job.task_id, schedule_id=job.schedule_id, tags=job.tags) await self._publish(conn, event) async def get_jobs(self, ids: Optional[Iterable[UUID]] = None) -> List[Job]: query = select([self.t_jobs.c.id, self.t_jobs.c.serialized_data]).\ order_by(self.t_jobs.c.id) if ids: job_ids = [job_id.hex for job_id in ids] query = query.where(self.t_jobs.c.id.in_(job_ids)) async with self.bind.begin() as conn: result = await conn.execute(query) return self._deserialize_jobs(result) async def acquire_jobs(self, worker_id: str, limit: Optional[int] = None) -> List[Job]: async with self.bind.begin() as conn: now = datetime.now(timezone.utc) acquired_until = now + timedelta(seconds=self.lock_expiration_delay) query = select([self.t_jobs.c.id, self.t_jobs.c.serialized_data]).\ where(or_(self.t_jobs.c.acquired_until.is_(None), self.t_jobs.c.acquired_until < now)).\ order_by(self.t_jobs.c.created_at).\ limit(limit) serialized_jobs: Dict[str, bytes] = {row[0]: row[1] for row in await conn.execute(query)} if serialized_jobs: query = self.t_jobs.update().\ values(acquired_by=worker_id, acquired_until=acquired_until).\ where(self.t_jobs.c.id.in_(serialized_jobs)) await conn.execute(query) return self._deserialize_jobs(serialized_jobs.items()) async def release_jobs(self, worker_id: str, jobs: List[Job]) -> None: job_ids = [job.id.hex for job in jobs] statement = self.t_jobs.delete().\ where(and_(self.t_jobs.c.acquired_by == worker_id, self.t_jobs.c.id.in_(job_ids))) async with self.bind.begin() as conn: await conn.execute(statement)
19,189
5,495
from django.shortcuts import render from django.views.generic.base import TemplateView class IndexFinanceiroView(TemplateView): template_name = 'financeiro/index-financeiro.html'
185
57
from .fancyindex import *
26
9
""" Structural Variants Report """ import os import os.path as op import logging import sys import json import itertools import collections import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt import matplotlib.patches as mpatches import matplotlib.ticker as ticker from matplotlib import rcParams from matplotlib.transforms import offset_copy from pbcommand.models.report import Report, Table, Column, PlotGroup, Plot from pbcommand.models import FileTypes, get_pbparser from pbcommand.cli import pbparser_runner from pbcommand.utils import setup_log from pbreports.io.specs import * from pbreports.plot.helper import (get_fig_axes_lpr, save_figure_with_thumbnail, DEFAULT_DPI) __version__ = '0.1.0' class Constants(object): TOOL_ID = "pbreports.tasks.structural_variants_report" DRIVER_EXE = ("python -m pbreports.report.structural_variants " "--resolved-tool-contract ") R_ID = "structural_variants" SAMPLE_KEY = "CountBySample" T_SAMPLE = "sample_table" C_SAMPLE = "sample" C_INS = "insertions" C_DEL = "deletions" C_HOM = "homozygous" C_HET = "heterozygous" C_TOTAL = "total" ANNO_KEY = "CountByAnnotation" T_ANNO = "anno_table" C_ANNO = "annotation" R_TANDEM = "Tandem Repeat" R_ALU = "Alu" R_L1 = "L1" R_SVA = "SVA" R_UNANNOTATED = "Unannotated" R_TOTAL = "Total" PG_SV = "sv_plot_group" P_SV = "sv_plot" C_SHORT = 'Variants <1 kb' C_LONG = 'Variants ' + r'$\geq$' + '1 kb' SV_LEN_CUTOFF_S = 1000 BIN_WIDTH_S = 50 X_TICKS_S = range(0, SV_LEN_CUTOFF_S + 100, 50)[:-1] X_LIMS_S = [X_TICKS_S[0], X_TICKS_S[-1]] X_LABELS_S = list(itertools.chain( *[[str(x), ""] for x in xrange(0, 1000, 100)])) + ["1,000"] X_LABEL_S = "variant length (bp)" N_BINS_S = X_LIMS_S[1] / BIN_WIDTH_S OVERFLOW_BIN_X = 11250 SV_LEN_CUTOFF_L = 10000 BIN_WIDTH_L = 500 X_TICKS_L = range(0, SV_LEN_CUTOFF_L + 500, 500) + [OVERFLOW_BIN_X] X_LIMS_L = [0, 12000] X_LABELS_L = list(itertools.chain( *[[str(x), ""] for x in xrange(0, 11)]))[:-1] + [">10"] X_LABEL_L = "variant length (kb)" N_BINS_L = X_LIMS_L[1] / BIN_WIDTH_L log = logging.getLogger(__name__) spec = load_spec(Constants.R_ID) def _comma_formatter(x, pos=0): return ("{0:,d}".format(int(x))) def _my_combine(n, t): """ Takes two integers, n and t, and returns "n (t)" """ c = _comma_formatter(str(n)) + " (" + _comma_formatter(str(t)) + ")" return c def to_sample_table(table_json): col_ids = [Constants.C_SAMPLE, Constants.C_INS, Constants.C_DEL, Constants.C_HOM, Constants.C_HET, Constants.C_TOTAL] sample_table = table_json[Constants.SAMPLE_KEY] t = [] if len(sample_table) == 0: table = [[], [], [], [], [], []] else: for row in sample_table: r = [row[0]] r.append(_my_combine(row[1], row[2])) r.append(_my_combine(row[3], row[4])) r.append(row[5]) r.append(row[6]) r.append(_my_combine(row[7], row[8])) t.append(r) table = zip(*t) columns = [] for i, col_id in enumerate(col_ids): columns.append(Column(col_id, values=table[i])) sample_table = Table(Constants.T_SAMPLE, columns=columns) return sample_table def to_anno_table(table_json): col_ids = [Constants.C_ANNO, Constants.C_INS, Constants.C_DEL, Constants.C_TOTAL] row_ids = [Constants.R_TANDEM, Constants.R_ALU, Constants.R_L1, Constants.R_SVA, Constants.R_UNANNOTATED, Constants.R_TOTAL] anno_table = table_json[Constants.ANNO_KEY] t = [] for _id in row_ids: for row in anno_table: if _id == row[0]: r = [row[0]] for i in xrange(1, 6, 2): r.append(_my_combine(row[i], row[i + 1])) t.append(r) table = zip(*t) columns = [] for i, col_id in enumerate(col_ids): columns.append(Column(col_id, values=table[i])) anno_table = Table(Constants.T_ANNO, columns=columns) return anno_table def process_short_data(data): short_ins = [x for x in data.get( "Insertion", []) if x < Constants.SV_LEN_CUTOFF_S] short_del = [x for x in data.get( "Deletion", []) if x < Constants.SV_LEN_CUTOFF_S] return short_ins, short_del def process_long_data(data): long_ins_raw = [x for x in data.get( "Insertion", []) if x >= Constants.SV_LEN_CUTOFF_S] long_del_raw = [x for x in data.get( "Deletion", []) if x >= Constants.SV_LEN_CUTOFF_S] # mapping all lengths above 10k to a constant long_ins = [Constants.OVERFLOW_BIN_X if x > Constants.SV_LEN_CUTOFF_L else x for x in long_ins_raw] long_del = [Constants.OVERFLOW_BIN_X if x > Constants.SV_LEN_CUTOFF_L else x for x in long_del_raw] return long_ins, long_del def add_subplot(fig, ax, sample, data, counter, y_max, position): insertions = data[0] deletions = data[1] y_label = get_plot_ylabel(spec, Constants.PG_SV, Constants.P_SV) if position == 0: x_ticks = Constants.X_TICKS_S x_lims = Constants.X_LIMS_S x_labels = Constants.X_LABELS_S n_bins = Constants.N_BINS_S x_label = Constants.X_LABEL_S if position == 1: x_ticks = Constants.X_TICKS_L x_lims = Constants.X_LIMS_L x_labels = Constants.X_LABELS_L n_bins = Constants.N_BINS_L x_label = Constants.X_LABEL_L ax = ax[counter, position] if insertions or deletions: ax.hist([deletions, insertions], label=["Deletions", "Insertions"], histtype='barstacked', color=["#FF7E79", "#A9D18E"], edgecolor="none", bins=n_bins, width=0.85 * (x_lims[1] - x_lims[0]) / n_bins, range=[x_lims[0], x_lims[1]]) ax.set_xlabel(x_label, size=20) ax.set_ylabel(y_label, size=20) ax.set_ylim(bottom=0) ax.set_xlim(left=x_lims[0], right=x_lims[1]) ax.yaxis.set_major_locator(ticker.MaxNLocator(integer=True)) ax.yaxis.set_major_formatter(ticker.FuncFormatter(_comma_formatter)) ax.grid(color='#e0e0e0', linewidth=0.9, linestyle='-') ax.xaxis.grid(False) ax.set_axisbelow(True) ax.set_xticks(x_ticks) ax.set_xticklabels(x_labels, size=15) ax.tick_params(axis='y', labelsize=15) rcParams['xtick.direction'] = 'out' rcParams['ytick.direction'] = 'out' ax.yaxis.set_ticks_position('left') ax.xaxis.set_ticks_position('bottom') y_top = ax.get_ylim()[1] if y_top > y_max[position]: y_max[position] = y_top def add_subplots(fig, ax, sample, data, counter, y_max): short_ins, short_del = process_short_data(data) add_subplot(fig, ax, sample, [short_ins, short_del], counter, y_max, 0) long_ins, long_del = process_long_data(data) add_subplot(fig, ax, sample, [long_ins, long_del], counter, y_max, 1) def label_rows(fig, axes, rows): pad = 5 # in points for ax, row in zip(axes[:, 0], rows): ax.annotate(row, xy=(0, 0.5), xytext=(-ax.yaxis.labelpad - pad, 0), xycoords=ax.yaxis.label, textcoords='offset points', size=25, ha='right', va='center') fig.tight_layout() fig.subplots_adjust(left=0.15, top=0.95) def label_columns(fig, axes): pad = 5 # in points columns = [Constants.C_SHORT, Constants.C_LONG] for ax, col in zip(axes[0], columns): ax.annotate(col, xy=(0.5, 1), xytext=(0, pad), xycoords='axes fraction', textcoords='offset points', size=25, ha='center', va='baseline') def to_plotgroup(plot_json, output_dir): n_samples = len(plot_json) if n_samples > 0: fig, ax = plt.subplots(n_samples, 2, figsize=( 15, n_samples * 5), squeeze=False) od = collections.OrderedDict(sorted(plot_json.items())) counter = 0 y_max = [0, 0] for sample, data in od.iteritems(): add_subplots(fig, ax, sample, data, counter, y_max) counter += 1 label_rows(fig, ax, od.keys()) label_columns(fig, ax) for row in xrange(0, n_samples): ax[row, 0].set_ylim(top=y_max[0] * 1.1) ax[row, 1].set_ylim(top=y_max[1] * 1.1) p1 = mpatches.Patch(color='#FF7E79', linewidth=0) p2 = mpatches.Patch(color='#A9D18E', linewidth=0) fig.legend((p1, p2), ("Deletions", "Insertions"), "upper left", fontsize=15) else: fig = plt.figure() plot_name = get_plot_title(spec, Constants.PG_SV, Constants.P_SV) png_fn = os.path.join(output_dir, "{p}.png".format(p=Constants.P_SV)) png_base, thumbnail_base = save_figure_with_thumbnail( fig, png_fn, dpi=DEFAULT_DPI, bbox_inches='tight') plot = Plot(Constants.P_SV, os.path.relpath(png_base, output_dir), title=plot_name, caption=plot_name, thumbnail=os.path.relpath(thumbnail_base, output_dir)) plot_group = PlotGroup(Constants.PG_SV, plots=[plot]) return plot_group def to_report(table_json_file, plot_json_file, output_dir): log.info("Starting {f} v{v}".format(f=os.path.basename(__file__), v=__version__)) with open(table_json_file) as f: table_json = json.load(f) with open(plot_json_file) as f: plot_json = json.load(f) tables = [to_sample_table(table_json), to_anno_table(table_json)] plotgroups = [to_plotgroup(plot_json, output_dir)] report = Report(Constants.R_ID, tables=tables, plotgroups=plotgroups) return spec.apply_view(report) def _args_runner(args): output_dir = os.path.dirname(args.report) report = to_report(args.table_json, args.plot_json, output_dir) report.write_json(args.report) return 0 def _resolved_tool_contract_runner(rtc): output_dir = os.path.dirname(rtc.task.output_files[0]) report = to_report(rtc.task.input_files[0], rtc.task.input_files[1], output_dir) report.write_json(rtc.task.output_files[0]) return 0 def _add_options_to_parser(p): p.add_input_file_type( FileTypes.JSON, file_id="json_table", name="JSON Table Data", description="JSON of table data") p.add_input_file_type( FileTypes.JSON, file_id="json_plot", name="JSON Plot Data", description="JSON of plot data") p.add_output_file_type(FileTypes.REPORT, "report", spec.title, description=("Filename of JSON output report. Should be name only, " "and will be written to output dir"), default_name="report") return p def _get_parser(): p = get_pbparser( Constants.TOOL_ID, __version__, "Report", __doc__, Constants.DRIVER_EXE, is_distributed=False) return _add_options_to_parser(p) def main(argv=sys.argv): return pbparser_runner(argv[1:], _get_parser(), _args_runner, _resolved_tool_contract_runner, log, setup_log) if __name__ == "__main__": sys.exit(main())
11,444
4,400
import logging import os from datetime import datetime class Logger : logger = None def myLogger(self): if None == self.logger: self.logger=logging.getLogger('nrdf') self.logger.setLevel(logging.DEBUG) log_folder = r"logs/" os.makedirs(os.path.dirname(log_folder), exist_ok=True) output_file = os.path.join(log_folder, datetime.now().strftime("%Y_%m_%d-%H_%M_%S")) file_handler=logging.FileHandler(output_file + '.log', mode="w", encoding=None, delay=False) formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') file_handler.setFormatter(formatter) self.logger.addHandler(file_handler) stream_handler = logging.StreamHandler() file_handler.setFormatter(formatter) self.logger.addHandler(stream_handler) self.logger.propagate = False return self.logger
976
280
# Just to keep things like ./manage.py test happy from django.contrib.auth.models import AbstractUser # class Group(models.Model): # """ # Groups are a generic way of categorizing users to apply permissions, or # some other label, to those users. A user can belong to any number of # groups. # A user in a group automatically has all the permissions granted to that # group. For example, if the group Site editors has the permission # can_edit_home_page, any user in that group will have that permission. # Beyond permissions, groups are a convenient way to categorize users to # apply some label, or extended functionality, to them. For example, you # could create a group 'Special users', and you could write code that would # do special things to those users -- such as giving them access to a # members-only portion of your site, or sending them members-only email # messages. # """ # name = models.CharField(_('name'), max_length=80, unique=True) # permissions = models.ManyToManyField( # Permission, # verbose_name=_('permissions'), # blank=True, # ) # # objects = GroupManager() # # class Meta: # verbose_name = _('group') # verbose_name_plural = _('groups') # # def __str__(self): # return self.name # # def natural_key(self): # return (self.name,) # class User(AbstractUser): # """ # Users within the Django authentication system are represented by this # model. # Username, password and email are required. Other fields are optional. # """ # class Meta(AbstractUser.Meta): # swappable = 'AUTH_USER_MODEL'
1,689
460
import os import argparse import cv2 import torch import pandas as pd from tqdm import tqdm from pathlib import Path import segmentation_models_pytorch as smp from tools.datasets import InferenceDataset from tools.models import CovidScoringNet, SegmentationModel from tools.utils import extract_model_opts, get_list_of_files def inference( model: CovidScoringNet, dataset: InferenceDataset, output_dir: str, csv_name: str, ) -> None: model.eval() output_lungs_dir = os.path.join(output_dir, 'lungs') output_covid_dir = os.path.join(output_dir, 'covid') os.makedirs(output_lungs_dir) if not os.path.exists(output_lungs_dir) else False os.makedirs(output_covid_dir) if not os.path.exists(output_covid_dir) else False data = { 'dataset': [], 'filename': [], 'lungs_mask': [], 'covid_mask': [], 'score': [], } keys = ['lung_segment_{:d}'.format(idx + 1) for idx in range(6)] lung_segment_probs = {key: [] for key in keys} data.update(lung_segment_probs) for source_img, img_path in tqdm(dataset, desc='Prediction', unit=' images'): image_path = os.path.normpath(img_path) filename = os.path.split(image_path)[-1] dataset_name = image_path.split(os.sep)[-3] predicted_score, mask_lungs, mask_covid, raw_pred = model.predict(source_img) cv2.imwrite(os.path.join(output_lungs_dir, filename), mask_lungs * 255) cv2.imwrite(os.path.join(output_covid_dir, filename), mask_covid * 255) data['dataset'].append(dataset_name) data['filename'].append(filename) data['lungs_mask'].append(os.path.join(output_lungs_dir, filename)) data['covid_mask'].append(os.path.join(output_covid_dir, filename)) data['score'].append(predicted_score) for idx in range(len(raw_pred)): raw_pred_col = 'lung_segment_{:d}'.format(idx + 1) data[raw_pred_col].append(raw_pred[idx]) csv_save_path = os.path.join(output_dir, csv_name) df = pd.DataFrame(data) df.to_csv(csv_save_path, index=False) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Inference pipeline') # Dataset settings parser.add_argument('--data_dir', type=str) parser.add_argument('--output_dir', default='dataset/inference_output', type=str) parser.add_argument('--csv_name', default='model_outputs.csv', type=str) # COVID model settings parser.add_argument('--covid_model_path', type=str) parser.add_argument('--covid_model_name', default='Unet', type=str) parser.add_argument('--covid_encoder_name', default='se_resnet101', type=str) parser.add_argument('--covid_encoder_weights', default='imagenet', type=str) parser.add_argument('--covid_in_channels', default=3, type=int) parser.add_argument('--covid_num_classes', default=1, type=int) parser.add_argument('--covid_activation', default='sigmoid', type=str) parser.add_argument('--covid_dropout', default=0.2, type=float) parser.add_argument('--covid_aux_params', default=True, type=bool) parser.add_argument('--covid_input_size', nargs='+', default=(480, 480), type=int) # Lungs model settings parser.add_argument('--lungs_model_path', type=str) parser.add_argument('--lungs_model_name', default='Unet', type=str) parser.add_argument('--lungs_encoder_name', default='se_resnext101_32x4d', type=str) parser.add_argument('--lungs_encoder_weights', default='imagenet', type=str) parser.add_argument('--lungs_in_channels', default=3, type=int) parser.add_argument('--lungs_num_classes', default=1, type=int) parser.add_argument('--lungs_activation', default='sigmoid', type=str) parser.add_argument('--lungs_dropout', default=0.2, type=float) parser.add_argument('--lungs_aux_params', default=False, type=bool) parser.add_argument('--lungs_input_size', nargs='+', default=(384, 384), type=int) # Additional settings parser.add_argument('--automatic_parser', action='store_true') parser.add_argument('--threshold', default=0.5, type=float) args = parser.parse_args() device = 'cuda' if torch.cuda.is_available() else 'cpu' args.covid_input_size = tuple(args.covid_input_size) args.lungs_input_size = tuple(args.lungs_input_size) if args.automatic_parser: covid_model_opts = extract_model_opts(args.covid_model_path) lungs_model_opts = extract_model_opts(args.lungs_model_path) args.covid_model_name = covid_model_opts['model_name'] args.covid_encoder_name = covid_model_opts['encoder_name'] args.covid_encoder_weights = covid_model_opts['encoder_weights'] args.lungs_model_name = lungs_model_opts['model_name'] args.lungs_encoder_name = lungs_model_opts['encoder_name'] args.lungs_encoder_weights = lungs_model_opts['encoder_weights'] args.output_dir = os.path.join(args.output_dir, args.covid_model_name) args.csv_name = '{:s}_{:s}{:s}'.format( Path(args.csv_name).stem, args.covid_model_name, Path(args.csv_name).suffix ) covid_aux_params = None if args.covid_aux_params: covid_aux_params = dict( pooling='avg', dropout=args.covid_dropout, activation=args.covid_activation, classes=args.covid_num_classes, ) lungs_aux_params = None if args.lungs_aux_params: lungs_aux_params = dict( pooling='avg', dropout=args.lungs_dropout, activation=args.covid_activation, classes=args.covid_num_classes, ) covid_model = SegmentationModel( model_name=args.covid_model_name, encoder_name=args.covid_encoder_name, aux_params=covid_aux_params, encoder_weights=args.covid_encoder_weights, in_channels=args.covid_in_channels, num_classes=args.covid_num_classes, activation=args.covid_activation, wandb_api_key=None, ) lungs_model = SegmentationModel( model_name=args.lungs_model_name, encoder_name=args.lungs_encoder_name, aux_params=lungs_aux_params, encoder_weights=args.lungs_encoder_weights, in_channels=args.lungs_in_channels, num_classes=args.lungs_num_classes, activation=args.lungs_activation, wandb_api_key=None, ) covid_model = covid_model.build_model() lungs_model = lungs_model.build_model() covid_model.load_state_dict(torch.load(args.covid_model_path, map_location=device)) lungs_model.load_state_dict(torch.load(args.lungs_model_path, map_location=device)) covid_preprocessing_params = smp.encoders.get_preprocessing_params( encoder_name=args.covid_encoder_name, pretrained=args.covid_encoder_weights ) lung_preprocessing_params = smp.encoders.get_preprocessing_params( encoder_name=args.lungs_encoder_name, pretrained=args.lungs_encoder_weights ) img_paths = get_list_of_files(args.data_dir, ['mask']) dataset = InferenceDataset(img_paths, input_size=args.lungs_input_size) model = CovidScoringNet( lungs_model, covid_model, device, args.threshold, args.lungs_input_size, args.covid_input_size, covid_preprocessing_params, lung_preprocessing_params, crop_type='single_crop', ) inference(model, dataset, args.output_dir, args.csv_name)
7,494
2,624
#!/usr/bin/env python # Copyright (c) 2017, DIANA-HEP # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ROOT constants used in deserialization.""" import numpy # used in unmarshaling kByteCountMask = numpy.int64(0x40000000) kByteCountVMask = numpy.int64(0x4000) kClassMask = numpy.int64(0x80000000) kNewClassTag = numpy.int64(0xFFFFFFFF) kIsOnHeap = numpy.uint32(0x01000000) kIsReferenced = numpy.uint32(1 << 4) kMapOffset = 2 # not used? kNullTag = 0 kNotDeleted = 0x02000000 kZombie = 0x04000000 kBitMask = 0x00ffffff kDisplacementMask = 0xFF000000 ################################################################ core/zip/inc/Compression.h kZLIB = 1 kLZMA = 2 kOldCompressionAlgo = 3 kLZ4 = 4 kUndefinedCompressionAlgorithm = 5 ################################################################ constants for streamers kBase = 0 kChar = 1 kShort = 2 kInt = 3 kLong = 4 kFloat = 5 kCounter = 6 kCharStar = 7 kDouble = 8 kDouble32 = 9 kLegacyChar = 10 kUChar = 11 kUShort = 12 kUInt = 13 kULong = 14 kBits = 15 kLong64 = 16 kULong64 = 17 kBool = 18 kFloat16 = 19 kOffsetL = 20 kOffsetP = 40 kObject = 61 kAny = 62 kObjectp = 63 kObjectP = 64 kTString = 65 kTObject = 66 kTNamed = 67 kAnyp = 68 kAnyP = 69 kAnyPnoVT = 70 kSTLp = 71 kSkip = 100 kSkipL = 120 kSkipP = 140 kConv = 200 kConvL = 220 kConvP = 240 kSTL = 300 kSTLstring = 365 kStreamer = 500 kStreamLoop = 501 ################################################################ constants from core/foundation/inc/ESTLType.h kNotSTL = 0 kSTLvector = 1 kSTLlist = 2 kSTLdeque = 3 kSTLmap = 4 kSTLmultimap = 5 kSTLset = 6 kSTLmultiset = 7 kSTLbitset = 8 kSTLforwardlist = 9 kSTLunorderedset = 10 kSTLunorderedmultiset = 11 kSTLunorderedmap = 12 kSTLunorderedmultimap = 13 kSTLend = 14 kSTLany = 300 ################################################################ IOFeatures kGenerateOffsetMap = 1
4,267
1,479
#771. Jewels and Stones class Solution: def numJewelsInStones(self, jewels: str, stones: str) -> int: # count = 0 # jewl = {} # for i in jewels: # if i not in jewl: # jewl[i] = 0 # for j in stones: # if j in jewl: # count += 1 # return count # return sum(s in jewels for s in stones) count = 0 jewl = set(jewels) for s in stones: if s in jewl: count += 1 return count
554
188
from django.contrib import admin from .models import Comment # Register your models here. class CommentsAdmin(admin.ModelAdmin): list_display = ['id', "user", "content", "timestamp"] class Meta: model = Comment admin.site.register(Comment, CommentsAdmin)
277
79
##incompleted yt tuitorial ##import discord ##import json ##import asyncio ##import youtube_dl ##import shell ##import os ##from discord.utils import get ##from discord.ext import commands ## ##@client.command(pass_context=True) ##async def join(ctx): ## global voice ## channel=ctx.message.author.voice.channel ## voice=get(client.voice_clients,guild=ctx.guild) ## ## if voice and voice.is_connected(): ## await voice.move_to(channel) ## else: ## voice=await chqannel.connect() ## await ctx.send(f"Joined {channel}") ## ##@client.command(pass_context=True) ##async def leave(ctx): ## channel=ctx.message.author.voice.channel ## voice=get(client.voice_clients,guild=ctx.guild) ## ## if voice and voice.is_connected(): ## await voice.disconnect() ## await ctx.send(f"Left {channel}") ## ##@client.command(pass_context=True,aliases=["p"]) ##async def play(ctx,url:str): ## def check_queue(): ## Queue_infile=os.path.indir("./Queue") ## if Queue_infile is True: ## DIR =os.path.abspath(os.path.realpath("Queue")) ## length=len(os. ##
1,180
420
import itertools import os import random import numpy as np import pandas as pd from tqdm import tqdm def _get_steps(): hdf_subdir = "augmentation/" steps = {"step_name": ["prototypical", "single_sources", "mixtures"]} steps_df = pd.DataFrame(steps) steps_df["hdf_path"] = hdf_subdir + steps_df["step_name"] # Impose order on the augmentation steps: steps_df["step_name"] = pd.Categorical( steps_df["step_name"], ["prototypical", "single_sources", "mixtures"] ) steps_df.sort_values("step_name", inplace=True, ignore_index=True) return steps_df def prototypical_spectrum(dataset, source_df): """Weighted average of calibration spectra with randomly assigned weights between 0 and 1. Args: dataset (pyeem.datasets.Dataset): Your PyEEM dataset. source_df (pandas.DataFrame): Calibration information for a single source. Returns: pandas.DataFrame: A prototypical Excitation Emission Matrix for a single source. """ aug_steps_df = _get_steps() source_name = source_df.index.get_level_values("source").unique().item() source_units = source_df.index.get_level_values("source_units").unique().item() intensity_units = ( source_df.index.get_level_values("intensity_units").unique().item() ) proto_eems = [] for index, row in source_df[source_df["prototypical_sample"]].iterrows(): eem_path = row["hdf_path"] eem = pd.read_hdf(dataset.hdf, key=eem_path) proto_eems.append(eem) # TODO - IMPORTANT: This can't just be the mean of the prototypical samples... # Need to use the same weighted average as the intensity values! proto_concentration = source_df[source_df["prototypical_sample"]][ "concentration" ].mean() """ weights = [] for i in range(len(proto_eems)): weights.append(random.uniform(0, 1)) proto_eem = np.average([eem.values for eem in proto_eems], axis=0, weights=weights) """ proto_eem = np.average([eem.values for eem in proto_eems], axis=0) proto_eem = pd.DataFrame( data=proto_eem, index=proto_eems[0].index, columns=proto_eems[0].columns ) proto_eem.index.name = "emission_wavelength" hdf_path = aug_steps_df[aug_steps_df["step_name"] == "prototypical"][ "hdf_path" ].item() hdf_path = os.path.join(hdf_path, source_name) new_indices = np.array( ["source", "proto_conc", "source_units", "intensity_units", "hdf_path"] ) proto_eem = proto_eem.assign( **{ "source": source_name, "proto_conc": proto_concentration, "source_units": source_units, "intensity_units": intensity_units, "hdf_path": hdf_path, } ) proto_eem.set_index(new_indices.tolist(), append=True, inplace=True) new_indices = np.append(new_indices, ("emission_wavelength")) proto_eem = proto_eem.reorder_levels(new_indices) proto_eem.to_hdf(dataset.hdf, key=hdf_path) return proto_eem def create_prototypical_spectra(dataset, cal_df): """Creates a protoypical spectrum for each calibration source in the PyEEM dataset. Args: dataset (pyeem.datasets.Dataset): Your PyEEM dataset. cal_df (pandas.DataFrame): Calibration information for your dataset returned from :meth:`pyeem.preprocessing.calibration()` Returns: pandas.DataFrame: A table describing the prototypical spectra and their paths within the HDF5 store. """ results_rows = [] for source_name, group in cal_df.groupby(level="source", as_index=False): proto_eem_df = prototypical_spectrum(dataset, group) new_indices = proto_eem_df.index.droplevel("emission_wavelength").unique() result = dict(zip(list(new_indices.names), list(new_indices.item()))) results_rows.append(result) results_df = pd.DataFrame(results_rows) results_index = "source" results_df.set_index(results_index, inplace=True) return results_df def single_source(dataset, source_df, conc_range, num_spectra): """Creates augmented single source spectra for a single calibration source. Args: dataset (pyeem.datasets.Dataset): Your PyEEM dataset. source_df (pandas.DataFrame): Calibration information for a single source. conc_range (tuple of (int, float)): The concentration range which the augmented single source spectra will occupy. num_spectra (int): The number of augmented single source spectra to create. Returns: pandas.DataFrame: A table describing the source's augmented spectra and their paths within the HDF5 store. """ aug_steps_df = _get_steps() # Get the source's name source_name = source_df.index.get_level_values("source").unique().item() # Get the HDF5 path to the source's prototypical EEM proto_hdf_path = aug_steps_df[aug_steps_df["step_name"] == "prototypical"][ "hdf_path" ].item() proto_hdf_path = os.path.join(proto_hdf_path, source_name) # Read in the prototypical EEM proto_eem = pd.read_hdf(dataset.hdf, key=proto_hdf_path) # Get the source's prototypical concentration proto_concentration = proto_eem.index.get_level_values("proto_conc").unique().item() # Remove the concentration index from the dataframe proto_eem.reset_index(level=["proto_conc"], drop=True, inplace=True) # Get the slope and intercept of the source's calibration function slope = source_df.index.get_level_values("slope").unique().item() y_intercept = source_df.index.get_level_values("intercept").unique().item() """ slope = ( cal_df.xs(source_name, level="source") .index.get_level_values("slope") .unique() .item() ) y_intercept = ( cal_df.xs(source_name, level="source") .index.get_level_values("intercept") .unique() .item() ) """ # Generate the 1D polynomial cal_func = np.poly1d([slope, y_intercept]) # Generate the concentration range based on the argument's concentration_range = np.linspace(conc_range[0], conc_range[1], num=num_spectra) # Create a new HDF5 path for the single source spectra hdf_path = aug_steps_df[aug_steps_df["step_name"] == "single_sources"][ "hdf_path" ].item() hdf_path = os.path.join(hdf_path, source_name) # aug_ss_dfs: A list which we will iteratively append single source spectra to. For each # concentration in the concentration range. Then we will turn the list of DFs # into a single DF by using concat() aug_ss_dfs = [] sources = list(dataset.calibration_sources) for new_concentration in concentration_range: scalar = cal_func(new_concentration) / cal_func(proto_concentration) ss_eem = proto_eem * scalar # Make sure there are no negative values ss_eem.clip(lower=0, inplace=True) label = np.zeros(len(sources)) source_index = sources.index(source_name) label[source_index] = new_concentration ss_eem.index.name = "emission_wavelength" ss_eem = ss_eem.assign(**dict(zip(sources, label))) new_indices = sources ss_eem.set_index(new_indices, append=True, inplace=True) new_indices = [ "source", "source_units", "intensity_units", "hdf_path", ] + new_indices new_indices.append("emission_wavelength") ss_eem = ss_eem.reorder_levels(new_indices) ss_eem.rename(index={proto_hdf_path: hdf_path}, inplace=True) aug_ss_dfs.append(ss_eem) aug_ss_df = pd.concat(aug_ss_dfs) aug_ss_df.to_hdf(dataset.hdf, key=hdf_path) return aug_ss_df def create_single_source_spectra(dataset, cal_df, conc_range, num_spectra): """Creates augmented single source spectra for each calibration source in the PyEEM dataset. Args: dataset (pyeem.datasets.Dataset): Your PyEEM dataset. cal_df (pandas.DataFrame): Calibration information for your dataset returned from :meth:`pyeem.preprocessing.calibration()` conc_range (tuple of (int, float)): The concentration range which the augmented single source spectra will occupy. num_spectra (int): The number of augmented single source spectra for each calibration source. Returns: pandas.DataFrame: A table describing the augmented single source spectra and their paths within the HDF5 store. """ aug_ss_dfs = [] for source_name, group in tqdm(cal_df.groupby(level="source", as_index=False)): ss_df = single_source( dataset, group, conc_range=conc_range, num_spectra=num_spectra ) ss_df = ( ss_df.index.droplevel(["emission_wavelength"]) .unique() .to_frame() .reset_index(drop=True) ) ss_df.set_index( ["source", "source_units", "intensity_units", "hdf_path"], inplace=True ) aug_ss_dfs.append(ss_df) aug_ss_df = pd.concat(aug_ss_dfs) return aug_ss_df """ def mixture(): return """ def create_mixture_spectra(dataset, cal_df, conc_range, num_steps, scale="logarithmic"): """Creates augmented mixture spectra by summing together augmented single source spectra. The number of augmented mixtures created is equal to the Cartesian product composed of... Args: dataset (pyeem.datasets.Dataset): Your PyEEM dataset. cal_df (pandas.DataFrame): Calibration information for your dataset returned from :meth:`pyeem.preprocessing.calibration()` conc_range (tuple of (int, float)): The concentration range which the augmented spectra mixtures will occupy. num_steps (int): The number of intervals within the concentration range. scale (str, optional): Determines how the concentrations will be spaced along the given concentration range. Options are "linear" and "logarithmic". Defaults to "logarithmic". Raises: Exception: Raised if calibration sources are reported in different units. ValueError: Raised if the scale argument is a value other than linear" or "logarithmic". Returns: pandas.DataFrame: A table describing the augmented mixture spectra and their paths within the HDF5 store. """ if cal_df.index.get_level_values("source_units").nunique() != 1: raise Exception( "Sources must be reported in the same units in order create augmented mixtures." ) sources = cal_df.index.get_level_values(level="source").unique().to_list() source_units = cal_df.index.get_level_values("source_units").unique().item() intensity_units = ( cal_df.index.get_level_values(level="intensity_units").unique().item() ) aug_steps_df = _get_steps() hdf_path = aug_steps_df[aug_steps_df["step_name"] == "mixtures"]["hdf_path"].item() proto_spectra = [] for source_name, group in cal_df.groupby(level="source", as_index=False): # Get the HDF5 path to the source's prototypical EEM proto_hdf_path = aug_steps_df[aug_steps_df["step_name"] == "prototypical"][ "hdf_path" ].item() proto_hdf_path = os.path.join(proto_hdf_path, source_name) # Read in the prototypical EEM proto_eem = pd.read_hdf(dataset.hdf, key=proto_hdf_path) proto_spectra.append(proto_eem) proto_eem_df = pd.concat(proto_spectra) if scale == "logarithmic": number_range = np.geomspace(conc_range[0], conc_range[1], num=num_steps) elif scale == "linear": number_range = np.linspace(conc_range[0], conc_range[1], num=num_steps) else: raise ValueError("scale must be 'logarithmic' or 'linear'") cartesian_product = [ p for p in itertools.product(number_range.tolist(), repeat=len(sources)) ] aug = [] for conc_set in tqdm(cartesian_product, desc="Creating Augmented Mixtures"): mix = [] # TODO - it'd be a good idea to break this out into another function. # Call it mixture() -- returns a single mixture EEM for index, label in enumerate(zip(sources, conc_set)): source_name = label[0] new_concentration = label[1] slope = ( cal_df.xs(source_name, level="source") .index.get_level_values("slope") .unique() .item() ) y_intercept = ( cal_df.xs(source_name, level="source") .index.get_level_values("intercept") .unique() .item() ) cal_func = np.poly1d([slope, y_intercept]) proto_eem = proto_eem_df.xs(source_name, level="source", drop_level=False) proto_concentration = ( proto_eem.index.get_level_values("proto_conc").unique().item() ) proto_eem.reset_index(level=["proto_conc"], drop=True, inplace=True) scalar = cal_func(new_concentration) / cal_func(proto_concentration) new_eem = proto_eem * scalar # Make sure there are no negative values new_eem.clip(lower=0, inplace=True) mix.append(new_eem) mix_eem = pd.concat(mix).sum(level="emission_wavelength") mix_eem = mix_eem.assign(**dict(zip(sources, conc_set))) mix_eem["hdf_path"] = hdf_path mix_eem["source"] = "mixture" mix_eem["source_units"] = source_units mix_eem["intensity_units"] = intensity_units new_indices = [ "source", "source_units", "intensity_units", "hdf_path", ] + sources mix_eem.set_index(new_indices, append=True, inplace=True) new_indices = np.append(new_indices, ("emission_wavelength")) mix_eem = mix_eem.reorder_levels(new_indices) aug.append(mix_eem) aug_mix_df = pd.concat(aug) aug_mix_df.to_hdf(dataset.hdf, key=hdf_path) aug_mix_df = ( aug_mix_df.index.droplevel(["emission_wavelength"]) .unique() .to_frame() .reset_index(drop=True) ) aug_mix_df.set_index( ["source", "source_units", "intensity_units", "hdf_path"], inplace=True ) return aug_mix_df
14,426
4,703
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: POGOProtos/Networking/Responses/SetAvatarResponse.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from POGOProtos.Data import PlayerData_pb2 as POGOProtos_dot_Data_dot_PlayerData__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='POGOProtos/Networking/Responses/SetAvatarResponse.proto', package='POGOProtos.Networking.Responses', syntax='proto3', serialized_pb=_b('\n7POGOProtos/Networking/Responses/SetAvatarResponse.proto\x12\x1fPOGOProtos.Networking.Responses\x1a POGOProtos/Data/PlayerData.proto\"\xd7\x01\n\x11SetAvatarResponse\x12I\n\x06status\x18\x01 \x01(\x0e\x32\x39.POGOProtos.Networking.Responses.SetAvatarResponse.Status\x12\x30\n\x0bplayer_data\x18\x02 \x01(\x0b\x32\x1b.POGOProtos.Data.PlayerData\"E\n\x06Status\x12\t\n\x05UNSET\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x12\x16\n\x12\x41VATAR_ALREADY_SET\x10\x02\x12\x0b\n\x07\x46\x41ILURE\x10\x03\x62\x06proto3') , dependencies=[POGOProtos_dot_Data_dot_PlayerData__pb2.DESCRIPTOR,]) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _SETAVATARRESPONSE_STATUS = _descriptor.EnumDescriptor( name='Status', full_name='POGOProtos.Networking.Responses.SetAvatarResponse.Status', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='UNSET', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='SUCCESS', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='AVATAR_ALREADY_SET', index=2, number=2, options=None, type=None), _descriptor.EnumValueDescriptor( name='FAILURE', index=3, number=3, options=None, type=None), ], containing_type=None, options=None, serialized_start=273, serialized_end=342, ) _sym_db.RegisterEnumDescriptor(_SETAVATARRESPONSE_STATUS) _SETAVATARRESPONSE = _descriptor.Descriptor( name='SetAvatarResponse', full_name='POGOProtos.Networking.Responses.SetAvatarResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='status', full_name='POGOProtos.Networking.Responses.SetAvatarResponse.status', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='player_data', full_name='POGOProtos.Networking.Responses.SetAvatarResponse.player_data', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _SETAVATARRESPONSE_STATUS, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=127, serialized_end=342, ) _SETAVATARRESPONSE.fields_by_name['status'].enum_type = _SETAVATARRESPONSE_STATUS _SETAVATARRESPONSE.fields_by_name['player_data'].message_type = POGOProtos_dot_Data_dot_PlayerData__pb2._PLAYERDATA _SETAVATARRESPONSE_STATUS.containing_type = _SETAVATARRESPONSE DESCRIPTOR.message_types_by_name['SetAvatarResponse'] = _SETAVATARRESPONSE SetAvatarResponse = _reflection.GeneratedProtocolMessageType('SetAvatarResponse', (_message.Message,), dict( DESCRIPTOR = _SETAVATARRESPONSE, __module__ = 'POGOProtos.Networking.Responses.SetAvatarResponse_pb2' # @@protoc_insertion_point(class_scope:POGOProtos.Networking.Responses.SetAvatarResponse) )) _sym_db.RegisterMessage(SetAvatarResponse) # @@protoc_insertion_point(module_scope)
4,306
1,743
from abc import ABC, abstractmethod class Dynamics(ABC): """ An abstract class which outlines the basic functionalities that an object simulating dynamics should implement """ @abstractmethod def step(self, dt): pass @abstractmethod def get_state(self): pass @abstractmethod def get_measurements(self): pass
376
100
class MaxPQ: def __init__(self): self.pq = [] def insert(self, v): self.pq.append(v) self.swim(len(self.pq) - 1) def max(self): return self.pq[0] def del_max(self, ): m = self.pq[0] self.pq[0], self.pq[-1] = self.pq[-1], self.pq[0] self.pq = self.pq[:-1] self.sink(0) return m def is_empty(self, ): return not self.pq def size(self, ): return len(self.pq) def swim(self, k): while k > 0 and self.pq[(k - 1) // 2] < self.pq[k]: self.pq[k], self.pq[ (k - 1) // 2] = self.pq[(k - 1) // 2], self.pq[k] k = k // 2 def sink(self, k): N = len(self.pq) while 2 * k + 1 <= N - 1: j = 2 * k + 1 if j < N - 1 and self.pq[j] < self.pq[j + 1]: j += 1 if self.pq[k] > self.pq[j]: break self.pq[k], self.pq[j] = self.pq[j], self.pq[k] k = j
1,012
441
""" four different ways to run an action in a thread; all print 4294967296, but prints should be synchronized with a mutex here to avoid overlap """ import threading, _thread def action(i): print(i ** 32) # subclass with state class Mythread(threading.Thread): def __init__(self, i): self.i = i threading.Thread.__init__(self) def run(self): # redefine run for action print(self.i ** 32) Mythread(2).start() # start invokes run() # pass action in thread = threading.Thread(target=(lambda: action(2))) # run invokes target thread.start() # same but no lambda wrapper for state threading.Thread(target=action, args=(2,)).start() # callable plus its args # basic thread module _thread.start_new_thread(action, (2,)) # all-function interface
887
259
import logging import socket import numpy import time from cPickle import loads from scipy import linalg from matplotlib import pyplot from multiprocessing import Array from src.logic import helpers from src.logic.parallel_process import ProcessParallel from scipy import * from numpy import * class Server: def __init__(self, server_address, server_port, true_positions, estimated_positions, sensor_positions, microphone_amount, trials, coordinates, cores_amount): self.__x, self.__y, self.__z = coordinates self.__server_address = server_address self.__microphone_amount = microphone_amount self.__server_port = server_port self.__true_positions = true_positions self.__estimated_positions = estimated_positions self.__trials = trials self.__sensor_positions = sensor_positions self.__distances = [] self.__time_delays = [] self.__padding = [] self.__cores_amount = cores_amount self.__microphone_data = None self.__raw_microphone_data = [] def generate_data(self): self.generate_source_positions() self.generate_distances() self.prepare() def run(self, received_data): # Create a TCP/IP socket sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # Bind the socket to the port server_address = (self.__server_address, self.__server_port) logging.info('Starting up on %s port %s', self.__server_address, self.__server_port) sock.bind(server_address) microphones_data = {} received_data_count = 0 while received_data_count < self.__microphone_amount: logging.info('Waiting to receive message...') data, address = sock.recvfrom(65535 - 28) logging.info("Received %s", len(data)) if len(data) == 36: received_data[received_data_count] = microphones_data[data] received_data_count += 1 logging.info("Received data from %s microphones", received_data_count) else: microphone_id = data[0:36] if not microphone_id in microphones_data: microphones_data[microphone_id] = data[36:] else: microphones_data[microphone_id] += data[36:] logging.info("Received data from all microphones") def generate_source_positions(self): logging.info('Generating sources positions.') for i in range(self.__trials): #r = numpy.random.rand(1) * 50 #t = numpy.random.rand(1) * 2 * math.pi r = 0.1 * 50 t = 0.2 * 50 z = 0.3 * 20 x = r * math.cos(t) y = r * math.sin(t) #z = numpy.random.rand(1) * 20 self.__true_positions[i, 0] = x self.__true_positions[i, 1] = y self.__true_positions[i, 2] = z logging.info('Generated sources positions.') def generate_distances(self): logging.info('Generating distances.') self.__distances = numpy.zeros((self.__trials, self.__microphone_amount)) for i in range(self.__trials): for j in range(self.__microphone_amount): x1 = self.__true_positions[i, 0] y1 = self.__true_positions[i, 1] z1 = self.__true_positions[i, 2] x2 = self.__sensor_positions[j, 0] y2 = self.__sensor_positions[j, 1] z2 = self.__sensor_positions[j, 2] self.__distances[i, j] = math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2 + (z1 - z2) ** 2) logging.info('Generated distances.') def log_results(self): for trial_number in range(self.__trials): logging.info('Trial number: %d', trial_number + 1) logging.info('Estimated X = %.15f, Estimated Y = %.15f, Estimated Z = %.15f', float(self.__estimated_positions[trial_number][0]), float(self.__estimated_positions[trial_number][1]), float(self.__estimated_positions[trial_number][2])) logging.info('True X = %.15f, True Y = %.15f, True Z = %.15f', float(self.__true_positions[trial_number][0]), float(self.__true_positions[trial_number][1]), float(self.__true_positions[trial_number][2])) def draw_plot(self): pyplot.plot(self.__true_positions[:, 0], self.__true_positions[:, 1], 'bd', label='True position') pyplot.plot(self.__estimated_positions[:, 0], self.__estimated_positions[:, 1], 'r+', label='Estimated position') pyplot.legend(loc='upper right', numpoints=1) pyplot.xlabel('X coordinate of target') pyplot.ylabel('Y coordinate of target') pyplot.title('TDOA Hyperbolic Localization') pyplot.axis([-50, 50, -50, 50]) pyplot.show() def prepare(self): logging.info('Preparing stage started.') self.__time_delays = numpy.divide(self.__distances, 340.29) self.__padding = numpy.multiply(self.__time_delays, 44100) logging.info('Preparing stage ended.') def handle_retrieved_data(self, received_data): for i in range(self.__trials): x = self.__true_positions[i, 0] y = self.__true_positions[i, 1] z = self.__true_positions[i, 2] data = [] for j in range(self.__microphone_amount): data.append(received_data[j]) multi_track = numpy.array([loads(raw) for raw in data]) logging.info('Prepared all data.') logging.info('Started source localization.') x, y, z = self.locate(self.__sensor_positions, multi_track) logging.info('Localized source.') self.__estimated_positions[i, 0] = x self.__estimated_positions[i, 1] = y self.__estimated_positions[i, 2] = z def locate(self, sensor_positions, multi_track): s = sensor_positions.shape len = s[0] time_delays = numpy.zeros((len, 1)) starts = time.time() if self.__cores_amount == 1: for p in range(len): time_delays[p] = helpers.time_delay_function(multi_track[0,], multi_track[p,]) else: pp = ProcessParallel() outs = Array('d', range(len)) ranges = [] for result in helpers.per_delta(0, len, len / self.__cores_amount): ranges.append(result) for start, end in ranges: pp.add_task(helpers.time_delay_function_optimized, (start, end, outs, multi_track)) pp.start_all() pp.join_all() for idx, res in enumerate(outs): time_delays[idx] = res ends = time.time() logging.info('%.15f passed for localization computation trial.', ends - starts) Amat = numpy.zeros((len, 1)) Bmat = numpy.zeros((len, 1)) Cmat = numpy.zeros((len, 1)) Dmat = numpy.zeros((len, 1)) for i in range(2, len): x1 = sensor_positions[0, 0] y1 = sensor_positions[0, 1] z1 = sensor_positions[0, 2] x2 = sensor_positions[1, 0] y2 = sensor_positions[1, 1] z2 = sensor_positions[1, 2] xi = sensor_positions[i, 0] yi = sensor_positions[i, 1] zi = sensor_positions[i, 2] if time_delays[i] == 0 and time_delays[1] == 0: Amat[i] = 0 Bmat[i] = 0 Cmat[i] = 0 Dmat[i] = 0 continue if time_delays[i] == 0: ti_value = 0 else: ti_value = 1 / (340.29 * time_delays[i]) if time_delays[1] == 0: t1_value = 0 else: t1_value = 1 / (340.29 * time_delays[1]) Amat[i] = ti_value * (-2 * x1 + 2 * xi) - t1_value * ( -2 * x1 + 2 * x2) Bmat[i] = ti_value * (-2 * y1 + 2 * yi) - t1_value * ( -2 * y1 + 2 * y2) Cmat[i] = ti_value * (-2 * z1 + 2 * zi) - t1_value * ( -2 * z1 + 2 * z2) Sum1 = (x1 ** 2) + (y1 ** 2) + (z1 ** 2) - (xi ** 2) - (yi ** 2) - (zi ** 2) Sum2 = (x1 ** 2) + (y1 ** 2) + (z1 ** 2) - (x2 ** 2) - (y2 ** 2) - (z2 ** 2) Dmat[i] = 340.29 * (time_delays[i] - time_delays[1]) + ti_value * Sum1 - t1_value * Sum2 M = numpy.zeros((len + 1, 3)) D = numpy.zeros((len + 1, 1)) for i in range(len): M[i, 0] = Amat[i] M[i, 1] = Bmat[i] M[i, 2] = Cmat[i] D[i] = Dmat[i] M = numpy.array(M[2:len, :]) D = numpy.array(D[2:len]) D = numpy.multiply(-1, D) Minv = linalg.pinv(M) T = numpy.dot(Minv, D) x = T[0] y = T[1] z = T[2] return x, y, z @property def padding(self): return self.__padding @property def distances(self): return self.__distances
9,399
3,121
import numpy as np import pandas as pd from pandas.testing import assert_series_equal from analysis.comparisons import gt, gte, lt, lte def test_comparisons(): # a | b | gt | gte | lt | lte # ---+---+----+-----+----+----- # 1 | 1 | F | T | F | T # 1 | 2 | F | F | T | T # 2 | 1 | T | T | F | F # 1 | - | T | T | F | F # - | 1 | F | F | T | T # - | - | F | F | F | F # This makes things line up nicely T = True F = False df = pd.DataFrame.from_records( [ (1, 1, F, T, F, T), (1, 2, F, F, T, T), (2, 1, T, T, F, F), (1, 0, T, T, F, F), (0, 1, F, F, T, T), (0, 0, F, F, F, F), ], columns=["a", "b", "gt", "gte", "lt", "lte"], ).replace(0, np.nan) assert_series_equal(gt(df["a"], df["b"]), df["gt"], check_names=False) assert_series_equal(gte(df["a"], df["b"]), df["gte"], check_names=False) assert_series_equal(lt(df["a"], df["b"]), df["lt"], check_names=False) assert_series_equal(lte(df["a"], df["b"]), df["lte"], check_names=False)
1,139
498
import pytest import numpy as np import os import pyarrow as pa import pyarrow.feather as feather import pandas as pd from app.services.preprocessor import PreProcessor from typing import List @pytest.fixture def preprocessor() -> PreProcessor: return PreProcessor("datasets/csvs/train.csv", "datasets/csvs/building1.csv") @pytest.fixture def generic_csv() -> str: arr = np.random.rand(20, 20) path = "datasets/csvs/dummy.csv" np.savetxt(path, arr) yield path os.remove(path) @pytest.fixture def generic_feathers() -> List[str]: base_path = "datasets/gen" files = [] n_files = 30 col_rows = 20 rows = [f"row{x}" for x in range(0, col_rows)] columns = [f"column{x}" for x in range(0, col_rows)] for number in range(0, n_files): arr = np.random.rand(col_rows , col_rows) df = pd.DataFrame(arr, index = rows, columns = columns) file_path = f"{base_path}/gen_{number}.feather" files.append(file_path) feather.write_feather(df, file_path) yield (files, n_files, col_rows) for file in files: os.remove(file)
1,127
407
# -*- coding: utf-8 -*- # # @package color_histogram.results.hist_2d # # Compute 2D color histogram result. # @author tody # @date 2015/08/28 import os import numpy as np import matplotlib.pyplot as plt from color_histogram.io_util.image import loadRGB from color_histogram.cv.image import rgb, to32F from color_histogram.datasets.datasets import dataFile from color_histogram.results.results import resultFile, batchResults from color_histogram.plot.window import showMaximize from color_histogram.core.hist_2d import Hist2D from color_histogram.util.timer import timing_func # # Plot 2D color histograms for the target image, color space, channels. @timing_func def plotHistogram2D(image, num_bins, color_space, channels, ax): font_size = 15 plt.title("%s (%s, %s): %s bins" % (color_space, color_space[channels[0]], color_space[channels[1]], num_bins), fontsize=font_size) hist2D = Hist2D(image, num_bins=num_bins, color_space=color_space, channels=channels) hist2D.plot(ax) # # Create histogram 2D result function. def histogram2DResultFunc(num_bins=32): def func(image_file): histogram2DResult(image_file, num_bins) return func # # Compute histogram 2D result for the image file. def histogram2DResult(image_file, num_bins=32, image=None, tile=None): image_name = os.path.basename(image_file) if image is None: image_name = os.path.basename(image_file) image_name = os.path.splitext(image_name)[0] image = loadRGB(image_file) if tile is None: tile = image fig_w = 10 fig_h = 6 fig = plt.figure(figsize=(fig_w, fig_h)) fig.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.95, wspace=0.3, hspace=0.2) font_size = 15 fig.suptitle("Hisotogram 2D", fontsize=font_size) h, w = image.shape[:2] fig.add_subplot(231) plt.title("Original Image: %s x %s" % (w, h), fontsize=font_size) plt.imshow(tile) plt.axis('off') color_space = "hsv" channels_list = [[0, 1], [0, 2], [1, 2]] plot_id = 234 for channels in channels_list: ax = fig.add_subplot(plot_id) plotHistogram2D(image, num_bins, color_space, channels, ax) plot_id += 1 result_name = image_name + "_hist2D" result_file = resultFile(result_name) plt.savefig(result_file, transparent=True) # # Compute histogram 2D results for the given data names, ids. def histogram2DResults(data_names, data_ids, num_bins=32): batchResults(data_names, data_ids, histogram2DResultFunc(num_bins), "Histogram 2D") if __name__ == '__main__': data_names = ["flower"] data_ids = [0, 1, 2] histogram2DResults(data_names, data_ids)
2,811
1,048
import os import unittest import tempfile from gi.repository import Smf class Test(unittest.TestCase): def setUp(self): self.path = os.path.dirname(__file__) def compare_smf_files(self, a, b): self.assertEqual(a.format, b.format) self.assertEqual(a.ppqn, b.ppqn) self.assertEqual(a.frames_per_second, b.frames_per_second) self.assertEqual(a.resolution, b.resolution) self.assertEqual(a.number_of_tracks, b.number_of_tracks) self.assertEqual(len(a.tracks_array), len(b.tracks_array)) for i in range(a.number_of_tracks): tracka = a.tracks_array[i] trackb = b.tracks_array[i] self.assertEqual(tracka.smf, a) self.assertEqual(trackb.smf, b) self.assertEqual(tracka.track_number, trackb.track_number) self.assertEqual(tracka.number_of_events, trackb.number_of_events) self.assertEqual(tracka.file_buffer_length, trackb.file_buffer_length) self.assertEqual(tracka.last_status, trackb.last_status) self.assertEqual(tracka.next_event_offset, trackb.next_event_offset) #self.assertEqual(tracka.next_event_number, trackb.next_event_number) #self.assertEqual(tracka.time_of_next_event, trackb.time_of_next_event) tracka_events = tracka.events_array trackb_events = trackb.events_array for j in range(tracka.number_of_events): eventa = tracka_events[j] eventb = trackb_events[j] self.assertEqual(tracka, eventa.track) self.assertEqual(trackb, eventb.track) self.assertEqual(eventa.event_number, eventb.event_number) self.assertEqual(eventa.delta_time_pulses, eventb.delta_time_pulses) self.assertEqual(eventa.time_pulses, eventb.time_pulses) self.assertEqual(eventa.time_seconds, eventb.time_seconds) self.assertEqual(eventa.track_number, eventb.track_number) self.assertEqual(eventa.midi_buffer_length, eventb.midi_buffer_length) self.assertEqual(eventa.get_buffer(), eventb.get_buffer()) @unittest.expectedFailure def test_tempo_ref_counts(self): bach = Smf.File.load(os.path.join(self.path, 'chpn_op53.mid')) tempo = bach.get_last_tempo() #self.assertEqual(tempo.ref_count, 2) bach.remove_tempo(tempo) self.assertEqual(tempo.ref_count, 1) def test_file_ref_count(self): pass def test_bach_read_write_read_compare(self): orig = Smf.File.load(os.path.join(self.path, 'chpn_op53.mid')) handle, temp_filename = tempfile.mkstemp('mid') os.close(handle) orig.save(temp_filename) new = Smf.File.load(temp_filename) self.compare_smf_files(orig, new) if __name__ == '__main__': unittest.main()
2,926
983