text
stringlengths
957
885k
<reponame>Troublor/smSymer<filename>smsymerd/wsserver-compile.py<gh_stars>1-10 import asyncio import json import os import subprocess import sys import time from typing import List, Tuple import websockets from smsymer import utils, Printer from smsymer.analyzer import Analyzer from smsymer.cfg import CFG from smsymer.evm import ByteCode, Instruction from smsymerd.debugger import Debugger from smsymerd.model import instruction from smsymerd.wprinter import WPrinter async def serve(websocket, path): w_printer = WPrinter(websocket) debugger = Debugger() while True: data = await websocket.recv() data = json.loads(data) if "identifier" not in data: data["identifier"] = "" if data["operation"] == "solc": bytecode = solc(w_printer, data["data"], bool(data["options"]["optimization"]), bool(data["options"]["runtime"])) resp = { "operation": "return-solc", "data": { "bytecode": bytecode, }, "identifier": data["identifier"] } await websocket.send(json.dumps(resp)) elif data["operation"] == "disasm": bytecode = data["data"]["bytecode"] instructions = disasm(w_printer, bytecode) resp = { "operation": "return-disasm", "data": instructions, "identifier": data["identifier"] } await websocket.send(json.dumps(resp)) elif data["operation"] == "debug": if data["type"] == "reset": debugger.reset() elif data["type"] == "execute": instructions = data["data"] debugger.execute( map(lambda ins: Instruction(addr=ins["address"], opcode=ins["opcode"], bytecode=ins["bytecode"], params=ins["params"]), instructions)) resp = { "operation": "return-execute", "data": debugger.get_status(), "identifier": data["identifier"] } await websocket.send(json.dumps(resp)) elif data["operation"] == "analyze": instructions = list(map( lambda ins: Instruction(addr=ins["address"], opcode=ins["opcode"], bytecode=ins["bytecode"], params=ins["params"]), data["data"])) vuls = analyze(WPrinter(websocket, identifier="analyze-log"), instructions) resp = { "operation": "return-analyze", "data": vuls, "identifier": data["identifier"], } await websocket.send(json.dumps(resp)) def analyze(printer: WPrinter, instructions: List[Instruction]) -> List[dict]: analyzer = Analyzer(instructions, printer, True) return analyze_cfg(analyzer.construct_cfg) + analyze_cfg(analyzer.body_cfg) def analyze_cfg(cfg: CFG) -> List[dict]: resp = [] timestamp_dependency_report = cfg.check_timestamp_dependency() if timestamp_dependency_report["vulnerable"]: for report in timestamp_dependency_report["spots"]: vul = { "type": "timestamp_dependency", "timestamp_address": str(cfg.get_instruction(report["timestamp_address"])), "dependency_address": str(cfg.get_instruction(report["dependency_address"])), } resp.append(vul) uncheck_call_report = cfg.check_unchecked_call() if uncheck_call_report["vulnerable"]: for report in uncheck_call_report["spots"]: vul = { "type": "unchecked_call", "call_address": str(cfg.get_instruction(report["call_address"])), } resp.append(vul) reentrancy_report = cfg.check_reentrancy() if reentrancy_report["vulnerable"]: for report in reentrancy_report["spots"]: vul = { "type": "reentrancy", "call_address": str(cfg.get_instruction(report["call_address"])), "guard_storage_variables": list( map(lambda a: str(cfg.get_instruction(a)), report["storage_addresses"])), } resp.append(vul) return resp def disasm(printer: WPrinter, bytecode: str) -> List[dict]: instructions = ByteCode.disasm(bytecode, printer) return list(map(lambda ins: instruction.dict_encode(ins), instructions)) def solc(printer: WPrinter, data: dict, optimization, runtime): tmp_file = os.path.join(sys.path[0], "tmp", utils.uuid()) with open(tmp_file, "w+") as tmp_f: tmp_f.write(data["sourceCode"]) solc_path = os.path.join(sys.path[0], "..", 'tools', "solc.exe") cmd = solc_path if optimization: cmd += " --optimize" cmd += " --bin" if runtime: cmd += "-runtime" cmd += " " + tmp_file full_bytecode = None p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) is_bin = False lines = p.stdout.readlines() for line in lines: if not runtime: printer.print(str(line, encoding='ansi')) if is_bin: full_bytecode = str(line, encoding='ansi').strip() break if "Binary" in str(line): is_bin = True else: printer.error("Compile error") return full_bytecode start_server = websockets.serve(serve, 'localhost', 1110) asyncio.get_event_loop().run_until_complete(start_server) asyncio.get_event_loop().run_forever()
import pandas as pd import numpy as np import matplotlib.pyplot as plt import scipy.odr import itertools def computeModelDetails(frame): """ Takes a dataframe and computes columns related to the dynamical frb model """ tauwerror_expr = lambda r: 1e3*r['time_res']*np.sqrt(r['max_sigma']**6*r['min_sigma_error']**2*np.cos(r['angle']-np.pi/2)**4 + r['angle_error']**2*r['max_sigma']**2*r['min_sigma']**2*(-r['max_sigma']**2 + r['min_sigma']**2)**2*np.cos(r['angle']-np.pi/2)**2*np.sin(r['angle']-np.pi/2)**2 + r['max_sigma_error']**2*r['min_sigma']**6*np.sin(r['angle']-np.pi/2)**4)/(r['max_sigma']**2*np.cos(r['angle']-np.pi/2)**2 + r['min_sigma']**2*np.sin(r['angle']-np.pi/2)**2)**1.5 frame['drift_abs'] = -1*(frame['drift (mhz/ms)']) frame['drift_over_nuobs'] = frame[['drift_abs','center_f']].apply(lambda row: row['drift_abs'] / row['center_f'], axis=1) frame['recip_drift_over_nuobs'] = 1/frame['drift_over_nuobs'] frame['drift_abs_nuobssq'] = frame['drift_abs']/frame['center_f']**2/1000 # unitless frame['min_sigma'] = frame[['sigmax','sigmay']].apply(lambda row: min(abs(row['sigmax']), abs(row['sigmay'])), axis=1) frame['max_sigma'] = frame[['sigmax','sigmay']].apply(lambda row: max(abs(row['sigmax']), abs(row['sigmay'])), axis=1) # the following two lines assume that if sigmax > sigmay, then sigmax_error > sigmay_error, which is true (so far) for this dataset frame['min_sigma_error'] = frame[['sigmax_error','sigmay_error']].apply(lambda row: min(row['sigmax_error'], row['sigmay_error']), axis=1) frame['max_sigma_error'] = frame[['sigmax_error','sigmay_error']].apply(lambda row: max(row['sigmax_error'], row['sigmay_error']), axis=1) frame['sigma_t'] = frame[['min_sigma','time_res']].apply(lambda row: row['min_sigma']*row['time_res'], axis=1) frame['tau_w'] = frame[['time_res', 'min_sigma', 'max_sigma', 'angle']].apply( lambda r: r['time_res']*r['min_sigma']*r['max_sigma'] / np.sqrt( np.abs((np.sin(r['angle']-np.pi/2)*r['min_sigma'])**2 + (np.cos(r['angle']-np.pi/2)*r['max_sigma'])**2 )), axis=1 ) # this error is in ms frame['tau_w_error'] = frame[['tau_w', 'time_res', 'min_sigma', 'max_sigma', 'min_sigma_error', 'max_sigma_error', 'angle', 'angle_error']].apply( tauwerror_expr, axis=1 ) frame['sigma_t_ms'] = frame['sigma_t']*1e3 frame['tau_w_ms'] = frame['tau_w']*1e3 ## Redshift corrections if 'z' in frame.index: frame['drift_z'] = frame[['drift_over_nuobs', 'z']].apply(lambda row: row['drift_over_nuobs']*(1+row['z']), axis=1) frame['tau_w_ms_z'] = frame[['tau_w_ms', 'z']].apply(lambda row: row['tau_w_ms']/(1+row['z']), axis=1) return frame def cleanAngle(row): angle = row['angle'] if angle < 0 or angle > np.pi: if angle > np.pi: return angle % (np.pi) elif angle < 0 and angle > -np.pi: return angle + np.pi elif angle < 0 and angle < -np.pi: angle = angle % (2*np.pi) if angle > np.pi: return angle - np.pi else: return angle else: return angle def atanmodel(B, x): return np.arctan(x/B[0]) def offset_atanmodel(B, x, zero_ddm_fit=6.554): return np.arctan(x/zero_ddm_fit) + B[0] def reciprocal(x, a): return a/x def reciprocal_log(x, b): return -x+b def log_log(x, k, b): return k*x+b def reciprocal_odr(B, x): return B[0]/x def reciprocal_odr_log(B, x): return -x+B[0] def fitreciprocal(x, data, sigma=1): guess = [522] abs_sigma = True if (type(sigma) == int) and (sigma == 1): abs_sigma = False sigma = np.zeros(len(data.ravel())) + sigma popt, pcov = scipy.optimize.curve_fit(reciprocal, x, data, p0=guess, sigma=sigma, absolute_sigma=abs_sigma) return popt, pcov def fitreciprocal_log(x, data, sigma=1, loglog=False): guess = [522] abs_sigma = True if (type(sigma) == int) and (sigma == 1): abs_sigma = False sigma = np.zeros(len(data.ravel())) + sigma if loglog: guess = [1,1] popt, pcov = scipy.optimize.curve_fit(log_log, x, data, p0=guess, sigma=sigma, absolute_sigma=abs_sigma) else: popt, pcov = scipy.optimize.curve_fit(reciprocal_log, x, data, p0=guess, sigma=sigma, absolute_sigma=abs_sigma) return popt, pcov def modelerror(frame): ex = np.sqrt(frame['red_chisq'])*frame['tau_w_error'] ey = np.sqrt(frame['red_chisq'])*frame['drift error (mhz/ms)']/frame['center_f'] return ex, ey def rangeerror(frame): """ These ranges are not errors in the statistical sense. they are the min/max values, which should be larger than the real errors. So this is extremely conservative while also being easier to compute. The strange shape of the returned value is due to a quirk in the way pandas handles asymmetric errors. """ ex = [np.array([frame['tau_w_ms'] - frame['tw_min'], frame['tw_max'] - frame['tau_w_ms']])] ey = [np.array([frame['drift_over_nuobs'] - frame['drift_nu_min'], frame['drift_nu_max'] - frame['drift_over_nuobs']])] return ex, ey def log_error(frame): """ see modelerror() """ sx = np.log((frame['tau_w_ms'] + np.sqrt(frame['red_chisq'])*frame['tau_w_error']) / frame['tau_w_ms']) sy = np.log((frame['drift_over_nuobs'] + np.sqrt(frame['red_chisq'])*(frame['drift error (mhz/ms)'])) / frame['drift_over_nuobs']) return sx, sy def rangelog_error(frame): """ The range errors are asymmetric. Average the error """ ex, ey = rangeerror(frame) ex = np.log((frame['tau_w_ms'] + (ex[0][0]+ex[0][1])/2 ) / frame['tau_w_ms']) ey = np.log((frame['drift_over_nuobs'] + (ey[0][0]+ey[0][1])/2) / frame['drift_over_nuobs']) return ey, ey # return np.log(np.maximum(ex[0][0], ex[0][1])), np.log(np.maximum(ey[0][0], ey[0][1])) def rangeerror_odr(frame): """ The range errors are asymmetric. Take the largest error """ ex, ey = rangeerror(frame) return np.maximum(ex[0][0], ex[0][1]), np.maximum(ey[0][0], ey[0][1]) def fitodr(frame, beta0=[1000], errorfunc=log_error, log=True): fit_model = scipy.odr.Model(reciprocal_odr) fit_model_log = scipy.odr.Model(reciprocal_odr_log) fitdata = scipy.odr.RealData(frame['tau_w_ms'], frame['drift_over_nuobs'], sx=rangeerror_odr(frame)[0], sy=rangeerror_odr(frame)[1]) fitdata_log = scipy.odr.RealData(np.log(frame['tau_w_ms']), np.log(frame['drift_over_nuobs']), sx=errorfunc(frame)[0], sy=errorfunc(frame)[1]) odrfitter_log = scipy.odr.ODR(fitdata_log, fit_model_log, beta0=beta0) odrfitter_log.set_job(fit_type=0) odrfitter = scipy.odr.ODR(fitdata, fit_model, beta0=beta0) odrfitter.set_job(fit_type=0) if log: # print('log odr') return odrfitter_log.run() else: # print('linear odr') return odrfitter.run() def driftranges(source): """ Given all burst and model data at different trial DMs, computes the range of drifts durations across the range of trial DMs """ yaxis = 'drift_over_nuobs' xaxis ='tau_w_ms' for burst in source.index.unique(): burstdf = source.loc[burst] eduration = np.sqrt(burstdf['red_chisq'])*burstdf['tau_w_error'] edriftnuobs = np.sqrt(burstdf['red_chisq'])*burstdf['drift error (mhz/ms)']/burstdf['center_f'] dmax, dmin = np.max(burstdf[yaxis] + edriftnuobs), np.min(burstdf[yaxis] - edriftnuobs) tmax, tmin = np.max(burstdf[xaxis] + eduration) , np.min(burstdf[xaxis] - eduration) source.loc[burst, 'drift_nu_max'] = dmax source.loc[burst, 'drift_nu_min'] = dmin source.loc[burst, 'drift_max'] = dmax*burstdf['center_f'] source.loc[burst, 'drift_min'] = dmin*burstdf['center_f'] source.loc[burst, 'tw_max'] = tmax source.loc[burst, 'tw_min'] = tmin # print(f'burst: {burst},\t\tdriftrange = ({dmin}, {dmax}),\t\ttwrange = ({tmin}, {tmax})') return source def plotDriftVsDuration(frames=[], labels=[], title=None, logscale=True, annotatei=0, markers=['o', 'p', 'X', 'd', 's'], hidefit=[], hidefitlabel=False, fitlines=['r-', 'b--', 'g-.'], fitextents=None, errorfunc=modelerror, fiterrorfunc=rangelog_error, dmtrace=False): """ wip """ plt.rcParams["errorbar.capsize"] = 4 plt.rcParams["font.family"] = "serif" markersize = 125#100 fontsize = 25 #18 annotsize = 14 filename = 'log_drift_over_nu_obsvsduration' if logscale else 'drift_over_nu_obsvsduration' figsize = (17, 8) figsize = (17, 9) # figsize = (14, 10) yaxis = 'drift_over_nuobs' yaxis_lbl = 'Sub-burst Slope $\\,\\left|\\frac{d\\nu_\\mathrm{obs}}{dt_\\mathrm{D}}\\right|(1/\\nu_{\\mathrm{obs}})$ (ms$^{-1}$)' # yaxis = 'recip_drift_over_nuobs' # yaxis_lbl = 'nu_obs / drift' if type(markers) == list: markers = itertools.cycle(markers) if type(fitlines) == list: fitlines = itertools.cycle(fitlines) ax = frames[0].plot.scatter(x='tau_w_ms', y=yaxis, xerr=errorfunc(frames[0])[0], yerr=errorfunc(frames[0])[1], figsize=figsize, s=markersize, c='color', colorbar=False, fontsize=fontsize, logy=logscale, logx=logscale, marker=next(markers), edgecolors='k', label=labels[0]) for frame, lbl in zip(frames[1:], labels[1:]): frame.plot.scatter(ax=ax, x='tau_w_ms', y=yaxis, xerr=errorfunc(frame)[0], yerr=errorfunc(frame)[1], figsize=figsize, s=markersize, c='color', colorbar=False, fontsize=fontsize, logy=logscale, logx=logscale, marker=next(markers), edgecolors='k', label=lbl) if type(annotatei) == int: annotatei =[annotatei] for ai in annotatei: if ai < len(frames): for k, v in frames[ai].iterrows(): if v[yaxis] > 0 or not logscale: ax.annotate(k, (v['tau_w_ms'], v[yaxis]), xytext=(-3,5), textcoords='offset points', weight='bold', size=annotsize) alldata = pd.concat([f for f in frames]) if not fitextents: fitextents = min(alldata['tau_w_ms'])*0.9, max(alldata['tau_w_ms'])*1.1 logfit = True if type(hidefit) == int: hidefit = [hidefit] fits = [] for fi, (frame, label, line) in enumerate(zip(frames, labels, fitlines)): x = np.linspace(fitextents[0], fitextents[1], num=1200) if logfit: fit = fitodr(frame, errorfunc=fiterrorfunc) param, err = np.exp(fit.beta[0]), np.exp(fit.beta[0])*(np.exp(fit.sd_beta[0])-1) else: fit = fitodr(frame, log=logfit) param, err = fit.beta[0], fit.sd_beta[0] ## compute reduced chisq # parameter error ex = frame['tau_w_error']*np.sqrt(frame['red_chisq']) ey = frame['drift error (mhz/ms)']/frame['center_f']*np.sqrt(frame['red_chisq']) data_err = np.sqrt(ex**2 + ey**2) residuals = frame['drift_over_nuobs'] - param/frame['tau_w_ms'] chisq = np.sum((residuals / data_err) ** 2) red_chisq = chisq / (len(frame) - 1) # print(residuals) fits.append([label, param, err, red_chisq, residuals, len(frame)]) lstr = '' if not hidefitlabel: lstr = '{} fit ({:.3f} $\\pm$ {:.3f}) $t_w^{{-1}}$'.format(label, param, err) if fi not in hidefit: plt.plot(x, param/x, line, label=lstr) if title: ax.set_title(title, size=fontsize) if dmtrace: sorteddata = pd.concat([frames[dmi] for dmi in np.argsort(labels)]) for bid in sorteddata.index.unique(): plt.plot(sorteddata.loc[bid]['tau_w_ms'], sorteddata.loc[bid]['drift_over_nuobs']) ax.set_xlabel('Sub-burst Duration $t_\\mathrm{w}$ (ms)', size=fontsize) ax.set_ylabel(yaxis_lbl, size=fontsize) plt.legend(fontsize='xx-large') # plt.legend() plt.tight_layout() return ax, fits def _plotAnglevsDM(frames, annotate=False, save=False, drops=[]): thetamodel = scipy.odr.Model(atanmodel) offsetmodel = scipy.odr.Model(offset_atanmodel) for frame in frames: frame = computeModelDetails(frame) frame['angle_clean'] = frame[['angle']].apply(cleanAngle, axis=1) - (np.pi/2) def errorexpr(frame): ex = frame['tau_w_error'] ey = frame['angle_error'] return ex, ey markersize = 125 #100 fontsize = 25 #18 annotsize = 14 logscale = False figsize = (15, 8) ax = frames[0].drop(drops).plot.scatter(x='tau_w_ms', y='angle_clean', xerr=errorexpr(frame[0])[0], yerr=errorexpr(frame[0])[0], figsize=figsize, s=markersize, c='b', colorbar=False, fontsize=fontsize, logy=logscale, logx=logscale, marker='X', edgecolors='k', label='$\\Delta$DM = 1/2 pc/cm$^3$') markers = ['o', 'p', 's'] for frame, c, label, mark in zip(frames[:3], ['r', 'c', 'g'], ['$\\Delta$DM = 0 pc/cm$^3$', '$\\Delta$DM = -1 pc/cm$^3$', '$\\Delta$DM = -2 pc/cm$^3$'], markers): frame.drop(drops).plot.scatter(ax=ax, x='tau_w_ms', y='angle_clean', xerr=errorexpr(frame)[0], yerr=errorexpr(frame)[1], figsize=figsize, s=markersize, c=c, colorbar=False, fontsize=fontsize, logy=logscale, logx=logscale, marker=mark, edgecolors='k', label=label) ax.set_xlabel('Sub-burst Duration $t_\\mathrm{w}$ (ms)', size=fontsize) #ax.set_ylabel('-$\pi/2 + $ Gaussian2d angle (rad)', size=fontsize) ax.set_ylabel('Sub-burst Drift Angle $\\theta$ (rad)', size=fontsize) ## Find Fits lstyles = ['-', '--', '-.', ':'] for frame, drops, pcol, beta, lstyle in zip(frames, [[15], [15], [15], [15]], ['r', 'c', 'g', 'b'], [-6, -4, -3, -9], lstyles): if frame.equals(frames[0]): model = thetamodel else: model = offsetmodel #model = thetamodel datafitter = scipy.odr.RealData(frame.drop(drop)['tau_w_ms'], frame.drop(drop)['angle_clean'], sx=errorexpr(frame)[0], sy=errorexpr(frame)[1]) anglefitter = scipy.odr.ODR(datafitter, model, beta0=[1]) anglefitter.set_job(fit_type=0) anglefit = anglefitter.run() tws = np.linspace(0, 8.5, num=80) print(anglefit.beta) #print(anglefit.beta[0]) if model == thetamodel: plt.plot(tws, np.arctan(tws/anglefit.beta[0]), c=pcol, label="$\\tan^{{-1}}(t_\\mathrm{{w}}/{:.2f})$".format(anglefit.beta[0]), linestyle=lstyle) elif model == offsetmodel: plt.plot(tws, np.arctan(tws/zero_ddm_fit) + anglefit.beta[0], c=pcol, label="$\\tan^{{-1}}(t_\\mathrm{{w}}/{:.2f}) {:+.2f}$ rad".format(zero_ddm_fit, anglefit.beta[0]), linestyle=lstyle) ## Point Annotations if annotate: for k, v in frames[0].iterrows(): ax.annotate(int(k) if k != 15.5 else k, (v['tau_w_ms'], v['angle_clean']), xytext=(-3,5), textcoords='offset points', weight='bold', size=annotsize) ax.set_xlim(0, 8.5) plt.title("Fit Angles for FRB180916 at different DMs", size=25) plt.legend(fontsize='xx-large') if save: for fformat in ['png', 'pdf', 'eps']: plt.savefig('angleatdifferentDMs.{}'.format(fformat))
<reponame>choderalab/Protons # coding=utf-8 """Test the reading of forcefield files included with the package. Developer Notes --------------- Do not use protons.app classes for this test module. These files need to be tested to be compatible with original OpenMM. Note that the Z in the filename is necessary so that functions with class side effects get loaded last. """ import pytest from simtk import unit from simtk.openmm import app, openmm as mm import os from protons import GBAOABIntegrator, GHMCIntegrator from protons import ForceFieldProtonDrive from protons.app.proposals import UniformProposal from protons import app as protonsapp from protons.app.driver import SAMSApproach from protons import SAMSCalibrationEngine from . import get_test_data from .utilities import create_compound_gbaoab_integrator, SystemSetup # Patch topology to unload standard bond definitions def unloadStandardBonds(cls): """ Resets _standardBonds and _hasLoadedStandardBonds to original state. """ cls._hasLoadedStandardBonds = False cls._standardBonds = dict() app.Topology.unloadStandardBonds = classmethod(unloadStandardBonds) app_location = os.path.dirname(protonsapp.__file__) bonds_path = os.path.join(app_location, "data", "bonds-amber10-constph.xml") ffxml_path = os.path.join(app_location, "data", "amber10-constph.xml") ions_spce_path = os.path.join(app_location, "data", "ions_spce.xml") ions_tip3p_path = os.path.join(app_location, "data", "ions_tip3p.xml") ions_tip4pew_path = os.path.join(app_location, "data", "ions_tip4pew.xml") hydrogen_path = os.path.join(app_location, "data", "hydrogens-amber10-constph.xml") gaff_path = os.path.join(app_location, "data", "gaff.xml") gaff2_path = os.path.join(app_location, "data", "gaff2.xml") def test_reading_protons(): """Read parameters and templates protons.xml using OpenMM.""" parsed = app.ForceField(ffxml_path) def test_reading_bonds(): """Read bond definitions in bonds-protons.xml using OpenMM.""" app.Topology.loadBondDefinitions(bonds_path) # unit test specific errors might occur otherwise when loading files due # to class side effects app.Topology.unloadStandardBonds() def test_create_peptide_system_using_protons_xml(): """Test if protons.xml can be used to successfully create a peptide System object in OpenMM.""" app.Topology.loadBondDefinitions(bonds_path) # Load pdb file with protons compatible residue names pdbx = app.PDBxFile( get_test_data( "glu_ala_his-solvated-minimized-renamed.cif", "testsystems/tripeptides" ) ) forcefield = app.ForceField(ffxml_path, ions_spce_path, "spce.xml") # System Configuration nonbondedMethod = app.PME constraints = app.AllBonds rigidWater = True constraintTolerance = 1.0e-7 # Integration Options dt = 0.5 * unit.femtoseconds temperature = 300.0 * unit.kelvin friction = 1.0 / unit.picosecond pressure = 1.0 * unit.atmospheres barostatInterval = 25 # Simulation Options platform = mm.Platform.getPlatformByName("Reference") # Prepare the Simulation topology = pdbx.topology positions = pdbx.positions system = forcefield.createSystem( topology, nonbondedMethod=nonbondedMethod, constraints=constraints, rigidWater=rigidWater, ) system.addForce(mm.MonteCarloBarostat(pressure, temperature, barostatInterval)) integrator = GHMCIntegrator(temperature, friction, dt) integrator.setConstraintTolerance(constraintTolerance) # Clean up so that the classes remain unmodified # unit test specific errors might occur otherwise when loading files due # to class side effects app.Topology.unloadStandardBonds() def test_create_peptide_simulation_using_protons_xml(): """Test if protons.xml can be used to successfully create a peptide Simulation object in OpenMM and Instantiate a ForceFieldProtonDrive.""" sys_details = SystemSetup() sys_details.timestep = 0.5 * unit.femtoseconds sys_details.temperature = 300.0 * unit.kelvin sys_details.collision_rate = 1.0 / unit.picosecond sys_details.pressure = 1.0 * unit.atmospheres sys_details.barostatInterval = 25 sys_details.constraint_tolerance = 1.0e-7 app.Topology.loadBondDefinitions(bonds_path) # Load pdb file with protons compatible residue names pdbx = app.PDBxFile( get_test_data( "glu_ala_his-solvated-minimized-renamed.cif", "testsystems/tripeptides" ) ) forcefield = app.ForceField(ffxml_path, ions_spce_path, "spce.xml") # System Configuration nonbondedMethod = app.PME constraints = app.AllBonds rigidWater = True sys_details.constraintTolerance = 1.0e-7 # Simulation Options platform = mm.Platform.getPlatformByName("Reference") # Prepare the Simulation topology = pdbx.topology positions = pdbx.positions system = forcefield.createSystem( topology, nonbondedMethod=nonbondedMethod, constraints=constraints, rigidWater=rigidWater, ) system.addForce( mm.MonteCarloBarostat( sys_details.pressure, sys_details.temperature, sys_details.barostatInterval ) ) integrator = create_compound_gbaoab_integrator(testsystem=sys_details) driver = ForceFieldProtonDrive( sys_details.temperature, topology, system, forcefield, ffxml_path, pressure=sys_details.pressure, perturbations_per_trial=1, ) simulation = app.Simulation(topology, system, integrator, platform) simulation.context.setPositions(positions) simulation.context.setVelocitiesToTemperature(sys_details.temperature) # run one step and one update simulation.step(1) driver.attach_context(simulation.context) driver.update(UniformProposal(), nattempts=1) # Clean up so that the classes remain unmodified # unit test specific errors might occur otherwise when loading files due # to class side effects app.Topology.unloadStandardBonds() def test_create_peptide_simulation_with_residue_pools_using_protons_xml(): """Test if protons.xml can be used to successfully create a peptide Simulation object in OpenMM and Instantiate a ForceFieldProtonDrive, while using pools of residues to sample from.""" sys_details = SystemSetup() sys_details.timestep = 0.5 * unit.femtoseconds sys_details.temperature = 300.0 * unit.kelvin sys_details.collision_rate = 1.0 / unit.picosecond sys_details.pressure = 1.0 * unit.atmospheres sys_details.barostatInterval = 25 sys_details.constraint_tolerance = 1.0e-7 app.Topology.loadBondDefinitions(bonds_path) # Load pdb file with protons compatible residue names pdbx = app.PDBxFile( get_test_data( "glu_ala_his-solvated-minimized-renamed.cif", "testsystems/tripeptides" ) ) forcefield = app.ForceField(ffxml_path, ions_spce_path, "spce.xml") # System Configuration nonbondedMethod = app.PME constraints = app.AllBonds rigidWater = True sys_details.constraintTolerance = 1.0e-7 # Simulation Options platform = mm.Platform.getPlatformByName("Reference") # Prepare the Simulation topology = pdbx.topology positions = pdbx.positions system = forcefield.createSystem( topology, nonbondedMethod=nonbondedMethod, constraints=constraints, rigidWater=rigidWater, ) system.addForce( mm.MonteCarloBarostat( sys_details.pressure, sys_details.temperature, sys_details.barostatInterval ) ) integrator = create_compound_gbaoab_integrator(testsystem=sys_details) driver = ForceFieldProtonDrive( sys_details.temperature, topology, system, forcefield, ffxml_path, pressure=sys_details.pressure, perturbations_per_trial=1, ) pools = {"glu": [0], "his": [1], "glu-his": [0, 1]} driver.define_pools(pools) simulation = app.Simulation(topology, system, integrator, platform) simulation.context.setPositions(positions) simulation.context.setVelocitiesToTemperature(sys_details.temperature) driver.attach_context(simulation.context) # run one step and one update simulation.step(1) driver.update(UniformProposal(), nattempts=1, residue_pool="his") # Clean up so that the classes remain unmodified # unit test specific errors might occur otherwise when loading files due # to class side effects app.Topology.unloadStandardBonds() def pattern_from_multiline(multiline, pattern): """Return only lines that contain the pattern Parameters ---------- multiline - multiline str pattern - str Returns ------- multiline str containing pattern """ return "\n".join([line for line in multiline.splitlines() if pattern in line]) def test_create_peptide_calibration_with_residue_pools_using_protons_xml(): """Test if protons.xml can be used to successfully create a peptide Simulation object in OpenMM and Instantiate a ForceFieldProtonDrive, while using pools of residues to sample from, and calibrate histidine.""" sys_details = SystemSetup() sys_details.timestep = 0.5 * unit.femtoseconds sys_details.temperature = 300.0 * unit.kelvin sys_details.collision_rate = 1.0 / unit.picosecond sys_details.pressure = 1.0 * unit.atmospheres sys_details.barostatInterval = 25 sys_details.constraint_tolerance = 1.0e-7 app.Topology.loadBondDefinitions(bonds_path) # Load pdb file with protons compatible residue names pdbx = app.PDBxFile( get_test_data( "glu_ala_his-solvated-minimized-renamed.cif", "testsystems/tripeptides" ) ) forcefield = app.ForceField(ffxml_path, ions_spce_path, "spce.xml") # System Configuration nonbondedMethod = app.PME constraints = app.AllBonds rigidWater = True sys_details.constraintTolerance = 1.0e-7 # Simulation Options platform = mm.Platform.getPlatformByName("Reference") # Prepare the Simulation topology = pdbx.topology positions = pdbx.positions system = forcefield.createSystem( topology, nonbondedMethod=nonbondedMethod, constraints=constraints, rigidWater=rigidWater, ) system.addForce( mm.MonteCarloBarostat( sys_details.pressure, sys_details.temperature, sys_details.barostatInterval ) ) integrator = create_compound_gbaoab_integrator(testsystem=sys_details) driver = ForceFieldProtonDrive( sys_details.temperature, topology, system, forcefield, ffxml_path, pressure=sys_details.pressure, perturbations_per_trial=1, ) pools = {"glu": [0], "his": [1], "glu-his": [0, 1]} driver.define_pools(pools) driver.enable_calibration(SAMSApproach.ONESITE, group_index=1) sams_sampler = SAMSCalibrationEngine(driver) # SAMS on HIS simulation = app.Simulation(topology, system, integrator, platform) simulation.context.setPositions(positions) simulation.context.setVelocitiesToTemperature(sys_details.temperature) driver.attach_context(simulation.context) # run one step and one update simulation.step(1) driver.update(UniformProposal(), nattempts=1, residue_pool="his") sams_sampler.adapt_zetas() # Clean up so that the classes remain unmodified # unit test specific errors might occur otherwise when loading files due # to class side effects app.Topology.unloadStandardBonds() def test_peptide_system_integrity(): """ Set up peptide, and assure that the systems particles have not been modified after driver instantiation. """ sys_details = SystemSetup() sys_details.timestep = 0.5 * unit.femtoseconds sys_details.temperature = 300.0 * unit.kelvin sys_details.collision_rate = 1.0 / unit.picosecond sys_details.pressure = 1.0 * unit.atmospheres sys_details.barostatInterval = 25 sys_details.constraint_tolerance = 1.0e-7 app.Topology.loadBondDefinitions(bonds_path) # Load pdb file with protons compatible residue names pdbx = app.PDBxFile( get_test_data( "glu_ala_his-solvated-minimized-renamed.cif", "testsystems/tripeptides" ) ) forcefield = app.ForceField(ffxml_path, ions_spce_path, "spce.xml") # System Configuration nonbondedMethod = app.PME constraints = app.AllBonds rigidWater = True sys_details.constraintTolerance = 1.0e-7 # Simulation Options platform = mm.Platform.getPlatformByName("Reference") # Prepare the Simulation topology = pdbx.topology positions = pdbx.positions system = forcefield.createSystem( topology, nonbondedMethod=nonbondedMethod, constraints=constraints, rigidWater=rigidWater, ) system.addForce( mm.MonteCarloBarostat( sys_details.pressure, sys_details.temperature, sys_details.barostatInterval ) ) integrator = create_compound_gbaoab_integrator(testsystem=sys_details) original_system = pattern_from_multiline( mm.XmlSerializer.serialize(system), "<Particle" ) driver = ForceFieldProtonDrive( sys_details.temperature, topology, system, forcefield, ffxml_path, pressure=sys_details.pressure, perturbations_per_trial=1, ) after_driver = pattern_from_multiline( mm.XmlSerializer.serialize(system), "<Particle" ) # Clean up so that the classes remain unmodified # unit test specific errors might occur otherwise when loading files due # to class side effects app.Topology.unloadStandardBonds() # Make sure there are no differences between the particles in each system assert original_system == after_driver def test_reading_hydrogens(): """Read hydrogen definitions in hydrogens-protons.xml using OpenMM.""" app.Modeller.loadHydrogenDefinitions(hydrogen_path) # Clean up so that the classes remain unmodified pass # implement unloadhydrogendefinitions if necessary def test_reading_ions_spce(): """Read parameters and templates in ions_spce.xml using OpenMM.""" parsed = app.ForceField(ions_spce_path) def test_reading_ions_tip3p(): """Read parameters and templates in ions_tip3p.xml using OpenMM.""" parsed = app.ForceField(ions_tip3p_path) def test_reading_ions_tip4pew(): """Read parameters and templates in ions_tip4pew.xml using OpenMM.""" parsed = app.ForceField(ions_tip4pew_path) def test_reading_gaff(): """Read parameters and templates in gaff.xml using OpenMM.""" parsed = app.ForceField(gaff_path) def test_reading_gaff2(): """Read parameters and templates in gaff2.xml using OpenMM.""" parsed = app.ForceField(gaff2_path)
<gh_stars>0 # # Copyright (c) 2013-2018 Quarkslab. # This file is part of IRMA project. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License in the top-level directory # of this distribution and at: # # http://www.apache.org/licenses/LICENSE-2.0 # # No part of the project, including this file, may be copied, # modified, propagated, or distributed except according to the # terms contained in the LICENSE file. import logging import hashlib import unittest import os from irma.ftp.ftps import IrmaFTPS from irma.common.exceptions import IrmaFtpError from tempfile import TemporaryFile, mkstemp # ================= # Logging options # ================= def enable_logging(level=logging.INFO, handler=None, formatter=None): global log log = logging.getLogger() if formatter is None: formatter = logging.Formatter("%(asctime)s [%(name)s] " + "%(levelname)s: %(message)s") if handler is None: handler = logging.StreamHandler() handler.setFormatter(formatter) log.addHandler(handler) log.setLevel(level) # ============ # Test Cases # ============ class FTPSTestCase(unittest.TestCase): # Test config test_ftp_host = "irma.test" test_ftp_port = 21 test_ftp_auth = "password" test_ftp_key = None test_ftp_user = "testuser" test_ftp_passwd = "<PASSWORD>" test_ftp_vuser = None test_ftp_uploadpath = None def setUp(self): # check database is ready for test self.ftp = IrmaFTPS self.ftp_connect() self.flush_all() self.cwd = os.path.dirname(os.path.realpath(__file__)) def tearDown(self): # do the teardown self.flush_all() def flush_all(self): # check that ftp server is empty before running tests try: ftp = self.ftp_connect() ftp.deletepath("/") except IrmaFtpError as e: print("Testftp Error: {0}".format(e)) self.skipTest(FTPSTestCase) def ftp_connect(self, host=None, port=None, auth=None, key=None, user=None, passwd=None, vuser=None, upload_path=None): kwargs = {} if host is None: host = self.test_ftp_host if port is None: port = self.test_ftp_port if auth is None: auth = self.test_ftp_auth if key is None: key = self.test_ftp_key if user is None: user = self.test_ftp_user if passwd is None: passwd = <PASSWORD> if vuser is not None: kwargs['vuser'] = self.test_ftp_vuser, if upload_path is not None: kwargs['upload_path'] = self.test_ftp_uploadpath # check that ftp is up before running tests try: return self.ftp(host, port, auth, key, user, passwd, **kwargs) except IrmaFtpError as e: print("Testftp Error: {0}".format(e)) self.skipTest(FTPSTestCase) def test_ftp_upload_file(self): ftp = self.ftp_connect() filename = os.path.join(self.cwd, "test.ini") hashname = ftp.upload_file("/", filename) self.assertEqual(len(ftp.list("/")), 1) self.assertEqual(hashname, hashlib.sha256(open(filename).read()).hexdigest()) def test_ftp_upload_fobj(self): ftp = self.ftp_connect() t = TemporaryFile() data = "TEST TEST TEST TEST" t.write(data) hashname = ftp.upload_fobj("/", t) self.assertEqual(len(ftp.list("/")), 1) self.assertEqual(hashname, hashlib.sha256(data).hexdigest()) t.close() def test_ftp_create_dir(self): ftp = self.ftp_connect() ftp.mkdir("test1") self.assertEqual(len(ftp.list("/")), 1) def test_ftp_create_subdir(self): ftp = self.ftp_connect() ftp.mkdir("/test1") ftp.mkdir("/test1/test2") ftp.mkdir("/test1/test2/test3") self.assertEqual(len(ftp.list("/")), 1) self.assertEqual(len(ftp.list("/test1")), 1) self.assertEqual(len(ftp.list("/test1/test2")), 1) self.assertEqual(len(ftp.list("/test1/test2/test3")), 0) def test_ftp_upload_in_subdir(self): ftp = self.ftp_connect() ftp.mkdir("/test1") ftp.mkdir("/test1/test2") ftp.mkdir("/test1/test2/test3") filename = os.path.join(self.cwd, "test.ini") hashname = ftp.upload_file("/test1/test2/test3", filename) self.assertEqual(len(ftp.list("/test1/test2/test3")), 1) self.assertEqual(hashname, hashlib.sha256(open(filename).read()).hexdigest()) def test_ftp_remove_not_existing_file(self): ftp = self.ftp_connect() with self.assertRaises(IrmaFtpError): ftp.delete(".", "lkzndlkaznd") def test_ftp_remove_not_existing_dir(self): ftp = self.ftp_connect() with self.assertRaises(IrmaFtpError): ftp.deletepath("/test1", deleteParent=True) def test_ftp_modify_file_hash(self): ftp = self.ftp_connect() filename = os.path.join(self.cwd, "test.ini") hashname = ftp.upload_file("/", filename) altered_name = "0000" + hashname[4:] ftp.rename(hashname, altered_name) self.assertEqual(len(hashname), len(altered_name)) t = TemporaryFile() with self.assertRaises(IrmaFtpError): ftp.download_file("/", altered_name, t) t.close() def test_ftp_download_file(self): ftp = self.ftp_connect() t = TemporaryFile() data = "TEST TEST TEST TEST" t.write(data) hashname = ftp.upload_fobj("/", t) _, tmpname = mkstemp(prefix="test_ftp") ftp.download_file(".", hashname, tmpname) data2 = open(tmpname).read() os.unlink(tmpname) self.assertEqual(data, data2) t.close() def test_ftp_download_fobj(self): ftp = self.ftp_connect() t1, t2 = TemporaryFile(), TemporaryFile() data = "TEST TEST TEST TEST" t1.write(data) hashname = ftp.upload_fobj(".", t1) ftp.download_fobj(".", hashname, t2) self.assertEqual(t2.read(), data) t1.close() t2.close() def test_ftp_already_connected(self): ftp = self.ftp(self.test_ftp_host, self.test_ftp_port, self.test_ftp_auth, self.test_ftp_key, self.test_ftp_user, self.test_ftp_passwd) old_conn = ftp._conn ftp._connect() new_conn = ftp._conn # TODO when singleton will be done self.assertEqual(old_conn, new_conn) def test_ftp_double_connect(self): self.ftp(self.test_ftp_host, self.test_ftp_port, self.test_ftp_auth, self.test_ftp_key, self.test_ftp_user, self.test_ftp_passwd) self.ftp(self.test_ftp_host, self.test_ftp_port, self.test_ftp_auth, self.test_ftp_key, self.test_ftp_user, self.test_ftp_passwd) # TODO when singleton will be done # self.assertEquals(ftp1, ftp2) def test_ftp_wrong_port(self): with self.assertRaises(IrmaFtpError): self.ftp(self.test_ftp_host, 45000, self.test_ftp_auth, self.test_ftp_key, self.test_ftp_user, self.test_ftp_passwd) def test_ftp_wrong_user(self): with self.assertRaises(IrmaFtpError): self.ftp(self.test_ftp_host, self.test_ftp_port, self.test_ftp_auth, self.test_ftp_key, "random_foo", self.test_ftp_passwd) def test_ftp_wrong_passwd(self): with self.assertRaises(IrmaFtpError): self.ftp(self.test_ftp_host, self.test_ftp_port, self.test_ftp_auth, self.test_ftp_key, self.test_ftp_user, "random_bar") if __name__ == '__main__': enable_logging() unittest.main()
""" Unit test for GaussianGRUPolicy with Model. This test consists of four different GaussianGRUPolicy: P1, P2, P3 and P4. P1 and P2 are from GaussianGRUPolicy, which does not use garage.tf.models.GRUModel while P3 and P4 do use. This test ensures the outputs from all the policies are the same, for the transition from using GaussianGRUPolicy to GaussianGRUPolicyWithModel. It covers get_action, get_actions, dist_info_sym, kl_sym, log_likelihood_sym, entropy_sym and likelihood_ratio_sym. """ from unittest import mock import numpy as np import tensorflow as tf from garage.tf.envs import TfEnv from garage.tf.misc import tensor_utils from garage.tf.policies import GaussianGRUPolicy from garage.tf.policies import GaussianGRUPolicyWithModel from tests.fixtures import TfGraphTestCase from tests.fixtures.envs.dummy import DummyBoxEnv class TestGaussianGRUPolicyWithModelTransit(TfGraphTestCase): @mock.patch('tensorflow.random.normal') def setUp(self, mock_rand): mock_rand.return_value = 0.5 super().setUp() env = TfEnv(DummyBoxEnv(obs_dim=(1, ), action_dim=(1, ))) self.default_initializer = tf.constant_initializer(1) self.default_hidden_nonlinearity = tf.nn.tanh self.default_recurrent_nonlinearity = tf.nn.sigmoid self.default_output_nonlinearity = None self.time_step = 1 self.policy1 = GaussianGRUPolicy( env_spec=env.spec, hidden_dim=4, hidden_nonlinearity=self.default_hidden_nonlinearity, recurrent_nonlinearity=self.default_recurrent_nonlinearity, recurrent_w_x_init=self.default_initializer, recurrent_w_h_init=self.default_initializer, output_nonlinearity=self.default_output_nonlinearity, output_w_init=self.default_initializer, state_include_action=True, name='P1') self.policy2 = GaussianGRUPolicy( env_spec=env.spec, hidden_dim=4, hidden_nonlinearity=self.default_hidden_nonlinearity, recurrent_nonlinearity=self.default_recurrent_nonlinearity, recurrent_w_x_init=self.default_initializer, recurrent_w_h_init=self.default_initializer, output_nonlinearity=self.default_output_nonlinearity, output_w_init=tf.constant_initializer(2), state_include_action=True, name='P2') self.sess.run(tf.global_variables_initializer()) self.policy3 = GaussianGRUPolicyWithModel( env_spec=env.spec, hidden_dim=4, hidden_nonlinearity=self.default_hidden_nonlinearity, hidden_w_init=self.default_initializer, recurrent_nonlinearity=self.default_recurrent_nonlinearity, recurrent_w_init=self.default_initializer, output_nonlinearity=self.default_output_nonlinearity, output_w_init=self.default_initializer, state_include_action=True, name='P3') self.policy4 = GaussianGRUPolicyWithModel( env_spec=env.spec, hidden_dim=4, hidden_nonlinearity=self.default_hidden_nonlinearity, hidden_w_init=self.default_initializer, recurrent_nonlinearity=self.default_recurrent_nonlinearity, recurrent_w_init=self.default_initializer, output_nonlinearity=self.default_output_nonlinearity, output_w_init=tf.constant_initializer(2), state_include_action=True, name='P4') self.policy1.reset() self.policy2.reset() self.policy3.reset() self.policy4.reset() self.obs = [env.reset()] self.obs = np.concatenate([self.obs for _ in range(self.time_step)], axis=0) self.obs_ph = tf.placeholder( tf.float32, shape=(None, None, env.observation_space.flat_dim)) self.action_ph = tf.placeholder( tf.float32, shape=(None, None, env.action_space.flat_dim)) self.dist1_sym = self.policy1.dist_info_sym( obs_var=self.obs_ph, state_info_vars={'prev_action': np.zeros((2, self.time_step, 1))}, name='p1_sym') self.dist2_sym = self.policy2.dist_info_sym( obs_var=self.obs_ph, state_info_vars={'prev_action': np.zeros((2, self.time_step, 1))}, name='p2_sym') self.dist3_sym = self.policy3.dist_info_sym( obs_var=self.obs_ph, state_info_vars={'prev_action': np.zeros((2, self.time_step, 1))}, name='p3_sym') self.dist4_sym = self.policy4.dist_info_sym( obs_var=self.obs_ph, state_info_vars={'prev_action': np.zeros((2, self.time_step, 1))}, name='p4_sym') def test_dist_info_sym_output(self): # batch size = 2 dist1 = self.sess.run( self.dist1_sym, feed_dict={self.obs_ph: [self.obs, self.obs]}) dist2 = self.sess.run( self.dist2_sym, feed_dict={self.obs_ph: [self.obs, self.obs]}) dist3 = self.sess.run( self.dist3_sym, feed_dict={self.obs_ph: [self.obs, self.obs]}) dist4 = self.sess.run( self.dist4_sym, feed_dict={self.obs_ph: [self.obs, self.obs]}) assert np.array_equal(dist1['mean'], dist3['mean']) assert np.array_equal(dist1['log_std'], dist3['log_std']) assert np.array_equal(dist2['mean'], dist4['mean']) assert np.array_equal(dist2['log_std'], dist4['log_std']) @mock.patch('numpy.random.normal') def test_get_action(self, mock_rand): mock_rand.return_value = 0.5 action1, agent_info1 = self.policy1.get_action(self.obs) action2, agent_info2 = self.policy2.get_action(self.obs) action3, agent_info3 = self.policy3.get_action(self.obs) action4, agent_info4 = self.policy4.get_action(self.obs) assert np.array_equal(action1, action3) assert np.array_equal(action2, action4) assert np.array_equal(agent_info1['mean'], agent_info3['mean']) assert np.array_equal(agent_info1['log_std'], agent_info3['log_std']) assert np.array_equal(agent_info2['mean'], agent_info4['mean']) assert np.array_equal(agent_info2['log_std'], agent_info4['log_std']) actions1, agent_infos1 = self.policy1.get_actions([self.obs]) actions2, agent_infos2 = self.policy2.get_actions([self.obs]) actions3, agent_infos3 = self.policy3.get_actions([self.obs]) actions4, agent_infos4 = self.policy4.get_actions([self.obs]) assert np.array_equal(actions1, actions3) assert np.array_equal(actions2, actions4) assert np.array_equal(agent_infos1['mean'], agent_infos3['mean']) assert np.array_equal(agent_infos1['log_std'], agent_infos3['log_std']) assert np.array_equal(agent_infos2['mean'], agent_infos4['mean']) assert np.array_equal(agent_infos2['log_std'], agent_infos4['log_std']) def test_kl_sym(self): kl_diff_sym1 = self.policy1.distribution.kl_sym( self.dist1_sym, self.dist2_sym) objective1 = tf.reduce_mean(kl_diff_sym1) kl_func = tensor_utils.compile_function([self.obs_ph], objective1) kl1 = kl_func([self.obs, self.obs]) kl_diff_sym2 = self.policy3.distribution.kl_sym( self.dist3_sym, self.dist4_sym) objective2 = tf.reduce_mean(kl_diff_sym2) kl_func = tensor_utils.compile_function([self.obs_ph], objective2) kl2 = kl_func([self.obs, self.obs]) assert np.array_equal(kl1, kl2) def test_log_likehihood_sym(self): log_prob_sym1 = self.policy1.distribution.log_likelihood_sym( self.action_ph, self.dist1_sym) log_prob_func = tensor_utils.compile_function( [self.obs_ph, self.action_ph], log_prob_sym1) log_prob1 = log_prob_func([self.obs, self.obs], np.ones((2, self.time_step, 1))) log_prob_sym2 = self.policy3.distribution.log_likelihood_sym( self.action_ph, self.dist3_sym) log_prob_func2 = tensor_utils.compile_function( [self.obs_ph, self.action_ph], log_prob_sym2) log_prob2 = log_prob_func2([self.obs, self.obs], np.ones((2, self.time_step, 1))) assert np.array_equal(log_prob1, log_prob2) log_prob_sym1 = self.policy2.distribution.log_likelihood_sym( self.action_ph, self.dist2_sym) log_prob_func = tensor_utils.compile_function( [self.obs_ph, self.action_ph], log_prob_sym1) log_prob1 = log_prob_func([self.obs, self.obs], np.ones((2, self.time_step, 1))) log_prob_sym2 = self.policy4.distribution.log_likelihood_sym( self.action_ph, self.dist4_sym) log_prob_func2 = tensor_utils.compile_function( [self.obs_ph, self.action_ph], log_prob_sym2) log_prob2 = log_prob_func2([self.obs, self.obs], np.ones((2, self.time_step, 1))) assert np.array_equal(log_prob1, log_prob2) def test_policy_entropy_sym(self): entropy_sym1 = self.policy1.distribution.entropy_sym( self.dist1_sym, name='entropy_sym1') entropy_func = tensor_utils.compile_function([self.obs_ph], entropy_sym1) entropy1 = entropy_func([self.obs, self.obs]) entropy_sym2 = self.policy3.distribution.entropy_sym( self.dist3_sym, name='entropy_sym1') entropy_func = tensor_utils.compile_function([self.obs_ph], entropy_sym2) entropy2 = entropy_func([self.obs, self.obs]) assert np.array_equal(entropy1, entropy2) def test_likelihood_ratio_sym(self): likelihood_ratio_sym1 = self.policy1.distribution.likelihood_ratio_sym( self.action_ph, self.dist1_sym, self.dist2_sym, name='li_ratio_sym1') likelihood_ratio_func = tensor_utils.compile_function( [self.action_ph, self.obs_ph], likelihood_ratio_sym1) likelihood_ratio1 = likelihood_ratio_func( np.ones((2, 1, 1)), [self.obs, self.obs]) likelihood_ratio_sym2 = self.policy3.distribution.likelihood_ratio_sym( self.action_ph, self.dist3_sym, self.dist4_sym, name='li_ratio_sym2') likelihood_ratio_func = tensor_utils.compile_function( [self.action_ph, self.obs_ph], likelihood_ratio_sym2) likelihood_ratio2 = likelihood_ratio_func( np.ones((2, 1, 1)), [self.obs, self.obs]) assert np.array_equal(likelihood_ratio1, likelihood_ratio2)
#-*- coding: UTF-8 -*- import pandas as pd import numpy as np import matplotlib.pyplot as plt # predicted_filename = '/Users/sunguangyu/Downloads/电表/1528641961.4759896df_dh.csv51220predicted_df.csv' # test_filename = '/Users/sunguangyu/Downloads/电表/1528641961.4759896df_dh.csv51220y_test_df.csv' predicted_filename = '/Users/sunguangyu/Downloads/电表/1536784794.03df_dh.csv51220predicted_df.csv' test_filename = '/Users/sunguangyu/Downloads/电表/1536784794.03df_dh.csv51220y_test_df.csv' l=4 t=0.5 def check(l=l, t=t, predicted_filename = predicted_filename, test_filename = test_filename): flag=False predicted = pd.read_csv(predicted_filename) test = pd.read_csv(test_filename) error = predicted - test #print(error) #error.reset_index(inplace=True) error.rename(columns={error.columns[0]: "index", error.columns[1]: "data", }, inplace=True) error['data']=error['data'].apply(lambda x: abs(x)) a = [0 for x in range(-1, 1000)] #print(error) def get_it(p,x): x=round(x) return p.iloc[x,0] for i in range(0, error.iloc[:, 0].size-1-l): for j in range(0, l-1): if error.loc[i+j, 'data'] < t: #print(i,i+j,error.loc[i+j, 'data']) continue else: a[i+j]=a[i+j-1]+1 if a[i+j] == l: print(i+j) flag=True if flag: break #print(a) test.rename(columns={test.columns[0]: "index", test.columns[1]: "data", }, inplace=True) test['data']=test['data'].apply(lambda x: abs(x)) predicted=predicted.drop('Unnamed: 0', axis=1) predicted=predicted['0'].tolist() test = test.drop('index', axis=1) test = test['data'].tolist() print(predicted) print(error) pp=[i+t for i in predicted] pm=[i-t for i in predicted] # for i in range(0,71): # print(get_it(predicted,i)) # if (get_it(error,i) > get_it(predicted,i)+t): plt.fill_between(np.linspace(i,i+1),get_it(error,i),get_it(predicted,i)+t,facecolor='purple') x=np.arange(0,72) y1=pp y2=test # plt.fill_between(x, y1, y2, where= (y1 > y2), facecolor='green',interpolate=True, alpha=1.5) # plt.fill_between(x, y1, 0, facecolor='white', interpolate=True, alpha=1) plt.fill_between(x, pm, test, where=(pm < test), facecolor='green', interpolate=True, alpha=1.5) plt.fill_between(x, test, -1, facecolor='white', interpolate=True, alpha=1) plt.plot(pp,'r--',label='Upper Bound') plt.plot(pm,'g--',label='Lower Bound') plt.plot(test,'b',label='Test(malfunction)') plt.legend() plt.xlim(0,71) plt.show() if __name__ == '__main__': check()
<gh_stars>1-10 # coding: utf-8 from __future__ import absolute_import import tempfile import os import shutil import logging import base64 import nacl.encoding import subprocess from bravado.client import SwaggerClient from bravado.exception import HTTPNotFound from requests.exceptions import ConnectionError from nose.tools import assert_equals from nose.tools import assert_true from testconfig import config from waiting import wait from nacl.hash import sha256 import base58 import rlp import integration.keys EXT_API = {} INT_API = {} def create_client(node_name, port): node_config = config['nodes'][node_name] url = 'http://' + node_config['host'] + ':' + str(node_config['ports'][port]) + '/api' client_config = {'validate_responses': False} return SwaggerClient.from_url(url, config=client_config) def external_api(name): if not name in EXT_API: client = create_client(name, 'external_api') api = client.external def get_model(model): return client.get_model(model) api.get_model = get_model EXT_API[name] = api return EXT_API[name] def internal_api(name): if not name in INT_API: client = create_client(name, 'internal_api') api = client.internal def get_model(model): return client.get_model(model) api.get_model = get_model INT_API[name] = api return INT_API[name] def node_online(name): def is_ext_online(): try: ext_api = external_api(name) top = ext_api.GetCurrentKeyBlock().response().result return top.height > -1 except ConnectionError as e: return False def is_int_online(): try: int_api = internal_api(name) key = int_api.GetNodePubkey().response().result return key.pub_key is not None except ConnectionError as e: return False return is_ext_online() and is_int_online() def setup_node(node): # prepare a dir to hold the configs root_dir = tempfile.mkdtemp() # setup the dir with non-mining node user_config = make_no_mining_user_config(root_dir, "aeternity.yaml") start_node(node, user_config) ext_api = external_api(node) int_api = internal_api(node) return (root_dir, node, ext_api, int_api) def setup_node_with_tokens(node, beneficiary, blocks_to_mine): # prepare a dir to hold the configs root_dir = tempfile.mkdtemp() # setup the dir with mining node user_config = make_mining_user_config(root_dir, beneficiary, "aeternity.yaml") start_node(node, user_config) ext_api = external_api(node) int_api = internal_api(node) top0 = ext_api.GetCurrentKeyBlock().response().result bal0 = get_account_balance(ext_api, beneficiary['enc_pubk']) # populate the chain so node had mined some blocks and has tokens # to spend wait_until_height(ext_api, top0.height + blocks_to_mine) top1 = ext_api.GetCurrentKeyBlock().response().result assert_true(top1.height >= top0.height) assert_true(top1.height >= blocks_to_mine) # Now the node has at least blocks_to_mine blocks mined bal1 = get_account_balance(ext_api, beneficiary['enc_pubk']) # The node received the reward for at least blocks_to_mine blocks assert_true(bal1 > bal0) return (root_dir, ext_api, int_api, top1) def install_user_config(root_dir, file_name, conf): user_config = os.path.join(root_dir, file_name) f = open(user_config, "w") f.write(conf) f.close() return user_config def make_no_mining_user_config(root_dir, file_name): conf = """\ --- mining: autostart: false expected_mine_rate: 100 beneficiary: "<KEY>" beneficiary_reward_delay: 2 cuckoo: edge_bits: 15 miners: - executable: mean15-generic extra_args: "" """ return install_user_config(root_dir, file_name, conf) def make_mining_user_config(root_dir, beneficiary, file_name): conf = """\ --- mining: autostart: true expected_mine_rate: 100 beneficiary: "{}" beneficiary_reward_delay: 2 cuckoo: edge_bits: 15 miners: - executable: mean15-generic extra_args: "" """.format(beneficiary['enc_pubk']) return install_user_config(root_dir, file_name, conf) def start_node(name, config_filename): if should_start_node(name): print("\nNode " + name + " starting") config_prefix = "" if config_filename[0] == "/": # absolute path config_prefix = 'AETERNITY_CONFIG="' + config_filename + '" ' else: config_prefix = 'AETERNITY_CONFIG="`pwd`/' + config_filename + '" ' print("Starting node with config prefix " + config_prefix) p = os.popen("(cd ../.. && " + config_prefix + "make " + name + "-start;)","r") while 1: line = p.readline() if not line: break wait(lambda: node_online(name), timeout_seconds=30, sleep_seconds=0.5) def stop_node(name): if should_start_node(name): print("Node " + name + " stopping") p = os.popen("(cd ../.. && make " + name + "-stop;)","r") while 1: line = p.readline() if not line: break def coinbase_reward(): return config["coinbase_reward"] def should_start_node(name): return config['nodes'][name]['start'] def node_config(name): return config['nodes'][name] def test_settings(test_name): return config['tests'][test_name] def tool_settings(test_name): return config['tools'][test_name] def wait_until_height(ext_api, height): wait(lambda: ext_api.GetCurrentKeyBlock().response().result.height >= height, timeout_seconds=120, sleep_seconds=0.25) def post_transaction(ext_api, signed_tx): Tx = ext_api.get_model('Tx') tx_object = Tx(tx=signed_tx) return ext_api.PostTransaction(body=tx_object).response().result.tx_hash def ensure_transaction_posted(ext_api, signed_tx, min_confirmations=1): tx_hash = post_transaction(ext_api, signed_tx) ensure_transaction_confirmed(ext_api, tx_hash, min_confirmations) def ensure_transaction_confirmed(ext_api, tx_hash, min_confirmations): wait(lambda: is_tx_confirmed(ext_api, tx_hash, min_confirmations), timeout_seconds=20, sleep_seconds=0.25) def is_tx_confirmed(ext_api, tx_hash, min_confirmations): res = ext_api.GetCurrentKeyBlockHeight().response().result top_key_height = res['height'] tx = ext_api.GetTransactionByHash(hash=tx_hash).response().result if "none" == tx.block_hash: return False return (top_key_height - tx.block_height) >= min_confirmations def get_account_balance(api, pub_key): AccountModel = api.get_model('Account') return _balance_from_get_account(AccountModel, lambda: api.GetAccountByPubkey(pubkey=pub_key).response().result, pub_key) def send_tokens(sender, address, tokens, fee, ext_api, int_api): SpendTx = int_api.get_model('SpendTx') spend_tx_obj = SpendTx( sender_id=sender['enc_pubk'], recipient_id=address, amount=tokens, fee=fee, ttl=100, payload="sending tokens" ) spend_tx = int_api.PostSpend(body=spend_tx_obj).response().result.tx unsigned_tx = api_decode(spend_tx) signed_tx = integration.keys.sign_encode_tx(unsigned_tx, sender['privk']) return post_transaction(ext_api, signed_tx) def ensure_send_tokens(sender, address, tokens, fee, ext_api, int_api, min_confirmations): tx_hash = send_tokens(sender, address, tokens, fee, ext_api, int_api) ensure_transaction_confirmed(ext_api, tx_hash, min_confirmations) def _balance_from_get_account(AccountModel, get_account_fun, pub_key): account = AccountModel(id=pub_key, balance=0, nonce=0) try: account = get_account_fun() except HTTPNotFound as e: assert_equals(e.status_code, 404) # no account yet return account.balance def api_decode(encoded): if encoded[2] != '_': raise ValueError('Invalid hash') prefix = encoded[0:2] if api_encode_type(prefix) == 64: return base64decode_check(encoded[3:]) return base58.b58decode_check(encoded[3:]) def api_encode(prefix, decoded): if api_encode_type(prefix) == 64: return prefix + '_' + base64encode_check(decoded) return prefix + '_' + base58.b58encode_check(decoded) def api_encode_type(prefix): if len(prefix) != 2: raise ValueError('Invalid prefix: ' + prefix) base64Prefixes = {'cb', 'tx', 'ov', 'or', 'st', 'pi', 'ss'} if prefix in base64Prefixes: return 64 return 58 def base64decode_check(encoded): decoded = base64.b64decode(encoded) check = decoded[-4:] body = decoded[:-4] shaHash = sha256(sha256(body, encoder=nacl.encoding.RawEncoder), encoder=nacl.encoding.RawEncoder) if shaHash[0:4] != check: raise ValueError('Invalid hash') return body def base64encode_check(decoded): shaHash = sha256(sha256(decoded, encoder=nacl.encoding.RawEncoder), encoder=nacl.encoding.RawEncoder) return base64.b64encode(decoded + shaHash[0:4]) def hexstring_to_contract_bytearray(hexstring): if (hexstring.startswith("0x")): hexstring = hexstring[2:] return "cb_" + base64encode_check(hexstring.decode("hex")) def encode_signed_tx(encoded_tx, signatures): tag = bytearray([11]) vsn = bytearray([1]) payload = rlp.encode([tag, vsn, signatures, encoded_tx]) return api_encode("tx", payload) def decode_unsigned_tx(encoded_tx): decoded = rlp.decode(encoded_tx) tag, vsn, fields = decoded[0], decoded[1], decoded[2:] # This is minimally what we need for now if (tag == bytes(bytearray([42])) and vsn == bytes(bytearray([1]))): ownerid = decode_id(fields[0]) return {'type': 'contract_create_tx', 'owner_id': ownerid['pubkey'], 'nonce': bytes_to_int(fields[1]), 'code': fields[2], 'vm_version': bytes_to_int(fields[3]) >> 16, 'abi_version': bytes_to_int(fields[3]) & 65535, 'fee': bytes_to_int(fields[4]), 'ttl': bytes_to_int(fields[5]), 'deposit': bytes_to_int(fields[6]), 'amount': bytes_to_int(fields[7]), 'gas': bytes_to_int(fields[8]), 'gas_price': bytes_to_int(fields[9]), 'call_data': fields[10] } def decode_id(encoded): return { 'type': encoded[0], 'pubkey': encoded[1:] } def bytes_to_int(x): return int(x.encode('hex'), 16) def encode_pubkey(pubkey): return api_encode("ak", pubkey) def encode_name(name): return api_encode("nm", name) def encode_tx_hash(txhash): return api_encode("th", txhash) def setup_beneficiary(): ben_priv = integration.keys.new_private() ben_pub = integration.keys.public_key(ben_priv) beneficiary = {'privk': ben_priv, 'pubk': ben_pub, 'enc_pubk': integration.keys.address(ben_pub)} return beneficiary def compile_contract(file): compiler_cmd = os.path.join(os.getcwd(), 'integration', 'aesophia_cli') bytecode = subprocess.check_output([compiler_cmd, file]).splitlines()[-1] return bytecode def encode_calldata(file, function, args): compiler_cmd = os.path.join(os.getcwd(), 'integration', 'aesophia_cli') calldata = subprocess.check_output([compiler_cmd, '--create_calldata', file, '--calldata_fun', function, '--calldata_args', args]).splitlines()[-1] return calldata def decode_data(type, data): compiler_cmd = os.path.join(os.getcwd(), 'integration', 'aesophia_cli') value = subprocess.check_output([compiler_cmd, '--decode_data', data, '--decode_type', type]).splitlines()[-1] return value
import argparse import time import numpy as np import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim import torch.utils.data as data import torch.utils.data.distributed import torchvision import torchvision.transforms as transforms import torchvision.datasets as datasets import resnet_multigpu as resnet import resnet_multigpu_maxpool as resnet_max import alexnet_multigpu as alexnet import os import cv2 from PIL import Image import pdb import datasets as pointing_datasets """ Here, we evaluate using the stochastic pointing game metriic on imagenet dataset by using the bbox annotations on the val dataset. """ model_names = ['resnet18', 'resnet50', 'alexnet'] parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') parser.add_argument('data', metavar='DIR', help='path to dataset') parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18', choices=model_names, help='model architecture: ' + ' | '.join(model_names) + ' (default: resnet18)') parser.add_argument('-j', '--workers', default=16, type=int, metavar='N', help='number of data loading workers (default: 16)') parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 96)') parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model') parser.add_argument('-g', '--num-gpus', default=1, type=int, metavar='N', help='number of GPUs to match (default: 4)') parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--input_resize', default=224, type=int, metavar='N', help='Resize for smallest side of input (default: 224)') parser.add_argument('--maxpool', dest='maxpool', action='store_true', help='use maxpool variant of ResNet') def main(): global args args = parser.parse_args() if args.pretrained: print("=> using pre-trained model '{}'".format(args.arch)) if args.arch.startswith('resnet'): if args.maxpool: model = resnet_max.__dict__[args.arch](pretrained=True) else: model = resnet.__dict__[args.arch](pretrained=True) elif args.arch.startswith('alexnet'): model = alexnet.__dict__[args.arch](pretrained=True) else: assert False, 'Unsupported architecture: {}'.format(args.arch) else: # create the model print("=> creating model '{}'".format(args.arch)) if args.arch.startswith('resnet'): if args.maxpool: model = resnet_max.__dict__[args.arch]() else: model = resnet.__dict__[args.arch]() elif args.arch.startswith('alexnet'): model = alexnet.__dict__[args.arch]() else: assert False, 'Unsupported architecture: {}'.format(args.arch) model = torch.nn.DataParallel(model).cuda() if args.resume: print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['state_dict']) if (not args.resume) and (not args.pretrained): assert False, "Model checkpoint not specified for evaluation" cudnn.benchmark = True normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # Here, we don't resize the images. Instead, we feed the full image and use AdaptivePooling before FC. # We will resize Gradcam heatmap to image size and compare the actual bbox co-ordinates val_dataset = pointing_datasets.ImageNetDetection(args.data, transform=transforms.Compose([ transforms.Resize(args.input_resize), transforms.ToTensor(), normalize, ])) # we set batch size=1 since we are loading full resolution images. val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=1, shuffle=False, num_workers=args.workers, pin_memory=True) validate_multi(val_loader, val_dataset, model) def validate_multi(val_loader, val_dataset, model): batch_time = AverageMeter() # switch to evaluate mode model.eval() expected_value_list = [] # We take 100 stochastic samples for our evaluation. NUM_SAMPLES = 100 TOLERANCE = 15 end = time.time() for i, (images, annotation, targets) in enumerate(val_loader): images = images.cuda(non_blocking=True) targets = targets.cuda(non_blocking=True) # we assume batch size == 1 and unwrap the first elem of every list in annotation object annotation = unwrap_dict(annotation) w, h = val_dataset.as_image_size(annotation) class_id = targets[0].item() # compute output # we have added return_feats=True to get the output as well as layer4 conv feats output, feats = model(images, return_feats=True) output_gradcam = compute_gradcam(output, feats, targets) output_gradcam_np = output_gradcam.data.cpu().numpy()[0] # since we have batch size==1 resized_output_gradcam = cv2.resize(output_gradcam_np, (w, h)) spatial_sum = resized_output_gradcam.sum() if spatial_sum <= 0: # We ignore images with zero Grad-CAM continue resized_output_gradcam = resized_output_gradcam / spatial_sum # Now, we obtain the mask corresponding to the ground truth bounding boxes # Skip if all boxes for class_id are marked difficult. objs = annotation['annotation']['object'] if not isinstance(objs, list): objs = [objs] objs = [obj for obj in objs if pointing_datasets._IMAGENET_CLASS_TO_INDEX[obj['name']] == class_id] if all([bool(int(obj['difficult'])) for obj in objs]): continue gt_mask = pointing_datasets.imagenet_as_mask(annotation, class_id) # output_gradcam is now normalized and can be considered as probabilities sample_indices = np.random.choice(np.arange(h*w), NUM_SAMPLES, p=resized_output_gradcam.ravel()) curr_image_hits = [] for sample_index in sample_indices: sample_x, sample_y = np.unravel_index(sample_index, (h, w)) v, u = torch.meshgrid(( (torch.arange(gt_mask.shape[0], dtype=torch.float32) - sample_x) ** 2, (torch.arange(gt_mask.shape[1], dtype=torch.float32) - sample_y) ** 2, )) accept = (v + u) < TOLERANCE ** 2 hit = (gt_mask & accept).view(-1).any() if hit: hit = +1 else: hit = -1 curr_image_hits.append((hit+1)/2) curr_image_hits_arr = np.array(curr_image_hits) # we have a bernoulli distribution for the hits, so we compute mean and variance pos_prob = float(curr_image_hits_arr.sum())/float(NUM_SAMPLES) expected_value_list.append(pos_prob*100) if i % 1000 == 0: print('\n{} val images:'.format(i+1)) expected_value_arr = np.array(expected_value_list) mean_expectation = expected_value_arr.mean() print('Mean - Expected value for 100 stochastic samples/image for hits(1) and misses(0): {}'.format( mean_expectation)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() expected_value_arr = np.array(expected_value_list) mean_expectation = expected_value_arr.mean() print('Mean - Expected value for 100 stochastic samples/image for hits(1) and misses(0): {}'.format( mean_expectation)) return def compute_gradcam(output, feats, target): """ Compute the gradcam for the given target :param output: :param feats: :param: target: :return: """ eps = 1e-8 relu = nn.ReLU(inplace=True) target = target.cpu().numpy() one_hot = np.zeros((output.shape[0], output.shape[-1]), dtype=np.float32) indices_range = np.arange(output.shape[0]) one_hot[indices_range, target[indices_range]] = 1 one_hot = torch.from_numpy(one_hot) one_hot.requires_grad = True # Compute the Grad-CAM for the original image one_hot_cuda = torch.sum(one_hot.cuda() * output) dy_dz1, = torch.autograd.grad(one_hot_cuda, feats, grad_outputs=torch.ones(one_hot_cuda.size()).cuda(), retain_graph=True, create_graph=True) # Changing to dot product of grad and features to preserve grad spatial locations gcam512_1 = dy_dz1 * feats gradcam = gcam512_1.sum(dim=1) gradcam = relu(gradcam) spatial_sum1 = gradcam.sum(dim=[1, 2]).unsqueeze(-1).unsqueeze(-1) gradcam = (gradcam / (spatial_sum1 + eps)) + eps return gradcam def unwrap_dict(dict_object): new_dict = {} for k, v in dict_object.items(): if k == 'object': new_v_list = [] for elem in v: new_v_list.append(unwrap_dict(elem)) new_dict[k] = new_v_list continue if isinstance(v, dict): new_v = unwrap_dict(v) elif isinstance(v, list) and len(v) == 1: new_v = v[0] else: new_v = v new_dict[k] = new_v return new_dict class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count if __name__ == '__main__': main()
<gh_stars>1-10 import datetime from enum import unique, Enum import json import related import requests from adaptive_alerting_detector_build.config import get_datasource_config from adaptive_alerting_detector_build.datasources import datasource from adaptive_alerting_detector_build.detectors import build_detector, DetectorClient from adaptive_alerting_detector_build.profile.metric_profiler import build_profile @unique class MetricType(Enum): REQUEST_COUNT = "REQUEST_COUNT" ERROR_COUNT = "ERROR_COUNT" SUCCESS_RATE = "SUCCESS_RATE" LATENCY = "LATENCY" @related.immutable class MetricConfig: name = related.StringField() type = related.ChildField(MetricType) tags = related.ChildField(dict) description = related.StringField(required=False) datasource = related.ChildField( dict, default=get_datasource_config(), required=False ) @property def tag_key(self): return json.dumps(self.tags, sort_keys=True) class Metric: def __init__( self, config, datasource_config, model_service_url=None, model_service_user=None ): self.config = config self._datasource = datasource(datasource_config) self._detector_client = DetectorClient( model_service_url=model_service_url, model_service_user=model_service_user ) self._sample_data = None self._profile = None def query(self): return self._datasource.query(tags=self.config["tags"]) @property def detectors(self): # removed optimization due to possible consistancy issues # if not self._detectors: # self._detectors = self._detector_client.list_detectors_for_metric(self.config["tags"]) # return self._detectors return self._detector_client.list_detectors_for_metric(self.config["tags"]) def build_detectors(self, selected_detectors=None): """ Creates selected detectors if they don't exist in the service. """ _selected_detectors = [] if selected_detectors: _selected_detectors = selected_detectors else: _selected_detectors = self.select_detectors() existing_detector_types = [d.type for d in self.detectors] new_detectors = list() for selected_detector in _selected_detectors: if selected_detector["type"] not in existing_detector_types: detector = build_detector(**selected_detector) detector.train(data=self.query(), metric_type=self.config["type"]) new_detector = self._detector_client.create_detector(detector) self._detector_client.save_metric_detector_mapping( new_detector.uuid, self ) new_detectors.append(new_detector) return new_detectors def delete_detectors(self): """ Deletes all detectors and mappings for the metric. """ deleted_detectors = [] for detector in self.detectors: detector_mappings = self._detector_client.list_detector_mappings( detector.uuid ) for detector_mapping in detector_mappings: self._detector_client.delete_metric_detector_mapping( detector_mapping.id ) self._detector_client.delete_detector(detector.uuid) deleted_detectors.append(detector) return deleted_detectors def disable_detectors(self): """ Sets 'enabled=false' for all detectors/mappings for the metric. """ disabled_detectors = [] for detector in self.detectors: detector_mappings = self._detector_client.list_detector_mappings( detector.uuid ) for detector_mapping in detector_mappings: self._detector_client.disable_metric_detector_mapping( detector_mapping.id ) self._detector_client.disable_detector(detector.uuid) disabled_detectors.append(detector) return disabled_detectors def select_detectors(self): """ TODO: Use metric profile data to determine which detectors to use """ constant_threshold_detector = dict( type="constant-detector", config=dict( hyperparams=dict( strategy="highwatermark", lower_weak_multiplier=3.0, lower_strong_multiplier=4.0, upper_weak_multiplier=1.05, upper_strong_multiplier=1.10, hampel_window_size=10, hampel_n_sigma=3 ) ), ) return [constant_threshold_detector] def train_detectors(self): """ Trains all detectors for the metric, if needed. """ updated_detectors = [] for detector in self.detectors: if detector.needs_training: detector.train(data=self.query(), metric_type=self.config["type"]) detector.enabled = True updated_detector = self._detector_client.update_detector(detector) updated_detectors.append(updated_detector) return updated_detectors @property def sample_data(self): if self._sample_data is None: self._sample_data = self.query() return self._sample_data @property def profile(self): if not self._profile: self._profile = build_profile(self.sample_data) return self._profile
"""Plots histograms for GridRad dataset. Specifically, this script plots two histograms: - number of convective days per month - number of tornado reports in each convective day """ import os.path import argparse import numpy import pandas import matplotlib matplotlib.use('agg') from matplotlib import pyplot from gewittergefahr.gg_io import tornado_io from gewittergefahr.gg_io import gridrad_io from gewittergefahr.gg_utils import time_conversion from gewittergefahr.gg_utils import time_periods from gewittergefahr.gg_utils import file_system_utils from gewittergefahr.plotting import plotting_utils LARGE_INTEGER = int(1e6) SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n' TIME_INTERVAL_SEC = 300 NUM_MONTHS_IN_YEAR = 12 FACE_COLOUR = numpy.array([27, 158, 119], dtype=float) / 255 EDGE_COLOUR = numpy.full(3, 0.) EDGE_WIDTH = 1.5 FIGURE_RESOLUTION_DPI = 300 FIGURE_WIDTH_INCHES = FIGURE_HEIGHT_INCHES = 15 TORNADO_DIR_ARG_NAME = 'input_tornado_dir_name' GRIDRAD_DIR_ARG_NAME = 'input_gridrad_dir_name' FIRST_DATE_ARG_NAME = 'first_spc_date_string' LAST_DATE_ARG_NAME = 'last_spc_date_string' OUTPUT_DIR_ARG_NAME = 'output_dir_name' TORNADO_DIR_HELP_STRING = ( 'Name of directory with tornado reports. Files therein will be found by ' '`tornado_io.find_processed_file` and read by ' '`tornado_io.read_processed_file`.' ) GRIDRAD_DIR_HELP_STRING = ( 'Name of top-level GridRad directory, used to determine which convective ' 'days are covered. Files therein will be found by `gridrad_io.find_file`.' ) SPC_DATE_HELP_STRING = ( 'SPC date or convective day (format "yyyymmdd"). This script will look for' ' GridRad files in the period `{0:s}`...`{1:s}`.' ).format(FIRST_DATE_ARG_NAME, LAST_DATE_ARG_NAME) OUTPUT_DIR_HELP_STRING = ( 'Name of output directory. Figures will be saved here.' ) INPUT_ARG_PARSER = argparse.ArgumentParser() INPUT_ARG_PARSER.add_argument( '--' + TORNADO_DIR_ARG_NAME, type=str, required=True, help=TORNADO_DIR_HELP_STRING ) INPUT_ARG_PARSER.add_argument( '--' + GRIDRAD_DIR_ARG_NAME, type=str, required=True, help=GRIDRAD_DIR_HELP_STRING ) INPUT_ARG_PARSER.add_argument( '--' + FIRST_DATE_ARG_NAME, type=str, required=True, help=SPC_DATE_HELP_STRING ) INPUT_ARG_PARSER.add_argument( '--' + LAST_DATE_ARG_NAME, type=str, required=True, help=SPC_DATE_HELP_STRING ) INPUT_ARG_PARSER.add_argument( '--' + OUTPUT_DIR_ARG_NAME, type=str, required=True, help=OUTPUT_DIR_HELP_STRING ) def _find_gridrad_file_for_date(top_gridrad_dir_name, spc_date_string): """Tries to find one GridRad file for given SPC date. :param top_gridrad_dir_name: See documentation at top of file. :param spc_date_string: SPC date or convective day (format "yyyymmdd"). :return: gridrad_file_name: Path to GridRad file. If no files were found for the given SPC date, returns None. """ first_time_unix_sec = time_conversion.get_start_of_spc_date(spc_date_string) last_time_unix_sec = time_conversion.get_end_of_spc_date(spc_date_string) all_times_unix_sec = time_periods.range_and_interval_to_list( start_time_unix_sec=first_time_unix_sec, end_time_unix_sec=last_time_unix_sec, time_interval_sec=TIME_INTERVAL_SEC, include_endpoint=True) for this_time_unix_sec in all_times_unix_sec: this_gridrad_file_name = gridrad_io.find_file( unix_time_sec=this_time_unix_sec, top_directory_name=top_gridrad_dir_name, raise_error_if_missing=False) if os.path.isfile(this_gridrad_file_name): return this_gridrad_file_name return None def _spc_dates_to_years(spc_date_strings): """Finds first and last years in set of SPC dates. :param spc_date_strings: 1-D list of SPC dates (format "yyyymmdd"). :return: first_year: First year. :return: last_year: Last year. """ start_times_unix_sec = numpy.array( [time_conversion.get_start_of_spc_date(d) for d in spc_date_strings], dtype=int ) end_times_unix_sec = numpy.array( [time_conversion.get_end_of_spc_date(d) for d in spc_date_strings], dtype=int ) start_years = numpy.array([ int(time_conversion.unix_sec_to_string(t, '%Y')) for t in start_times_unix_sec ], dtype=int) end_years = numpy.array([ int(time_conversion.unix_sec_to_string(t, '%Y')) for t in end_times_unix_sec ], dtype=int) all_years = numpy.concatenate((start_years, end_years)) return numpy.min(all_years), numpy.max(all_years) def _read_tornado_reports(tornado_dir_name, first_year, last_year): """Reads tornado reports from the given years. :param tornado_dir_name: See documentation at top of file. :param first_year: First year. :param last_year: Last year. :return: tornado_table: pandas DataFrame in format returned by `tornado_io.read_processed_file`. """ list_of_tornado_tables = [] for this_year in range(first_year, last_year + 1): this_file_name = tornado_io.find_processed_file( directory_name=tornado_dir_name, year=this_year) print('Reading data from: "{0:s}"...'.format(this_file_name)) list_of_tornado_tables.append( tornado_io.read_processed_file(this_file_name) ) if len(list_of_tornado_tables) == 1: continue list_of_tornado_tables[-1] = list_of_tornado_tables[-1].align( list_of_tornado_tables[0], axis=1 )[0] return pandas.concat(list_of_tornado_tables, axis=0, ignore_index=True) def _get_num_tornadoes_in_day(tornado_table, spc_date_string): """Returns number of tornado reports for given SPC date (convective day). :param tornado_table: pandas DataFrame in format returned by `tornado_io.read_processed_file`. :param spc_date_string: SPC date (format "yyyymmdd"). :return: num_tornadoes: Number of tornado reports. """ first_time_unix_sec = time_conversion.get_start_of_spc_date(spc_date_string) last_time_unix_sec = time_conversion.get_end_of_spc_date(spc_date_string) good_start_point_flags = numpy.logical_and( tornado_table[tornado_io.START_TIME_COLUMN].values >= first_time_unix_sec, tornado_table[tornado_io.START_TIME_COLUMN].values <= last_time_unix_sec ) good_end_point_flags = numpy.logical_and( tornado_table[tornado_io.END_TIME_COLUMN].values >= first_time_unix_sec, tornado_table[tornado_io.END_TIME_COLUMN].values <= last_time_unix_sec ) return numpy.sum(numpy.logical_or( good_start_point_flags, good_end_point_flags )) def _plot_tornado_histogram(num_tornadoes_by_day, output_file_name): """Plots histogram for daily number of tornado reports. D = number of SPC dates with GridRad data :param num_tornadoes_by_day: length-D numpy array with number of tornado reports by day. :param output_file_name: Path to output file (figure will be saved here). """ lower_bin_edges = numpy.concatenate(( numpy.linspace(0, 6, num=7, dtype=int), numpy.linspace(11, 101, num=10, dtype=int) )) upper_bin_edges = numpy.concatenate(( numpy.linspace(0, 5, num=6, dtype=int), numpy.linspace(10, 100, num=10, dtype=int), numpy.array([LARGE_INTEGER], dtype=int) )) num_bins = len(lower_bin_edges) num_days_by_bin = numpy.full(num_bins, -1, dtype=int) x_tick_labels = [''] * num_bins for k in range(num_bins): num_days_by_bin[k] = numpy.sum(numpy.logical_and( num_tornadoes_by_day >= lower_bin_edges[k], num_tornadoes_by_day <= upper_bin_edges[k] )) if lower_bin_edges[k] == upper_bin_edges[k]: x_tick_labels[k] = '{0:d}'.format(lower_bin_edges[k]) elif k == num_bins - 1: x_tick_labels[k] = '{0:d}+'.format(lower_bin_edges[k]) else: x_tick_labels[k] = '{0:d}-{1:d}'.format( lower_bin_edges[k], upper_bin_edges[k] ) figure_object, axes_object = pyplot.subplots( 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES) ) x_tick_coords = 0.5 + numpy.linspace( 0, num_bins - 1, num=num_bins, dtype=float ) axes_object.bar( x=x_tick_coords, height=num_days_by_bin, width=1., color=FACE_COLOUR, edgecolor=EDGE_COLOUR, linewidth=EDGE_WIDTH) axes_object.set_xlim([ x_tick_coords[0] - 0.5, x_tick_coords[-1] + 0.5 ]) axes_object.set_xticks(x_tick_coords) axes_object.set_xticklabels(x_tick_labels, rotation=90.) axes_object.set_title( 'Histogram of daily tornado reports' ) axes_object.set_ylabel('Number of convective days') axes_object.set_xlabel('Tornado reports') plotting_utils.label_axes(axes_object=axes_object, label_string='(a)') print('Saving figure to: "{0:s}"...'.format(output_file_name)) figure_object.savefig( output_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0, bbox_inches='tight' ) pyplot.close(figure_object) def _plot_month_histogram(spc_date_strings, output_file_name): """Plots histogram of months. :param spc_date_strings: 1-D list of SPC dates (format "yyyymmdd") with GridRad data. :param output_file_name: Path to output file (figure will be saved here). """ start_times_unix_sec = numpy.array( [time_conversion.get_start_of_spc_date(d) for d in spc_date_strings], dtype=int ) end_times_unix_sec = numpy.array( [time_conversion.get_end_of_spc_date(d) for d in spc_date_strings], dtype=int ) start_month_by_date = numpy.array([ int(time_conversion.unix_sec_to_string(t, '%m')) for t in start_times_unix_sec ], dtype=int) end_month_by_date = numpy.array([ int(time_conversion.unix_sec_to_string(t, '%m')) for t in end_times_unix_sec ], dtype=int) num_days_by_month = numpy.full(NUM_MONTHS_IN_YEAR, numpy.nan) for k in range(NUM_MONTHS_IN_YEAR): num_days_by_month[k] = 0.5 * ( numpy.sum(start_month_by_date == k + 1) + numpy.sum(end_month_by_date == k + 1) ) figure_object, axes_object = pyplot.subplots( 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES) ) x_tick_coords = 0.5 + numpy.linspace( 0, NUM_MONTHS_IN_YEAR - 1, num=NUM_MONTHS_IN_YEAR, dtype=float ) x_tick_labels = [ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec' ] axes_object.bar( x=x_tick_coords, height=num_days_by_month, width=1., color=FACE_COLOUR, edgecolor=EDGE_COLOUR, linewidth=EDGE_WIDTH) axes_object.set_xlim([ x_tick_coords[0] - 0.5, x_tick_coords[-1] + 0.5 ]) axes_object.set_xticks(x_tick_coords) axes_object.set_xticklabels(x_tick_labels, rotation=90.) axes_object.set_title('Histogram of months') axes_object.set_ylabel('Number of convective days') axes_object.set_xlabel('Month') plotting_utils.label_axes(axes_object=axes_object, label_string='(b)') print('Saving figure to: "{0:s}"...'.format(output_file_name)) figure_object.savefig( output_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0, bbox_inches='tight' ) pyplot.close(figure_object) def _run(tornado_dir_name, top_gridrad_dir_name, first_spc_date_string, last_spc_date_string, output_dir_name): """Plots histograms for GridRad dataset. This is effectively the main method. :param tornado_dir_name: See documentation at top of file. :param top_gridrad_dir_name: Same. :param first_spc_date_string: Same. :param last_spc_date_string: Same. :param output_dir_name: Same. """ file_system_utils.mkdir_recursive_if_necessary( directory_name=output_dir_name) all_spc_date_strings = time_conversion.get_spc_dates_in_range( first_spc_date_string=first_spc_date_string, last_spc_date_string=last_spc_date_string) spc_date_strings = [] for this_spc_date_string in all_spc_date_strings: this_gridrad_file_name = _find_gridrad_file_for_date( top_gridrad_dir_name=top_gridrad_dir_name, spc_date_string=this_spc_date_string) if this_gridrad_file_name is None: continue spc_date_strings.append(this_spc_date_string) first_year, last_year = _spc_dates_to_years(spc_date_strings) tornado_table = _read_tornado_reports( tornado_dir_name=tornado_dir_name, first_year=first_year, last_year=last_year ) print(SEPARATOR_STRING) num_days = len(spc_date_strings) num_tornadoes_by_day = numpy.full(num_days, -1, dtype=int) for i in range(num_days): num_tornadoes_by_day[i] = _get_num_tornadoes_in_day( tornado_table=tornado_table, spc_date_string=spc_date_strings[i] ) print('Number of tornadoes on SPC date "{0:s}" = {1:d}'.format( spc_date_strings[i], num_tornadoes_by_day[i] )) print(SEPARATOR_STRING) _plot_tornado_histogram( num_tornadoes_by_day=num_tornadoes_by_day, output_file_name='{0:s}/tornado_histogram.jpg'.format(output_dir_name) ) _plot_month_histogram( spc_date_strings=spc_date_strings, output_file_name='{0:s}/month_histogram.jpg'.format(output_dir_name) ) if __name__ == '__main__': INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args() _run( tornado_dir_name=getattr(INPUT_ARG_OBJECT, TORNADO_DIR_ARG_NAME), top_gridrad_dir_name=getattr(INPUT_ARG_OBJECT, GRIDRAD_DIR_ARG_NAME), first_spc_date_string=getattr(INPUT_ARG_OBJECT, FIRST_DATE_ARG_NAME), last_spc_date_string=getattr(INPUT_ARG_OBJECT, LAST_DATE_ARG_NAME), output_dir_name=getattr(INPUT_ARG_OBJECT, OUTPUT_DIR_ARG_NAME) )
""" NCL_panel_15.py =============== This script illustrates the following concepts: - Paneling three plots vertically - Making a color bar span over two axes - Selecting a different colormap to abide by best practices. See the `color examples <https://geocat-examples.readthedocs.io/en/latest/gallery/index.html#colors>`_ for more information. See following URLs to see the reproduced NCL plot & script: - Original NCL script: http://www.ncl.ucar.edu/Applications/Scripts/panel_15.ncl - Original NCL plot: http://www.ncl.ucar.edu/Applications/Images/panel_15_lg.png """ ############################################################################## # Import packages: import cartopy.crs as ccrs from cartopy.mpl.gridliner import LongitudeFormatter, LatitudeFormatter import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import numpy as np import xarray as xr import geocat.datafiles as gdf import geocat.viz.util as gvutil ############################################################################## # Read in data: # Open a netCDF data file using xarray default engine and load the data into # xarrays ds = xr.open_dataset(gdf.get("netcdf_files/h_avg_Y0191_D000.00.nc"), decode_times=False) # Ensure longitudes range from 0 to 360 degrees t = gvutil.xr_add_cyclic_longitudes(ds.T, "lon_t") # Selecting the first time step and then the three levels of interest t = t.isel(time=0) t_1 = t.isel(z_t=0) t_2 = t.isel(z_t=1) t_6 = t.isel(z_t=5) ############################################################################## # Plot: fig = plt.figure(figsize=(8, 12)) grid = gridspec.GridSpec(nrows=3, ncols=1, figure=fig) # Choose the map projection proj = ccrs.PlateCarree() # Add the subplots ax1 = fig.add_subplot(grid[0], projection=proj) # upper cell of grid ax2 = fig.add_subplot(grid[1], projection=proj) # middle cell of grid ax3 = fig.add_subplot(grid[2], projection=proj) # lower cell of grid for (ax, title) in [(ax1, 'level 0'), (ax2, 'level 1'), (ax3, 'level 6')]: # Use geocat.viz.util convenience function to set axes tick values gvutil.set_axes_limits_and_ticks(ax=ax, xlim=(-180, 180), ylim=(-90, 90), xticks=np.linspace(-180, 180, 13), yticks=np.linspace(-90, 90, 7)) # Use geocat.viz.util convenience function to make plots look like NCL # plots by using latitude, longitude tick labels gvutil.add_lat_lon_ticklabels(ax) # Remove the degree symbol from tick labels ax.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol='')) ax.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol='')) # Use geocat.viz.util convenience function to add minor and major ticks gvutil.add_major_minor_ticks(ax) # Draw coastlines ax.coastlines(linewidth=0.5) # Use geocat.viz.util convenience function to set titles gvutil.set_titles_and_labels(ax, lefttitle=t_1.long_name, righttitle=t_1.units, lefttitlefontsize=10, righttitlefontsize=10) # Add center title ax.set_title(title, loc='center', y=1.04, fontsize=10) # Select an appropriate colormap cmap = 'magma' # Plot data C = ax1.contourf(t_1['lon_t'], t_1['lat_t'], t_1.data, levels=np.arange(0, 30, 2), cmap=cmap, extend='both') ax2.contourf(t_2['lon_t'], t_2['lat_t'], t_2.data, levels=np.arange(0, 30, 2), cmap=cmap, extend='both') C_2 = ax3.contourf(t_6['lon_t'], t_6['lat_t'], t_6.data, levels=np.arange(0, 22, 2), cmap=cmap, extend='both') # Add colorbars # By specifying two axes for `ax` the colorbar will span both of them plt.colorbar(C, ax=[ax1, ax2], ticks=range(0, 30, 2), extendrect=True, extendfrac='auto', shrink=0.85, aspect=13, drawedges=True) plt.colorbar(C_2, ax=ax3, ticks=range(0, 22, 2), extendrect=True, extendfrac='auto', shrink=0.85, aspect=5.5, drawedges=True) plt.show()
<reponame>plataKwon/KPRN #*********************************************************** #Copyright 2018 eBay Inc. #Use of this source code is governed by a MIT-style #license that can be found in the LICENSE file or at #https://opensource.org/licenses/MIT. #*********************************************************** # -*- coding:utf-8 -*- import codecs import time import sys # relation dict relation_dict = {"rate": "r1", "belong": "r2", "category": "r3", "_rate": "r4", "_belong": "r5", "_category": "r6"} # Find Paths between head entity and tail entity def get_relation(head_entity, end_entity): if "s" in head_entity: if "u" in end_entity: return relation_dict["_rate"] elif "p" in end_entity: return relation_dict["_category"] elif "t" in end_entity: return relation_dict["_belong"] else: pass elif "u" in head_entity: if "s" in end_entity: return relation_dict["rate"] else: pass elif "p" in head_entity: if "s" in end_entity: return relation_dict["category"] else: pass elif "t" in head_entity: if "s" in end_entity: return relation_dict["belong"] else: pass else: pass if __name__ == "__main__": # input of positive(user,movie)file user_rate_reader = codecs.open(sys.argv[1], mode="r", encoding="utf-8") head_line = user_rate_reader.readline() ground_truth_list = [] line = user_rate_reader.readline() while line: line_list = line.strip().split("\t") ground_truth_list.append((line_list[0], line_list[1])) line = user_rate_reader.readline() user_rate_reader.close() ground_truth_list = set(ground_truth_list) print(len(ground_truth_list)) # input and output path path_reader = codecs.open(sys.argv[2], mode="r", encoding="utf-8") pos_writer = codecs.open(sys.argv[3], mode="w", encoding="utf-8") neg_writer = codecs.open(sys.argv[4], mode="w", encoding="utf-8") line = path_reader.readline() count_num = 0 start_time = time.time() pos_path_num = 0 neg_path_num = 0 pos_pair_num = 0 neg_pair_num = 0 while line: line_list = line.strip().split("\t") entity_pair = (line_list[0], line_list[1]) start_node = line_list[0] end_node = line_list[1] # add relation path_with_relation_list = [] path_list = line_list[2].split("###") for path in path_list: temp_path = [] node_list = path.split("/") # node_list.index(0, start_node) pre_node = start_node for node in node_list: re_id = get_relation(pre_node, node) temp_path.append(re_id) temp_path.append(node) pre_node = node re_id = get_relation(pre_node, end_node) temp_path.append(re_id) path_with_relation_list.append("-".join(temp_path)) # add label if entity_pair in ground_truth_list: pos_writer.write("\t".join(entity_pair)+"\t"+"###".join(path_with_relation_list)+"\t1\n") pos_pair_num += 1 pos_path_num += len(path_with_relation_list) else: neg_writer.write("\t".join(entity_pair)+"\t"+"###".join(path_with_relation_list)+"\t-1\n") neg_pair_num += 1 neg_path_num += len(path_with_relation_list) # read next line line = path_reader.readline() count_num += 1 if count_num % 10000 == 0: print(count_num, (time.time()-start_time)/(count_num/10000)) # break path_reader.close() pos_writer.close() neg_writer.close() print("total cost time:", time.time()-start_time) print("pos pair nums:", pos_pair_num, "pos path nums:", pos_path_num) print("neg pair nums:", neg_pair_num, "neg path nums:", neg_path_num)
# Generated by Django 2.0 on 2018-01-23 17:05 import django.db.models.deletion from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Article', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('slug', models.SlugField(blank=True, default='no-slug', max_length=160)), ('created_time', models.DateTimeField(auto_now_add=True)), ('last_mod_time', models.DateTimeField(auto_now=True)), ('title', models.CharField(max_length=200, unique=True, verbose_name='Title')), ('body', models.TextField()), ('pub_time', models.DateTimeField(blank=True, null=True)), ('status', models.CharField(choices=[('d', 'Draft'), ('p', 'Posted')], default='p', max_length=1)), ('comment_status', models.CharField(choices=[('o', 'Turn on'), ('c', 'Shut down')], default='o', max_length=1)), ('type', models.CharField(choices=[('a', 'Article'), ('p', 'Page')], default='a', max_length=1)), ('views', models.PositiveIntegerField(default=0)), ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name': 'Articles', 'verbose_name_plural': 'Articles', 'ordering': ['-pub_time'], 'get_latest_by': 'created_time', }, ), migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('slug', models.SlugField(blank=True, default='no-slug', max_length=160)), ('created_time', models.DateTimeField(auto_now_add=True)), ('last_mod_time', models.DateTimeField(auto_now=True)), ('name', models.CharField(max_length=30, unique=True)), ('parent_category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.Category')), ], options={ 'verbose_name': 'Category', 'verbose_name_plural': 'Category', 'ordering': ['name'], }, ), migrations.CreateModel( name='Links', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=30, unique=True)), ('link', models.URLField()), ('sequence', models.IntegerField(unique=True)), ('created_time', models.DateTimeField(auto_now_add=True)), ('last_mod_time', models.DateTimeField(auto_now=True)), ], options={ 'verbose_name': 'Links', 'verbose_name_plural': 'Links', 'ordering': ['sequence'], }, ), migrations.CreateModel( name='SideBar', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('content', models.TextField()), ('sequence', models.IntegerField(unique=True)), ('is_enable', models.BooleanField(default=True)), ('created_time', models.DateTimeField(auto_now_add=True)), ('last_mod_time', models.DateTimeField(auto_now=True)), ], options={ 'verbose_name': 'SideBar', 'verbose_name_plural': 'SideBar', 'ordering': ['sequence'], }, ), migrations.CreateModel( name='Tag', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('slug', models.SlugField(blank=True, default='no-slug', max_length=160)), ('created_time', models.DateTimeField(auto_now_add=True)), ('last_mod_time', models.DateTimeField(auto_now=True)), ('name', models.CharField(max_length=30, unique=True)), ], options={ 'verbose_name': 'Tags', 'verbose_name_plural': 'Tags', 'ordering': ['name'], }, ), migrations.AddField( model_name='article', name='category', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category'), ), migrations.AddField( model_name='article', name='tags', field=models.ManyToManyField(blank=True, to='blog.Tag'), ), ]
<filename>tests/providers/test_braket_backend.py<gh_stars>1-10 """Tests for AWS Braket backends.""" import unittest from unittest import TestCase from unittest.mock import Mock from qiskit import QuantumCircuit, transpile, BasicAer from qiskit.algorithms import VQE, VQEResult from qiskit.algorithms.optimizers import ( SLSQP, ) from qiskit.circuit.library import TwoLocal from qiskit.circuit.random import random_circuit from qiskit.opflow import ( I, X, Z, ) from qiskit.result import Result from qiskit.transpiler import Target from qiskit.utils import QuantumInstance from qiskit_braket_provider import AWSBraketProvider from qiskit_braket_provider.providers import AWSBraketBackend, BraketLocalBackend from qiskit_braket_provider.providers.adapter import aws_device_to_target from tests.providers.mocks import RIGETTI_MOCK_GATE_MODEL_QPU_CAPABILITIES class TestAWSBraketBackend(TestCase): """Tests BraketBackend.""" def test_device_backend(self): """Tests device backend.""" device = Mock() device.properties = RIGETTI_MOCK_GATE_MODEL_QPU_CAPABILITIES backend = AWSBraketBackend(device) self.assertTrue(backend) self.assertIsInstance(backend.target, Target) self.assertIsNone(backend.max_circuits) with self.assertRaises(NotImplementedError): backend.drive_channel(0) with self.assertRaises(NotImplementedError): backend.acquire_channel(0) with self.assertRaises(NotImplementedError): backend.measure_channel(0) with self.assertRaises(NotImplementedError): backend.control_channel([0, 1]) def test_local_backend(self): """Tests local backend.""" backend = BraketLocalBackend(name="default") self.assertTrue(backend) self.assertIsInstance(backend.target, Target) self.assertIsNone(backend.max_circuits) with self.assertRaises(NotImplementedError): backend.drive_channel(0) with self.assertRaises(NotImplementedError): backend.acquire_channel(0) with self.assertRaises(NotImplementedError): backend.measure_channel(0) with self.assertRaises(NotImplementedError): backend.control_channel([0, 1]) def test_local_backend_circuit(self): """Tests local backend with circuit.""" backend = BraketLocalBackend(name="default") circuits = [] # Circuit 0 q_c = QuantumCircuit(2) q_c.x(0) q_c.cx(0, 1) circuits.append(q_c) # Circuit 1 q_c = QuantumCircuit(2) q_c.h(0) q_c.cx(0, 1) circuits.append(q_c) results = [] for circuit in circuits: results.append(backend.run(circuit).result()) # Result 0 self.assertEqual(results[0].get_counts(), {"11": 1024}) # Result 1 _00 = results[1].get_counts()["00"] _11 = results[1].get_counts()["11"] self.assertEqual(_00 + _11, 1024) def test_vqe(self): """Tests VQE.""" local_simulator = BraketLocalBackend(name="default") h2_op = ( (-1.052373245772859 * I ^ I) + (0.39793742484318045 * I ^ Z) + (-0.39793742484318045 * Z ^ I) + (-0.01128010425623538 * Z ^ Z) + (0.18093119978423156 * X ^ X) ) quantum_instance = QuantumInstance( local_simulator, seed_transpiler=42, seed_simulator=42 ) ansatz = TwoLocal(rotation_blocks="ry", entanglement_blocks="cz") slsqp = SLSQP(maxiter=1) vqe = VQE(ansatz, optimizer=slsqp, quantum_instance=quantum_instance) result = vqe.compute_minimum_eigenvalue(h2_op) self.assertIsInstance(result, VQEResult) self.assertEqual(len(result.optimal_parameters), 8) self.assertEqual(len(list(result.optimal_point)), 8) def test_random_circuits(self): """Tests with random circuits.""" backend = BraketLocalBackend(name="braket_sv") aer_backend = BasicAer.get_backend("statevector_simulator") for i in range(1, 10): with self.subTest(f"Random circuit with {i} qubits."): circuit = random_circuit(i, 5, seed=42) braket_transpiled_circuit = transpile( circuit, backend=backend, seed_transpiler=42 ) braket_result = ( backend.run(braket_transpiled_circuit, shots=1000) .result() .get_counts() ) transpiled_aer_circuit = transpile( circuit, backend=aer_backend, seed_transpiler=42 ) aer_result = ( aer_backend.run(transpiled_aer_circuit, shots=1000) .result() .get_counts() ) self.assertEqual( sorted([k for k, v in braket_result.items() if v > 50]), sorted([k for k, v in aer_result.items() if v > 0.05]), ) self.assertIsInstance(braket_result, dict) @unittest.skip("Call to external resources.") def test_retrieve_job(self): """Tests retrieve job by id.""" backend = AWSBraketProvider().get_backend("SV1") circuits = [ transpile( random_circuit(3, 2, seed=seed), backend=backend, seed_transpiler=42 ) for seed in range(3) ] job = backend.run(circuits, shots=10) job_id = job.job_id() retrieved_job = backend.retrieve_job(job_id) job_result: Result = job.result() retrieved_job_result: Result = retrieved_job.result() self.assertEqual(job_result.job_id, retrieved_job_result.job_id) self.assertEqual(job_result.status, retrieved_job_result.status) self.assertEqual( job_result.backend_version, retrieved_job_result.backend_version ) self.assertEqual(job_result.backend_name, retrieved_job_result.backend_name) class TestAWSBackendTarget(TestCase): """Tests target for AWS Braket backend.""" def test_target(self): """Tests target.""" mock_device = Mock() mock_device.properties = RIGETTI_MOCK_GATE_MODEL_QPU_CAPABILITIES target = aws_device_to_target(mock_device) self.assertEqual(target.num_qubits, 30) self.assertEqual(len(target.operations), 2) self.assertEqual(len(target.instructions), 31) self.assertIn("Target for AWS Device", target.description)
<reponame>forfullstack/slicersources-src from __future__ import print_function import unittest import vtk, qt, ctk, slicer from slicer.ScriptedLoadableModule import * class VolumesLogicCompareVolumeGeometryTesting(ScriptedLoadableModuleTest): def setUp(self): pass def test_VolumesLogicCompareVolumeGeometry(self): """ Load a volume, then call the compare volume geometry test with different values of epsilon and precision. """ self.delayDisplay("Starting the test") # # first, get some sample data # import SampleData head = SampleData.downloadSample("MRHead") # # get the volumes logic and print out default epsilon and precision # volumesLogic = slicer.modules.volumes.logic() print('Compare volume geometry epsilon: ', volumesLogic.GetCompareVolumeGeometryEpsilon()) print('Compare volume geometry precision: ', volumesLogic.GetCompareVolumeGeometryPrecision()) self.assertAlmostEqual(volumesLogic.GetCompareVolumeGeometryEpsilon(), 1e-6) self.assertEqual(volumesLogic.GetCompareVolumeGeometryPrecision(), 6) # # compare the head against itself, this shouldn't produce any warning # string # warningString = volumesLogic.CompareVolumeGeometry(head, head) if len(warningString) != 0: print('Error in checking MRHead geometry against itself') print(warningString) return False else: print('Success in comparing MRHead vs itself with epsilon',volumesLogic.GetCompareVolumeGeometryEpsilon()) # # see if you can get it to fail with a tighter epsilon # volumesLogic.SetCompareVolumeGeometryEpsilon(1e-10) precision = volumesLogic.GetCompareVolumeGeometryPrecision() if precision != 10: print('Error in calculating precision from epsilon of ', volumesLogic.GetCompareVolumeGeometryEpsilon(), ', expected 10, got ', precision) return False warningString = volumesLogic.CompareVolumeGeometry(head, head) if len(warningString) != 0: print('Error in checking MRHead geometry against itself with strict epsilon') print(warningString) return False else: print('Success in comparing MRHead vs itself with epsilon',volumesLogic.GetCompareVolumeGeometryEpsilon()) # # clone the volume so can test for mismatches in geometry with # that operation # head2 = volumesLogic.CloneVolume(head, 'head2') warningString = volumesLogic.CompareVolumeGeometry(head, head2) if len(warningString) != 0: print('Error in checking MRHead geometry against itself with epsilon ', volumesLogic.GetCompareVolumeGeometryEpsilon()) print(warningString) return False else: print('Success in comparing MRHead vs clone with epsilon',volumesLogic.GetCompareVolumeGeometryEpsilon()) # # now try with a label map volume # headLabel = volumesLogic.CreateAndAddLabelVolume(head, "label vol") warningString = volumesLogic.CompareVolumeGeometry(head, headLabel) if len(warningString) != 0: print('Error in comparing MRHead geometry against a label map of itself with epsilon',volumesLogic.GetCompareVolumeGeometryEpsilon()) print(warningString) return False else: print('Success in comparing MRHead vs label map with epsilon',volumesLogic.GetCompareVolumeGeometryEpsilon()) # # adjust the geometry and make it fail # head2Matrix = vtk.vtkMatrix4x4() head2.GetRASToIJKMatrix(head2Matrix) val = head2Matrix.GetElement(2,0) head2Matrix.SetElement(2,0,val+0.25) head2.SetRASToIJKMatrix(head2Matrix) head2.SetSpacing(0.12345678901234567890, 2.0, 3.4) warningString = volumesLogic.CompareVolumeGeometry(head,head2) if len(warningString) == 0: print('Error in comparing MRHead geometry against an updated clone, with epsilon',volumesLogic.GetCompareVolumeGeometryEpsilon()) return False else: print('Success in making the comparison fail, with with epsilon',volumesLogic.GetCompareVolumeGeometryEpsilon()) print(warningString) # # reset the epsilon with an invalid negative number # volumesLogic.SetCompareVolumeGeometryEpsilon(-0.01) epsilon = volumesLogic.GetCompareVolumeGeometryEpsilon() if epsilon != 0.01: print('Failed to use the absolute value for an epsilon of -0.01: ', epsilon) return False precision = volumesLogic.GetCompareVolumeGeometryPrecision() if precision != 2: print('Failed to set the precision to 2: ',precision) return False warningString = volumesLogic.CompareVolumeGeometry(head,head2) print(warningString) self.delayDisplay('Test passed') return True
""" Load tests for course import from studio. By default, this tests loading a relatively small course. I recommend exporting a large course from edX and using it here. """ import os import sys # due to locust sys.path manipulation, we need to re-add the project root. sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) import random import time from locust import HttpLocust, TaskSet, task, events from helpers import settings, markers settings.init( __name__, required_data=[ 'CMS_USER_EMAIL', 'TEST_FILE', 'NUM_PARALLEL_COURSES', ], required_secrets=[ 'CMS_USER_PASSWORD', ], ) markers.install_event_markers() class CourseImport(TaskSet): "Course import task set -- creates course and imports tarballs." def on_start(self): "Setup method; log in to studio and create courses." self.login() for i in xrange(settings.data['NUM_PARALLEL_COURSES']): self.create_course(i) def login(self): "Log in to CMS." if settings.data.get('BASIC_AUTH_USER') is not None: self.client.auth = ( settings.data['BASIC_AUTH_USER'], settings.data['BASIC_AUTH_PASS'], ) self.client.get("/logout") self.client.get("/signin") response = self.client.post("/login_post", data={'email': settings.data['CMS_USER_EMAIL'], 'password': settings.secrets['<PASSWORD>_<PASSWORD>_PASSWORD'], 'honor_code': 'true', 'csrfmiddlewaretoken': self.client.cookies['csrftoken']}, headers={'referer': '{0}/signin'.format( self.client.base_url)}) if response.status_code != 200: raise Exception('Login failed: ' + response.text) # FIXME: While running this test, I needed to comment the following # line. My goal is not to debug this test, so I will leave it # uncommented. --@TroySankey self.client.auth = None response = self.client.get("/home/") if response.text.find("Currently signed in as:") < 0: raise Exception('Login failed.') def create_course(self, num): """Create a course with run number 'num' Arguments: num - the rerun id to use. """ self.client.get("/home/") response = self.client.post("/course/", json={"org": "LocustX", "number": "Soup101", "display_name": "Soup is Delicious, etc.", "run": "X{0:02d}".format(num)}, headers={'referer': '{0}/home/'.format( self.client.base_url), 'accept': 'application/json', 'X-CSRFToken': self.client.cookies['csrftoken']}) if response.status_code != 200: raise Exception('Course creation failed: ' + response.text) def import_course(self, num): "Import a course over run number 'num'." with open(settings.data['TEST_FILE'], "rb") as test_fp: cid = "course-v1:LocustX+Soup101+X{0:02d}".format(num) import_url = "/import/{0}".format(cid) ifname = "some{0:08d}.tar.gz".format(int(random.random() * 1e8)) self.client.get(import_url, name="/import") start_time = time.time() resp = self.client.post(import_url, name="/import", headers={'referer': "{0}{1}".format( self.client.base_url, import_url), 'accept': 'application/json', 'X-CSRFToken': self.client.cookies['csrftoken']}, files={'course-data': (ifname, test_fp, "application/x-compressed")}) if resp.status_code != 200: raise Exception('Course import failed.') for _ in xrange(100): resp = self.client.get("/import_status/{0}/{1}".format(cid, ifname), name="/import_status/") if resp.text.find("4") >= 0 or resp.text.find("0") >= 0: break time.sleep(0.1) if resp.text.find("4") >= 0: events.request_success.fire(request_type="http", name="course_import", response_time=(time.time() - start_time) * 1000, response_length=0) else: events.request_failure.fire(request_type="http", name="course_import", response_time=(time.time() - start_time) * 1000) @task def import_random_course(self): "Import a course, overwriting a random course." num = random.randrange(settings.data['NUM_PARALLEL_COURSES']) self.import_course(num) class WebsiteUser(HttpLocust): "Locust user class." task_set = CourseImport min_wait = 10 max_wait = 50
from scapy.all import * from PyQt5 import QtCore import threading def GetProtocol(pkt:Packet): PktSummary = pkt.summary() PktSumList = PktSummary.split("/") ProtocolList1 = ['ARP','RARP','DHCP'] for prtcl in ProtocolList1: if prtcl in PktSumList[1]: return prtcl if 'IPv6' in PktSumList[1]: #eg. Ether / IPv6 / UDP fe80::c14c:d0f3:10a:92de:64073 > fdf8:f53e:61e4::18:3:llmnr / LLMNRQuery return 'IPv6/'+PktSumList[2].strip().split(' ')[0] elif 'IP' in PktSumList[1]: #eg. Ether / IP / TCP 172.16.58.3:https > 192.168.1.109:62028 PA / Raw if 'Raw' in PktSumList[-1] or 'Padding' in PktSumList[-1]: UpperPrtcl = PktSumList[-2] else: UpperPrtcl = PktSumList[-1] return UpperPrtcl.strip().split(' ')[0] else: Prtcl = PktSumList[2].split(' ')[0].strip() if Prtcl != '': Prtcl = Prtcl+'/' Prtcl=Prtcl+PktSumList[2].split(' ')[1] return Prtcl def SrcAndDst(pkt): try: src = pkt[IP].src dst = pkt[IP].dst except: src = pkt[0].src dst = pkt[0].dst return src, dst def Reassemble_packet(plist): id_dict = {} for pkt in plist: if str(pkt[IP].id) not in id_dict.keys(): id_dict[str(pkt[IP].id)] = PacketList() id_dict[str(pkt[IP].id)].append(pkt) else: id_dict[str(pkt[IP].id)].append(pkt) result_dict = {} for id_key in id_dict.keys(): tmp_dict = {} for pkt in id_dict[id_key]: tmp_dict[str(pkt[IP].frag)] = pkt try: result_dict[id_key] = tmp_dict['0'] except: return None loads = b'' for frag in sorted(tmp_dict.keys()): loads = loads + tmp_dict[frag].getlayer(Raw).load result_dict[id_key].len += len(loads) - len(result_dict[id_key][Raw].load) result_dict[id_key][Raw].load = loads result_dict[id_key].flags = 2 result_dict[id_key].frag = 0 return result_dict class SnifferThread(QtCore.QThread): Signal_UpdateShow = QtCore.pyqtSignal(int, Packet) def __init__(self, Signal_SniffStop = None, Interf = None, PktCount = 0, Filter = None, *args, **kwargs): super(SnifferThread, self).__init__() self.SniffPkt = None self.Interf = Interf self.Filter = Filter self.PktCount = PktCount self.SniffPkt = None self.Event_Stop = threading.Event() # To Block Other Threadings self.Signal_SniffStop = Signal_SniffStop self.Signal_SniffStop.connect(self.join)# Signal Connect to Function def run(self): print("### Sniffer ###") self.SniffPkt = sniff(iface= self.Interf, filter= self.Filter, prn= self.Callback, stop_filter=lambda p: self.Event_Stop.is_set()) print("### Sniffer Stop###") # Redefine Functions def join(self,Flag_Stop): if(Flag_Stop): self.Event_Stop.set() # Set False print("### Sniffer Stop -- ThreadID: %d ###"%self.currentThreadId()) def Callback(self, pkt:Packet): self.PktCount += 1 self.Signal_UpdateShow.emit(self.PktCount, pkt)
# --------------------------------------------------------------------- # Object segmentation # --------------------------------------------------------------------- # Copyright (C) 2007-2015 The NOC Project # See LICENSE for details # --------------------------------------------------------------------- # Python modules import operator import threading # Third-party modules import cachetools import jinja2 # NOC modules from noc.inv.models.networksegment import NetworkSegment from noc.services.discovery.jobs.base import DiscoveryCheck tpl_lock = threading.Lock() class SegmentationCheck(DiscoveryCheck): """ Version discovery """ name = "segmentation" required_artefacts = ["seen_objects"] tpl_cache = cachetools.TTLCache(100, 300) def is_enabled(self): return self.object.enable_autosegmentation def handler(self): self.seg_cache = {} seen_objects = self.get_artefact("seen_objects") s_objects = {} for iface in seen_objects: if iface.get_profile().allow_autosegmentation: s_objects[iface] = seen_objects[iface] return self.segmentation(s_objects) def segmentation(self, if_map): """ Perform segmentation of seen objects :param if_map: :param target_segment: :return: """ sp = self.object.get_autosegmentation_policy() max_level = self.object.object_profile.autosegmentation_level_limit for iface in if_map: # Move all related objects to target segment for mo in if_map[iface]: # Detect target segment if sp == "o": new_segment = self.object.segment elif sp == "c": new_segment = self.get_segment( object=self.object, interface=iface, remote_object=mo ) else: continue if not new_segment: self.logger.debug("[%s|%s] No target segment. Skipping", mo.name, mo.address) continue if new_segment != mo.segment: if max_level and mo.object_profile.level > max_level: self.logger.info( "[%s|%s] Object level too high (%s > %s). Skipping", mo.name, mo.address, mo.object_profile.level, max_level, ) continue if mo.allow_autosegmentation: self.logger.info( "[%s|%s] Changing segment: %s -> %s", mo.name, mo.address, mo.segment.name, new_segment.name, ) mo.segment = new_segment mo.save() else: self.logger.info( "[%s|%s] Autosegmentation is disabled, skipping", mo.name, mo.address ) @classmethod @cachetools.cachedmethod(operator.attrgetter("tpl_cache"), lock=lambda _: tpl_lock) def get_template(self, tpl): return jinja2.Template(tpl) def get_segment(self, **kwargs): tpl = self.get_template(self.object.object_profile.autosegmentation_segment_name) name = tpl.render(**kwargs) return self.ensure_segment(name) @cachetools.cachedmethod(operator.attrgetter("seg_cache")) def ensure_segment(self, name): ns = NetworkSegment.objects.filter(parent=self.object.segment.id, name=name).first() if not ns: root = self.object.segment if root.profile: profile = root.profile.autocreated_profile or root.profile else: profile = None ns = NetworkSegment( parent=self.object.segment, name=name, profile=profile, description="Autocreated by segmentation", ) ns.save() return ns
<gh_stars>1-10 # From the original file example_evaluator.py by <NAME> (https://github.com/AICrowd/aicrowd-example-evaluator) # Adapted for MEDIQA 2019 by <NAME> --Accuracy for Tasks 1 and 2 (NLI and RQE) & MRR, Accuracy, Precision, and Spearman's rank correlation coefficient. # Last update on April 16, 2019. import pdb import pandas as pd import numpy as np import scipy import scipy.stats from collections import defaultdict class MediqaEvaluator: def __init__(self, answer_file_path, task=1, round=1): """ `round` : Holds the round for which the evaluation is being done. can be 1, 2...upto the number of rounds the challenge has. Different rounds will mostly have different ground truth files. """ self.answer_file_path = answer_file_path self.round = round self.task = task def _evaluate(self, client_payload, _context={}): if self.task == 1: return self._evaluate_task_1(client_payload, _context) elif self.task == 2: return self._evaluate_task_2(client_payload, _context) elif self.task == 3: return self._evaluate_task_3(client_payload, _context) def _evaluate_task_1(self, client_payload, _context={}): """ `client_payload` will be a dict with (atleast) the following keys : - submission_file_path : local file path of the submitted file - aicrowd_submission_id : A unique id representing the submission - aicrowd_participant_id : A unique id for participant/team submitting (if enabled) """ submission_file_path = client_payload["submission_file_path"] # Result file format: pair_id,label (csv file) col_names = ['pair_id', 'label'] submission = pd.read_csv(submission_file_path, header=None, names=col_names) gold_truth = pd.read_csv(self.answer_file_path, header=None, names=col_names) # Drop duplicates except for the first occurrence. submission = submission.drop_duplicates(['pair_id']) submission.label = submission.label.astype(str) gold_truth.label = gold_truth.label.astype(str) submission['entry'] = submission.apply(lambda x: '_'.join(x), axis=1) gold_truth['entry'] = gold_truth.apply(lambda x: '_'.join(x), axis=1) s1 = submission[submission['entry'].isin(gold_truth['entry'])] accuracy = s1.size / gold_truth.size _result_object = { "score": accuracy, "score_secondary" : 0.0 } return _result_object def _evaluate_task_2(self, client_payload, _context={}): """ `client_payload` will be a dict with (atleast) the following keys : - submission_file_path : local file path of the submitted file - aicrowd_submission_id : A unique id representing the submission - aicrowd_participant_id : A unique id for participant/team submitting (if enabled) """ submission_file_path = client_payload["submission_file_path"] # Result file format: pair_id,label (csv file) col_names = ['pair_id', 'label'] submission = pd.read_csv(submission_file_path, header=None, names=col_names, dtype={'pair_id': str, "label": str}) gold_truth = pd.read_csv(self.answer_file_path, header=None, names=col_names, dtype={'pair_id': str, "label": str}) # Drop duplicates except for the first occurrence. submission = submission.drop_duplicates(['pair_id']) submission.label = submission.label.astype(str) gold_truth.label = gold_truth.label.astype(str) submission['entry'] = submission.apply(lambda x: '_'.join(x), axis=1) gold_truth['entry'] = gold_truth.apply(lambda x: '_'.join(x), axis=1) s1 = submission[submission['entry'].isin(gold_truth['entry'])] accuracy = s1.size / gold_truth.size _result_object = { "score": accuracy, "score_secondary" : 0.0 } return _result_object def _evaluate_task_3(self, client_payload, _context={}): """ `client_payload` will be a dict with (atleast) the following keys : - submission_file_path : local file path of the submitted file - aicrowd_submission_id : A unique id representing the submission - aicrowd_participant_id : A unique id for participant/team submitting (if enabled) """ submission_file_path = client_payload["submission_file_path"] # Result file format: q_id,a_id,label{0/1} col_names = ['question_id','answer_id', 'label'] submission = pd.read_csv(submission_file_path, header=None, names=col_names) gold_truth = pd.read_csv(self.answer_file_path, header=None, names=col_names) # Drop duplicates except for the first occurrence. submission = submission.drop_duplicates(['question_id', 'answer_id']) submission.label = submission.label.astype(str) gold_truth.label = gold_truth.label.astype(str) submission['entry'] = submission.apply(lambda x: '_'.join(map(str,x)), axis=1) gold_truth['entry'] = gold_truth.apply(lambda x: '_'.join(map(str,x)), axis=1) s1 = submission[submission['entry'].isin(gold_truth['entry'])] accuracy = s1.size / gold_truth.size question_ids = [] correct_answers = {} for index, row in gold_truth.iterrows(): qid = row['question_id'] if qid not in question_ids: question_ids.append(qid) if row['label'] == '1': if qid not in correct_answers: correct_answers[qid] = [] correct_answers[qid].append(row['answer_id']) P1 = 0. P5 = 0. P10 = 0. spearman = 0. pv = 0. ref_sizeAt5 = 0. ref_sizeAt10 = 0. mrr = 0. sp_nan_ignoredQs = 0 for qid in question_ids: submitted_correct_answers = [] if qid not in correct_answers: sp_nan_ignoredQs+=1 continue index = 1 first = True for _, row in submission[submission['question_id']==qid].iterrows(): aid = row['answer_id'] if row['label'] == '1': if first: mrr += 1. / index first=False if aid in correct_answers[qid]: submitted_correct_answers.append(aid) if index == 1: P1 += 1 if index <= 5: P5 += 1 if index <= 10: P10 += 1 index += 1 matched_gold_subset = [] for x in correct_answers[qid]: if x in submitted_correct_answers: matched_gold_subset.append(x) rho, p_value = scipy.stats.spearmanr(submitted_correct_answers, matched_gold_subset) if np.isnan(rho): rho = 0.0 sp_nan_ignoredQs += 1 spearman += rho pv += p_value ref_sizeAt5 += min(5, len(correct_answers[qid])) ref_sizeAt10 += min(10, len(correct_answers[qid])) question_nb = len(question_ids) q_nb_spearman = question_nb - sp_nan_ignoredQs spearman = spearman / q_nb_spearman if q_nb_spearman!=0 else 0.0 P1 = P1 / question_nb if ref_sizeAt5 != 0: P5 = P5 / ref_sizeAt5 else: P5 = 0. if ref_sizeAt10 !=0: P10 = P10 / ref_sizeAt10 else: P10 = 0. # print(mrr, question_nb) if question_nb !=0: mrr = mrr / question_nb else: mrr = 0. if np.isnan(spearman): spearman = 0.0 _result_object = { "score": accuracy, "score_secondary": spearman, "meta" : { "MRR": mrr, "P@1": P1, "P@5": P5, "P@10": P10 } } return _result_object def load_qa_pred(pred_path='/pylon5/db5fp9p/yichongx/data/mediqa/task3_qa/gt_dev.csv'): pred_dict=defaultdict(list) headline=False with open(pred_path) as f: for line in f: if headline: headline=False continue qid,aid,label = line.split(',') # if '102' in aid: # print(line, qid, aid, label) # uid = '{}____{}'.format(qid,aid) label=int(label) pred_dict[qid].append((aid,label)) # pred_dict[uid]=label return pred_dict def eval_mediqa_official(pred_path, ground_truth_path='../data/mediqa/task3_qa/gt_dev.csv', task=3, eval_qa_more=False): _client_payload = {} _client_payload["submission_file_path"] = pred_path _client_payload["aicrowd_submission_id"] = 1123 _client_payload["aicrowd_participant_id"] = 1234 # Instaiate a dummy context _context = {} # Instantiate an evaluator aicrowd_evaluator = MediqaEvaluator(ground_truth_path, task=task) # Evaluate result = aicrowd_evaluator._evaluate(_client_payload, _context) if task==3 and eval_qa_more: pred_dict = load_qa_pred(pred_path) gt_dict = load_qa_pred(ground_truth_path) cnt=0 feed_list = [] for qid in gt_dict: pred_scores = {} for rank, (aid,pred_label) in enumerate(pred_dict[qid]): pred_scores[aid]=100-rank this_list=[] # if 1 not in [item[1] for item in gt_dict[qid]]: # pdb.set_trace() for aid,gt_label in gt_dict[qid]: this_list.append((gt_label,pred_scores[aid])) feed_list.append(this_list) maps, mrrs, pa1s = get_score(feed_list) # result['MRR']=mrrs result['MAP']=maps # result['P@1']=pa1s return result def get_score(total_list): correct, wrong = 0, 0 # for p@1 pred = [] # for MAP inv_rank = [] # for MRR for this_list in total_list: # this_list is a list of tuples (y, yp) ys = [l[0] for l in this_list] # true answers yps = [l[1] for l in this_list] # prob of true answer if not 1 in ys: # print(this_list) continue # remove cases of no answer # following previous works my_preds = [yp for (y, yp) in zip(ys, yps) if y==1] yps.sort(reverse=True) rank = len(yps) for i in my_preds: if rank>yps.index(i): rank=yps.index(i) rank += 1 # model set groundtruth which rank inv_rank.append(1.0/float(rank))# for MRR if rank==1: correct+=1 else: wrong += 1 # for P@1 precs = [] for i, ypi in enumerate(yps): if ypi in my_preds: prec = (1.0+len(precs))/(i+1.0) precs.append(prec) if len(precs)==0: pred.append(0.0) else: pred.append(np.mean(precs)) MAP = np.mean(pred)*100 # print(np.sum(inv_rank), len(inv_rank)) MRR = np.mean(inv_rank)*100 P1 = float(correct)/float(correct+wrong)*100 return (MAP, MRR, P1) if __name__ == "__main__": task=2 print("Testing Task (Round-1) : {}".format(task)) answer_file_path = '../../data/mediqa/task2_rqe/gt_dev.csv' _client_payload = {} # _client_payload["submission_file_path"] = "/pylon5/db5fp9p/yichongx/model_data/mt_dnn_mediqa/scibert_predict/mediqa_dev_scores_0.csv" _client_payload["submission_file_path"] = "../../tmp/my_pred_mediqa/task2/rqe_dev_scores_1.csv" # Instaiate a dummy context _context = {} # Instantiate an evaluator # aicrowd_evaluator = MediqaEvaluator(answer_file_path, task=task) # Evaluate result = eval_mediqa_official(_client_payload["submission_file_path"], answer_file_path, task, True) print(result) # # Test Tasks 1,2,3 # # for task in range(1, 4): # task=2 # print("Testing Task (Round-1) : {}".format(task)) # # answer_file_path = "/pylon5/db5fp9p/yichongx/data/mediqa/task3_qa/gt_train.csv" # # answer_file_path = "../data/task{}/ground_truth.csv".format(task) # answer_file_path = '../../data/mediqa/task2_rqe/gt_test_my.csv' # _client_payload = {} # # _client_payload["submission_file_path"] = "/pylon5/db5fp9p/yichongx/model_data/mt_dnn_mediqa/scibert_predict/mediqa_dev_scores_0.csv" # _client_payload["submission_file_path"] = "../../tmp/my_pred_mediqa/task2/rqe_test_scores_3.csv" # # Instaiate a dummy context # _context = {} # # Instantiate an evaluator # aicrowd_evaluator = MediqaEvaluator(answer_file_path, task=task) # # Evaluate # result = aicrowd_evaluator._evaluate(_client_payload, _context) # print(result) # # Test Tasks 1,2,3 - Round -2 # for task in range(1, 4): # print("Testing Task (Round-2) : {}".format(task)) # answer_file_path = "data/task{}/ground_truth_round_2.csv".format(task) # _client_payload = {} # _client_payload["submission_file_path"] = "data/task{}/sample_submission_round_2.csv".format(task) # # Instaiate a dummy context # _context = {} # # Instantiate an evaluator # aicrowd_evaluator = MediqaEvaluator(answer_file_path, task=task, round=2) # # Evaluate # result = aicrowd_evaluator._evaluate(_client_payload, _context) # print(result)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import numpy as np import copy import utils import measures as ms def normal(nHyperplanes,nDimensions): """ Returns a set of hyperplanes with random orientations. nHyperplanes is the number of hyperplanes to return, and nDimension the number of dimensions of the space. The hyperplanes are simply generated by setting their coordinates to random values following a normal distribution. """ return np.random.normal(0,10,(nHyperplanes,nDimensions+1)) def doublePoint(nHyperplanes,nDimensions,distrib): """ Returns a set of hyperplanes with random orientations. nHyperplanes is the number of hyperplanes to return, and nDimension the number of dimensions of the space. Here for each hyperplane, nDimensions random points are generated following the distribution distrib, and the unique hyperplane passing by all these points is kept. """ hyperplanes = [] for k in range(nHyperplanes): points = np.array([utils.f(nDimensions,distrib,dataset) for n in range(nDimensions)]) hyperplanes.append( utils.hyperplaneFromPoints(points) ) return np.array(hyperplanes) def poolSelect(nHyperplanes, nDimensions, pCentroids, pMeasure, poolSize, distrib, m,initType='doublePoint'): ''' Initialize hyperplanes by generating a pool of poolSize random configurations and selecting the one with lowest measure of distortion. ''' for k in range(poolSize): if initType == 'normal': hps = normal(nHyperplanes,nDimensions) elif initType == 'doublePoint': hps = doublePoint(nHyperplanes,nDimensions,distrib) else: print("ERROR! invalid initialization type") e = ms.measure(m,hps,pCentroids,pMeasure,distrib) if k < 1: minDistortion = e minConfig = hps else: if minDistortion >= e: minDistortion = e minConfig = hps return minConfig def genetic(nHyperplanes, nDimensions, pCentroids, pMeasureInit, distrib, m, nConfigs, pGenetic, crossover, mutation, order='dissimilarity', selection='rank', initType='doublePoint', mutationPercentage=50): ''' Generates a partially optimized configuration of hyperplanes, with the goal of having an approximatively equal repartition of the input distribution over the regions. Here one hyperplane = one gene. At every iteration, one half of the old configs is kept and used to generate the next generation by crossover. -nConfigs is the number of configurations to generate and cross -pGenetic is the number of iterations -order is the type of ordering used to order hyperplanes before crossover -crossover is the number of crossing points in the crossover operations -mutation is the mutation method and intensity -selection is the selection method used to chose the configs that are reproduced ''' print('start initialisation (genetic)') configs = [] measures = [] geneticMeasureEvolution = [] # Step 1: generate random configurations for k in range(nConfigs): if initType == 'normal': config = normal(nHyperplanes,nDimensions) elif initType == 'doublePoint': config = doublePoint(nHyperplanes,nDimensions,distrib) else: print("ERROR! invalid initialization type") configs.append(config) print('finished generating random configurations') for k in range(pGenetic): pMeasure = (k+1)*pMeasureInit print('genetic: iteration '+str(k+1)+' of '+str(pGenetic)) measures = [ms.measure(m, config, pCentroids, pMeasure, distrib) for config in configs] geneticMeasureEvolution.append( np.min(measures) ) # Step 2: selecting configs to reproduce configs, measures = select(selection, configs, measures, percentageToKeep=80) # Step 3: crossing configurations newConfigs = cross(nDimensions, distrib, crossover, copy.deepcopy(configs), order, outputSize=nConfigs) configs = np.concatenate((configs,newConfigs),axis=0) # Step 4: mutation if mutationPercentage == 100: configs = mutateAll(mutation,configs) else: measures = [ms.measure(m, config, pCentroids, pMeasure, distrib) for config in configs] configs = mutate(mutation, configs, measures, mutationPercentage) # Step 5: return the best config measures = [ms.measure(m, config, pCentroids, pMeasure, distrib) for config in configs] print('final: ',np.min(measures)) print('end initialisation') return configs[ np.argmin(measures) ], geneticMeasureEvolution #print(genetic(3, 2, 1000, 10000, 'gaussian', 'mse',10, 5, 1, 1)) #test ## Genetic algorithm subfunctions def select(selection, configs, measures, percentageToKeep=50): ''' Returns the selected configurations that are kept for the next generation. percentageToKeep is the persentage of the total of configuration, representing the configurations that will be kept. ''' n = int(len(configs)*percentageToKeep/100) if selection == 'rank': # sort by distortion measure and keep the lowest configs = [x for _,x in sorted(zip(measures,configs))] measures = sorted(measures) return configs[:n+1], measures[:n+1] elif selection == 'random': return configs[:n+1], measures[:n+1] else: print('ERROR: unknown selection method') def cross(nDimensions, distrib, crossover, configs, order, outputSize='default'): ''' Crosses the configs 'configs', repeating the operation 'outputSize' times, with 'crossover' crossing points each time. Hyperplanes can be ordered before the crossing. ''' newGen = [] # next generation if outputSize == 'default': outputSize = len(configs) if order == 'distanceToDistribCenter': distribCenter = utils.distribCenter(nDimensions, distrib) for k in range(len(configs)): config = configs[k] ranks = [ utils.distancePoint2Hp(distribCenter,hp) for hp in config ] #order hyperplanes according to ranks ordConfig = [hp for _,hp in sorted(zip(ranks,config))] configs[k] = ordConfig for k in range(outputSize): # select 2 configs to cross i,j = np.random.randint(len(configs)),np.random.randint(len(configs)) if order == 'distanceToDistribCenter' or order == 'noOrder': crosspoints = np.random.randint(len(configs[0]), size=crossover)# chose crossing points newConfig = [] useI = True # whether to include i or j genes for l in range(len(configs[0])): if useI: newConfig.append(configs[i][l]) else: newConfig.append(configs[j][l]) if l in crosspoints: useI = not useI elif order == 'dissimilarity': dissimilarities, hpPairs = [], [] # list to store dissimilarity values and associated hyperplane pairs for k in range(1,len(configs[i])): for l in range(k): dissimilarities.append(utils.dissimilarityHps(configs[j][l], configs[i][k], distrib)) hpPairs.append([k,l]) hpPairs = [hpPair for _,hpPair in sorted(zip(dissimilarities,hpPairs))] newConfig = configs[i] for pair in hpPairs[:int(len(hpPairs)/2)]: #swap the most similar half of hyperplane pairs newConfig[pair[0]] = configs[j][pair[1]] elif order == 'completeRandom': newConfig = [] for l in range(len(configs[0])): nextHp = configs[i][l] if np.random.uniform() > 0.5 else configs[j][l] newConfig.append(nextHp) newGen.append(newConfig) return newGen def mutateAll(mutation, configs): '''Applies a random mutation to all configs''' configs *= np.random.normal(1.,0.2, np.shape(configs)) return configs def mutate(mutation, configs, measures, percentageToMutate=50): ''' Applies a random mutation to a ceratin percentage of configs. For now, only multiplies every matrix coefficient with a random normal value. ''' #newConfigs = [] #mutate only the worst configs configs = [x for _,x in sorted(zip(measures,configs))] # Sort configs according to measure nHp,nDim1,nC = len(configs[0]), len(configs[0][0]), int(len(configs)/100*percentageToMutate) configs *= np.concatenate(( np.ones(( nC,nHp,nDim1 )) , np.random.normal(1.,0.2, (len(configs)-nC,nHp,nDim1) ) ),axis=0) return configs
<reponame>derekhoward/EmbEval import numpy as np import pandas as pd import scipy from sklearn.metrics.pairwise import euclidean_distances from sklearn.metrics.pairwise import cosine_similarity from sklearn.metrics import f1_score from sklearn.metrics import roc_auc_score from sklearn.model_selection import StratifiedKFold from sklearn.linear_model import LogisticRegression from goatools.base import download_ncbi_associations from goatools.anno.genetogo_reader import Gene2GoReader import utils import os gene2go = download_ncbi_associations() objanno = Gene2GoReader(gene2go, taxids=[9606], go2geneids=True) go2geneIDs = objanno.get_goid2dbids(objanno.associations) # this is a dict. Keys are GO IDs, values are gene_IDs of the genes that are associated to that GO term geneID2GO = objanno.get_dbid2goids(objanno.associations) goID2goTerm = {item.GO_ID :item.GO_term for item in objanno.associations} genes_in_GO = list(geneID2GO.keys()) # these are entrez_ids def distance_df(emb_df, metric='euclidean'): """Creates a distance matrix for a given embedding DataFrame. Args: emb_df (DataFrame): A DataFrame of shape (n_probes, n_features) metric (str, optional): Distance metric, defaults to 'euclidean'. Can also compute cosine similarity. Returns: dist (DataFrame): A square DataFrame of shape (n_probes, n_probes) """ if metric == 'euclidean': #dist = euclidean_distances(emb_df) #dist = euclidean_distances(emb_df, emb_df) # for ISH embeddings, needs to be done this way to avoid calculating distance between the IDs dist = euclidean_distances(emb_df.iloc[:, 1:], emb_df.iloc[:, 1:]) elif metric == 'cosine': dist = cosine_similarity(emb_df) dist = pd.DataFrame(dist) dist.index = emb_df.index dist.columns = emb_df.index return dist def get_proportion_first_match(emb_df, metric='euclidean'): """Operates on probe embedding and checks to see if nearest probe judged by distance metric is of another probe for the same gene. Args: emb_df (pd.DataFrame): A DataFrame of shape (n_samples, n_features) metric (str, optional): Distance metric, defaults to 'euclidean'. Can also compute cosine similarity. Returns: float: proportion of probes that match to another probe of the same gene """ dist = distance_df(emb_df, metric=metric) if metric == 'euclidean': np.fill_diagonal(dist.values, float('inf')) closest_indexes = dist.idxmin(axis=1, skipna=True).reset_index() elif metric == 'cosine': np.fill_diagonal(dist.values, float('0')) closest_indexes = dist.idxmax(axis=1, skipna=True).reset_index() closest_indexes.columns = ['probe_id', 'neighbor'] closest_indexes['gene_id'] = closest_indexes.probe_id.map(probe_map) closest_indexes['nearest_gene'] = closest_indexes.neighbor.map(probe_map) proportion_closest_match = closest_indexes[closest_indexes.gene_id == closest_indexes.nearest_gene].shape[0] / closest_indexes.shape[0] return proportion_closest_match def get_closest_probes(emb_df, probe_id, metric='euclidean'): dist = distance_df(emb_df, metric=metric) dist.index.name = 'probe_id' result = dist[dist.index == probe_id].iloc[0, :].sort_values() result = result.reset_index() result['gene'] = result.probe_id.map(probe_map) return result def calc_probe_match_auc(emb_df, mask, probe_map='default', metric='euclidean'): """Calculates AUC where matches are for different probes of same gene symbol. Args: emb_df (pd.DataFrame): A DataFrame of shape (n_samples, n_features) probe_map (str, optional): Mapping of probes to gene symbols. Default is from Allen fetal brain. metric (str, optional): Defaults to 'euclidean'. Returns: auc (float) """ if probe_map == 'default': probe_ids = pd.read_csv('./data/raw/allen_human_fetal_brain/lmd_matrix_12566/rows_metadata.csv', usecols=['probeset_name', 'gene_symbol']) probe_ids = probe_ids.set_index('probeset_name').to_dict()['gene_symbol'] elif probe_map == 'reannotator': # the following is to map probes in the same manner as was done while training NN/embeddings probe_ids = pd.read_table('./data/raw/gene_symbol_annotations/AllenInstitute_custom_Agilent_Array.txt', sep='\t') probe_ids = probe_ids.rename(columns={'#PROBE_ID': 'probe_id', 'Gene_symbol': 'gene_symbol'}).loc[:, ['probe_id', 'gene_symbol']] probe_ids.gene_symbol = probe_ids.gene_symbol.str.split(';').str[0] probe_ids = probe_ids.set_index('probe_id').to_dict()['gene_symbol'] else: raise ValueError("Error: specify probe_map as either 'default' or 'reannotator'.") dist = distance_df(emb_df) dist.index.name = 'probe_id' np.fill_diagonal(dist.values, float('inf')) #dist.drop('#na#', inplace=True) #dist.drop('#na#', axis=1, inplace=True) dist = dist.sort_index(axis=0).sort_index(axis=1) #mask = mask.sort_index(axis=0).sort_index(axis=1) values = dist.values i, j = np.tril_indices_from(values, k=-1) pairwise_dists = pd.DataFrame.from_dict({'probe_id1':dist.index[i], 'probe_id2': dist.columns[j], 'distance': values[i, j]}) pairwise_dists['gene1'] = pairwise_dists['probe_id1'].map(probe_ids) pairwise_dists['gene2'] = pairwise_dists['probe_id2'].map(probe_ids) pairwise_dists['same_gene'] = pairwise_dists['gene1'] == pairwise_dists['gene2'] y_score = pairwise_dists.distance y_true = pairwise_dists.same_gene if metric == 'euclidean': auc = 1 - metrics.roc_auc_score(y_true, y_score) else: auc = metrics.roc_auc_score(y_true, y_score) return auc def get_GO_presence_labels(genes_of_interest, min_GO_size=200, max_GO_size=300): """Creates a dataframe of GO-group presence for a list of genes. Args: genes_of_interest : must be iterable of entrez_gene_ids min_GO_size (int, optional): Min num of genes in GO group to be included. Defaults to 200. max_GO_size (int, optional): Max num of genes in GO group to be included. Defaults to 300. Returns: pd.DataFrame : df where index is entrezgene, columns are GO group with TRUE/FALSE presence values. """ genes = pd.Series(genes_of_interest) go_group_presence = {} for GO in go2geneIDs: gene_ids = go2geneIDs[GO] # boolean vector (length is num of genes in embedding) in_go_group_vector = genes.isin(gene_ids) if (in_go_group_vector.sum() > min_GO_size) & (in_go_group_vector.sum() < max_GO_size): go_group_presence[GO] = in_go_group_vector result = pd.DataFrame(go_group_presence) result.index = genes result.index.name = 'entrezgene' return result def filter_embedding_for_genes_in_GO(embedding, index_type='gene_symbol'): """Filters an embedding to only keep rows where genes have an annotation in GO. Args: embedding (pd.DataFrame): A DataFrame of shape (n_genes, n_dims) index_type (str, optional): Defaults to 'gene_symbol'. Returns: embedding (pd.DataFrame): A DataFrame of shape (n_genes, n_dims) """ #gene_entrez_map = pd.read_csv( './data/raw/allen_human_fetal_brain/lmd_matrix_12566/rows_metadata.csv', usecols=['entrez_id', 'gene_symbol']) #gene_entrez_map = gene_entrez_map.dropna(subset=['entrez_id']).drop_duplicates(subset=['entrez_id']) gene_entrez_map = embedding.dropna(subset=['entrez_id']).drop_duplicates(subset=['entrez_id']) gene_entrez_map = gene_entrez_map[gene_entrez_map.entrez_id.isin( genes_in_GO)] """ if index_type == 'gene_symbol': return embedding[embedding.index.isin(gene_entrez_map.gene_symbol)] else: return embedding[embedding.index.isin(gene_entrez_map.entrez_id)] """ return gene_entrez_map def merge_embedding_with_GO_labels(emb_df, GO_df): """Merges a gene_embedding with GO group presence df. Embedding cols are prefixed with emb_, while potential GO presence columns are prefixed with GO: Args: emb_df (pd.DataFrame): emb_df.index is gene_symbol GO_df (pd.DataFrame): GO_df.index is entrezgene Returns: (pd.DataFrame): Multi-index gene embedding with columns for GO presence concatenated. """ # get df with gene_symbols and entrez_ids from fetal data (more updated than adult probes data) #all_genes = pd.read_csv('./data/raw/allen_human_fetal_brain/lmd_matrix_12566/rows_metadata.csv') #all_genes = all_genes[~((all_genes.gene_symbol.str.startswith('A_')) | ( #all_genes.gene_symbol.str.startswith('CUST_')))].gene_symbol.drop_duplicates() #all_genes_w_entrez = utils.genesymbols_2_entrezids(all_genes) emb_df = emb_df.add_prefix('emb_') #df = emb_df.merge(all_genes_w_entrez, left_index=True,right_on='gene_symbol') emb_df = emb_df.rename(columns={"emb_gene_symbol": "gene_symbol", "emb_entrez_id": "entrez_id"}) df = emb_df.merge(GO_df, left_on='entrez_id', right_index=True) return df.set_index(['entrez_id', 'gene_symbol']) def perform_GOclass_eval(embedding_df, index_type='gene_symbol', min_GO_size=200, max_GO_size=300, n_splits=5, n_jobs=-1): if index_type == 'gene_symbol': embedding_df = filter_embedding_for_genes_in_GO( embedding_df, index_type='gene_symbol') #entrez_genelist = utils.genesymbols_2_entrezids(embedding_df.index) #GO_df = get_GO_presence_labels(genes_of_interest=entrez_genelist.entrez_id, min_GO_size=min_GO_size, max_GO_size=max_GO_size) emb_entrez_id = embedding_df['entrez_id'] GO_df= get_GO_presence_labels(emb_entrez_id, min_GO_size=min_GO_size, max_GO_size=max_GO_size) gene_count_per_GO_group ={col: GO_df[col].sum() for col in GO_df.columns} elif index_type == 'entrez_id': embedding_df = filter_embedding_for_genes_in_GO( embedding_df, index_type='entrez_id') GO_df = get_GO_presence_labels( genes_of_interest=embedding_df.index, min_GO_size=min_GO_size, max_GO_size=max_GO_size) else: raise ValueError( "Error: specify index type as either 'gene_symbol' or 'entrez_id'.") # merge the embedding and GO_df to ensure they have same index # returns a multi-index df with gene_symbol and entrez_id merged_df = merge_embedding_with_GO_labels(emb_df=embedding_df, GO_df=GO_df) X = merged_df.loc[:, merged_df.columns.str.startswith('emb_')] y = merged_df.loc[:, merged_df.columns.str.startswith('GO:')] GO_SCORES = [] skf = StratifiedKFold(n_splits=n_splits) for GOlabel in y: #y_test_total = pd.Series([]) #preds_total = [] #probas_total = pd.DataFrame() f1_score_values = [] auc_values = [] print('--'*50) print(GOlabel) y_GO = y.loc[:, GOlabel] GO_term = goID2goTerm[GOlabel] GO_group_size = len(go2geneIDs[GOlabel]) for i, (train_idx, test_idx) in enumerate(skf.split(X, y_GO)): model = LogisticRegression(penalty='none', n_jobs=n_jobs) X_train = X.iloc[train_idx, :] y_train = y_GO.iloc[train_idx] X_test = X.iloc[test_idx, :] y_test = y_GO.iloc[test_idx] model.fit(X_train, y_train) # Extract predictions from fitted model preds = list(model.predict(X_test)) # probs for classes ordered in same manner as model.classes_ # model.classes_ >> array([False, True]) probas = pd.DataFrame(model.predict_proba( X_test), columns=model.classes_) # Get metrics for each model f1 = f1_score(y_test, preds) auc = roc_auc_score(y_test, probas[True]) f1_score_values.append(f1) auc_values.append(auc) #y_test_total = y_test_total.append(y_test) #preds_total += preds #probas_total = probas_total.append(probas) print("Fold") #preds_total = np.array(preds_total) #f1 = f1_score(y_test_total, preds_total) #auc = roc_auc_score(y_test_total, probas_total[True]) f1 = np.mean(f1_score_values) auc = np.mean(auc_values) measures = {'GO_group': GOlabel, 'GO_group_title': GO_term, 'GO_group_size': GO_group_size, 'number of used genes':gene_count_per_GO_group[GOlabel], 'f1': f1, 'AUC': auc} GO_SCORES.append(measures) return pd.DataFrame(GO_SCORES) if __name__ == "__main__": """ general_path = "/Users/pegah_abed/Documents/old_Human_ISH/after_segmentation/dummy_3" ts_list = ["1603427490", "1603427156"] for ts in ts_list: embed_file_name = ts + "_triplet_no_sz_all_training_embeddings_gene_level_with_info.csv" path_to_embed = os.path.join(general_path, ts, embed_file_name) embed_df = pd.read_csv(path_to_embed) min_GO_size = 40 max_GO_size = 200 go_scores = perform_GOclass_eval(embed_df, index_type='gene_symbol', min_GO_size=min_GO_size, max_GO_size=max_GO_size, n_splits=5, n_jobs=-1) go_scores = go_scores.sort_values(by=['AUC'], ascending=False) go_scores = go_scores.reset_index(drop=True) print (len(go_scores)) print (np.mean(go_scores['AUC'])) print ("*"*50) #go_scores.to_csv(os.path.join(general_path, ts, ts +"_new_go_scores_" + str(min_GO_size) + "_" + str(max_GO_size) + ".csv")) """
from __future__ import division import os import time import math import numpy as np import tensorflow as tf import tensorflow.contrib.slim as slim from dataloader.supervise_data_loader import DataLoader from model.net import Net from utils.tools import * import matplotlib as mpl import matplotlib.cm as cm from tensorflow.python.ops import control_flow_ops class MonoDepth2Learner(object): def __init__(self, **config): self.config = config self.preprocess = self.config['dataset']['preprocess'] self.min_depth = np.float(self.config['dataset']['min_depth']) self.max_depth = np.float(self.config['dataset']['max_depth']) self.root_dir = self.config['model']['root_dir'] self.pose_type = self.config['model']['pose_type'] self.resize_bilinear = False def preprocess_image(self, image): image = (image - 0.45) / 0.225 return image def compute_loss_l1(self, output, label): valid_mask = label > 0.001 diff = tf.abs(output - label) diff_valid = tf.boolean_mask(diff, valid_mask) loss = tf.reduce_mean(diff_valid) return loss def build_train(self): self.start_learning_rate = np.float( self.config['model']['learning_rate']) self.total_epoch = np.int(self.config['model']['epoch']) self.beta1 = np.float(self.config['model']['beta1']) self.continue_ckpt = self.config['model']['continue_ckpt'] self.torch_res18_ckpt = self.config['model']['torch_res18_ckpt'] self.summary_freq = self.config['model']['summary_freq'] loader = DataLoader(trainable=True, **self.config) with tf.name_scope('data_loading'): src_image_stack, src_depth_stack = loader.load_batch() src_image_stack = tf.image.convert_image_dtype( src_image_stack, dtype=tf.float32) src_depth_stack = tf.image.convert_image_dtype( src_depth_stack, dtype=tf.float32) * 65.536 if self.preprocess: src_image_stack_net = self.preprocess_image( src_image_stack) else: src_image_stack_net = src_image_stack with tf.variable_scope('monodepth2_model', reuse=tf.AUTO_REUSE) as scope: net_builder = Net(True, **self.config) res18_tc, skips_tc = net_builder.build_resnet18(src_image_stack_net) if self.resize_bilinear: pred_disp = net_builder.build_disp_net_bilinear(res18_tc, skips_tc) else: pred_disp = net_builder.build_disp_net(res18_tc, skips_tc)[0] pred_depth_rawscale = disp_to_depth(pred_disp, self.min_depth, self.max_depth) with tf.name_scope('compute_loss'): curr_proj_error = tf.abs(pred_depth_rawscale - src_depth_stack) total_loss = self.compute_loss_l1(pred_depth_rawscale, src_depth_stack) with tf.name_scope('train_op'): self.total_step = self.total_epoch * loader.steps_per_epoch self.global_step = tf.Variable( 0, name='global_step', trainable=False) learning_rates = [self.start_learning_rate, self.start_learning_rate / 10] boundaries = [np.int(self.total_step * 3 / 4)] self.learning_rate = tf.train.piecewise_constant( self.global_step, boundaries, learning_rates) optimizer = tf.train.AdamOptimizer(self.learning_rate, self.beta1) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): self.train_op = optimizer.minimize( total_loss, global_step=self.global_step) self.incr_global_step = tf.assign( self.global_step, self.global_step + 1) # Collect tensors that are useful later (e.g. tf summary) self.pred_depth = pred_depth_rawscale self.pred_disp = pred_disp self.steps_per_epoch = loader.steps_per_epoch self.total_loss = total_loss self.src_image_stack_all = src_image_stack self.src_depth_stack_all = src_depth_stack self.pred_depth_stack_all = pred_depth_rawscale self.proj_error_stack_all = curr_proj_error def collect_summaries(self): tf.summary.scalar("total_loss", self.total_loss) # tf.summary.image('src_image', self.src_image_stack_all[0]) # tf.summary.image('depth_color_image', colorize(self.pred_depth_stack_all[0], cmap='plasma')) # tf.summary.image('gt_depth_color_image', colorize(self.src_depth_stack_all[0], cmap='plasma')) # tf.summary.image('proj_error', self.proj_error_stack_all[0]) def train(self, ckpt_dir): self.build_train() init = tf.global_variables_initializer() self.collect_summaries() # load weights from pytorch resnet 18 model if self.torch_res18_ckpt != '': assign_ops = load_resnet18_from_file(self.torch_res18_ckpt) with tf.name_scope("parameter_count"): parameter_count = tf.reduce_sum( [tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()]) var_list = [var for var in tf.global_variables() if "moving" in var.name] var_list += tf.trainable_variables() self.saver = tf.train.Saver( var_list + [self.global_step], max_to_keep=10) sv = tf.train.Supervisor( logdir=ckpt_dir, save_summaries_secs=0, saver=None) # print('/n/n/nCollections=====================',tf.get_collection(tf.GraphKeys.UPDATE_OPS)) config = tf.ConfigProto() config.gpu_options.allow_growth = True with sv.managed_session(config=config) as sess: # print('Trainable variables: ') # for var in var_list: # print(var.name) # # print('\n\n==========================================') # print('Model variables:') # for var in tf.model_variables(): # print(var.name) # # print('\n\n==========================================') # print('Global variables:') # for var in tf.global_variables(): # print(var.name) print("parameter_count =", sess.run(parameter_count)) sess.run(init) if self.continue_ckpt != '': print("Resume training from previous checkpoint: %s" % self.continue_ckpt) # ckpt = tf.train.latest_checkpoint('{}/{}'.format(self.root_dir,self.continue_ckpt)) self.saver.restore(sess, self.continue_ckpt) elif self.torch_res18_ckpt != '': sess.run(assign_ops) start_time = time.time() try: for step in range(0, self.total_step): fetches = { "train": self.train_op, "global_step": self.global_step, "incr_global_step": self.incr_global_step } if step % self.summary_freq == 0: fetches["loss"] = self.total_loss fetches["summary"] = sv.summary_op fetches["lr"] = self.learning_rate print('Process step: ', step) results = sess.run(fetches) gs = results["global_step"] print('End Process step: ', step) if step % self.summary_freq == 0: sv.summary_writer.add_summary(results["summary"], gs) train_epoch = math.ceil(gs / self.steps_per_epoch) train_step = gs - (train_epoch - 1) * \ self.steps_per_epoch print("Epoch: [{}] | [{}/{}] | time: {:.4f} s/it | loss: {:.4f} | lr: {:.5f}".format (train_epoch, train_step, self.steps_per_epoch, (time.time() - start_time) / self.summary_freq, results["loss"], results["lr"])) start_time = time.time() if step != 0 and step % (self.steps_per_epoch * 2) == 0: self.save(sess, ckpt_dir, gs) except: self.save(sess, ckpt_dir, 'latest') self.save(sess, ckpt_dir, 'latest') def save(self, sess, checkpoint_dir, step): model_name = 'model' print(" [*] Saving checkpoint to {}...".format(checkpoint_dir)) if step == 'latest': self.saver.save(sess, os.path.join( checkpoint_dir, model_name + '.latest')) else: self.saver.save(sess, os.path.join( checkpoint_dir, model_name), global_step=step)
import os import pickle import matplotlib as mpl import numpy as np import seaborn as sns from cartopy import crs as ccrs from matplotlib import pyplot as plt from matplotlib import ticker from mosaiks import config as c from mosaiks.plotting.general_plotter import scatter_preds from mosaiks.utils.io import get_us_from_shapefile mpl.rcParams["pdf.fonttype"] = 42 def setup_plotting_context(scale): sns.set( context="talk", style="white", palette="colorblind", font_scale=scale / 2, rc={ "axes.linewidth": 1.0 * scale, "xtick.major.width": 1.0 * scale, "xtick.minor.width": 0.5 * scale, "ytick.major.width": 1.0 * scale, "ytick.minor.width": 0.5 * scale, "xtick.major.size": 4.5 * scale, "lines.linewidth": 0.75 * scale, }, ) return sns.plotting_context() def plot_figure_2( tasks, data_by_task, marker_scale=0.4, scale=3, plot_error=False, is_ACS=False ): """ plots figure 2 from the main text. input: tasks: list of task names to consider data_by_task: formatted lists of data as specified by the output of 'aggregrate_and_bin_data' (below) returns: (none) plots data. """ # unpack data truth_by_task = data_by_task["truth_by_task"] preds_by_task = data_by_task["preds_by_task"] lon_points_by_task = data_by_task["lon_points_by_task"] lat_points_by_task = data_by_task["lat_points_by_task"] truth_binned_by_task = data_by_task["truth_binned_by_task"] preds_binned_by_task = data_by_task["preds_binned_by_task"] if is_ACS: bounds_by_task = data_by_task["bounds_by_task"] num_tasks = len(tasks) # set up the figure with sizes num_plot_types = 3 fig_width = 7.2 * scale fig_height = 2.0 * num_tasks * scale figsize = (fig_width, fig_height) fig = plt.figure(figsize=figsize) # relative figure sizes gs = fig.add_gridspec( num_tasks, num_plot_types, width_ratios=[1, 1, 0.4], wspace=0.01, hspace=0.05 ) pc = setup_plotting_context(scale) mpl.rcParams["pdf.fonttype"] = 42 # plotting config variables pa = c.plotting c_by_app = [getattr(c, i) for i in c.app_order] disp_names = [config["disp_name"] for config in c_by_app] units = [config["units_disp"] for config in c_by_app] colors = [config["color"] for config in c_by_app] cmap_fxn = pa["cmap_fxn"] cmaps = [cmap_fxn(color) for color in colors] scatter_bounds = pa["scatter_bounds"] cmap_bounds = pa["cmap_bounds"] cbar_extend = pa["cbar_extend"] pa["bg_color"] = "lightgrey" # get bounds for us us = get_us_from_shapefile(simplify=0.1) for t in range(num_tasks): app = tasks[t] ## get colormap/scatter bounds if is_ACS: scatter_bounds_this = bounds_by_task[t][0] cmap_bounds_this = bounds_by_task[t][0] else: scatter_bounds_this = scatter_bounds[app] cmap_bounds_this = cmap_bounds[app] ### Make the maps: ax_truth = fig.add_subplot(gs[t, 0], projection=ccrs.PlateCarree()) ax_truth.outline_patch.set_visible(False) ax_truth.background_patch.set_visible(False) ax_truth.add_geometries( [us], crs=ccrs.PlateCarree(), facecolor=pa["bg_color"], edgecolor="none", zorder=-100, ) sc_truth = ax_truth.pcolormesh( lat_points_by_task[t], lon_points_by_task[t], truth_binned_by_task[t], cmap=cmaps[t], vmin=cmap_bounds_this[0], vmax=cmap_bounds_this[1], edgecolors="none", ) ax_truth.text( 0, 0.6, disp_names[t].replace(" ", "\n"), va="bottom", ha="center", rotation="vertical", rotation_mode="anchor", transform=ax_truth.transAxes, weight="bold", ) # set up axes ax_pred = fig.add_subplot(gs[t, 1], projection=ccrs.PlateCarree()) ax_pred.outline_patch.set_visible(False) ax_pred.background_patch.set_visible(False) ax_pred.add_geometries( [us], crs=ccrs.PlateCarree(), facecolor=pa["bg_color"], edgecolor="none", zorder=-100, ) if t == 0: ax_truth.set_title("Labels", weight="bold") ax_pred.set_title("Predictions", weight="bold") if plot_error: ax_truth.set_title("Labels", weight="bold") ax_pred.set_title("Prediction Errors", weight="bold") # If not plotting error, then the right column is the mosaiks predictions if not plot_error: ## plot preds sc_pred = ax_pred.pcolormesh( lat_points_by_task[t], lon_points_by_task[t], preds_binned_by_task[t], cmap=cmaps[t], vmin=cmap_bounds_this[0], vmax=cmap_bounds_this[1], edgecolors="none", ) # If we are plotting errors, then the right column is model error. Here, the # name is kept as ax_pred for compatibility with future lines. else: ## plot preds cmap_brownteal = sns.diverging_palette( 53, 188, s=90, l=70, sep=25, center="light", as_cmap=True ) diverging_palette = cmap_brownteal mask_diff = ( preds_binned_by_task[t] - truth_binned_by_task[t] ) # this looks good. sc_pred = ax_pred.pcolormesh( lat_points_by_task[t], lon_points_by_task[t], mask_diff, cmap=diverging_palette, # makes it teal and brown vmin=-mask_diff.std(), # sets the bounds for the color scales vmax=mask_diff.std(), edgecolors="none", ) ### Make the scatter plots of predicted and observed if not plot_error: ## scatter preds and obs ax_scatter = fig.add_subplot(gs[t, 2]) ax_scatter = scatter_preds( preds_by_task[t], truth_by_task[t], app, ax=ax_scatter, bounds=scatter_bounds_this, c="dimgrey", s=marker_scale * (scale ** 2), linewidth=pc["lines.linewidth"], fontsize=pc["font.size"] * 1, rasterize=True, despine=True, axis_visible=True, is_ACS=is_ACS, ) # clip the scatter plot at bounds specified by the config file min_point = scatter_bounds_this[0] if min_point is None: min_point = np.min(np.hstack((preds_by_task[t], truth_by_task[t]))) max_point = scatter_bounds_this[1] if max_point is None: max_point = np.max(np.hstack((preds_by_task[t], truth_by_task[t]))) # format tick marks on the scatter plot to show the bounds of colormaps # of the left two plots with minor_ticks. major_ticks = [max_point, min_point] minor_ticks = [] if not cmap_bounds_this[0] is None: minor_ticks.append(cmap_bounds_this[0]) if not cmap_bounds_this[1] is None: minor_ticks.append(cmap_bounds_this[1]) def tick_formatter(x, pos): if x == 0 or x == 100: return str(int(x)) if abs(x) < 10: return f"{x:.1f}" if abs(x) < 1000: return str(int(x)) if abs(x) < 100000: return f"{x/1000:.1f}k" return str(int(x / 1000)) + "k" ax_scatter.xaxis.set_major_locator(mpl.ticker.FixedLocator(major_ticks)) ax_scatter.xaxis.set_major_formatter( mpl.ticker.FuncFormatter(tick_formatter) ) ax_scatter.yaxis.set_major_locator(mpl.ticker.FixedLocator(major_ticks)) ax_scatter.xaxis.set_minor_locator(mpl.ticker.FixedLocator(minor_ticks)) ax_scatter.yaxis.set_minor_locator(mpl.ticker.FixedLocator(minor_ticks)) # major ax_scatter.tick_params( axis="x", which="major", direction="out", bottom=True, length=5, color="black", ) # minor ax_scatter.tick_params( axis="x", which="minor", direction="in", bottom=True, length=5, color="black", ) ax_scatter.tick_params( axis="y", which="minor", direction="in", left=True, length=5, color="black", ) ax_scatter.yaxis.set_ticklabels([]) sns.despine(ax=ax_scatter, left=False, bottom=False) ### Make C-Axis: # Observations and predictions share the same c-axis so make one big one: ## colorbar for the first two bb_truth = ax_truth.get_position() bb_pred = ax_pred.get_position() height = bb_truth.height * 0.05 width = (bb_pred.x1 - bb_truth.x0) * 0.95 # Need to have a smaller c-axis for the error plot if plot_error: width = (bb_pred.x1 - bb_pred.x0) * 0.95 y0 = bb_truth.y0 - height x0 = bb_truth.x0 + width * 0.025 ax_cbar = fig.add_axes((x0, y0, width, height)) cb = fig.colorbar( sc_truth, cax=ax_cbar, orientation="horizontal", extend=cbar_extend[app] ) cb.locator = ticker.MaxNLocator(nbins=6, integer=True) cb.update_ticks() ax_cbar.set_xlabel(units[t], labelpad=1.0, weight="bold") # If you are plotting error then we need a separate c-axis for the truth and the # error if plot_error: ## colorbar for the error bb_diff = ax_pred.get_position() height = bb_diff.height * 0.05 width = (bb_diff.x1 - bb_diff.x0) * 0.95 y0 = bb_diff.y0 - height x0 = bb_diff.x0 + width * 0.025 ax_cbar2 = fig.add_axes((x0, y0, width, height)) # Plots COLOR BAR IN FIGURE fig.colorbar(sc_pred, cax=ax_cbar2, orientation="horizontal", extend="both") cb.locator = ticker.MaxNLocator(nbins=6, integer=True) cb.update_ticks() ax_cbar2.set_xlabel(units[t], labelpad=1.0, weight="bold") return fig def points_to_bin(x, y, vals, scale=10.0): """bins points over 2d space with bin sizes specified by scale args: x,y: nx1 arrays of locations in 1 dimension each preds: nx1 array of values to be averaged scale: the edge of a bin/box in {x,y} units. returns: x0, y0: kx1, mx1 arrays of the x and y gridpoints vals_grid: (m-1)x(k-1) resulting aggregated values """ x_range = np.max(x) - np.min(x) y_range = np.max(y) - np.min(y) bin_shapes = [int(y_range / scale), int(x_range / scale)] sums_grid, y0, x0 = np.histogram2d(y, x, bins=bin_shapes, weights=vals) counts, _, _ = np.histogram2d(y, x, bins=bin_shapes) vals_grid = sums_grid / counts vals_grid = np.ma.masked_invalid(vals_grid) return x0, y0, vals_grid def aggregrate_and_bin_data(agg_scale=0.2, is_ACS=False): """Aggregated labels from the saved output of the primary analysis notebooks. Aggregate to 'agg_scale' for vizualization""" c_by_app = [getattr(c, i) for i in c.app_order] tasks = [config["application"] for config in c_by_app] num_tasks = len(tasks) variables = [config["variable"] for config in c_by_app] sample_types = [config["sampling"] for config in c_by_app] # get file paths for data file_name_template = ( "outcomes_scatter_obsAndPred_{0}_{1}_CONTUS_16_640_{2}_100000_0_random_features" "_3_0.data" ) file_names_by_task = [ file_name_template.format(tasks[i], variables[i], sample_types[i]) for i in range(len(tasks)) ] path_name_template = os.path.join( c.out_dir, "applications/{0}/figures/primary_analysis/{1}" ) file_paths_local = [ path_name_template.format(tasks[i], file_names_by_task[i]) for i in range(len(tasks)) ] # store aggregated data in lists lat_points_by_task, lon_points_by_task = [], [] truth_binned_by_task, preds_binned_by_task = [], [] truth_by_task, preds_by_task = [], [] bounds_by_task = [] # For ACS for t in range(num_tasks): # grab the entire data f = file_paths_local[t] with open(f, "rb") as file_this: data_this = pickle.load(file_this) truth = data_this["truth"] preds = data_this["preds"] # store unbinned data truth_by_task.append(truth) preds_by_task.append(preds) # store bounds for ACS if is_ACS: # bounds_by_task.append(data_this["bounds"]) # Set better bounds for display # print(data_this["bounds"]) boundMin = np.amin([np.amin(truth), np.amin(preds)]) boundMax = np.amax([np.amax(truth), np.amax(preds)]) # print([np.array([boundMin,boundMax])]) bounds_by_task.append([np.array([boundMin, boundMax])]) # aggregate the data into averaged bins lat_points, lon_points, truth_binned = points_to_bin( data_this["lon"], data_this["lat"], truth, scale=agg_scale ) _, _, preds_binned = points_to_bin( data_this["lon"], data_this["lat"], preds, scale=agg_scale ) # store binned data lat_points_by_task.append(lat_points) lon_points_by_task.append(lon_points) truth_binned_by_task.append(truth_binned) preds_binned_by_task.append(preds_binned) if is_ACS: return { "truth_by_task": truth_by_task, "preds_by_task": preds_by_task, "lat_points_by_task": lat_points_by_task, "lon_points_by_task": lon_points_by_task, "truth_binned_by_task": truth_binned_by_task, "preds_binned_by_task": preds_binned_by_task, "bounds_by_task": bounds_by_task, } else: return { "truth_by_task": truth_by_task, "preds_by_task": preds_by_task, "lat_points_by_task": lat_points_by_task, "lon_points_by_task": lon_points_by_task, "truth_binned_by_task": truth_binned_by_task, "preds_binned_by_task": preds_binned_by_task, }
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. try: import errno except ImportError: import os.errno as errno import datetime import os import fixtures from ironicclient import exc as ironic_exc import mock from oslo_config import cfg import six from ironic_inspector.common import ironic as ir_utils from ironic_inspector import node_cache from ironic_inspector.pxe_filter import dnsmasq from ironic_inspector.test import base as test_base CONF = cfg.CONF class DnsmasqTestBase(test_base.BaseTest): def setUp(self): super(DnsmasqTestBase, self).setUp() self.driver = dnsmasq.DnsmasqFilter() class TestShouldEnableUnknownHosts(DnsmasqTestBase): def setUp(self): super(TestShouldEnableUnknownHosts, self).setUp() self.mock_introspection_active = self.useFixture( fixtures.MockPatchObject(node_cache, 'introspection_active')).mock def test_introspection_active(self): self.mock_introspection_active.return_value = True self.assertTrue(dnsmasq._should_enable_unknown_hosts()) def test_introspection_not_active(self): self.mock_introspection_active.return_value = False self.assertFalse(dnsmasq._should_enable_unknown_hosts()) class TestDnsmasqDriverAPI(DnsmasqTestBase): def setUp(self): super(TestDnsmasqDriverAPI, self).setUp() self.mock__execute = self.useFixture( fixtures.MockPatchObject(dnsmasq, '_execute')).mock self.driver._sync = mock.Mock() self.driver._tear_down = mock.Mock() self.mock__purge_dhcp_hostsdir = self.useFixture( fixtures.MockPatchObject(dnsmasq, '_purge_dhcp_hostsdir')).mock self.mock_ironic = mock.Mock() get_client_mock = self.useFixture( fixtures.MockPatchObject(ir_utils, 'get_client')).mock get_client_mock.return_value = self.mock_ironic self.start_command = '/far/boo buzz -V --ack 42' CONF.set_override('dnsmasq_start_command', self.start_command, 'dnsmasq_pxe_filter') self.stop_command = '/what/ever' CONF.set_override('dnsmasq_stop_command', self.stop_command, 'dnsmasq_pxe_filter') def test_init_filter(self): self.driver.init_filter() self.mock__purge_dhcp_hostsdir.assert_called_once_with() self.driver._sync.assert_called_once_with(self.mock_ironic) self.mock__execute.assert_called_once_with(self.start_command) def test_sync(self): self.driver.init_filter() # NOTE(milan) init_filter performs an initial sync self.driver._sync.reset_mock() self.driver.sync(self.mock_ironic) self.driver._sync.assert_called_once_with(self.mock_ironic) def test_tear_down_filter(self): mock_reset = self.useFixture( fixtures.MockPatchObject(self.driver, 'reset')).mock self.driver.init_filter() self.driver.tear_down_filter() mock_reset.assert_called_once_with() def test_reset(self): self.driver.init_filter() # NOTE(milan) init_filter calls _base_cmd self.mock__execute.reset_mock() self.driver.reset() self.mock__execute.assert_called_once_with( self.stop_command, ignore_errors=True) class TestExclusiveWriteOrPass(test_base.BaseTest): def setUp(self): super(TestExclusiveWriteOrPass, self).setUp() self.mock_open = self.useFixture(fixtures.MockPatchObject( six.moves.builtins, 'open', new=mock.mock_open())).mock self.mock_fd = self.mock_open.return_value self.mock_fcntl = self.useFixture(fixtures.MockPatchObject( dnsmasq.fcntl, 'flock', autospec=True)).mock self.path = '/foo/bar/baz' self.buf = 'spam' self.fcntl_lock_call = mock.call( self.mock_fd, dnsmasq.fcntl.LOCK_EX | dnsmasq.fcntl.LOCK_NB) self.fcntl_unlock_call = mock.call(self.mock_fd, dnsmasq.fcntl.LOCK_UN) self.mock_log = self.useFixture(fixtures.MockPatchObject( dnsmasq.LOG, 'debug')).mock self.mock_sleep = self.useFixture(fixtures.MockPatchObject( dnsmasq.time, 'sleep')).mock def test_write(self): wrote = dnsmasq._exclusive_write_or_pass(self.path, self.buf) self.assertTrue(wrote) self.mock_open.assert_called_once_with(self.path, 'w', 1) self.mock_fcntl.assert_has_calls( [self.fcntl_lock_call, self.fcntl_unlock_call]) self.mock_fd.write.assert_called_once_with(self.buf) self.mock_log.assert_not_called() def test_write_would_block(self): err = IOError('Oops!') err.errno = errno.EWOULDBLOCK # lock/unlock paired calls self.mock_fcntl.side_effect = [ # first try err, None, # second try None, None] wrote = dnsmasq._exclusive_write_or_pass(self.path, self.buf) self.assertTrue(wrote) self.mock_open.assert_called_once_with(self.path, 'w', 1) self.mock_fcntl.assert_has_calls( [self.fcntl_lock_call, self.fcntl_unlock_call], [self.fcntl_lock_call, self.fcntl_unlock_call]) self.mock_fd.write.assert_called_once_with(self.buf) self.mock_log.assert_called_once_with( '%s locked; will try again (later)', self.path) self.mock_sleep.assert_called_once_with( dnsmasq._EXCLUSIVE_WRITE_ATTEMPTS_DELAY) def test_write_would_block_too_many_times(self): self.useFixture(fixtures.MonkeyPatch( 'ironic_inspector.pxe_filter.dnsmasq._EXCLUSIVE_WRITE_ATTEMPTS', 1)) err = IOError('Oops!') err.errno = errno.EWOULDBLOCK self.mock_fcntl.side_effect = [err, None] wrote = dnsmasq._exclusive_write_or_pass(self.path, self.buf) self.assertFalse(wrote) self.mock_open.assert_called_once_with(self.path, 'w', 1) self.mock_fcntl.assert_has_calls( [self.fcntl_lock_call, self.fcntl_unlock_call]) self.mock_fd.write.assert_not_called() retry_log_call = mock.call('%s locked; will try again (later)', self.path) failed_log_call = mock.call( 'Failed to write the exclusively-locked path: %(path)s for ' '%(attempts)s times', { 'attempts': dnsmasq._EXCLUSIVE_WRITE_ATTEMPTS, 'path': self.path }) self.mock_log.assert_has_calls([retry_log_call, failed_log_call]) self.mock_sleep.assert_called_once_with( dnsmasq._EXCLUSIVE_WRITE_ATTEMPTS_DELAY) def test_write_custom_ioerror(self): err = IOError('Oops!') err.errno = errno.EBADF self.mock_fcntl.side_effect = [err, None] self.assertRaisesRegex( IOError, 'Oops!', dnsmasq._exclusive_write_or_pass, self.path, self.buf) self.mock_open.assert_called_once_with(self.path, 'w', 1) self.mock_fcntl.assert_has_calls( [self.fcntl_lock_call, self.fcntl_unlock_call]) self.mock_fd.write.assert_not_called() self.mock_log.assert_not_called() class TestMACHandlers(test_base.BaseTest): def setUp(self): super(TestMACHandlers, self).setUp() self.mac = 'ff:ff:ff:ff:ff:ff' self.dhcp_hostsdir = '/far' CONF.set_override('dhcp_hostsdir', self.dhcp_hostsdir, 'dnsmasq_pxe_filter') self.mock_join = self.useFixture( fixtures.MockPatchObject(os.path, 'join')).mock self.mock_join.return_value = "%s/%s" % (self.dhcp_hostsdir, self.mac) self.mock__exclusive_write_or_pass = self.useFixture( fixtures.MockPatchObject(dnsmasq, '_exclusive_write_or_pass')).mock self.mock_stat = self.useFixture( fixtures.MockPatchObject(os, 'stat')).mock self.mock_listdir = self.useFixture( fixtures.MockPatchObject(os, 'listdir')).mock self.mock_remove = self.useFixture( fixtures.MockPatchObject(os, 'remove')).mock self.mock_log = self.useFixture( fixtures.MockPatchObject(dnsmasq, 'LOG')).mock self.mock_introspection_active = self.useFixture( fixtures.MockPatchObject(node_cache, 'introspection_active')).mock def test__whitelist_unknown_hosts(self): self.mock_join.return_value = "%s/%s" % (self.dhcp_hostsdir, dnsmasq._UNKNOWN_HOSTS_FILE) self.mock_introspection_active.return_value = True dnsmasq._configure_unknown_hosts() self.mock_join.assert_called_once_with(self.dhcp_hostsdir, dnsmasq._UNKNOWN_HOSTS_FILE) self.mock__exclusive_write_or_pass.assert_called_once_with( self.mock_join.return_value, '%s' % dnsmasq._WHITELIST_UNKNOWN_HOSTS) self.mock_log.debug.assert_called_once_with( 'A %s record for all unknown hosts using wildcard mac ' 'created', 'whitelist') def test__blacklist_unknown_hosts(self): self.mock_join.return_value = "%s/%s" % (self.dhcp_hostsdir, dnsmasq._UNKNOWN_HOSTS_FILE) self.mock_introspection_active.return_value = False dnsmasq._configure_unknown_hosts() self.mock_join.assert_called_once_with(self.dhcp_hostsdir, dnsmasq._UNKNOWN_HOSTS_FILE) self.mock__exclusive_write_or_pass.assert_called_once_with( self.mock_join.return_value, '%s' % dnsmasq._BLACKLIST_UNKNOWN_HOSTS) self.mock_log.debug.assert_called_once_with( 'A %s record for all unknown hosts using wildcard mac ' 'created', 'blacklist') def test__configure_removedlist_whitelist(self): self.mock_introspection_active.return_value = True self.mock_stat.return_value.st_size = dnsmasq._MACBL_LEN dnsmasq._configure_removedlist({self.mac}) self.mock_join.assert_called_with(self.dhcp_hostsdir, self.mac) self.mock__exclusive_write_or_pass.assert_called_once_with( self.mock_join.return_value, '%s\n' % self.mac) def test__configure_removedlist_blacklist(self): self.mock_introspection_active.return_value = False self.mock_stat.return_value.st_size = dnsmasq._MACWL_LEN dnsmasq._configure_removedlist({self.mac}) self.mock_join.assert_called_with(self.dhcp_hostsdir, self.mac) self.mock__exclusive_write_or_pass.assert_called_once_with( self.mock_join.return_value, '%s,ignore\n' % self.mac) def test__whitelist_mac(self): dnsmasq._whitelist_mac(self.mac) self.mock_join.assert_called_once_with(self.dhcp_hostsdir, self.mac) self.mock__exclusive_write_or_pass.assert_called_once_with( self.mock_join.return_value, '%s\n' % self.mac) def test__blacklist_mac(self): dnsmasq._blacklist_mac(self.mac) self.mock_join.assert_called_once_with(self.dhcp_hostsdir, self.mac) self.mock__exclusive_write_or_pass.assert_called_once_with( self.mock_join.return_value, '%s,ignore\n' % self.mac) def test__get_blacklist(self): self.mock_listdir.return_value = [self.mac] self.mock_stat.return_value.st_size = len('%s,ignore\n' % self.mac) blacklist, whitelist = dnsmasq._get_black_white_lists() self.assertEqual({self.mac}, blacklist) self.mock_listdir.assert_called_once_with(self.dhcp_hostsdir) self.mock_join.assert_called_with(self.dhcp_hostsdir, self.mac) self.mock_stat.assert_called_with(self.mock_join.return_value) def test__get_whitelist(self): self.mock_listdir.return_value = [self.mac] self.mock_stat.return_value.st_size = len('%s\n' % self.mac) blacklist, whitelist = dnsmasq._get_black_white_lists() self.assertEqual({self.mac}, whitelist) self.mock_listdir.assert_called_once_with(self.dhcp_hostsdir) self.mock_join.assert_called_with(self.dhcp_hostsdir, self.mac) self.mock_stat.assert_called_with(self.mock_join.return_value) def test__get_no_blacklist(self): self.mock_listdir.return_value = [self.mac] self.mock_stat.return_value.st_size = len('%s\n' % self.mac) blacklist, whitelist = dnsmasq._get_black_white_lists() self.assertEqual(set(), blacklist) self.mock_listdir.assert_called_once_with(self.dhcp_hostsdir) self.mock_join.assert_called_with(self.dhcp_hostsdir, self.mac) self.mock_stat.assert_called_with(self.mock_join.return_value) def test__get_no_whitelist(self): self.mock_listdir.return_value = [self.mac] self.mock_stat.return_value.st_size = len('%s,ignore\n' % self.mac) blacklist, whitelist = dnsmasq._get_black_white_lists() self.assertEqual(set(), whitelist) self.mock_listdir.assert_called_once_with(self.dhcp_hostsdir) self.mock_join.assert_called_with(self.dhcp_hostsdir, self.mac) self.mock_stat.assert_called_with(self.mock_join.return_value) def test__purge_dhcp_hostsdir(self): self.mock_listdir.return_value = [self.mac] dnsmasq._purge_dhcp_hostsdir() self.mock_listdir.assert_called_once_with(self.dhcp_hostsdir) self.mock_join.assert_called_once_with(self.dhcp_hostsdir, self.mac) self.mock_remove.assert_called_once_with('%s/%s' % (self.dhcp_hostsdir, self.mac)) def test_disabled__purge_dhcp_hostsdir(self): CONF.set_override('purge_dhcp_hostsdir', False, 'dnsmasq_pxe_filter') # NOTE(dtantsur): set_override uses os.path internally self.mock_join.reset_mock() dnsmasq._purge_dhcp_hostsdir() self.mock_listdir.assert_not_called() self.mock_join.assert_not_called() self.mock_remove.assert_not_called() class TestSync(DnsmasqTestBase): def setUp(self): super(TestSync, self).setUp() self.mock__get_black_white_lists = self.useFixture( fixtures.MockPatchObject(dnsmasq, '_get_black_white_lists')).mock self.mock__whitelist_mac = self.useFixture( fixtures.MockPatchObject(dnsmasq, '_whitelist_mac')).mock self.mock__blacklist_mac = self.useFixture( fixtures.MockPatchObject(dnsmasq, '_blacklist_mac')).mock self.mock__configure_unknown_hosts = self.useFixture( fixtures.MockPatchObject(dnsmasq, '_configure_unknown_hosts')).mock self.mock__configure_removedlist = self.useFixture( fixtures.MockPatchObject(dnsmasq, '_configure_removedlist')).mock self.mock_ironic = mock.Mock() self.mock_utcnow = self.useFixture( fixtures.MockPatchObject(dnsmasq.timeutils, 'utcnow')).mock self.timestamp_start = datetime.datetime.utcnow() self.timestamp_end = (self.timestamp_start + datetime.timedelta(seconds=42)) self.mock_utcnow.side_effect = [self.timestamp_start, self.timestamp_end] self.mock_log = self.useFixture( fixtures.MockPatchObject(dnsmasq, 'LOG')).mock get_client_mock = self.useFixture( fixtures.MockPatchObject(ir_utils, 'get_client')).mock get_client_mock.return_value = self.mock_ironic self.mock_active_macs = self.useFixture( fixtures.MockPatchObject(node_cache, 'active_macs')).mock self.ironic_macs = {'new_mac', 'active_mac'} self.active_macs = {'active_mac'} self.blacklist = {'gone_mac', 'active_mac'} self.whitelist = {} self.mock__get_black_white_lists.return_value = (self.blacklist, self.whitelist) self.mock_ironic.port.list.return_value = [ mock.Mock(address=address) for address in self.ironic_macs] self.mock_active_macs.return_value = self.active_macs self.mock_should_enable_unknown_hosts = self.useFixture( fixtures.MockPatchObject(dnsmasq, '_should_enable_unknown_hosts')).mock self.mock_should_enable_unknown_hosts.return_value = True def test__sync_enable_unknown_hosts(self): self.mock_should_enable_unknown_hosts.return_value = True self.driver._sync(self.mock_ironic) self.mock__configure_unknown_hosts.assert_called_once_with() def test__sync_not_enable_unknown_hosts(self): self.mock_should_enable_unknown_hosts.return_value = False self.driver._sync(self.mock_ironic) self.mock__configure_unknown_hosts.assert_called_once_with() def test__sync(self): self.driver._sync(self.mock_ironic) self.mock__whitelist_mac.assert_called_once_with('active_mac') self.mock__blacklist_mac.assert_called_once_with('new_mac') self.mock_ironic.port.list.assert_called_once_with(limit=0, fields=['address']) self.mock_active_macs.assert_called_once_with() self.mock__get_black_white_lists.assert_called_once_with() self.mock__configure_unknown_hosts.assert_called_once_with() self.mock__configure_removedlist.assert_called_once_with({'gone_mac'}) self.mock_log.debug.assert_has_calls([ mock.call('Syncing the driver'), mock.call('The dnsmasq PXE filter was synchronized (took %s)', self.timestamp_end - self.timestamp_start) ]) @mock.patch('time.sleep', lambda _x: None) def test__sync_with_port_list_retries(self): self.mock_ironic.port.list.side_effect = [ ironic_exc.ConnectionRefused('boom'), [mock.Mock(address=address) for address in self.ironic_macs] ] self.driver._sync(self.mock_ironic) self.mock__whitelist_mac.assert_called_once_with('active_mac') self.mock__blacklist_mac.assert_called_once_with('new_mac') self.mock_ironic.port.list.assert_called_with(limit=0, fields=['address']) self.mock_active_macs.assert_called_once_with() self.mock__get_black_white_lists.assert_called_once_with() self.mock__configure_removedlist.assert_called_once_with({'gone_mac'}) self.mock_log.debug.assert_has_calls([ mock.call('Syncing the driver'), mock.call('The dnsmasq PXE filter was synchronized (took %s)', self.timestamp_end - self.timestamp_start) ]) class Test_Execute(test_base.BaseTest): def setUp(self): super(Test_Execute, self).setUp() self.mock_execute = self.useFixture( fixtures.MockPatchObject(dnsmasq.processutils, 'execute') ).mock CONF.set_override('rootwrap_config', '/path/to/rootwrap.conf') self.rootwrap_cmd = dnsmasq._ROOTWRAP_COMMAND.format( rootwrap_config=CONF.rootwrap_config) self.useFixture(fixtures.MonkeyPatch( 'ironic_inspector.pxe_filter.dnsmasq._ROOTWRAP_COMMAND', self.rootwrap_cmd)) self.command = 'foobar baz' def test__execute(self): dnsmasq._execute(self.command) self.mock_execute.assert_called_once_with( self.command, run_as_root=True, shell=True, check_exit_code=True, root_helper=self.rootwrap_cmd) def test__execute_ignoring_errors(self): dnsmasq._execute(self.command, ignore_errors=True) self.mock_execute.assert_called_once_with( self.command, run_as_root=True, shell=True, check_exit_code=False, root_helper=self.rootwrap_cmd) def test__execute_empty(self): dnsmasq._execute() self.mock_execute.assert_not_called()
<reponame>ivannlin0613/sC-projects<filename>stanCode_Projects/boggle_game_solver/boggle.py """ File: boggle.py Name: ---------------------------------------- TODO: """ # This is the file name of the dictionary txt file # we will be checking if a word exists by searching through it FILE = 'dictionary.txt' # List for storing words in FILE dict_list = [] # Boggle board boggle_board = [] # Neighbor coordinates neighbors = [(-1, -1), (0, -1), (1, -1), (-1, 0), (1, 0), (-1, 1), (0, 1), (1, 1)] # To prevent same combination of words being printed out # and calculate how many words being found words_found = [] def main(): """ This program first requires user to type in 4 row of letters, and it will help find the existed word combinations which has four characters or more among those letters. """ read_dictionary() user_input() letter_input_machine() def letter_input_machine(): """ This function walks through the boggle board and send each letter as the starter for words_finder to find out the matching words. """ for y in range(len(boggle_board)): for x in range(len(boggle_board[y])): words_finder(boggle_board[y][x], y, x, [(y, x)]) if len(words_found) != 0: print(f'There are {len(words_found)} words in total.') def words_finder(word, y, x, used_coord): """ This function receives the letter from letter_input_machine, it's coordinates and a list of visited coordinates and find out all the word combinations from the starter letter(word). """ global words_found # check whether words in dict_list uses {word} as its prefix if len(word) >= 3 and not has_prefix(word): return if word in dict_list and len(word) >= 4 and word not in words_found: words_found.append(word) print(f'Found \"{word}\"') # keep looping new coordinates to find new character combinations for dx, dy in neighbors: new_x = x+dx new_y = y+dy if (new_y, new_x) not in used_coord and is_in_board(new_x, new_y): used_coord.append((new_y, new_x)) words_finder(word+boggle_board[new_y][new_x], new_y, new_x, used_coord) used_coord.remove((new_y, new_x)) def is_in_board(nx, ny): """ This function makes sure the new coordinates is in the board """ if 0 <= nx < len(boggle_board) and 0 <= ny < len(boggle_board): return True else: return False def user_input(): """ This function asks user to fill in each row of the boggle board """ is_correct = False while not is_correct: row_1 = input('1 row of letters: ').lower() if len(row_1) == 7: row_2 = input('2 row of letters: ').lower() if len(row_2) == 7: row_3 = input('3 row of letters: ').lower() if len(row_3) == 7: row_4 = input('4 row of letters: ').lower() if len(row_4) == 7: is_correct = True bogl_board(row_1, row_2, row_3, row_4) else: print('Illegal format') is_correct = True else: print('Illegal format') is_correct = True else: print('Illegal format') is_correct = True else: print('Illegal format') is_correct = True def bogl_board(r1, r2, r3, r4): """ Turn each row of user typed in letters into individual letter list This function does not return anything """ global boggle_board str1 = ''.join(r1.split()) str2 = ''.join(r2.split()) str3 = ''.join(r3.split()) str4 = ''.join(r4.split()) boggle_board.append(str1) boggle_board.append(str2) boggle_board.append(str3) boggle_board.append(str4) def read_dictionary(): """ This function reads file "dictionary.txt" stored in FILE and appends words in each line into a Python list """ global dict_list with open(FILE, 'r') as f: for line in f: word = line.split() # delete unnecessary words for word prefix search if len(word[0]) >= 4: dict_list += word def has_prefix(sub_s): """ :param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid :return: (bool) If there is any words with prefix stored in sub_s """ for word in dict_list: if word.startswith(sub_s): return True return False if __name__ == '__main__': main()
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdeploy.core import FUNCTION_REWRITER, RewriterContext from mmdeploy.core.rewriters.function_rewriter import FunctionRewriter from mmdeploy.utils.constants import Backend def test_function_rewriter(): x = torch.tensor([1, 2, 3, 4, 5]) y = torch.tensor([2, 4, 6, 8, 10]) @FUNCTION_REWRITER.register_rewriter( func_name='torch.mul', backend='tensorrt') @FUNCTION_REWRITER.register_rewriter( func_name='torch.add', backend='tensorrt') def sub_func(rewriter, x, y): assert hasattr(rewriter, 'cfg') assert hasattr(rewriter, 'origin_func') return x - y cfg = dict() with RewriterContext(cfg, backend='tensorrt'): result = torch.add(x, y) # replace add with sub torch.testing.assert_allclose(result, x - y) result = torch.mul(x, y) # replace add with sub torch.testing.assert_allclose(result, x - y) result = torch.add(x, y) # recovery origin function torch.testing.assert_allclose(result, x + y) with RewriterContext(cfg): result = torch.add(x, y) # replace should not happen with wrong backend torch.testing.assert_allclose(result, x + y) # test different config @FUNCTION_REWRITER.register_rewriter( func_name='torch.Tensor.add', backend='default') def mul_func_class(rewriter, x, y): return x * y with RewriterContext(cfg, backend='tensorrt'): result = x.add(y) # replace add with multi torch.testing.assert_allclose(result, x * y) result = x.add(y) # recovery origin function torch.testing.assert_allclose(result, x + y) with RewriterContext(cfg): result = x.add(y) # replace add with multi torch.testing.assert_allclose(result, x * y) # test origin_func @FUNCTION_REWRITER.register_rewriter( func_name='torch.add', backend='default') def origin_add_func(rewriter, x, y, **kwargs): return rewriter.origin_func(x, y, **kwargs) + 1 with RewriterContext(cfg): result = torch.add(x, y) # replace with origin + 1 torch.testing.assert_allclose(result, x + y + 1) # remove torch.add del FUNCTION_REWRITER._origin_functions[-1] torch.testing.assert_allclose(torch.add(x, y), x + y) def test_rewrite_empty_function(): function_rewriter = FunctionRewriter() @function_rewriter.register_rewriter(func_name='torch.abcdefghijklmn') def func(rewriter, x, y): return x + y function_rewriter.enter() assert len(function_rewriter._origin_functions) == 0 function_rewriter.exit() class TestHomonymicRewriter: def test_rewrite_homonymic_functions(self): import package path1 = 'package.func' path2 = 'package.module.func' assert package.func() == 1 assert package.module.func() == 1 function_rewriter = FunctionRewriter() function_rewriter.add_backend(Backend.NCNN.value) @function_rewriter.register_rewriter(func_name=path1) def func_2(ctx): return 2 @function_rewriter.register_rewriter( func_name=path2, backend=Backend.NCNN.value) def func_3(ctx): return 3 function_rewriter.enter(backend=Backend.NCNN.value) # This is a feature assert package.func() == 2 assert package.module.func() == 3 function_rewriter.exit() assert package.func() == 1 assert package.module.func() == 1 function_rewriter2 = FunctionRewriter() function_rewriter2.add_backend(Backend.NCNN.value) @function_rewriter2.register_rewriter( func_name=path1, backend=Backend.NCNN.value) def func_4(ctx): return 4 @function_rewriter2.register_rewriter(func_name=path2) def func_5(ctx): return 5 function_rewriter2.enter(backend=Backend.NCNN.value) # This is a feature assert package.func() == 4 assert package.module.func() == 5 function_rewriter2.exit() assert package.func() == 1 assert package.module.func() == 1 def test_rewrite_homonymic_methods(self): import package path1 = 'package.C.method' path2 = 'package.module.C.method' c = package.C() function_rewriter = FunctionRewriter() function_rewriter.add_backend(Backend.NCNN.value) assert c.method() == 1 @function_rewriter.register_rewriter(func_name=path1) def func_2(ctx, self): return 2 @function_rewriter.register_rewriter( func_name=path2, backend=Backend.NCNN.value) def func_3(ctx, self): return 3 function_rewriter.enter(backend=Backend.NCNN.value) assert c.method() == 3 function_rewriter.exit() assert c.method() == 1 function_rewriter2 = FunctionRewriter() function_rewriter2.add_backend(Backend.NCNN.value) @function_rewriter2.register_rewriter( func_name=path1, backend=Backend.NCNN.value) def func_4(ctx, self): return 4 @function_rewriter2.register_rewriter(func_name=path2) def func_5(ctx, self): return 5 function_rewriter2.enter(backend=Backend.NCNN.value) assert c.method() == 4 function_rewriter2.exit() assert c.method() == 1 def test_rewrite_derived_methods(): import package path1 = 'package.C.method' path2 = 'package.C2.method' base_obj = package.C() derived_obj = package.C2() assert base_obj.method() == 1 assert derived_obj.method() == 1 function_rewriter = FunctionRewriter() function_rewriter.add_backend(Backend.NCNN.value) @function_rewriter.register_rewriter(func_name=path1) def func_2(ctx, self): return 2 @function_rewriter.register_rewriter( func_name=path2, backend=Backend.NCNN.value) def func_3(ctx, self): return 3 function_rewriter.enter() assert base_obj.method() == 2 assert derived_obj.method() == 2 function_rewriter.exit() function_rewriter.enter(backend=Backend.NCNN.value) assert base_obj.method() == 2 assert derived_obj.method() == 3 function_rewriter.exit() assert base_obj.method() == 1 assert derived_obj.method() == 1 # Check if the recovery is correct function_rewriter.enter() assert base_obj.method() == 2 assert derived_obj.method() == 2 function_rewriter.exit() assert base_obj.method() == 1 assert derived_obj.method() == 1
# Copyright (c) 2011-2015 by California Institute of Technology # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the California Institute of Technology nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH # OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF # USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT # OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # """Abstract syntax tree classes for LTL. Syntax taken originally roughly from: http://spot.lip6.fr/wiki/LtlSyntax """ import logging logger = logging.getLogger(__name__) # prototype for flattening to a "canonical" string OPMAP = { 'False': 'False', 'True': 'True', '!': '!', '|': '|', '&': '&', '->': '->', '<->': '<->', '^': '^', 'ite': 'ite', 'X': 'X', 'G': 'G', 'F': 'F', 'U': 'U', 'W': 'W', 'V': 'V', '<': '<', '<=': '<=', '=': '=', '>=': '>=', '>': '>', '!=': '!=', '+': '+', '-': '-', '*': '*', '/': '/', '<<>>': '<<>>' # arithmetic } # this mapping is based on SPIN documentation: # http://spinroot.com/spin/Man/ltl.html FULL_OPERATOR_NAMES = { 'next': 'X', 'always': '[]', 'eventually': '<>', 'until': 'U', 'stronguntil': 'U', 'weakuntil': 'W', 'unless': 'W', # see Baier - Katoen 'release': 'V', 'implies': '->', 'equivalent': '<->', 'not': '!', 'and': '&&', 'or': '||', } def make_nodes(opmap=None): """Return class with attributes the AST node classes. The tree is defined recursively, not with a graph data structure. L{Tree} is a graph data structure for that purpose. """ if opmap is None: opmap = OPMAP class Node(object): """Base class for AST nodes.""" opmap = None def __init__(self): pass def __repr__(self): pass def flatten(self): pass Node.opmap = opmap # Do not confuse "term" with the abbreviation of "terminal". # A "term" in FOL can comprise of terminals, # for example a function together with parentheses and its args. class Terminal(Node): """Terminal symbols of grammar. Include: - 0-ary function constants (numbers, strings) - 0-ary function variables (integer or string variable) - 0-ary connectives (Boolean constants) - 0-ary predicate constants - 0-ary predicate variables """ def __init__(self, value): try: value + 'a' except TypeError: raise TypeError( 'value must be a string, got: {v}'.format( v=value)) self.type = 'terminal' self.value = value def __repr__(self): return '{t}({v})'.format(t=type(self).__name__, v=repr(self.value)) def __hash__(self): return id(self) def __str__(self, *arg, **kw): # *arg accommodates "depth" arg of Operator.__str__ return self.value def __len__(self): """Return the number of operators and terminals. Note that this definition differs from the theoretical definition that a formula's length is the number of operators it contains. """ return 1 def __eq__(self, other): return (isinstance(other, type(self)) and self.value == other.value) def flatten(self, *arg, **kw): return self.value class Operator(Node): """Takes a non-zero number of operands and returns a result. Cases: - function (arithmetic): maps (terms)^n to terms - predicate (relational operator): maps (terms)^n to atomic formulas - connective (logical operator): maps (wff)^n to wff """ def __init__(self, operator, *operands): try: operator + 'a' except TypeError: raise TypeError( 'operator must be string, got: {op}'.format( op=operator)) self.type = 'operator' self.operator = operator self.operands = list(operands) # ''.join would be faster, but __repr__ is for debugging, # not for flattening, so readability takes precedence def __repr__(self): return '{t}({op}, {xyz})'.format( t=type(self).__name__, op=repr(self.operator), xyz=', '.join(repr(x) for x in self.operands)) # more readable recursive counterpart of __repr__ # depth allows limiting recursion to see a shallower view def __str__(self, depth=None): if depth is not None: depth = depth - 1 if depth == 0: return '...' return '({op} {xyz})'.format( op=self.operator, xyz=' '.join(x.__str__(depth=depth) for x in self.operands)) def __len__(self): return 1 + sum(len(x) for x in self.operands) def flatten(self, *arg, **kw): return ' '.join([ '(', self.opmap[self.operator], ', '.join(x.flatten(*arg, **kw) for x in self.operands), ')']) # Distinguish operators by arity class Unary(Operator): pass class Binary(Operator): def flatten(self, *arg, **kw): """Infix flattener for consistency with parser. Override it if you want prefix or postfix. """ return ' '.join([ '(', self.operands[0].flatten(*arg, **kw), self.opmap[self.operator], self.operands[1].flatten(*arg, **kw), ')']) class Nodes(object): """AST nodes for a generic grammar.""" nodes = Nodes() nodes.Node = Node nodes.Terminal = Terminal nodes.Operator = Operator nodes.Unary = Unary nodes.Binary = Binary return nodes def make_fol_nodes(opmap=None): """AST classes for fragment of first-order logic.""" nodes = make_nodes(opmap) class Var(nodes.Terminal): """A 0-ary variable. Two cases: - 0-ary function variable (integer or string variable) - 0-ary propositional variable (atomic proposition) """ def __init__(self, value): super(Var, self).__init__(value) self.type = 'var' class Bool(nodes.Terminal): """A 0-ary connective.""" def __init__(self, value): try: value + 'a' except TypeError: raise TypeError( 'value must be string, got: {v}'.format(v=value)) if value.lower() not in {'true', 'false'}: raise TypeError( 'value must be "true" or "false" ' '(case insensitive), got: {v}'.format(v=value)) self.value = 'True' if (value.lower() == 'true') else 'False' self.type = 'bool' def flatten(self, *arg, **kw): return self.opmap[self.value] class Num(nodes.Terminal): """A 0-ary function.""" # self.value is str, # use int(self.value) if you need to def __init__(self, value): super(Num, self).__init__(value) self.type = 'num' class Str(nodes.Terminal): """A 0-ary function.""" # parser ensures that value has no quotes def __init__(self, value): super(Str, self).__init__(value) self.type = 'str' class Comparator(nodes.Binary): """Binary relational operator (2-ary predicate).""" class Arithmetic(nodes.Binary): """Binary function. Maps terms to terms. """ nodes.Var = Var nodes.Bool = Bool nodes.Num = Num nodes.Str = Str nodes.Comparator = Comparator nodes.Arithmetic = Arithmetic return nodes nodes = make_fol_nodes()
############################################ # Copyright (c) 2016 Microsoft Corporation # # MSS enumeration based on maximal resolution. # # Author: <NAME> (nbjorner) ############################################ """ The following is a procedure for enumerating maximal satisfying subsets. It uses maximal resolution to eliminate cores from the state space. Whenever the hard constraints are satisfiable, it finds a model that satisfies the maximal number of soft constraints. During this process it collects the set of cores that are encountered. It then reduces the set of soft constraints using max-resolution in the style of [Narodytska & Bacchus, AAAI'14]. In other words, let F1, ..., F_k be a core among the soft constraints F1,...,F_n Replace F1,.., F_k by F1 or F2, F3 or (F2 & F1), F4 or (F3 & (F2 & F1)), ..., F_k or (F_{k-1} & (...)) Optionally, add the core ~F1 or ... or ~F_k to F The current model M satisfies the new set F, F1,...,F_{n-1} if the core is minimal. Whenever we modify the soft constraints by the core reduction any assignment to the reduced set satisfies a k-1 of the original soft constraints. """ from z3 import * def main(): x, y = Reals('x y') soft_constraints = [x > 2, x < 1, x < 0, Or(x + y > 0, y < 0), Or(y >= 0, x >= 0), Or(y < 0, x < 0), Or(y > 0, x < 0)] hard_constraints = BoolVal(True) solver = MSSSolver(hard_constraints, soft_constraints) for lits in enumerate_sets(solver): print("%s" % lits) def enumerate_sets(solver): while True: if sat == solver.s.check(): MSS = solver.grow() yield MSS else: break class MSSSolver: s = Solver() varcache = {} idcache = {} def __init__(self, hard, soft): self.n = len(soft) self.soft = soft self.s.add(hard) self.soft_vars = set([self.c_var(i) for i in range(self.n)]) self.orig_soft_vars = set([self.c_var(i) for i in range(self.n)]) self.s.add([(self.c_var(i) == soft[i]) for i in range(self.n)]) def c_var(self, i): if i not in self.varcache: v = Bool(str(self.soft[abs(i)])) self.idcache[v] = abs(i) if i >= 0: self.varcache[i] = v else: self.varcache[i] = Not(v) return self.varcache[i] # Retrieve the latest model # Add formulas that are true in the model to # the current mss def update_unknown(self): self.model = self.s.model() new_unknown = set([]) for x in self.unknown: if is_true(self.model[x]): self.mss.append(x) else: new_unknown.add(x) self.unknown = new_unknown # Create a name, propositional atom, # for formula 'fml' and return the name. def add_def(self, fml): name = Bool("%s" % fml) self.s.add(name == fml) return name # replace Fs := f0, f1, f2, .. by # Or(f1, f0), Or(f2, And(f1, f0)), Or(f3, And(f2, And(f1, f0))), ... def relax_core(self, Fs): assert(Fs <= self.soft_vars) prefix = BoolVal(True) self.soft_vars -= Fs Fs = [ f for f in Fs ] for i in range(len(Fs)-1): prefix = self.add_def(And(Fs[i], prefix)) self.soft_vars.add(self.add_def(Or(prefix, Fs[i+1]))) # Resolve literals from the core that # are 'explained', e.g., implied by # other literals. def resolve_core(self, core): new_core = set([]) for x in core: if x in self.mcs_explain: new_core |= self.mcs_explain[x] else: new_core.add(x) return new_core # Given a current satisfiable state # Extract an MSS, and ensure that currently # encountered cores are avoided in next iterations # by weakening the set of literals that are # examined in next iterations. # Strengthen the solver state by enforcing that # an element from the MCS is encountered. def grow(self): self.mss = [] self.mcs = [] self.nmcs = [] self.mcs_explain = {} self.unknown = self.soft_vars self.update_unknown() cores = [] while len(self.unknown) > 0: x = self.unknown.pop() is_sat = self.s.check(self.mss + [x] + self.nmcs) if is_sat == sat: self.mss.append(x) self.update_unknown() elif is_sat == unsat: core = self.s.unsat_core() core = self.resolve_core(core) self.mcs_explain[Not(x)] = {y for y in core if not eq(x,y)} self.mcs.append(x) self.nmcs.append(Not(x)) cores += [core] else: print("solver returned %s" % is_sat) exit() mss = [x for x in self.orig_soft_vars if is_true(self.model[x])] mcs = [x for x in self.orig_soft_vars if not is_true(self.model[x])] self.s.add(Or(mcs)) core_literals = set([]) cores.sort(key=lambda element: len(element)) for core in cores: if len(core & core_literals) == 0: self.relax_core(core) core_literals |= core return mss main()
# Copyright 2021 <NAME> <<EMAIL>>. # SPDX-License-Identifier: MIT from pytest import fixture, mark import pytest from ganjoor import Ganjoor, GanjoorException from dotenv import load_dotenv from os import environ import vcr from ganjoor.models import Category, Poem, Poet class TestGanjoor: @fixture() def ganjoor(self): return Ganjoor() @fixture def ganjoor_keys(self): return ['token', 'language', 'app_name', 'base_url'] @fixture def poem_keys_complete(self): return ['_id', '_title', '_full_title', '_url_slug', '_full_url', '_plain_text', '_html_text', '_ganjoor_metre', '_rhyme_letters', '_source_name', '_source_url_slug', '_category', '_next', '_previous', '_verses', '_recitations', '_images', '_songs', '_comments'] @fixture def poem_keys_incomplete(self): return ['_id', '_title', '_full_title', '_url_slug', '_full_url', '_plain_text', '_html_text', '_ganjoor_metre'] def test_init(self, ganjoor_keys, ganjoor): assert isinstance(ganjoor, Ganjoor) assert set(ganjoor_keys).issubset(ganjoor.__dict__.keys()) @vcr.use_cassette('tests/vcr_cassettes/ganjoor_login.yml') def test_log_in(self, ganjoor): load_dotenv() username = environ.get('GANJOOR_USERNAME') password = environ.get('GANJOOR_PASSWORD') print(username) print(password) if username and password: ganjoor.log_in(username=username, password=password) assert ganjoor.auth_token else: with pytest.raises(GanjoorException): ganjoor.log_in(username=username, password=password) # Poet Tests @vcr.use_cassette('tests/vcr_cassettes/ganjoor_find_poet_id.yml') def test_find_poet_by_id(self, ganjoor): poet = ganjoor.find_poet_by_id(2) assert isinstance(poet, Poet) assert poet.id == 2 @vcr.use_cassette('tests/vcr_cassettes/ganjoor_find_poet_url.yml') def test_find_poet_by_url(self, ganjoor): poet = ganjoor.find_poet_by_url('/hafez') assert isinstance(poet, Poet) assert poet.full_url == '/hafez' @vcr.use_cassette('tests/vcr_cassettes/ganjoor_get_all_poets.yml') def test_get_all_poets(self, ganjoor): poets = ganjoor.get_all_poets() assert [isinstance(poet, Poet) for poet in poets] # Category Tests @mark.parametrize("vcr_cassette, with_poems", [ ('tests/vcr_cassettes/ganjoor_find_category_by_id_with_poems.yml', True), ('tests/vcr_cassettes/ganjoor_find_category_by_id_without_poems.yml', False)]) def test_find_category_by_id(self, vcr_cassette: str, with_poems, ganjoor): with vcr.use_cassette(vcr_cassette): category = ganjoor.find_category_by_id(24, with_poems) assert isinstance(category, Category) assert category.id == 24 assert isinstance(category.poems, list) if with_poems: assert len(category.poems) > 0 else: assert len(category.poems) == 0 @mark.parametrize("vcr_cassette, with_poems", [ ('tests/vcr_cassettes/ganjoor_find_category_by_url_with_poems.yml', True), ('tests/vcr_cassettes/ganjoor_find_category_by_url_without_poems.yml', False)]) def test_find_category_by_url(self, vcr_cassette, with_poems, ganjoor): with vcr.use_cassette(vcr_cassette): category = ganjoor.find_category_by_url( '/hafez/ghazal', with_poems) assert isinstance(category, Category) assert category.full_url == '/hafez/ghazal' assert isinstance(category.poems, list) if with_poems: assert len(category.poems) > 0 # else: # assert len(category.poems) == 0 TODO: API Problem # Poem Tests @mark.parametrize("id,vcr_cassette, complete", [ (2131, 'tests/vcr_cassettes/ganjoor_find_poem_by_id_complete.yml', True), (2131, 'tests/vcr_cassettes/ganjoor_find_poem_by_id_incomplete.yml', False) ]) def test_ganjoor_find_poem_by_id(self, id, vcr_cassette, complete, poem_keys_complete, poem_keys_incomplete, ganjoor): with vcr.use_cassette(vcr_cassette): poem = ganjoor.find_poem_by_id(id, complete=complete) assert isinstance(poem, Poem) assert poem.id == id if complete: for attr in poem_keys_complete: assert (getattr(poem, attr, [])) else: for attr in poem_keys_incomplete: assert (getattr(poem, attr, [])) @mark.parametrize("url,vcr_cassette, complete", [ ('/hafez/ghazal/sh2', 'tests/vcr_cassettes/ganjoor_find_poem_by_url_complete.yml', True), ('/hafez/ghazal/sh2', 'tests/vcr_cassettes/ganjoor_find_poem_by_url_incomplete.yml', False) ]) def test_ganjoor_find_poem_by_url(self, url, vcr_cassette, complete, poem_keys_complete, poem_keys_incomplete, ganjoor): with vcr.use_cassette(vcr_cassette): poem = ganjoor.find_poem_by_url(url, complete=complete) assert isinstance(poem, Poem) assert poem.full_url == url if complete: for attr in poem_keys_complete: assert (getattr(poem, attr, [])) else: for attr in poem_keys_incomplete: assert (getattr(poem, attr, [])) @vcr.use_cassette('tests/vcr_cassettes/ganjoor_hafez_faal.yml') def test_ganjoor_hafez_faal(self, poem_keys_incomplete, ganjoor): faal = ganjoor.hafez_faal() assert isinstance(faal, Poem) for attr in poem_keys_incomplete: assert (getattr(faal, attr, [])) assert faal.get_poet_name_from_url() == "hafez" @mark.parametrize("poet_id,vcr_cassette", [ (0, 'tests/vcr_cassettes/ganjoor_random_poem.yml'), (2, 'tests/vcr_cassettes/ganjoor_random_poem_by_hafez.yml') ]) def test_ganjoor_random_poem(self, poem_keys_incomplete, poet_id, vcr_cassette, ganjoor): with vcr.use_cassette(vcr_cassette): poem = ganjoor.random_poem(poet_id) assert isinstance(poem, Poem) if poet_id == 2: assert poem.get_poet_name_from_url() == "hafez" for attr in poem_keys_incomplete: assert getattr(poem, attr, []) @mark.parametrize("metre,rhyme,vcr_cassette", [ ('مفعول مفاعلن فعولن (هزج مسدس اخرب مقبوض محذوف)', 'ست', 'tests/vcr_cassettes/ganjoor_similar_poem_w_rhyme.yml'), ('فعولن فعولن فعولن فعل (متقارب مثمن محذوف یا وزن شاهنامه)', None, 'tests/vcr_cassettes/ganjoor_similar_poem_wo_rhyme.yml') ]) def test_ganjoor_find_similar_poems(self, poem_keys_incomplete, metre, rhyme, vcr_cassette, ganjoor): with vcr.use_cassette(vcr_cassette): similar_poems = ganjoor.find_similar_poems( metre=metre, rhyme=rhyme) assert len(similar_poems) == 5 for poem in similar_poems: assert poem.ganjoor_metre.rhythm == metre if rhyme: assert poem.rhyme_letters == rhyme for attr in poem_keys_incomplete: assert getattr(poem, attr, []) @mark.parametrize("term,poet_id,cat_id,vcr_cassette", [ ('شیراز', 0, 0, 'tests/vcr_cassettes/ganjoor_search_poems_wo_cat.yml'), ('شیراز', 2, 24, 'tests/vcr_cassettes/ganjoor_search_poems_w_cat.yml') ]) def test_ganjoor_search_poems(self, poem_keys_incomplete, term, poet_id, cat_id, ganjoor: Ganjoor, vcr_cassette): with vcr.use_cassette(vcr_cassette): poems = ganjoor.search_poems(term, poet_id=poet_id, cat_id=cat_id) assert len(poems) == 5 for poem in poems: assert term in str(poem) if poet_id == 2 and cat_id == 24: assert poem.poet.id == poet_id assert (poem.category == cat_id) or ( cat_id in [child.id for child in poem.category.children]) for attr in poem_keys_incomplete: assert getattr(poem, attr, [])
# -*- coding: utf-8 -*- # Copyright European Organization for Nuclear Research (CERN) since 2012 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import unittest from hashlib import sha256 import pytest from dogpile.cache import make_region from rucio.common.config import config_get from rucio.common.config import config_get_bool from rucio.common.types import InternalAccount, InternalScope from rucio.common.utils import generate_uuid as uuid from rucio.core.account_limit import set_local_account_limit from rucio.core.did import add_did, attach_dids from rucio.core.lock import successful_transfer, failed_transfer, get_replica_locks from rucio.core.replica import get_replica from rucio.core.request import cancel_request_did from rucio.core.transfer import cancel_transfers from rucio.core.rse import add_rse_attribute, add_rse, update_rse, get_rse_id from rucio.core.rule import get_rule, add_rule from rucio.daemons.judge.evaluator import re_evaluator from rucio.daemons.judge.repairer import rule_repairer from rucio.db.sqla import models from rucio.db.sqla.constants import DIDType, RuleState, ReplicaState from rucio.db.sqla.session import get_session from rucio.tests.common import rse_name_generator from rucio.tests.common_server import get_vo from rucio.tests.test_rule import create_files, tag_generator @pytest.mark.dirty @pytest.mark.noparallel(reason='uses pre-defined rses, sets rse attributes, sets account limits') class TestJudgeRepairer(unittest.TestCase): @classmethod def setUpClass(cls): if config_get_bool('common', 'multi_vo', raise_exception=False, default=False): cls.vo = {'vo': get_vo()} else: cls.vo = {} # Add test RSE cls.rse1 = 'MOCK' cls.rse3 = 'MOCK3' cls.rse4 = 'MOCK4' cls.rse5 = 'MOCK5' cls.rse1_id = get_rse_id(rse=cls.rse1, **cls.vo) cls.rse3_id = get_rse_id(rse=cls.rse3, **cls.vo) cls.rse4_id = get_rse_id(rse=cls.rse4, **cls.vo) cls.rse5_id = get_rse_id(rse=cls.rse5, **cls.vo) # Add Tags cls.T1 = tag_generator() cls.T2 = tag_generator() add_rse_attribute(cls.rse1_id, cls.T1, True) add_rse_attribute(cls.rse3_id, cls.T1, True) add_rse_attribute(cls.rse4_id, cls.T2, True) add_rse_attribute(cls.rse5_id, cls.T1, True) # Add fake weights add_rse_attribute(cls.rse1_id, "fakeweight", 10) add_rse_attribute(cls.rse3_id, "fakeweight", 0) add_rse_attribute(cls.rse4_id, "fakeweight", 0) add_rse_attribute(cls.rse5_id, "fakeweight", 0) # Add quota cls.jdoe = InternalAccount('jdoe', **cls.vo) cls.root = InternalAccount('root', **cls.vo) set_local_account_limit(cls.jdoe, cls.rse1_id, -1) set_local_account_limit(cls.jdoe, cls.rse3_id, -1) set_local_account_limit(cls.jdoe, cls.rse4_id, -1) set_local_account_limit(cls.jdoe, cls.rse5_id, -1) set_local_account_limit(cls.root, cls.rse1_id, -1) set_local_account_limit(cls.root, cls.rse3_id, -1) set_local_account_limit(cls.root, cls.rse4_id, -1) set_local_account_limit(cls.root, cls.rse5_id, -1) def test_to_repair_a_rule_with_NONE_grouping_whose_transfer_failed(self): """ JUDGE REPAIRER: Test to repair a rule with 1 failed transfer (lock)""" rule_repairer(once=True) # Clean out the repairer scope = InternalScope('mock', **self.vo) files = create_files(3, scope, self.rse4_id, bytes_=100) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.DATASET, self.jdoe) attach_dids(scope, dataset, files, self.jdoe) rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=self.T1, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=None)[0] failed_rse_id = get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id assert(get_replica(scope=files[2]['scope'], name=files[2]['name'], rse_id=failed_rse_id)['state'] == ReplicaState.COPYING) assert(get_replica(scope=files[2]['scope'], name=files[2]['name'], rse_id=failed_rse_id)['lock_cnt'] == 1) successful_transfer(scope=scope, name=files[0]['name'], rse_id=get_replica_locks(scope=files[0]['scope'], name=files[2]['name'])[0].rse_id, nowait=False) successful_transfer(scope=scope, name=files[1]['name'], rse_id=get_replica_locks(scope=files[1]['scope'], name=files[2]['name'])[0].rse_id, nowait=False) failed_transfer(scope=scope, name=files[2]['name'], rse_id=get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id) assert(rule_id == get_rule(rule_id)['id'].replace('-', '').lower()) assert(RuleState.STUCK == get_rule(rule_id)['state']) rule_repairer(once=True) assert(RuleState.REPLICATING == get_rule(rule_id)['state']) assert(get_replica(scope=files[2]['scope'], name=files[2]['name'], rse_id=failed_rse_id)['state'] == ReplicaState.UNAVAILABLE) assert(get_replica(scope=files[2]['scope'], name=files[2]['name'], rse_id=failed_rse_id)['lock_cnt'] == 0) def test_to_repair_a_rule_with_ALL_grouping_whose_transfer_failed(self): """ JUDGE REPAIRER: Test to repair a rule with 1 failed transfer (lock)""" rule_repairer(once=True) # Clean out the repairer scope = InternalScope('mock', **self.vo) files = create_files(4, scope, self.rse4_id, bytes_=100) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.DATASET, self.jdoe) attach_dids(scope, dataset, files, self.jdoe) rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=self.T1, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None, activity='DebugJudge')[0] successful_transfer(scope=scope, name=files[0]['name'], rse_id=get_replica_locks(scope=files[0]['scope'], name=files[2]['name'])[0].rse_id, nowait=False) successful_transfer(scope=scope, name=files[1]['name'], rse_id=get_replica_locks(scope=files[1]['scope'], name=files[2]['name'])[0].rse_id, nowait=False) failed_transfer(scope=scope, name=files[2]['name'], rse_id=get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id) failed_transfer(scope=scope, name=files[3]['name'], rse_id=get_replica_locks(scope=files[3]['scope'], name=files[3]['name'])[0].rse_id) assert(rule_id == get_rule(rule_id)['id'].replace('-', '').lower()) assert(RuleState.STUCK == get_rule(rule_id)['state']) rule_repairer(once=True) assert(RuleState.REPLICATING == get_rule(rule_id)['state']) assert(get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id == get_replica_locks(scope=files[3]['scope'], name=files[3]['name'])[0].rse_id) assert(get_replica_locks(scope=files[1]['scope'], name=files[1]['name'])[0].rse_id == get_replica_locks(scope=files[3]['scope'], name=files[3]['name'])[0].rse_id) def test_to_repair_a_rule_with_DATASET_grouping_whose_transfer_failed(self): """ JUDGE REPAIRER: Test to repair a rule with 1 failed transfer (lock)""" rule_repairer(once=True) # Clean out the repairer scope = InternalScope('mock', **self.vo) files = create_files(4, scope, self.rse4_id, bytes_=100) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.DATASET, self.jdoe) attach_dids(scope, dataset, files, self.jdoe) rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=self.T1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None, activity='DebugJudge')[0] successful_transfer(scope=scope, name=files[0]['name'], rse_id=get_replica_locks(scope=files[0]['scope'], name=files[2]['name'])[0].rse_id, nowait=False) successful_transfer(scope=scope, name=files[1]['name'], rse_id=get_replica_locks(scope=files[1]['scope'], name=files[2]['name'])[0].rse_id, nowait=False) failed_transfer(scope=scope, name=files[2]['name'], rse_id=get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id) failed_transfer(scope=scope, name=files[3]['name'], rse_id=get_replica_locks(scope=files[3]['scope'], name=files[3]['name'])[0].rse_id) assert(rule_id == get_rule(rule_id)['id'].replace('-', '').lower()) assert(RuleState.STUCK == get_rule(rule_id)['state']) rule_repairer(once=True) assert(RuleState.REPLICATING == get_rule(rule_id)['state']) assert(get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id == get_replica_locks(scope=files[3]['scope'], name=files[3]['name'])[0].rse_id) assert(get_replica_locks(scope=files[1]['scope'], name=files[1]['name'])[0].rse_id == get_replica_locks(scope=files[3]['scope'], name=files[3]['name'])[0].rse_id) def test_repair_a_rule_with_missing_locks(self): """ JUDGE EVALUATOR: Test the judge when a rule gets STUCK from re_evaluating and there are missing locks""" scope = InternalScope('mock', **self.vo) files = create_files(3, scope, self.rse4_id) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.DATASET, self.jdoe) # Add a first rule to the DS rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=2, rse_expression=self.T1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)[0] attach_dids(scope, dataset, files, self.jdoe) # Fake judge re_evaluator(once=True) # Check if the Locks are created properly for file in files: assert(len(get_replica_locks(scope=file['scope'], name=file['name'])) == 2) # Add more files to the DID files2 = create_files(3, scope, self.rse4_id) attach_dids(scope, dataset, files2, self.jdoe) # Mark the rule STUCK to fake that the re-evaluation failed session = get_session() rule = session.query(models.ReplicationRule).filter_by(id=rule_id).one() rule.state = RuleState.STUCK session.commit() rule_repairer(once=True) for file in files: assert(len(get_replica_locks(scope=file['scope'], name=file['name'])) == 2) for file in files2: assert(len(get_replica_locks(scope=file['scope'], name=file['name'])) == 2) assert(len(set([lock.rse_id for lock in get_replica_locks(scope=files[0]['scope'], name=files[0]['name'])]).intersection(set([lock.rse_id for lock in get_replica_locks(scope=file['scope'], name=file['name'])]))) == 2) assert(12 == get_rule(rule_id)['locks_replicating_cnt']) def test_repair_a_rule_with_source_replica_expression(self): """ JUDGE EVALUATOR: Test the judge when a with two rules with source_replica_expression""" scope = InternalScope('mock', **self.vo) files = create_files(3, scope, self.rse4_id) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.DATASET, self.jdoe) attach_dids(scope, dataset, files, self.jdoe) # Add a first rule to the DS rule_id1 = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=self.rse1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)[0] rule_id2 = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=self.rse3, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None, source_replica_expression=self.rse1)[0] assert(RuleState.REPLICATING == get_rule(rule_id1)['state']) assert(RuleState.STUCK == get_rule(rule_id2)['state']) successful_transfer(scope=scope, name=files[0]['name'], rse_id=self.rse1_id, nowait=False) successful_transfer(scope=scope, name=files[1]['name'], rse_id=self.rse1_id, nowait=False) successful_transfer(scope=scope, name=files[2]['name'], rse_id=self.rse1_id, nowait=False) # Also make replicas AVAILABLE session = get_session() replica = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=files[0]['name'], rse_id=self.rse1_id).one() replica.state = ReplicaState.AVAILABLE replica = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=files[1]['name'], rse_id=self.rse1_id).one() replica.state = ReplicaState.AVAILABLE replica = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=files[2]['name'], rse_id=self.rse1_id).one() replica.state = ReplicaState.AVAILABLE session.commit() rule_repairer(once=True) assert(RuleState.OK == get_rule(rule_id1)['state']) assert(RuleState.REPLICATING == get_rule(rule_id2)['state']) def test_to_repair_a_rule_with_only_1_rse_whose_transfers_failed(self): """ JUDGE REPAIRER: Test to repair a rule with only 1 rse whose transfers failed (lock)""" rule_repairer(once=True) # Clean out the repairer scope = InternalScope('mock', **self.vo) files = create_files(4, scope, self.rse4_id, bytes_=100) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.DATASET, self.jdoe) attach_dids(scope, dataset, files, self.jdoe) rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=self.rse1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)[0] successful_transfer(scope=scope, name=files[0]['name'], rse_id=get_replica_locks(scope=files[0]['scope'], name=files[2]['name'])[0].rse_id, nowait=False) successful_transfer(scope=scope, name=files[1]['name'], rse_id=get_replica_locks(scope=files[1]['scope'], name=files[2]['name'])[0].rse_id, nowait=False) failed_transfer(scope=scope, name=files[2]['name'], rse_id=get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id) failed_transfer(scope=scope, name=files[3]['name'], rse_id=get_replica_locks(scope=files[3]['scope'], name=files[3]['name'])[0].rse_id) transfs = cancel_request_did(scope=scope, name=files[2]['name'], dest_rse_id=get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id) cancel_transfers(transfs) transfs = cancel_request_did(scope=scope, name=files[3]['name'], dest_rse_id=get_replica_locks(scope=files[3]['scope'], name=files[2]['name'])[0].rse_id) cancel_transfers(transfs) assert(rule_id == get_rule(rule_id)['id'].replace('-', '').lower()) assert(RuleState.STUCK == get_rule(rule_id)['state']) rule_repairer(once=True) # Stil assert STUCK because of delays: assert(RuleState.STUCK == get_rule(rule_id)['state']) assert(get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id == get_replica_locks(scope=files[3]['scope'], name=files[3]['name'])[0].rse_id) # assert(RuleState.REPLICATING == get_rule(rule_id)['state']) # assert(get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id == get_replica_locks(scope=files[3]['scope'], name=files[3]['name'])[0].rse_id) def test_to_repair_a_rule_with_NONE_grouping_whose_transfer_failed_and_flipping_to_other_rse(self): """ JUDGE REPAIRER: Test to repair a rule with 1 failed transfer and flip to other rse(lock)""" rule_repairer(once=True) # Clean out the repairer scope = InternalScope('mock', **self.vo) files = create_files(4, scope, self.rse4_id, bytes_=100) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.DATASET, self.jdoe) attach_dids(scope, dataset, files, self.jdoe) rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=self.T1, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=None)[0] successful_transfer(scope=scope, name=files[0]['name'], rse_id=get_replica_locks(scope=files[0]['scope'], name=files[2]['name'])[0].rse_id, nowait=False) successful_transfer(scope=scope, name=files[1]['name'], rse_id=get_replica_locks(scope=files[1]['scope'], name=files[2]['name'])[0].rse_id, nowait=False) failed_transfer(scope=scope, name=files[2]['name'], rse_id=get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id) failed_transfer(scope=scope, name=files[3]['name'], rse_id=get_replica_locks(scope=files[3]['scope'], name=files[3]['name'])[0].rse_id) old_rse_id = get_replica_locks(scope=files[3]['scope'], name=files[3]['name'])[0].rse_id assert(rule_id == get_rule(rule_id)['id'].replace('-', '').lower()) assert(RuleState.STUCK == get_rule(rule_id)['state']) rule_repairer(once=True) assert(RuleState.REPLICATING == get_rule(rule_id)['state']) assert(get_replica_locks(scope=files[3]['scope'], name=files[3]['name'])[0].rse_id != old_rse_id) def test_to_repair_a_rule_with_only_1_rse_whose_site_is_blocklisted(self): """ JUDGE REPAIRER: Test to repair a rule with only 1 rse whose site is blocklisted""" rse = rse_name_generator() rse_id = add_rse(rse, **self.vo) set_local_account_limit(self.jdoe, rse_id, -1) rule_repairer(once=True) # Clean out the repairer region = make_region().configure( 'dogpile.cache.pymemcache', expiration_time=900, arguments={'url': config_get('cache', 'url', False, '127.0.0.1:11211'), 'distributed_lock': True} ) def change_availability(new_value): update_rse(rse_id, {'availability_write': new_value}) # clear cache region.delete(sha256(rse.encode()).hexdigest()) for grouping, ignore_availability in itertools.product(["NONE", "DATASET", "ALL"], [True, False]): scope = InternalScope('mock', **self.vo) files = create_files(1, scope, self.rse4_id, bytes_=100) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.DATASET, self.jdoe) attach_dids(scope, dataset, files, self.jdoe) if ignore_availability: change_availability(False) rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=rse, grouping=grouping, weight=None, lifetime=None, locked=False, subscription_id=None, ignore_availability=ignore_availability, activity='DebugJudge')[0] assert(RuleState.STUCK == get_rule(rule_id)['state']) rule_repairer(once=True) assert(RuleState.REPLICATING == get_rule(rule_id)['state']) change_availability(True) else: rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=rse, grouping=grouping, weight=None, lifetime=None, locked=False, subscription_id=None, ignore_availability=ignore_availability, activity='DebugJudge')[0] failed_transfer(scope=scope, name=files[0]['name'], rse_id=get_replica_locks(scope=files[0]['scope'], name=files[0]['name'])[0].rse_id) change_availability(False) assert(RuleState.STUCK == get_rule(rule_id)['state']) rule_repairer(once=True) assert(RuleState.STUCK == get_rule(rule_id)['state']) change_availability(True) rule_repairer(once=True) assert(RuleState.REPLICATING == get_rule(rule_id)['state'])
# Copyright 2019 Splunk, Inc. # # Use of this source code is governed by a BSD-2-clause-style # license that can be found in the LICENSE-BSD2 file or at # https://opensource.org/licenses/BSD-2-Clause import random from jinja2 import Environment from .sendmessage import * from .splunkutils import * from .timeutils import * import pytest env = Environment() test_data_cppm = [ "{{ mark }}{{ aruba_time }} {{ host }} CPPM_System_Events 973 1 0 event_source=SnmpService,level=ERROR,category=Trap,description=Switch IP=10.17.8.67. Ignore v2c trap. Bad security name in trap,action_key=Failed,timestamp=2014-06-03 13:05:30.023+05:30", "{{ mark }}{{ aruba_time }} {{ host }} TEST filter 0 1 0 Common.Alerts=WebAuthService: User 'bbb' not present in [Local User Repository](localhost)User 'bbb' not present in ClearPass Lab AD(adisam.arubapoc.local),Common.Alerts-Present=0,Common.Audit-Posture-Token=UNKNOWN,Common.Auth-Type=,Common.Enforcement-Profiles=[Deny Application Access Profile],Common.Error-Code=201,Common.Host-MAC- Address=,Common.Login-Status=REJECT,Common.Monitor-Mode=Enabled,Common.Request-Id=W0000002e-01-533557ec,Common.Request-Timestamp=2014-03- 28 16:37:24.417+05:30,Common.Roles=,Common.Service=EAI ClearPass Identity Provider (SAML IdP Service),Common.Source=Application,Common.System-Posture-Token=UNKNOWN,Common.Username=bbb,WEBAUTH.Auth-Source=,WEBAUTH.Host-IP- Address=127.0.0.1,", "{{ mark }}{{ aruba_time }} {{ host }} All Session Log Fields 4 1 0 Common.Alerts-Present=0,Common.Audit-Posture-Token=UNKNOWN,Common.Auth-Type=,Common.Enforcement-Profiles=EAI ClearPass Identity Provider (SAML IdP Service) Profile,Common.Error-Code=0,Common.Host-MAC- Address=,Common.Login-Status=ACCEPT,Common.Monitor-Mode=Disabled,Common.Request-Id=W00000032-01- 5335874b,Common.Request-Timestamp=2014-03- 28 19:59:31.533+05:30,Common.Roles=[Employee], [User Authenticated],Common.Service=EAI ClearPass Identity Provider (SAML IdP Service),Common.Source=Application,Common.System-Posture-Token=UNKNOWN,Common.Username=prem1,WEBAUTH.Auth-Source=[Local User Repository],WEBAUTH.Host-IP- Address=127.0.0.1,", "{{ mark }}{{ aruba_time }} {{ host }} All Events 710 1 0 Timestamp=Mar 28, 2014 19:59:39 IST,Source=Endpoint Context Server,Level=ERROR,Category=MaaS360: Communication Error,Action=Failed,Description=Failed to fetch Endpoint details from MaaS360 - verify Proxy settings, Server credentials and retry.", "{{ mark }}{{ aruba_time }} {{ host }} All Audits 30 1 0 Timestamp=Mar 28, 2014 16:46:59 IST,Source=All Audits,Category=Syslog Export Data,Action=MODIFY,User=admin", ] @pytest.mark.parametrize("event", test_data_cppm) def test_aruba_clearpass_CPPM( record_property, setup_wordlist, setup_splunk, setup_sc4s, get_host_key, event ): host = "aruba-cp-" + get_host_key dt = datetime.datetime.now() iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt) aruba_time = dt.strftime("%Y-%m-%d %H:%M:%S,%f")[:-3] epoch = epoch[:-3] mt = env.from_string(event + "\n") message = mt.render( mark="<46>", bsd=bsd, host=host, date=date, aruba_time=aruba_time ) sendsingle(message, setup_sc4s[0], setup_sc4s[1][514]) st = env.from_string( 'search _time={{ epoch }} index=netops host="{{ host }}" sourcetype="aruba:clearpass"' ) search = st.render(epoch=epoch, host=host) resultCount, eventCount = splunk_single(setup_splunk, search) record_property("host", host) record_property("resultCount", resultCount) record_property("message", message) assert resultCount == 1
<filename>awx/sso/backends.py # Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. # Python import logging import uuid import ldap import six # Django from django.dispatch import receiver from django.contrib.auth.models import User from django.conf import settings as django_settings from django.core.signals import setting_changed # django-auth-ldap from django_auth_ldap.backend import LDAPSettings as BaseLDAPSettings from django_auth_ldap.backend import LDAPBackend as BaseLDAPBackend from django_auth_ldap.backend import populate_user # radiusauth from radiusauth.backends import RADIUSBackend as BaseRADIUSBackend # tacacs+ auth import tacacs_plus # social from social_core.backends.saml import OID_USERID from social_core.backends.saml import SAMLAuth as BaseSAMLAuth from social_core.backends.saml import SAMLIdentityProvider as BaseSAMLIdentityProvider # Ansible Tower from awx.conf.license import feature_enabled from awx.sso.models import UserEnterpriseAuth logger = logging.getLogger('awx.sso.backends') class LDAPSettings(BaseLDAPSettings): defaults = dict(BaseLDAPSettings.defaults.items() + { 'ORGANIZATION_MAP': {}, 'TEAM_MAP': {}, 'GROUP_TYPE_PARAMS': {}, }.items()) def __init__(self, prefix='AUTH_LDAP_', defaults={}): super(LDAPSettings, self).__init__(prefix, defaults) # If a DB-backed setting is specified that wipes out the # OPT_NETWORK_TIMEOUT, fall back to a sane default if ldap.OPT_NETWORK_TIMEOUT not in getattr(self, 'CONNECTION_OPTIONS', {}): options = getattr(self, 'CONNECTION_OPTIONS', {}) options[ldap.OPT_NETWORK_TIMEOUT] = 30 self.CONNECTION_OPTIONS = options class LDAPBackend(BaseLDAPBackend): ''' Custom LDAP backend for AWX. ''' settings_prefix = 'AUTH_LDAP_' def __init__(self, *args, **kwargs): self._dispatch_uid = uuid.uuid4() super(LDAPBackend, self).__init__(*args, **kwargs) setting_changed.connect(self._on_setting_changed, dispatch_uid=self._dispatch_uid) def __del__(self): setting_changed.disconnect(dispatch_uid=self._dispatch_uid) def _on_setting_changed(self, sender, **kwargs): # If any AUTH_LDAP_* setting changes, force settings to be reloaded for # this backend instance. if kwargs.get('setting', '').startswith(self.settings_prefix): self._settings = None def _get_settings(self): if self._settings is None: self._settings = LDAPSettings(self.settings_prefix) return self._settings def _set_settings(self, settings): self._settings = settings settings = property(_get_settings, _set_settings) def authenticate(self, username, password): if self.settings.START_TLS and ldap.OPT_X_TLS_REQUIRE_CERT in self.settings.CONNECTION_OPTIONS: # with python-ldap, if you want to set connection-specific TLS # parameters, you must also specify OPT_X_TLS_NEWCTX = 0 # see: https://stackoverflow.com/a/29722445 # see: https://stackoverflow.com/a/38136255 self.settings.CONNECTION_OPTIONS[ldap.OPT_X_TLS_NEWCTX] = 0 if not self.settings.SERVER_URI: return None if not feature_enabled('ldap'): logger.error("Unable to authenticate, license does not support LDAP authentication") return None try: user = User.objects.get(username=username) if user and (not user.profile or not user.profile.ldap_dn): return None except User.DoesNotExist: pass try: return super(LDAPBackend, self).authenticate(username, password) except Exception: logger.exception("Encountered an error authenticating to LDAP") return None def get_user(self, user_id): if not self.settings.SERVER_URI: return None if not feature_enabled('ldap'): logger.error("Unable to get_user, license does not support LDAP authentication") return None return super(LDAPBackend, self).get_user(user_id) # Disable any LDAP based authorization / permissions checking. def has_perm(self, user, perm, obj=None): return False def has_module_perms(self, user, app_label): return False def get_all_permissions(self, user, obj=None): return set() def get_group_permissions(self, user, obj=None): return set() class LDAPBackend1(LDAPBackend): settings_prefix = 'AUTH_LDAP_1_' class LDAPBackend2(LDAPBackend): settings_prefix = 'AUTH_LDAP_2_' class LDAPBackend3(LDAPBackend): settings_prefix = 'AUTH_LDAP_3_' class LDAPBackend4(LDAPBackend): settings_prefix = 'AUTH_LDAP_4_' class LDAPBackend5(LDAPBackend): settings_prefix = 'AUTH_LDAP_5_' def _decorate_enterprise_user(user, provider): user.set_unusable_password() user.save() enterprise_auth, _ = UserEnterpriseAuth.objects.get_or_create(user=user, provider=provider) return enterprise_auth def _get_or_set_enterprise_user(username, password, provider): created = False try: user = User.objects.all().prefetch_related('enterprise_auth').get(username=username) except User.DoesNotExist: user = User(username=username) enterprise_auth = _decorate_enterprise_user(user, provider) logger.debug("Created enterprise user %s via %s backend." % (username, enterprise_auth.get_provider_display())) created = True if created or user.is_in_enterprise_category(provider): return user logger.warn("Enterprise user %s already defined in Tower." % username) class RADIUSBackend(BaseRADIUSBackend): ''' Custom Radius backend to verify license status ''' def authenticate(self, request, username, password): if not django_settings.RADIUS_SERVER: return None if not feature_enabled('enterprise_auth'): logger.error("Unable to authenticate, license does not support RADIUS authentication") return None return super(RADIUSBackend, self).authenticate(request, username, password) def get_user(self, user_id): if not django_settings.RADIUS_SERVER: return None if not feature_enabled('enterprise_auth'): logger.error("Unable to get_user, license does not support RADIUS authentication") return None user = super(RADIUSBackend, self).get_user(user_id) if not user.has_usable_password(): return user def get_django_user(self, username, password=None): return _get_or_set_enterprise_user(username, password, 'radius') class TACACSPlusBackend(object): ''' Custom TACACS+ auth backend for AWX ''' def authenticate(self, username, password): if not django_settings.TACACSPLUS_HOST: return None if not feature_enabled('enterprise_auth'): logger.error("Unable to authenticate, license does not support TACACS+ authentication") return None try: # Upstream TACACS+ client does not accept non-string, so convert if needed. auth = tacacs_plus.TACACSClient( django_settings.TACACSPLUS_HOST.encode('utf-8'), django_settings.TACACSPLUS_PORT, django_settings.TACACSPLUS_SECRET.encode('utf-8'), timeout=django_settings.TACACSPLUS_SESSION_TIMEOUT, ).authenticate( username.encode('utf-8'), password.encode('<PASSWORD>'), authen_type=tacacs_plus.TAC_PLUS_AUTHEN_TYPES[django_settings.TACACSPLUS_AUTH_PROTOCOL], ) except Exception as e: logger.exception("TACACS+ Authentication Error: %s" % (e.message,)) return None if auth.valid: return _get_or_set_enterprise_user(username, password, 'tacacs+') def get_user(self, user_id): if not django_settings.TACACSPLUS_HOST: return None if not feature_enabled('enterprise_auth'): logger.error("Unable to get user, license does not support TACACS+ authentication") return None try: return User.objects.get(pk=user_id) except User.DoesNotExist: return None class TowerSAMLIdentityProvider(BaseSAMLIdentityProvider): ''' Custom Identity Provider to make attributes to what we expect. ''' def get_user_permanent_id(self, attributes): uid = attributes[self.conf.get('attr_user_permanent_id', OID_USERID)] if isinstance(uid, six.string_types): return uid return uid[0] def get_attr(self, attributes, conf_key, default_attribute): """ Get the attribute 'default_attribute' out of the attributes, unless self.conf[conf_key] overrides the default by specifying another attribute to use. """ key = self.conf.get(conf_key, default_attribute) value = attributes[key] if key in attributes else None # In certain implementations (like https://pagure.io/ipsilon) this value is a string, not a list if isinstance(value, (list, tuple)): value = value[0] if conf_key in ('attr_first_name', 'attr_last_name', 'attr_username', 'attr_email') and value is None: logger.warn("Could not map user detail '%s' from SAML attribute '%s'; " "update SOCIAL_AUTH_SAML_ENABLED_IDPS['%s']['%s'] with the correct SAML attribute.", conf_key[5:], key, self.name, conf_key) return six.text_type(value) if value is not None else value class SAMLAuth(BaseSAMLAuth): ''' Custom SAMLAuth backend to verify license status ''' def get_idp(self, idp_name): idp_config = self.setting('ENABLED_IDPS')[idp_name] return TowerSAMLIdentityProvider(idp_name, **idp_config) def authenticate(self, *args, **kwargs): if not all([django_settings.SOCIAL_AUTH_SAML_SP_ENTITY_ID, django_settings.SOCIAL_AUTH_SAML_SP_PUBLIC_CERT, django_settings.SOCIAL_AUTH_SAML_SP_PRIVATE_KEY, django_settings.SOCIAL_AUTH_SAML_ORG_INFO, django_settings.SOCIAL_AUTH_SAML_TECHNICAL_CONTACT, django_settings.SOCIAL_AUTH_SAML_SUPPORT_CONTACT, django_settings.SOCIAL_AUTH_SAML_ENABLED_IDPS]): return None if not feature_enabled('enterprise_auth'): logger.error("Unable to authenticate, license does not support SAML authentication") return None user = super(SAMLAuth, self).authenticate(*args, **kwargs) # Comes from https://github.com/omab/python-social-auth/blob/v0.2.21/social/backends/base.py#L91 if getattr(user, 'is_new', False): _decorate_enterprise_user(user, 'saml') elif user and not user.is_in_enterprise_category('saml'): return None return user def get_user(self, user_id): if not all([django_settings.SOCIAL_AUTH_SAML_SP_ENTITY_ID, django_settings.SOCIAL_AUTH_SAML_SP_PUBLIC_CERT, django_settings.SOCIAL_AUTH_SAML_SP_PRIVATE_KEY, django_settings.SOCIAL_AUTH_SAML_ORG_INFO, django_settings.SOCIAL_AUTH_SAML_TECHNICAL_CONTACT, django_settings.SOCIAL_AUTH_SAML_SUPPORT_CONTACT, django_settings.SOCIAL_AUTH_SAML_ENABLED_IDPS]): return None if not feature_enabled('enterprise_auth'): logger.error("Unable to get_user, license does not support SAML authentication") return None return super(SAMLAuth, self).get_user(user_id) def _update_m2m_from_groups(user, ldap_user, rel, opts, remove=True): ''' Hepler function to update m2m relationship based on LDAP group membership. ''' should_add = False if opts is None: return elif not opts: pass elif opts is True: should_add = True else: if isinstance(opts, six.string_types): opts = [opts] for group_dn in opts: if not isinstance(group_dn, six.string_types): continue if ldap_user._get_groups().is_member_of(group_dn): should_add = True if should_add: rel.add(user) elif remove and user in rel.all(): rel.remove(user) @receiver(populate_user, dispatch_uid='populate-ldap-user') def on_populate_user(sender, **kwargs): ''' Handle signal from LDAP backend to populate the user object. Update user organization/team memberships according to their LDAP groups. ''' from awx.main.models import Organization, Team user = kwargs['user'] ldap_user = kwargs['ldap_user'] backend = ldap_user.backend # Prefetch user's groups to prevent LDAP queries for each org/team when # checking membership. ldap_user._get_groups().get_group_dns() # Update organization membership based on group memberships. org_map = getattr(backend.settings, 'ORGANIZATION_MAP', {}) for org_name, org_opts in org_map.items(): org, created = Organization.objects.get_or_create(name=org_name) remove = bool(org_opts.get('remove', True)) admins_opts = org_opts.get('admins', None) remove_admins = bool(org_opts.get('remove_admins', remove)) _update_m2m_from_groups(user, ldap_user, org.admin_role.members, admins_opts, remove_admins) users_opts = org_opts.get('users', None) remove_users = bool(org_opts.get('remove_users', remove)) _update_m2m_from_groups(user, ldap_user, org.member_role.members, users_opts, remove_users) # Update team membership based on group memberships. team_map = getattr(backend.settings, 'TEAM_MAP', {}) for team_name, team_opts in team_map.items(): if 'organization' not in team_opts: continue org, created = Organization.objects.get_or_create(name=team_opts['organization']) team, created = Team.objects.get_or_create(name=team_name, organization=org) users_opts = team_opts.get('users', None) remove = bool(team_opts.get('remove', True)) _update_m2m_from_groups(user, ldap_user, team.member_role.members, users_opts, remove) # Update user profile to store LDAP DN. profile = user.profile if profile.ldap_dn != ldap_user.dn: profile.ldap_dn = ldap_user.dn profile.save()
<filename>venv/lib/python3.7/site-packages/scapy/contrib/ppi_cace.py # This file is part of Scapy # Scapy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # Scapy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Scapy. If not, see <http://www.gnu.org/licenses/>. # author: <<EMAIL>> # scapy.contrib.description = Parallel Peripheral Interface CACE (PPI CACE) # scapy.contrib.status = loads """ CACE PPI types """ from scapy.packet import Packet from scapy.fields import ByteField, Field, FlagsField, LELongField, \ LEShortField from scapy.layers.ppi import addPPIType PPI_DOT11COMMON = 2 PPI_DOT11NMAC = 3 PPI_DOT11NMACPHY = 4 PPI_SPECTRUMMAP = 5 PPI_PROCESSINFO = 6 PPI_CAPTUREINFO = 7 PPI_AGGREGATION = 8 PPI_DOT3 = 9 # PPI 802.11 Common Field Header Fields class dBmByteField(Field): def __init__(self, name, default): Field.__init__(self, name, default, "b") def i2repr(self, pkt, val): if (val is not None): val = "%4d dBm" % val return val class PPITSFTField(LELongField): def i2h(self, pkt, val): flags = 0 if (pkt): flags = pkt.getfieldval("Pkt_Flags") if not flags: flags = 0 if (flags & 0x02): scale = 1e-3 else: scale = 1e-6 tout = scale * float(val) return tout def h2i(self, pkt, val): scale = 1e6 if pkt: flags = pkt.getfieldval("Pkt_Flags") if flags: if (flags & 0x02): scale = 1e3 tout = int((scale * val) + 0.5) return tout _PPIDot11CommonChFlags = ['', '', '', '', 'Turbo', 'CCK', 'OFDM', '2GHz', '5GHz', # noqa: E501 'PassiveOnly', 'Dynamic CCK-OFDM', 'GSFK'] _PPIDot11CommonPktFlags = ['FCS', 'TSFT_ms', 'FCS_Invalid', 'PHY_Error'] # PPI 802.11 Common Field Header class Dot11Common(Packet): name = "PPI 802.11-Common" fields_desc = [LEShortField('pfh_type', PPI_DOT11COMMON), LEShortField('pfh_length', 20), PPITSFTField('TSF_Timer', 0), FlagsField('Pkt_Flags', 0, -16, _PPIDot11CommonPktFlags), LEShortField('Rate', 0), LEShortField('Ch_Freq', 0), FlagsField('Ch_Flags', 0, -16, _PPIDot11CommonChFlags), ByteField('FHSS_Hop', 0), ByteField('FHSS_Pat', 0), dBmByteField('Antsignal', -128), dBmByteField('Antnoise', -128)] def extract_padding(self, p): return b"", p # Hopefully other CACE defined types will be added here. # Add the dot11common layer to the PPI array addPPIType(PPI_DOT11COMMON, Dot11Common)
<reponame>PrincetonUniversity/mcpib<gh_stars>0 # # Copyright (c) 2014, <NAME> # All rights reserved. # # mcpib is distributed under a simple BSD-like license; # see the LICENSE file that should be present in the root # of the source distribution. # import unittest import os import sys buildPythonPath = os.path.join(os.path.split(__file__)[0], "..", "python") if os.path.exists(buildPythonPath): sys.path.insert(0, buildPythonPath) import mcpib import builtin_numeric_mod as mod class BuiltinNumericTestCase(unittest.TestCase): def checkModifiedTypes(self, name, value): """Test pointer and references to numeric types. Const pointers and references should both be converted from Python to C++, while non-const pointers and references should not (because those imply that the function may modify them, which we can't support in Python). No pointers or references should be converted from C++ to Python, since we don't know how the user would want us to manage their lifetimes. """ self.assertRaises(mcpib.FromPythonError, getattr(mod, "accept_%s_ref" % name), value) self.assertEqual(getattr(mod, "accept_%s_cref" % name)(value), None) self.assertRaises(mcpib.FromPythonError, getattr(mod, "accept_%s_ptr" % name), value) self.assertEqual(getattr(mod, "accept_%s_cptr" % name)(value), None) self.assertRaises(mcpib.ToPythonError, getattr(mod, "return_%s_ref" % name)) self.assertRaises(mcpib.ToPythonError, getattr(mod, "return_%s_cref" % name)) self.assertRaises(mcpib.ToPythonError, getattr(mod, "return_%s_ptr" % name)) self.assertRaises(mcpib.ToPythonError, getattr(mod, "return_%s_cptr" % name)) def checkInteger(self, minimum, maximum, name): func = getattr(mod, "passthru_%s" % name) self.assertEqual(func(int(5)), 5) self.assertEqual(func(True), True) self.assertEqual(func(long(minimum)), long(minimum)) self.assertEqual(func(long(maximum)), long(maximum)) self.assertRaises(OverflowError, func, maximum + 1) self.assertRaises(OverflowError, func, minimum - 1) self.assertRaises(mcpib.FromPythonError, func, "5") self.checkModifiedTypes(name, 4) def testIntegers(self): self.checkInteger(-2**(mod.bits_signed_char - 1), 2**(mod.bits_signed_char - 1) - 1, name="signed_char") self.assertEqual(type(mod.passthru_signed_char(5)), int) self.checkInteger(0, 2**mod.bits_unsigned_char - 1, name="unsigned_char") self.assertEqual(type(mod.passthru_unsigned_char(5)), int) for t in ("int", "short", "long", "long_long"): bits = getattr(mod, "bits_%s" % t) self.checkInteger(-2**(bits - 1), 2**(bits - 1) - 1, t) self.checkInteger(0, 2**bits - 1, "unsigned_%s" % t) passthru_signed = getattr(mod, "passthru_%s" % t) passthru_unsigned = getattr(mod, "passthru_unsigned_%s" % t) if bits < mod.bits_long: self.assertEqual(type(passthru_signed(5)), int) self.assertEqual(type(passthru_unsigned(5)), int) elif bits > mod.bits_long: self.assertEqual(type(passthru_signed(5)), long) self.assertEqual(type(passthru_unsigned(5)), long) else: self.assertEqual(type(passthru_signed(5)), int) self.assertEqual(type(passthru_unsigned(5)), long) def checkFloat(self, name): func = getattr(mod, "passthru_%s" % name) self.assertEqual(func(2.5), 2.5) self.assertEqual(func(1.0), 1.0) self.assertEqual(func(True), 1.0) self.assertRaises(mcpib.FromPythonError, func, "5") self.checkModifiedTypes(name, 1.5) def testFloats(self): for name in ("float", "double", "long_double"): self.checkFloat(name) if __name__ == "__main__": unittest.main()
import torch from fastprogress.fastprogress import master_bar, progress_bar import numpy as np from time import time from time import strftime, gmtime import pdb __all__ = ["LearnerCallback", "Learner"] class LearnerCallback(): def get_metric_names(self): return [] def on_train_begin(self): return def on_train_end(self): return def on_epoch_begin(self): return def on_epoch_end(self): return def on_batch_begin(self, features, target, train): return def on_batch_end(self, output, target, train): return class Learner: def __init__(self, model, loss_func, train_dl, valid_dl, optimizer, learner_callback=None, gpu_id=0, predict_smaple_func=None, masks_and_hulls=True, save_func=None, stacked_net=False, early_stopping_metric=None): self.device = torch.device("cuda:"+str(gpu_id)) self.predict_smaple_func = predict_smaple_func if predict_smaple_func is not None else lambda epoch : None self.save_func = save_func if save_func is not None else lambda epoch : None self.early_stopping_metric = early_stopping_metric self.model = model self.train_dl = train_dl self.valid_dl = valid_dl self.optimizer = optimizer self.loss_func = loss_func self.learner_callback = learner_callback if learner_callback is not None else LearnerCallback() self.metric_names = self.learner_callback.get_metric_names() self.metrics = None self.train_losses = None self.valid_losses = None self.masks_and_hulls = masks_and_hulls self.stacked_net = stacked_net def set_loss_func(self, loss_func): self.loss_func = loss_func def loss_batch(self, x_data, y_data, masks, hulls, scheduler=None, is_train=True): "Calculate loss and metrics for a batch." self.learner_callback.on_batch_begin(x_data, y_data, train=is_train) self.optimizer.zero_grad() out = self.model(x_data) if self.masks_and_hulls: if self.stacked_net: loss = sum(self.loss_func(o, y_data, masks, hulls) for o in out) else: loss = self.loss_func(out, y_data, masks, hulls) else: if self.stacked_net: loss = sum(self.loss_func(o, out[1].shape) for o in out) else: loss = self.loss_func(out, y_data) if is_train: loss.backward() self.optimizer.step() if scheduler is not None: scheduler.step() if self.stacked_net: self.learner_callback.on_batch_end(out[-1].detach().cpu(), y_data.detach().cpu(), train=is_train) else: self.learner_callback.on_batch_end(out.detach().cpu(), y_data.detach().cpu(), train=is_train) return float(loss.detach().cpu()) def validate(self,parrent_bar): "Calculate `loss_func` in evaluation mode." self.model.eval() with torch.no_grad(): val_losses = [] for data_zip in progress_bar(self.valid_dl, parent=parrent_bar): if self.masks_and_hulls: names,x_data,y_data, masks, hulls = (data_zip[0],data_zip[1],data_zip[2],data_zip[3],data_zip[4]) masks = masks.to(self.device) hulls = hulls[:,None].repeat(1,masks.shape[1],1,1).to(self.device) else: names,x_data,y_data,masks,hulls = (data_zip[0],data_zip[1],data_zip[2],None,None) x_data = x_data.to(self.device) y_data = y_data.to(self.device) val_loss = self.loss_batch(x_data, y_data, masks, hulls, is_train=False) parrent_bar.child.comment = str(round(val_loss,4)) val_losses.append(val_loss) return np.array(val_losses).mean() def get_losses(self, train=True, valid=True): assert (train or valid), "train or valid must be True" losses = [] if train: losses.append(np.array(self.train_losses)) if valid: losses.append(np.array(self.valid_losses)) return losses[0] if len(losses) == 1 else losses def predict(self, inp, eval_mode=True): if eval_mode: self.model.eval() with torch.no_grad(): return self.model(inp) else: return self.model(inp) def get_metrics(self): return np.array(self.metrics) def fit(self, epochs, one_cylce=True): assert self.loss_func is not None, "loss function not definied!" self.train_losses = [] self.valid_losses = [] self.metrics = [] scheduler = torch.optim.lr_scheduler.OneCycleLR(self.optimizer, max_lr=self.optimizer.param_groups[0]["lr"]*10, steps_per_epoch=len(self.train_dl), epochs=epochs) if one_cylce else None pbar = master_bar(range(epochs)) pbar.write(["Epoch","train_loss","valid_loss"] + self.metric_names + ["time"], table=True) self.learner_callback.on_train_begin() Stop = False for epoch in pbar: if Stop: break t1 = time() self.model.train() self.learner_callback.on_epoch_begin() for data_zip in progress_bar(self.train_dl, parent=pbar): if self.masks_and_hulls: names,x_data,y_data, masks, hulls = (data_zip[0],data_zip[1],data_zip[2],data_zip[3],data_zip[4]) masks = masks.to(self.device) hulls = hulls[:,None].repeat(1,masks.shape[1],1,1).to(self.device) else: names,x_data,y_data,masks,hulls = (data_zip[0],data_zip[1],data_zip[2],None,None) x_data = x_data.to(self.device) y_data = y_data.to(self.device) train_loss = self.loss_batch(x_data, y_data, masks, hulls, scheduler=scheduler, is_train=True) pbar.child.comment = str(round(train_loss,4)) valid_loss = self.validate(parrent_bar=pbar) self.train_losses.append(train_loss) self.valid_losses.append(valid_loss) self.predict_smaple_func(epoch) self.save_func(epoch) met = self.learner_callback.on_epoch_end() t2 = time() metrics_table = [] if met is not None: self.metrics.append(met) for name,value in zip(self.metric_names, met): metrics_table.append(f'{round(value,4):.4f}') metrics_table.append(strftime('%M:%S', gmtime(round(t2-t1)))) graphs = [[np.arange(len(self.train_losses)),np.array(self.train_losses)], [np.arange(len(self.valid_losses)),np.array(self.valid_losses)]] pbar.update_graph(graphs, [0,epochs], [0,np.array([self.train_losses,self.valid_losses]).max()]) pbar.write([f'{epoch:04d}',f'{round(train_loss,4):.4f}',f'{round(valid_loss,4):.4f}'] + metrics_table, table=True) if self.early_stopping_metric is not None: idx = self.metric_names.index(self.early_stopping_metric[0]) if float(metrics_table[idx])>self.early_stopping_metric[1]: Stop=True self.learner_callback.on_train_end()
from baconian.test.tests.set_up.setup import TestWithAll from baconian.common.logging import Logger, ConsoleLogger, Recorder, record_return_decorator import numpy as np from baconian.core.core import Basic, EnvSpec from baconian.algo.dqn import DQN from baconian.envs.gym_env import make from baconian.algo.value_func.mlp_q_value import MLPQValueFunction from baconian.core.agent import Agent class Foo(Basic): def __init__(self, name='foo'): super().__init__(name=name) self.loss = 1.0 self.recorder = Recorder(flush_by_split_status=False, default_obj=self) def get_status(self): return dict(x=1) def get_val(self): return np.random.random() @record_return_decorator(which_recorder='self') def get_by_return(self, res, num=2, *args, **kwargs): return dict(val=res * num, val2=res) @property def name(self): return 'foo' class TestLogger(TestWithAll): def test_register(self): obj = Foo() a = Recorder(flush_by_split_status=False, default_obj=obj) a.register_logging_attribute_by_record(obj=obj, attr_name='val', get_method=lambda x: x['obj'].get_val(), static_flag=False) a.register_logging_attribute_by_record(obj=obj, attr_name='loss', static_flag=True) a.record() print(a._obj_log) self.assertTrue('val' in a._obj_log[obj]) self.assertTrue('loss' in a._obj_log[obj]) obj.loss = 10.0 a.record() b = Recorder(flush_by_split_status=False, default_obj=obj) b.register_logging_attribute_by_record(obj=obj, attr_name='val', get_method=lambda x: x['obj'].get_val(), static_flag=False) b.register_logging_attribute_by_record(obj=obj, attr_name='loss', static_flag=True) b.record() self.assertTrue('val' in b._obj_log[obj]) self.assertTrue('loss' in b._obj_log[obj]) obj.loss = 10.0 b.record() self.assertTrue(b._obj_log is not a._obj_log) self.assertTrue(b._registered_log_attr_by_get_dict is not a._registered_log_attr_by_get_dict) def test_return_record(self): obj = Foo(name='foo') obj.get_by_return(res=10, num=2) obj.get_by_return(res=1, num=2) obj.get_by_return(res=2, num=4) print(obj.recorder._obj_log) self.assertEqual(len(obj.recorder._obj_log), 1) self.assertTrue(obj in obj.recorder._obj_log) self.assertTrue('val' in obj.recorder._obj_log[obj]) self.assertTrue(len(obj.recorder._obj_log[obj]['val']) == 3) self.assertTrue(obj.recorder._obj_log[obj]['val'][0]['value'] == 20) self.assertTrue(obj.recorder._obj_log[obj]['val'][1]['value'] == 2) self.assertTrue(obj.recorder._obj_log[obj]['val'][2]['value'] == 8) self.assertTrue('val2' in obj.recorder._obj_log[obj]) self.assertTrue(len(obj.recorder._obj_log[obj]['val2']) == 3) self.assertTrue(obj.recorder._obj_log[obj]['val2'][0]['value'] == 10) self.assertTrue(obj.recorder._obj_log[obj]['val2'][1]['value'] == 1) self.assertTrue(obj.recorder._obj_log[obj]['val2'][2]['value'] == 2) obj = Foo(name='foo2') obj.get_by_return(res=10, num=2) obj.get_by_return(res=1, num=2) obj.get_by_return(res=2, num=4) print(obj.recorder._obj_log) self.assertTrue(obj in obj.recorder._obj_log) self.assertTrue('val' in obj.recorder._obj_log[obj]) self.assertTrue(len(obj.recorder._obj_log[obj]['val']) == 3) self.assertTrue(obj.recorder._obj_log[obj]['val'][0]['value'] == 20) self.assertTrue(obj.recorder._obj_log[obj]['val'][1]['value'] == 2) self.assertTrue(obj.recorder._obj_log[obj]['val'][2]['value'] == 8) self.assertTrue('val2' in obj.recorder._obj_log[obj]) self.assertTrue(len(obj.recorder._obj_log[obj]['val2']) == 3) self.assertTrue(obj.recorder._obj_log[obj]['val2'][0]['value'] == 10) self.assertTrue(obj.recorder._obj_log[obj]['val2'][1]['value'] == 1) self.assertTrue(obj.recorder._obj_log[obj]['val2'][2]['value'] == 2) class TesTLoggerWithDQN(TestWithAll): def test_integration_with_dqn(self): env = make('Acrobot-v1') env_spec = EnvSpec(obs_space=env.observation_space, action_space=env.action_space) mlp_q = MLPQValueFunction(env_spec=env_spec, name='mlp_q', name_scope='mlp_q', mlp_config=[ { "ACT": "RELU", "B_INIT_VALUE": 0.0, "NAME": "1", "N_UNITS": 16, "TYPE": "DENSE", "W_NORMAL_STDDEV": 0.03 }, { "ACT": "LINEAR", "B_INIT_VALUE": 0.0, "NAME": "OUPTUT", "N_UNITS": 1, "TYPE": "DENSE", "W_NORMAL_STDDEV": 0.03 } ]) dqn = DQN(env_spec=env_spec, name='dqn_test', config_or_config_dict=dict(REPLAY_BUFFER_SIZE=1000, GAMMA=0.99, BATCH_SIZE=10, LEARNING_RATE=0.001, TRAIN_ITERATION=1, DECAY=0.5), value_func=mlp_q) agent = Agent(env=env, env_spec=env_spec, algo=dqn, name='agent') agent.init() # dqn.init() st = env.reset() from baconian.common.sampler.sample_data import TransitionData a = TransitionData(env_spec) res = [] agent.sample(env=env, sample_count=100, in_which_status='TRAIN', store_flag=True, sample_type='transition') agent.sample(env=env, sample_count=100, in_which_status='TRAIN', store_flag=True, sample_type='transition') res.append(dqn.train(batch_data=a, train_iter=10, sess=None, update_target=True)['average_loss']) res.append(dqn.train(batch_data=None, train_iter=10, sess=None, update_target=True)['average_loss']) self.assertTrue(dqn in dqn.recorder._obj_log) self.assertTrue('average_loss' in dqn.recorder._obj_log[dqn]) self.assertTrue(len(dqn.recorder._obj_log[dqn]['average_loss']) == 2) self.assertTrue( np.equal(np.array(res), [x['value'] for x in dqn.recorder._obj_log[dqn]['average_loss']]).all()) self.assertTrue(len(Logger()._registered_recorders) > 0) self.assertTrue(dqn.recorder in Logger()._registered_recorders) res = dqn.recorder.get_log(attr_name='average_loss', filter_by_status=dict()) self.assertEqual(len(res), 2) res = agent.recorder.get_log(attr_name='sum_reward', filter_by_status={'status': 'TRAIN'}) self.assertEqual(len(res), 2) res = agent.recorder.get_log(attr_name='sum_reward', filter_by_status={'status': 'TEST'}) self.assertEqual(len(res), 0) Logger().flush_recorder() def test_console_logger(self): self.assertTrue(ConsoleLogger().inited_flag) logger = ConsoleLogger() self.assertTrue(logger.inited_flag) logger.print('info', 'this is for test %s', 'args') logger.print('info', 'this is for test {}'.format('args')) logger2 = ConsoleLogger() self.assertEqual(id(logger), id(logger2)) logger.flush() if __name__ == '__main__': import unittest unittest.main()
<filename>pizzapi/order.py<gh_stars>0 import requests from .menu import Menu from .urls import Urls, COUNTRY_USA # TODO: Add add_coupon and remove_coupon methods class Order(object): """Core interface to the payments API. The Order is perhaps the second most complicated class - it wraps up all the logic for actually placing the order, after we've determined what we want from the Menu. """ def __init__( self, store, customer, address, country=COUNTRY_USA, ): self.store = store self.menu = Menu.from_store(store_id=store.id, country=country) self.customer = customer self.address = address self.urls = Urls(country) self.data = { 'Address': { 'Street': self.address.street, 'City': self.address.city, 'Region': self.address.region, 'PostalCode': self.address.zip, 'Type': 'House', }, 'Coupons': [], 'CustomerID': '', 'Extension': '', 'OrderChannel': 'OLO', 'OrderID': '', 'NoCombine': True, 'OrderMethod': 'Web', 'OrderTaker': None, 'Payments': [], 'Products': [], 'Market': '', 'Currency': '', 'ServiceMethod': 'Delivery', 'Tags': {}, 'Version': '1.0', 'SourceOrganizationURI': 'order.dominos.com', 'LanguageCode': 'en', 'Partners': {}, 'NewUser': True, 'metaData': {}, 'Amounts': {}, 'BusinessDate': '', 'EstimatedWaitMinutes': '', 'PriceOrderTime': '', 'AmountsBreakdown': {}, } # TODO: Implement item options # TODO: Add exception handling for KeyErrors def add_item( self, code, qty=1, options=[], ): item = self.menu.variants[code] item.update(ID=1, isNew=True, Qty=qty, AutoRemove=False) self.data['Products'].append(item) return item # TODO: Raise Exception when index isn't found def remove_item(self, code): codes = [x['Code'] for x in self.data['Products']] return self.data['Products'].pop(codes.index(code)) def add_coupon(self, code, qty=1): item = self.menu.variants[code] item.update(ID=1, isNew=True, Qty=qty, AutoRemove=False) self.data['Coupons'].append(item) return item def remove_coupon(self, code): codes = [x['Code'] for x in self.data['Coupons']] return self.data['Coupons'].pop(codes.index(code)) def _send(self, url, merge): self.data.update(StoreID=self.store.id, Email=self.customer.email, FirstName=self.customer.first_name, LastName=self.customer.last_name, Phone=self.customer.phone) # Address=self.address.street for key in ('Products', 'StoreID', 'Address'): if key not in self.data or not self.data[key]: raise Exception('order has invalid value for key "%s"' % key) headers = \ {'Referer': 'https://order.dominos.com/en/pages/order/', 'Content-Type': 'application/json'} r = requests.post(url=url, headers=headers, json={'Order': self.data}) r.raise_for_status() json_data = r.json() if merge: for (key, value) in json_data['Order'].items(): if value or not isinstance(value, list): self.data[key] = value return json_data # TODO: Figure out if this validates anything that self.urls.price_url() does not def validate(self): response = self._send(self.urls.validate_url(), True) return response['Status'] != -1 # TODO: Actually test this def place(self, card=False): self.pay_with(card) response = self._send(self.urls.place_url(), False) return response # TODO: Add self.price() and update whenever called and items were changed def pay_with(self, card=False): """Use this instead of self.place when testing""" # get the price to check that everything worked okay response = self._send(self.urls.price_url(), True) if response['Status'] == -1: raise Exception('get price failed: %r' % response) if card == False: self.data['Payments'] = [{'Type': 'Cash'}] else: self.data['Payments'] = [{ 'Type': 'CreditCard', 'Expiration': card.expiration, 'Amount': self.data['Amounts'].get('Customer', 0), 'CardType': card.card_type, 'Number': int(card.number), 'SecurityCode': int(card.cvv), 'PostalCode': int(card.zip), }] return response
import json import os from copy import copy from PyQt5 import QtCore from PyQt5.QtWidgets import ( QPushButton, QButtonGroup, QVBoxLayout, QGroupBox, QGridLayout, QCheckBox, QComboBox, QScrollArea, QTabBar, QHBoxLayout, QRadioButton) from PyQt5.QtCore import Qt, pyqtSignal from python.gui.widgets.worker import Worker from python.zaf_plus.fishfeed import run class ProgramTab(QTabBar): early_stop_signal = pyqtSignal() def __init__(self, parent, name=None): super().__init__() self.parent = parent self.name = name self.cron_job = None self.layout = QHBoxLayout() self.setMovable(True) self.is_running = False self.is_enabled_checkbox = QCheckBox(self.name) self.num_tanks = 30 self.num_quantity = ["1", "2", "3", "4"] self.program_settings = { "Program_name": self.name, "Enabled": self.is_enabled_checkbox.isChecked(), "Type": "Feeding and washing", "Day": None, "Time": None, "Tanks": [None] * self.num_tanks } program_default = copy(self.program_settings) # Layout left self.left_layout = QVBoxLayout() self.left_layout.setAlignment(Qt.AlignTop) self.button_startstop = QPushButton("Run", self) self.button_startstop.setFixedSize(QtCore.QSize(100, 35)) self.button_startstop.setCheckable(True) self.button_startstop.clicked.connect(lambda: self.start_program()) self.button_reset = QPushButton("Reset", self) self.button_reset.setFixedSize(QtCore.QSize(100, 35)) self.button_reset.clicked.connect(lambda: self.reset(program_default)) self.button_reset.clicked.connect(lambda: self.repaint()) self.button_duplicate = QPushButton("Duplicate", self) self.button_duplicate.setFixedSize(QtCore.QSize(100, 35)) self.button_duplicate.clicked.connect(lambda: self.duplicate()) self.button_delete = QPushButton("Delete", self) self.button_delete.setFixedSize(QtCore.QSize(100, 35)) self.button_delete.clicked.connect(lambda: self.delete_tab()) self.first_button_row_layout = QHBoxLayout() self.first_button_row_layout.setAlignment(Qt.AlignLeft) self.first_button_row_layout.addWidget(self.button_startstop) self.first_button_row_layout.addWidget(self.button_reset) self.second_button_row_layout = QHBoxLayout() self.second_button_row_layout.setAlignment(Qt.AlignLeft) self.second_button_row_layout.addWidget(self.button_duplicate) self.second_button_row_layout.addWidget(self.button_delete) self.left_layout.addLayout(self.first_button_row_layout) self.left_layout.addLayout(self.second_button_row_layout) # Create a button group for feed & washing self.bgroup1_1 = QButtonGroup(self) # self.bgroup1_1.setExclusive(False) self.button_feeding = QRadioButton("Feeding and washing", self) self.button_feeding.setCheckable(True) self.button_feeding.setChecked(True) self.button_washing = QRadioButton("Only washing", self) # self.button_washing.setEnabled(False) self.button_washing.setCheckable(True) self.bgroup1_1.addButton(self.button_feeding, 1) self.bgroup1_1.addButton(self.button_washing, 2) self.bgroup1_1.buttonClicked.connect(lambda: self.record_log("Type", self.bgroup1_1)) # Create a group box for feeding & washing gpbox1_1 = QGroupBox("Program Type") grid = QVBoxLayout() grid.addWidget(self.button_feeding) grid.addWidget(self.button_washing) grid.setAlignment(Qt.AlignLeft) gpbox1_1.setLayout(grid) self.left_layout.addWidget(gpbox1_1) # Create a button group for day of week self.button_dow = [ 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', 'Everyday', ] self.bgroup1_2 = QButtonGroup(self) self.bgroup1_2.setExclusive(False) for id, name in enumerate(self.button_dow): button = QCheckBox(name, self) button.stateChanged.connect(lambda: self.update_active_days()) if name == 'Everyday': button.clicked.connect(lambda: self.check_everyday()) self.bgroup1_2.addButton(button, id) # adds each button to the layout gpbox1_2 = QGroupBox("Select day of week") gpbox1_2.setAlignment(Qt.AlignLeft) grid = QGridLayout() grid.setSpacing(5) for i, button in enumerate(self.bgroup1_2.buttons()): grid.addWidget(button, i % 4, i // 4) gpbox1_2.setLayout(grid) self.left_layout.addWidget(gpbox1_2) # Add time pulldown gpbox1_2_1 = QGroupBox("Select time") self.pd_time = QComboBox(self) for h in range(24): for m in range(2): self.pd_time.addItem(f'{h // 12 * 12 + h % 12} : {m * 30 :02d} {"AM" if h < 12 else "PM"}') self.pd_time.currentIndexChanged.connect(lambda: self.update_time()) self.update_time() gpbox1_2_1_layout = QVBoxLayout() gpbox1_2_1_layout.addWidget(self.pd_time) gpbox1_2_1.setLayout(gpbox1_2_1_layout) self.left_layout.addWidget(gpbox1_2_1) # Group box right # Box for food quantity ================================================= gpbox2 = QGroupBox("Food Quantity") gpbox2.setStyleSheet( 'QGroupBox:title {' 'subcontrol-origin: margin;' 'subcontrol-position: top center;' 'padding-left: 10px;' 'padding-right: 10px; }' ) gpbox2_layout = QVBoxLayout() gpbox2.setLayout(gpbox2_layout) gpbox2_1 = QGroupBox() grid = QGridLayout() grid.setSpacing(15) scroll = QScrollArea() scroll.setWidget(gpbox2_1) # scroll.setFixedWidth(470) scroll.setWidgetResizable(True) self.bgroup2_1 = QButtonGroup(self) # buttn gp of tank selecetion self.bgroup2_1.setExclusive(False) self.bgroup2_2 = [] # QButtonGroup(self) # buttn gp of food amount selecetion # Select/Unselect all tanks self.select_unselect_all_checkbox = QCheckBox(f'All Tanks', self) self.select_unselect_all_checkbox.toggled.connect(self.select_unselect_all_tanks) self.select_unselect_all_checkbox.setChecked(True) self.bgroup2_1.addButton(self.select_unselect_all_checkbox, 1) grid.addWidget(self.select_unselect_all_checkbox, 0, 0) bg = QButtonGroup(self) for j, name in enumerate(self.num_quantity): b = QRadioButton(name, self) b.setFixedSize(QtCore.QSize(40, 20)) b.setCheckable(True) bg.addButton(b, j + 1) grid.addWidget(b, 0, j + 1) bg.buttonClicked.connect(lambda: self.select_unselect_food_amount()) self.bgroup2_2.append(bg) # Populate tank rows for i in range(1, self.num_tanks + 1): cb = QCheckBox(f'Tank {i}', self) cb.setChecked(True) self.bgroup2_1.addButton(cb, i + 1) grid.addWidget(cb, i, 0) bg = QButtonGroup(self) for j, name in enumerate(self.num_quantity): b = QRadioButton(name, self) b.setFixedSize(QtCore.QSize(40, 20)) b.setCheckable(True) bg.addButton(b, j + 1) grid.addWidget(b, i, j + 1) bg.buttonClicked.connect(lambda: self.record_log()) self.bgroup2_2.append(bg) self.bgroup2_1.buttonClicked.connect(lambda: self.record_log()) grid.setAlignment(Qt.AlignCenter) gpbox2_1.setLayout(grid) gpbox2_layout.addWidget(scroll) # Add to layout self.layout.addLayout(self.left_layout) self.layout.addWidget(gpbox2) self.setLayout(self.layout) self.update_json() @property def json_path(self): return f'/home/pi/Dev/prod/zaf_data/{self.name}.json' def toggle_program_enabled(self): self.is_enabled_checkbox.setChecked(not self.is_enabled_checkbox.isChecked()) self.program_settings["Enabled"] = self.is_enabled_checkbox.isChecked() self.update_json() def check_everyday(self): if self.bgroup1_2.buttons()[-1].isChecked(): for i, bt in enumerate(self.bgroup1_2.buttons()): if i != 8: bt.setChecked(True) else: for i, bt in enumerate(self.bgroup1_2.buttons()): if i != 8: bt.setChecked(False) self.repaint() self.update_active_days() def update_time(self): time = self.pd_time.currentText() self.program_settings["Time"] = time self.update_json() def update_active_days(self): checked_dow = [i.isChecked() for i in self.bgroup1_2.buttons()] dow = [self.button_dow[i][:3] for i, ii in enumerate(checked_dow) if ii and self.button_dow[i] != "Everyday"] self.parent.tabs[0].update_program_list() self.program_settings["Day"] = dow self.update_json() def select_unselect_food_amount(self): for idx, (tk, bt) in enumerate(zip(self.bgroup2_1.buttons(), self.bgroup2_2)): if tk.isChecked(): for index, i in enumerate(self.bgroup2_2[0].buttons()): if i.isChecked(): bt.buttons()[index].setChecked(True) self.record_log() def select_unselect_all_tanks(self): if self.select_unselect_all_checkbox.isChecked(): for tank in self.bgroup2_1.buttons(): tank.setChecked(True) else: for tank in self.bgroup2_1.buttons(): tank.setChecked(False) def record_log(self, key=None, obj=None): if isinstance(obj, QButtonGroup): # For logging feeding or washing for i in obj.buttons(): if i.isChecked(): self.program_settings[key] = i.text() elif key == "Enabled": self.program_settings[key] = obj else: # For logging food quantity for idx, (tk, bt) in enumerate(zip(self.bgroup2_1.buttons(), self.bgroup2_2)): if idx != 0: if tk.isChecked(): for i in bt.buttons(): if i.isChecked(): self.program_settings["Tanks"][int(tk.text().split()[1]) - 1] = i.text() else: self.program_settings["Tanks"][int(tk.text().split()[1]) - 1] = None self.update_json() def reset(self, preset): self.program_settings = copy(preset) for key, val in preset.items(): if key == "Enabled": if isinstance(val, str): val = eval(val) self.is_enabled_checkbox.setChecked(bool(val)) # self.button_onoff.setChecked(val) elif key == "Type": if val == "Feeding and washing": self.button_feeding.setChecked(True) self.button_washing.setChecked(False) elif val == "Only washing": self.button_feeding.setChecked(False) self.button_washing.setChecked(True) self.record_log("Type", self.bgroup1_1) elif key == "Day": # Reset all checkboxes for bt in self.bgroup1_2.buttons(): bt.setChecked(False) # Turn on checkboxes if val: if isinstance(val, str): val = eval(val) for v in val: for bt in self.bgroup1_2.buttons(): if v in bt.text(): bt.setChecked(True) break elif key == "Time": if val: self.pd_time.setCurrentIndex(self.pd_time.findText(val)) else: self.pd_time.setCurrentIndex(0) elif "Tanks" in key: for idx, tank in enumerate(val): tankid = idx + 1 if tank: self.bgroup2_1.buttons()[tankid].setChecked(True) bg = self.bgroup2_2[tankid] for bt in bg.buttons(): if tank == bt.text(): bt.setChecked(True) break else: self.bgroup2_1.buttons()[tankid].setChecked(True) bg = self.bgroup2_2[tankid] bg.setExclusive(False) for bt in bg.buttons(): bt.setChecked(False) bg.setExclusive(True) def duplicate(self): self.parent.addprogramtab() duplicated_program_settings = self.program_settings.copy() duplicated_program_settings["Program_name"] = self.parent.tabs[-1].name self.parent.tabs[-1].reset(duplicated_program_settings) def delete_tab(self): # Delete the associated crontab job self.parent.cron.remove(self.parent.cron.find_comment(self.name)) self.parent.cron.write() # Scan for the current tab for id, tab in enumerate(self.parent.tabs): if tab.name == self.name: del self.parent.tabs[id] self.parent.removeTab(id) if os.path.isfile(self.json_path): os.remove(self.json_path) else: ## Show an error ## print("Error: %s file not found" % self.json_path) def update_json(self): # Serializing json json_object = json.dumps(self.program_settings, indent=4) # Writing to sample.json with open(self.json_path, "w") as outfile: outfile.write(json_object) self.parent.update_crontab_job() def progress_fn(self, log_str): self.parent.log_tab.infoTextBox.insertPlainText(log_str) def thread_complete(self): self.is_running = False self.button_startstop.setText("Start") self.button_startstop.setEnabled(True) self.parent.log_tab.program_name_label.setText(f"{self.name} is done") self.parent.status_bar.showMessage(f"{self.name} is done.") def start_program(self): if self.is_running: self.early_stop_signal.emit() self.is_running = False else: self.update_json() self.worker = Worker( run ) # Any other args, kwargs are passed to the run function self.worker.kwargs['food_amounts'] = self.program_settings["Tanks"] self.worker.kwargs['program_type'] = self.program_settings["Type"] # worker.signals.result.connect(self.result_callback) self.early_stop_signal.connect(self.worker.set_early_stop) self.worker.signals.finished.connect(self.thread_complete) self.worker.signals.progress.connect(self.progress_fn) self.parent.log_tab.clear_activity() # Execute self.parent.threadpool.start(self.worker) self.is_running = True self.parent.log_tab.program_name_label.setText(f"{self.name} is running now") self.parent.status_bar.showMessage(f"{self.name} is running now...") self.button_startstop.setText("Stop") # self.button_startstop.setEnabled(False)
<reponame>BookOps-CAT/bookops-callno<filename>bookops_callno/parser.py # -*- coding: utf-8 -*- """ This module contains methods to parse MARC records in a form of pymarc.Record objects """ from typing import List, Optional from pymarc import Record, Field from bookops_callno.errors import CallNoConstructorError def get_audience(bib: Record = None) -> Optional[str]: """ Determines audience based on MARC 008 tag. Possible returns: 'early juv', 'juv', 'young adult', 'adult'. Args: bib: pymarc.Record instance Returns: audn_code """ if bib is None: return None # determine has correct bib format if not has_audience_code(bib.leader): return None code = bib["008"].data[22] if code in ("a", "b"): return "early juv" elif code in "c": return "juv" elif code == "j": if is_short(bib): return "early juv" else: return "juv" elif code == "d": return "young adult" else: return "adult" def get_callno_relevant_subjects(bib: Record = None) -> List[Field]: """ Parses call number relevant subject MARc fields Args: bib: pymarc.Record instance Returns: subject_fields """ if bib is None: return [] elif not isinstance(bib, Record): raise CallNoConstructorError( "Invalid 'bib' argument used. Must be pymarc.Record instance." ) subject_fields = [] for field in bib.get_fields("600", "610"): if is_lc_subject(field): subject_fields.append(field) for field in bib.get_fields("650"): if is_lc_subject(field): subject_fields.append(field) return subject_fields def get_field(bib: Record = None, tag: str = None) -> Optional[Field]: """ Returns pymarc.Field instance of the the first given MARC tag in a bib Args: bib: pymarc.Record instance tag: MARC tag as str Returns: pymarc.Field instance """ if bib is None: return None elif not isinstance(bib, Record): raise CallNoConstructorError( "Invalid 'bib' argument used. Must be pymarc.Record instance." ) if not isinstance(tag, str): raise CallNoConstructorError("Invalid 'tag' argument used. Must be string.") return bib[tag] def get_form_of_item_code(bib: Record = None) -> Optional[str]: """ Parses form of item code in the 008 tag if exists Args: bib: pymarc.Record instance Returns: code """ if bib is None: return None elif not isinstance(bib, Record): raise CallNoConstructorError( "Invalid 'bib' argument used. Must be pymarc.Record instance." ) rec_type = bib.leader[6] try: # print, sound records, computer files if rec_type in ("a", "c", "d", "i", "j", "m", "t"): return bib["008"].data[23] # visual materials elif rec_type == "g": return bib["008"].data[29] else: return None except (AttributeError, IndexError): return None def get_language_code(bib: Record = None) -> Optional[str]: """ Determines world lanugage code based on pos 35-37 of the 008 tag Args: bib: pymarc.Record instance Returns: 3-letter language code """ if bib is None: return None try: code = bib["008"].data[35:38].upper() if code not in (None, "", "UND"): return code else: return None except (AttributeError, IndexError): return None def get_main_entry_tag(bib: Record = None) -> Optional[str]: """ Determines MARC tag of the main entry Args: bib: pymarc.Record instance Returns: tag """ if bib is None: return None elif not isinstance(bib, Record): raise CallNoConstructorError( "Invalid 'bib' argument used. Must be pymarc.Record instance." ) entry_tags = ["100", "110", "111", "245"] for tag in entry_tags: if has_tag(bib, tag): return tag def get_physical_description(bib: Record = None) -> Optional[str]: """ Returns value of MARC tag 300 """ if bib is None: return None try: t300 = bib["300"].value() return t300 except AttributeError: return None def get_record_type_code(bib: Record = None) -> Optional[str]: """ Parses MARC leader for record type code Args: bib: pymarc.Record instance Returns: rec_type_code """ if bib is None: return None rec_type_code = bib.leader[6] return rec_type_code def has_audience_code(leader: str = None) -> bool: """ Determines if MARC record has audience code in position 22 in the leader Args: leader: MARC leader field Returns: boolean """ if leader is None: return False elif not isinstance(leader, str): raise CallNoConstructorError( "Invalid 'leader' type used in argument. Must be a string." ) try: if leader[6] in ("a", "c", "d", "g", "i", "j", "k", "m", "t") and leader[7] in ( "a", "m", ): return True except IndexError: return False def has_tag(bib: Record = None, tag: str = None) -> bool: """ Checks if tag exists in record Args: bib: pymarc.Record instance tag: MARC tag as string Returns: boolean """ if bib is None: return False elif not isinstance(tag, str): raise CallNoConstructorError("Invalid 'tag' argument used. Must be string.") return bool(bib[tag]) def is_biography(bib: Record = None) -> bool: """ Determines if material is autobiography or biography """ if bib is None: return False rec_type = get_record_type_code(bib) if rec_type in ( "a", "t", ): # print material try: code = bib["008"].data[34] except AttributeError: return False if code in ("a", "b"): return True else: return False elif rec_type == "i": # nonmusical sound recording code = bib["008"].data[30] if code in ("a", "b"): return True else: return False else: return False def is_dewey(bib: Record = None) -> bool: """ Determines if material can be classified using Dewey Args: bib: pymarc.Record instance Returns: boolean """ pass def is_dewey_plus_subject(bib: Record = None) -> bool: """ Determines if material can be classified using Dewey + subject pattern Args: bib: pymarc.Record instance Returns: boolean """ pass def is_fiction(bib: Record = None) -> bool: """ Determines if material is fiction Args: bib: pymarc.Record instance Returns: boolean """ pass def is_lc_subject(field: Field = None) -> bool: """ Determies if subject belongs to LCSH Args: field: pymarc.Field instance Returns: boolean """ if field is None: return False elif not isinstance(field, Field): raise CallNoConstructorError( "Invalid 'field' argument. Must be an instance of 'pymarc.Field'." ) if field.tag in ("600", "610", "611", "630", "650", "651", "655"): if field.indicator2 == "0": return True else: return False else: return False def is_libretto(subjects: List[Field]) -> bool: """ Checks is material is a libretto """ for s in subjects: if "Librettos" in s.value(): return True return False def is_short(bib: Record = None) -> Optional[bool]: """ Determines if the print material is short """ if bib is None: return None try: t300 = bib["300"]["a"] except TypeError: return None if t300 is None: return None short = False if "1 volume" in t300 or "1 v." in t300: short = True else: words = t300.split(" ") for w in words: try: if int(w) < 50: short = True except ValueError: pass return short
# WARNING: Please don't edit this file. It was generated by Python/WinRT v1.0.0-beta.4 import enum import winsdk _ns_module = winsdk._import_ns_module("Windows.Media.Capture") try: import winsdk.windows.devices.enumeration except Exception: pass try: import winsdk.windows.foundation except Exception: pass try: import winsdk.windows.foundation.collections except Exception: pass try: import winsdk.windows.graphics.directx.direct3d11 except Exception: pass try: import winsdk.windows.graphics.imaging except Exception: pass try: import winsdk.windows.media except Exception: pass try: import winsdk.windows.media.capture.core except Exception: pass try: import winsdk.windows.media.capture.frames except Exception: pass try: import winsdk.windows.media.core except Exception: pass try: import winsdk.windows.media.devices except Exception: pass try: import winsdk.windows.media.effects except Exception: pass try: import winsdk.windows.media.mediaproperties except Exception: pass try: import winsdk.windows.security.credentials except Exception: pass try: import winsdk.windows.storage except Exception: pass try: import winsdk.windows.storage.streams except Exception: pass try: import winsdk.windows.ui.windowmanagement except Exception: pass class CameraCaptureUIMaxPhotoResolution(enum.IntEnum): HIGHEST_AVAILABLE = 0 VERY_SMALL_QVGA = 1 SMALL_VGA = 2 MEDIUM_XGA = 3 LARGE3_M = 4 VERY_LARGE5_M = 5 class CameraCaptureUIMaxVideoResolution(enum.IntEnum): HIGHEST_AVAILABLE = 0 LOW_DEFINITION = 1 STANDARD_DEFINITION = 2 HIGH_DEFINITION = 3 class CameraCaptureUIMode(enum.IntEnum): PHOTO_OR_VIDEO = 0 PHOTO = 1 VIDEO = 2 class CameraCaptureUIPhotoFormat(enum.IntEnum): JPEG = 0 PNG = 1 JPEG_X_R = 2 class CameraCaptureUIVideoFormat(enum.IntEnum): MP4 = 0 WMV = 1 class KnownVideoProfile(enum.IntEnum): VIDEO_RECORDING = 0 HIGH_QUALITY_PHOTO = 1 BALANCED_VIDEO_AND_PHOTO = 2 VIDEO_CONFERENCING = 3 PHOTO_SEQUENCE = 4 HIGH_FRAME_RATE = 5 VARIABLE_PHOTO_SEQUENCE = 6 HDR_WITH_WCG_VIDEO = 7 HDR_WITH_WCG_PHOTO = 8 VIDEO_HDR8 = 9 COMPRESSED_CAMERA = 10 class MediaCaptureDeviceExclusiveControlStatus(enum.IntEnum): EXCLUSIVE_CONTROL_AVAILABLE = 0 SHARED_READ_ONLY_AVAILABLE = 1 class MediaCaptureMemoryPreference(enum.IntEnum): AUTO = 0 CPU = 1 class MediaCaptureSharingMode(enum.IntEnum): EXCLUSIVE_CONTROL = 0 SHARED_READ_ONLY = 1 class MediaCaptureThermalStatus(enum.IntEnum): NORMAL = 0 OVERHEATED = 1 class MediaCategory(enum.IntEnum): OTHER = 0 COMMUNICATIONS = 1 MEDIA = 2 GAME_CHAT = 3 SPEECH = 4 FAR_FIELD_SPEECH = 5 UNIFORM_SPEECH = 6 VOICE_TYPING = 7 class MediaStreamType(enum.IntEnum): VIDEO_PREVIEW = 0 VIDEO_RECORD = 1 AUDIO = 2 PHOTO = 3 METADATA = 4 class PhotoCaptureSource(enum.IntEnum): AUTO = 0 VIDEO_PREVIEW = 1 PHOTO = 2 class PowerlineFrequency(enum.IntEnum): DISABLED = 0 FIFTY_HERTZ = 1 SIXTY_HERTZ = 2 AUTO = 3 class StreamingCaptureMode(enum.IntEnum): AUDIO_AND_VIDEO = 0 AUDIO = 1 VIDEO = 2 class VideoDeviceCharacteristic(enum.IntEnum): ALL_STREAMS_INDEPENDENT = 0 PREVIEW_RECORD_STREAMS_IDENTICAL = 1 PREVIEW_PHOTO_STREAMS_IDENTICAL = 2 RECORD_PHOTO_STREAMS_IDENTICAL = 3 ALL_STREAMS_IDENTICAL = 4 class VideoRotation(enum.IntEnum): NONE = 0 CLOCKWISE90_DEGREES = 1 CLOCKWISE180_DEGREES = 2 CLOCKWISE270_DEGREES = 3 WhiteBalanceGain = _ns_module.WhiteBalanceGain AdvancedCapturedPhoto = _ns_module.AdvancedCapturedPhoto AdvancedPhotoCapture = _ns_module.AdvancedPhotoCapture AppCapture = _ns_module.AppCapture CameraCaptureUI = _ns_module.CameraCaptureUI CameraCaptureUIPhotoCaptureSettings = _ns_module.CameraCaptureUIPhotoCaptureSettings CameraCaptureUIVideoCaptureSettings = _ns_module.CameraCaptureUIVideoCaptureSettings CapturedFrame = _ns_module.CapturedFrame CapturedFrameControlValues = _ns_module.CapturedFrameControlValues CapturedPhoto = _ns_module.CapturedPhoto LowLagMediaRecording = _ns_module.LowLagMediaRecording LowLagPhotoCapture = _ns_module.LowLagPhotoCapture LowLagPhotoSequenceCapture = _ns_module.LowLagPhotoSequenceCapture MediaCapture = _ns_module.MediaCapture MediaCaptureDeviceExclusiveControlStatusChangedEventArgs = _ns_module.MediaCaptureDeviceExclusiveControlStatusChangedEventArgs MediaCaptureFailedEventArgs = _ns_module.MediaCaptureFailedEventArgs MediaCaptureFocusChangedEventArgs = _ns_module.MediaCaptureFocusChangedEventArgs MediaCaptureInitializationSettings = _ns_module.MediaCaptureInitializationSettings MediaCapturePauseResult = _ns_module.MediaCapturePauseResult MediaCaptureRelativePanelWatcher = _ns_module.MediaCaptureRelativePanelWatcher MediaCaptureSettings = _ns_module.MediaCaptureSettings MediaCaptureStopResult = _ns_module.MediaCaptureStopResult MediaCaptureVideoProfile = _ns_module.MediaCaptureVideoProfile MediaCaptureVideoProfileMediaDescription = _ns_module.MediaCaptureVideoProfileMediaDescription OptionalReferencePhotoCapturedEventArgs = _ns_module.OptionalReferencePhotoCapturedEventArgs PhotoCapturedEventArgs = _ns_module.PhotoCapturedEventArgs PhotoConfirmationCapturedEventArgs = _ns_module.PhotoConfirmationCapturedEventArgs VideoStreamConfiguration = _ns_module.VideoStreamConfiguration
import paddle import paddle.nn as nn import paddle.vision.models as models import paddle.nn.functional as F class TVLoss(nn.Layer): def __init__(self,TVLoss_weight=1): super(TVLoss,self).__init__() self.TVLoss_weight = TVLoss_weight def forward(self,x): batch_size = x.shape[0] h_x = x.shape[2] w_x = x.shape[3] count_h = (x.shape[2]-1) * x.shape[3] count_w = x.shape[2] * (x.shape[3] - 1) h_tv = paddle.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:]),2).sum() w_tv = paddle.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1]),2).sum() return self.TVLoss_weight*2*(h_tv/count_h+w_tv/count_w)/batch_size class MaskRefineLoss(nn.Layer): def __init__(self, type='tversky'): super(MaskRefineLoss, self).__init__() self.type = type if type == 'l1': self.criterion = paddle.nn.L1Loss() def __call__(self, x, y): if self.type == 'tversky': # y is ground truth, x is output beta = 0.9 alpha = 1 - beta numerator = paddle.sum(x * y) denominator = x * y + alpha * (1 - y) * x + beta * y * (1 - x) return 1 - numerator / (paddle.sum(denominator) + 1e-7) elif self.type == 'l1': return self.criterion(x, y) else: raise class AdversarialLoss(nn.Layer): r""" Adversarial loss https://arxiv.org/abs/1711.10337 """ def __init__(self, type='nsgan', target_real_label=1.0, target_fake_label=0.0): r""" type = nsgan | lsgan | hinge """ super(AdversarialLoss, self).__init__() self.type = type self.register_buffer('real_label', paddle.to_tensor(target_real_label)) self.register_buffer('fake_label', paddle.to_tensor(target_fake_label)) if type == 'nsgan': self.criterion = nn.BCELoss() elif type == 'lsgan': self.criterion = nn.MSELoss() elif type == 'hinge': self.criterion = nn.ReLU() def __call__(self, outputs, is_real, is_disc=None): if self.type == 'hinge': if is_disc: if is_real: outputs = -outputs return self.criterion(1 + outputs).mean() else: return (-outputs).mean() else: labels = (self.real_label if is_real else self.fake_label).expand_as(outputs) loss = self.criterion(outputs, labels) return loss class StyleLoss(nn.Layer): r""" Perceptual loss, VGG-based https://arxiv.org/abs/1603.08155 https://github.com/dxyang/StyleTransfer/blob/master/utils.py """ def __init__(self): super(StyleLoss, self).__init__() # self.add_sublayer('vgg', VGG19()) self.criterion = paddle.nn.L1Loss() def compute_gram(self, x): b, ch, h, w = x.shape f = x.reshape([b, ch, w * h]) #print(f) f_T = f.transpose([0,2, 1]) G = f.bmm(f_T) / (h * w * ch) return G def __call__(self, x, y, mask=None): # Compute features # x_vgg, y_vgg = self.vgg(x), self.vgg(y) x_vgg, y_vgg = x, y input_size=512 # Compute loss style_loss = 0.0 if type(mask) != type(None): mask1 = F.interpolate(mask, size=(input_size/2, input_size/2), mode='nearest') mask2 = F.interpolate(mask, size=(input_size/4, input_size/4), mode='nearest') mask3 = F.interpolate(mask, size=(input_size/8, input_size/8), mode='nearest') mask4 = F.interpolate(mask, size=(input_size/16, input_size/16), mode='nearest') else: mask1 = 1 mask2 = 1 mask3 = 1 mask4 = 1 style_loss += self.criterion(self.compute_gram(x_vgg['relu2_2']*mask1), self.compute_gram(y_vgg['relu2_2']*mask1)) style_loss += self.criterion(self.compute_gram(x_vgg['relu3_4']*mask2), self.compute_gram(y_vgg['relu3_4']*mask2)) style_loss += self.criterion(self.compute_gram(x_vgg['relu4_4']*mask3), self.compute_gram(y_vgg['relu4_4']*mask3)) style_loss += self.criterion(self.compute_gram(x_vgg['relu5_2']*mask4), self.compute_gram(y_vgg['relu5_2']*mask4)) return style_loss class PerceptualLoss(nn.Layer): r""" Perceptual loss, VGG-based https://arxiv.org/abs/1603.08155 https://github.com/dxyang/StyleTransfer/blob/master/utils.py """ def __init__(self, weights=[1.0, 1.0, 1.0, 1.0, 1.0]): super(PerceptualLoss, self).__init__() # self.add_sublayer('vgg', VGG19()) self.criterion = paddle.nn.L1Loss() self.weights = weights def __call__(self, x, y, mask): # Compute features # x_vgg, y_vgg = self.vgg(x), self.vgg(y) x_vgg, y_vgg = x, y input_size=512 content_loss = 0.0 if type(mask) != type(None): mask1 = mask mask2 = F.interpolate(mask, size=(input_size/2, input_size/2), mode='nearest') mask3 = F.interpolate(mask, size=(input_size/4, input_size/4), mode='nearest') mask4 = F.interpolate(mask, size=(input_size/8, input_size/8), mode='nearest') mask5 = F.interpolate(mask, size=(input_size/16, input_size/16), mode='nearest') else: mask1 = 1 mask2 = 1 mask3 = 1 mask4 = 1 mask5 = 1 content_loss += self.weights[0] * self.criterion(x_vgg['relu1_1']*mask1, y_vgg['relu1_1']*mask1) content_loss += self.weights[1] * self.criterion(x_vgg['relu2_1']*mask2, y_vgg['relu2_1']*mask2) content_loss += self.weights[2] * self.criterion(x_vgg['relu3_1']*mask3, y_vgg['relu3_1']*mask3) content_loss += self.weights[3] * self.criterion(x_vgg['relu4_1']*mask4, y_vgg['relu4_1']*mask4) content_loss += self.weights[4] * self.criterion(x_vgg['relu5_1']*mask5, y_vgg['relu5_1']*mask5) return content_loss # TODO add total vairation loss. class VGG19(paddle.nn.Layer): def __init__(self): super(VGG19, self).__init__() features = models.vgg19(pretrained=True).features self.relu1_1 = paddle.nn.Sequential() self.relu1_2 = paddle.nn.Sequential() self.relu2_1 = paddle.nn.Sequential() self.relu2_2 = paddle.nn.Sequential() self.relu3_1 = paddle.nn.Sequential() self.relu3_2 = paddle.nn.Sequential() self.relu3_3 = paddle.nn.Sequential() self.relu3_4 = paddle.nn.Sequential() self.relu4_1 = paddle.nn.Sequential() self.relu4_2 = paddle.nn.Sequential() self.relu4_3 = paddle.nn.Sequential() self.relu4_4 = paddle.nn.Sequential() self.relu5_1 = paddle.nn.Sequential() self.relu5_2 = paddle.nn.Sequential() self.relu5_3 = paddle.nn.Sequential() self.relu5_4 = paddle.nn.Sequential() for x in range(2): self.relu1_1.add_sublayer(str(x), features[x]) for x in range(2, 4): self.relu1_2.add_sublayer(str(x), features[x]) for x in range(4, 7): self.relu2_1.add_sublayer(str(x), features[x]) for x in range(7, 9): self.relu2_2.add_sublayer(str(x), features[x]) for x in range(9, 12): self.relu3_1.add_sublayer(str(x), features[x]) for x in range(12, 14): self.relu3_2.add_sublayer(str(x), features[x]) for x in range(14, 16): self.relu3_3.add_sublayer(str(x), features[x]) for x in range(16, 18): self.relu3_4.add_sublayer(str(x), features[x]) for x in range(18, 21): self.relu4_1.add_sublayer(str(x), features[x]) for x in range(21, 23): self.relu4_2.add_sublayer(str(x), features[x]) for x in range(23, 25): self.relu4_3.add_sublayer(str(x), features[x]) for x in range(25, 27): self.relu4_4.add_sublayer(str(x), features[x]) for x in range(27, 30): self.relu5_1.add_sublayer(str(x), features[x]) for x in range(30, 32): self.relu5_2.add_sublayer(str(x), features[x]) for x in range(32, 34): self.relu5_3.add_sublayer(str(x), features[x]) for x in range(34, 36): self.relu5_4.add_sublayer(str(x), features[x]) # don't need the gradients, just want the features for param in self.parameters(): param.stop_gradient=True def forward(self, x): relu1_1 = self.relu1_1(x) relu1_2 = self.relu1_2(relu1_1) relu2_1 = self.relu2_1(relu1_2) relu2_2 = self.relu2_2(relu2_1) relu3_1 = self.relu3_1(relu2_2) relu3_2 = self.relu3_2(relu3_1) relu3_3 = self.relu3_3(relu3_2) relu3_4 = self.relu3_4(relu3_3) relu4_1 = self.relu4_1(relu3_4) relu4_2 = self.relu4_2(relu4_1) relu4_3 = self.relu4_3(relu4_2) relu4_4 = self.relu4_4(relu4_3) relu5_1 = self.relu5_1(relu4_4) relu5_2 = self.relu5_2(relu5_1) relu5_3 = self.relu5_3(relu5_2) relu5_4 = self.relu5_4(relu5_3) out = { 'relu1_1': relu1_1.detach(), 'relu1_2': relu1_2.detach(), 'relu2_1': relu2_1.detach(), 'relu2_2': relu2_2.detach(), 'relu3_1': relu3_1.detach(), 'relu3_2': relu3_2.detach(), 'relu3_3': relu3_3.detach(), 'relu3_4': relu3_4.detach(), 'relu4_1': relu4_1.detach(), 'relu4_2': relu4_2.detach(), 'relu4_3': relu4_3.detach(), 'relu4_4': relu4_4.detach(), 'relu5_1': relu5_1.detach(), 'relu5_2': relu5_2.detach(), 'relu5_3': relu5_3.detach(), 'relu5_4': relu5_4.detach(), } return out
from django.contrib.auth.decorators import login_required from django.shortcuts import render, redirect, get_object_or_404, resolve_url from django.contrib import messages from django.templatetags.static import static from django.contrib.auth import logout from .forms import UserUpdateForm, ProfileUpdateForm, UpdateProfileEveryLoginConfigForm, UserDeleteAccountForm, \ UserThemeConfigForm, UserSubpageConfigForm, UserSupportCreatorForm, UserHideEmailConfigForm from .models import Profile, Tag, Config from wiki.models import Ruleset from allauth.socialaccount.models import SocialAccount from wiki.function import fetch_created_ruleset @login_required def settings(request): """ View for setting form and related user setting like website configuration that is from Account and Config model. User must be logged in before access this page. This view has many condition due to the UpdateProfileEveryLogin setting that need more condition requirement on save. :param request: WSGI request from user. :return: Render the settings page with the many account related form and pass the value from context to the template (settings.html) """ hero_image = "img/settings-cover-night.jpeg" hero_image_light = 'img/settings-cover-light.png' if request.method == 'POST': user_form = UserUpdateForm(request.POST, instance=request.user) profile_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile) profile_sync_form = UpdateProfileEveryLoginConfigForm(request.POST, instance=request.user.config) website_config_form = UserThemeConfigForm(request.POST, instance=request.user.config) subpage_config_form = UserSubpageConfigForm(request.POST, instance=request.user.config) hide_email_config_form = UserHideEmailConfigForm(request.POST, instance=request.user.config) support_form = UserSupportCreatorForm(request.POST, instance=request.user.profile) if SocialAccount.objects.filter(user=request.user).exists(): # User that send request are login by social account, must check on profile sync field if profile_sync_form['update_profile_every_login'].value() == request.user.config.update_profile_every_login: # If value from the form and the value in database is the same, user doesn't change this config if not profile_sync_form['update_profile_every_login'].value(): # Tha value in form and database is all False -> User want to change value in other form user_form.save() profile_form.save() website_config_form.save() subpage_config_form.save() support_form.save() hide_email_config_form.save() messages.success(request, f'Your settings has been updated!') return redirect('settings') else: # Nothing changed here except website config that must be save website_config_form.save() subpage_config_form.save() support_form.save() hide_email_config_form.save() messages.success(request, f'Your settings has been updated!') return redirect('settings') else: if not profile_sync_form['update_profile_every_login'].value() and request.user.config.update_profile_every_login: # User want to change sync config from True to False, save only sync config value profile_sync_form.save() website_config_form.save() subpage_config_form.save() support_form.save() hide_email_config_form.save() messages.success(request, f'Your settings has been updated!') return redirect('settings') else: # User want to change sync config from False to True, must check on the valid of other form too. user_form.save() profile_form.save() profile_sync_form.save() website_config_form.save() subpage_config_form.save() support_form.save() hide_email_config_form.save() messages.success(request, f'Your settings has been updated!') return redirect('settings') else: # User that send request are login by normal Django login, cannot use profile sync system. # So we don't have to save profile_sync_form value user_form.save() profile_form.save() website_config_form.save() subpage_config_form.save() support_form.save() hide_email_config_form.save() messages.success(request, f'Your settings has been updated!') return redirect('settings') else: user_form = UserUpdateForm(instance=request.user) profile_form = ProfileUpdateForm(instance=request.user.profile) profile_sync_form = UpdateProfileEveryLoginConfigForm(instance=request.user.config) website_config_form = UserThemeConfigForm(instance=request.user.config) subpage_config_form = UserSubpageConfigForm(instance=request.user.config) support_form = UserSupportCreatorForm(instance=request.user.profile) hide_email_config_form = UserHideEmailConfigForm(instance=request.user.config) if SocialAccount.objects.filter(user=request.user).exists(): osu_confirm_username = SocialAccount.objects.get(user=request.user).extra_data['username'] else: osu_confirm_username = None context = { 'user_form': user_form, 'profile_form': profile_form, 'profile_sync_form': profile_sync_form, 'website_config_form': website_config_form, 'website_subpage_config_form': subpage_config_form, 'support_form': support_form, 'hide_email': hide_email_config_form, 'title': 'settings', 'social_account': SocialAccount.objects.filter(user=request.user).exists(), 'can_edit_profile': (not SocialAccount.objects.filter(user=request.user).exists()) or (SocialAccount.objects.filter(user=request.user).exists() and (not request.user.config.update_profile_every_login)), 'ruleset_creator': Ruleset.objects.filter(owner=str(request.user.id)).exists(), 'osu_confirm_username': osu_confirm_username, 'hero_image': static(hero_image), 'hero_image_light': static(hero_image_light), 'opengraph_description': 'All profile and website settings are visible here!', 'opengraph_url': resolve_url('settings'), } return render(request, 'users/settings.html', context) def profile_detail(request, pk): """ View for user detail page or user profile page. Can access publicly but if user open your own profile page it will have 'Edit profile' button that navigate to setting page. :param request: WSGI request from user :param pk: User ID :type pk: int :return: Render the profile detail page and pass the value from context to the template (profile.html) """ profile_object = get_object_or_404(Profile, pk=pk) config_object = get_object_or_404(Config, pk=pk) tag_list = profile_object.tag.split(',') tag_object_list = [] try: for tag_id in tag_list: try: tag_id = int(tag_id) tag_object_list.append(Tag.objects.get(id=tag_id)) except Tag.DoesNotExist: pass except ValueError: pass context = { 'profile_object': profile_object, 'tag_list': tag_object_list, 'hide_email': config_object.hide_email, 'created_ruleset': fetch_created_ruleset(profile_object.id), 'title': f"{profile_object.user.username}'s profile", 'hero_image': profile_object.cover.url, 'hero_image_light': profile_object.cover_light.url, 'opengraph_description': f"{profile_object.user.username}'s profile page", 'opengraph_url': resolve_url('profile', pk=profile_object.user.id), 'opengraph_image': profile_object.cover.url } return render(request, 'users/profile.html', context) @login_required def delete_account(request): """ View for delete account page. User must be logged in before access this page. This view has function for verification when the condition is met it will logout the user before delete the User, Profile and Config objects. :param request: WSGI request from user :return: Render the delete account page with delete account form and pass the value from context to the template (delete_account.html) """ if request.method == 'POST': account_delete_form = UserDeleteAccountForm(request.POST) if request.user.username == account_delete_form['confirm_username'].value() and request.user.check_password(account_delete_form['confirm_password'].value()): user = request.user # Logout before we delete. This will make request.user # unavailable (or actually, it points to AnonymousUser). logout(request) # Delete user (and any associated ForeignKeys, according to # on_delete parameters). user.delete() messages.success(request, 'Account successfully deleted.') return redirect('home') else: messages.error(request, f'Delete Account failed. Please check your username and password.') return redirect('settings') else: account_delete_form = UserDeleteAccountForm() context = { 'form': account_delete_form } return render(request, 'users/delete_account.html', context)
<filename>PyQt-Sudoku/ui.py # File: ui.py # Author: <NAME> from PyQt5.QtWidgets import * from PyQt5.QtGui import * from PyQt5.QtCore import * import sudoku class SudokuUI(QWidget): def __init__(self): super().__init__() self.gridSize = 9 self.cellSize = 35 self.sudokuGrid = sudoku.Sudoku() self.vbox = QVBoxLayout() self.hbox = QHBoxLayout() self.grid = QGridLayout() self.diff_str = "Difficulty: {0} ({1} cells to fill)" self.mode_str = "Mode: {}" self.isComputer = True self.diffLabel = QLabel(self.diff_str) self.modeLabel = QLabel(self.mode_str) self.init_ui() def init_ui(self): """ structure of UI: QVBoxLayout() -> QHBoxLayout() stores buttons & QGridLayout() stores Sudoku & QLabel shows difficulty & mode :return: """ self.vbox.addLayout(self.grid) self.vbox.addLayout(self.hbox) self.vbox.addWidget(self.diffLabel) self.vbox.addWidget(self.modeLabel) for i in range(self.gridSize): for j in range(self.gridSize): # cell = QLabel() # cell.setFixedSize(self.cellSize, self.cellSize) # cell.setFrameShape(QFrame.Box) # cell.setStyleSheet("background-color:#ff0000;") cell = SudokuCell() cell.setFixedSize(self.cellSize, self.cellSize) # cell.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum) cell.setAlignment(Qt.AlignCenter) reg_exp = QRegExp("[1-9]") validator = QRegExpValidator(reg_exp) cell.setValidator(validator) cell.setMaxLength(1) cell.textEdited.connect(self.edit_cell) self.grid.addWidget(cell, i, j, Qt.AlignCenter) generate_button = QPushButton("Generate") solve_button = QPushButton("Solve") check_button = QPushButton("Check") clear_button = QPushButton("Clear") reset_button = QPushButton("Reset") self.diffSpin = QSpinBox() self.diffSpin.setRange(1, 80) self.diffSpin.setValue(32) self.hbox.addWidget(generate_button) self.hbox.addWidget(solve_button) self.hbox.addWidget(check_button) self.hbox.addWidget(clear_button) self.hbox.addWidget(reset_button) self.hbox.addWidget(self.diffSpin) generate_button.clicked.connect(self.new_sudoku) solve_button.clicked.connect(self.solve_sudoku) check_button.clicked.connect(self.check_sudoku) clear_button.clicked.connect(self.clear_sudoku) reset_button.clicked.connect(self.reset_sudoku) self.setLayout(self.vbox) self.setWindowTitle("Sudoku! PyQt5") self.setWindowFlags(Qt.WindowMinimizeButtonHint | Qt.WindowCloseButtonHint | Qt.CustomizeWindowHint) self.new_sudoku() self.show() self.setFixedSize(self.size()) def update_ui(self): for i in range(self.gridSize): for j in range(self.gridSize): cell = self.grid.itemAtPosition(i, j).widget() num = self.sudokuGrid.grid[i][j] active = self.sudokuGrid.isActive[i][j] if num: cell.setText(str(num)) else: cell.setText("") if not active: cell.setStyleSheet("background-color:#c3d6e8;") cell.setReadOnly(True) else: cell.setStyleSheet("") cell.setReadOnly(False) def new_sudoku(self): self.diffLabel.setText(self.diff_str.format(self.calculate_difficulty(), self.diffSpin.value())) self.sudokuGrid.generate_sudoku(self.diffSpin.value()) # generate a sudoku with 32 holes by default self.modeLabel.setText(self.mode_str.format("Computer")) self.isComputer = True self.update_ui() def solve_sudoku(self): valid = True if not self.isComputer: cnt = 0 for i in range(self.gridSize): for j in range(self.gridSize): if self.sudokuGrid.grid[i][j] is not None: self.sudokuGrid.isActive[i][j] = False cnt += 1 valid = self.sudokuGrid.check_all(True) self.diffLabel.setText(self.diff_str.format(self.calculate_difficulty(cnt), 9 * 9 - cnt)) if valid: status = self.sudokuGrid.solve_sudoku(override=self.isComputer) if not valid or not status: QMessageBox.critical(self, "Error!", "This Sudoku is not solvable!") self.update_ui() def clear_sudoku(self): self.sudokuGrid.clear_sudoku() self.update_ui() def reset_sudoku(self): self.sudokuGrid.reset_sudoku() self.modeLabel.setText(self.mode_str.format("Human")) self.diffLabel.setText("") self.isComputer = False self.update_ui() def check_sudoku(self): status = self.sudokuGrid.check_all() if status: msg = "You solved this Sudoku successfully!" else: msg = "You didn't solve this Sudoku. Try again!" QMessageBox.information(self, "Check Sudoku", msg) def edit_cell(self): sender = self.sender() # print(sender) for i in range(self.gridSize): for j in range(self.gridSize): cell = self.grid.itemAtPosition(i, j).widget() if cell == sender: try: num = int(sender.text()) except ValueError: # means None num = None self.sudokuGrid.grid[i][j] = num def calculate_difficulty(self, given=None): """ Table: Extremely easy: given more than 50 Easy: given 36~49 Medium: given 32~35 Difficult: given 28~31 Evil: given 22~27 by http://zhangroup.aporc.org/images/files/Paper_3485.pdf Unknown: given < 22 :return: str """ if given is None: given = 9 * 9 - self.diffSpin.value() if given >= 50: ret = "Extremely easy" elif given >= 36: ret = "Easy" elif given >= 32: ret = "Medium" elif given >= 28: ret = "Difficult" elif given >= 22: ret = "Evil" else: ret = "Unknown" return ret class SudokuCell(QLineEdit): def __init__(self, parent=None): super(SudokuCell, self).__init__(parent) def mousePressEvent(self, e): if not self.isReadOnly(): self.selectAll()
<gh_stars>10-100 from dataclasses import dataclass from typing import Dict, List, Union import pytest import yahp as hp @dataclass class Foo(hp.Hparams): baz: int = hp.required(doc='int') @dataclass class Bar(hp.Hparams): baz: int = hp.required(doc='int') @dataclass class ParentListHP(hp.Hparams): hparams_registry = {'foos': {'foo': Foo, 'bar': Bar}} foos: List[hp.Hparams] = hp.required(doc='All the foos') @dataclass class ParentListHPNoRegistry(hp.Hparams): foos: List[Foo] = hp.required(doc='All the foos') @pytest.fixture def baz() -> int: return 1 def get_data(baz: int, bar: bool = False, duplicate: bool = False, as_list: bool = False) -> Union[Dict, List]: """Get a data dictionary Args: baz (int): Foo param bar (bool, optional): Include bar? Defaults to False. duplicate (bool, optional): Include foo+1?. Defaults to False. as_list (bool, optional): Return the dictionary as a list. Defaults to False. Returns: [type]: [description] """ d = {'foo': {'baz': baz}} if bar: d['bar'] = {'baz': baz} if duplicate: d['foo+1'] = {'baz': baz + 1} if as_list: d = dict_to_list(d) return d def dict_to_list(data: Dict[str, Dict]) -> List[Dict]: """Convert a dictionary to a list of 1-element dictionaries. """ return [{k: v} for k, v in data.items()] def test_list_without_registry(baz): data = get_data(baz) hp = ParentListHPNoRegistry.create(data={'foos': data}) assert isinstance(hp.foos, list) assert len(hp.foos) == 1 assert hp.foos[0].baz == baz @pytest.mark.filterwarnings('ignore:MalformedYAMLWarning') def test_list_without_registry_passed_list(baz): data = get_data(baz, as_list=True) hp = ParentListHPNoRegistry.create(data={'foos': data}) assert isinstance(hp.foos, list) assert len(hp.foos) == 1 assert hp.foos[0].baz == baz def test_list_without_registry_duplicate(baz): data = get_data(baz, duplicate=True) hp = ParentListHPNoRegistry.create(data={'foos': data}) assert isinstance(hp.foos, list) assert len(hp.foos) == 2 foo0 = hp.foos[0] assert foo0.baz == baz foo1 = hp.foos[1] assert foo1.baz == baz + 1 # Expected to fail while we do not allow CLI overrides of unregistered lists @pytest.mark.xfail def test_list_without_registry_cli_override(baz): data = get_data(baz) cli_args = ['--foos.foo.baz', str(baz + 2)] hp = ParentListHPNoRegistry.create(cli_args=cli_args, data={'foos': data}) assert isinstance(hp.foos, list) assert len(hp.foos) == 1 assert hp.foos[0].baz == baz + 2 # Expected to fail while we do not allow CLI overrides of unregistered lists @pytest.mark.xfail def test_list_without_registry_duplicate_cli_override(baz): data = get_data(baz, duplicate=True) cli_args = ['--foos.foo+1.baz', str(baz + 2)] hp = ParentListHPNoRegistry.create(cli_args=cli_args, data={'foos': data}) assert isinstance(hp.foos, list) assert len(hp.foos) == 2 foo0 = hp.foos[0] assert isinstance(foo0, Foo) assert foo0.baz == 1 foo1 = hp.foos[1] assert isinstance(foo1, Foo) assert foo1.baz == baz + 2 def test_list_with_registry(baz): data = get_data(baz, bar=True) hp = ParentListHP.create(data={'foos': data}) assert isinstance(hp.foos, list) assert len(hp.foos) == 2 foo = hp.foos[0] assert isinstance(foo, Foo) assert foo.baz == baz bar = hp.foos[1] assert isinstance(bar, Bar) assert bar.baz == baz @pytest.mark.filterwarnings('ignore:MalformedYAMLWarning') def test_list_with_registry_passed_list(baz): data = get_data(baz, bar=True, as_list=True) hp = ParentListHP.create(data={'foos': data}) assert isinstance(hp.foos, list) assert len(hp.foos) == 2 foo = hp.foos[0] assert isinstance(foo, Foo) assert foo.baz == baz bar = hp.foos[1] assert isinstance(bar, Bar) assert bar.baz == baz def test_list_with_registry_duplicate(baz): data = get_data(baz, bar=True, duplicate=True) hp = ParentListHP.create(data={'foos': data}) assert isinstance(hp.foos, list) assert len(hp.foos) == 3 foo0 = hp.foos[0] assert isinstance(foo0, Foo) assert foo0.baz == baz bar = hp.foos[1] assert isinstance(bar, Bar) assert bar.baz == baz foo1 = hp.foos[2] assert isinstance(foo1, Foo) assert foo1.baz == baz + 1 def test_list_with_registry_cli_override(baz): data = get_data(baz, bar=True) cli_args = ['--foos.foo.baz', str(baz + 2)] hp = ParentListHP.create(cli_args=cli_args, data={'foos': data}) assert isinstance(hp.foos, list) assert len(hp.foos) == 2 foo = hp.foos[0] assert isinstance(foo, Foo) assert foo.baz == baz + 2 bar = hp.foos[1] assert isinstance(bar, Bar) assert bar.baz == baz @pytest.mark.filterwarnings('ignore:MalformedYAMLWarning') def test_list_with_registry_cli_override_custom_list(baz): data = get_data(baz, bar=True) cli_args = ['--foos', 'foo'] hp = ParentListHP.create(cli_args=cli_args, data={'foos': data}) assert isinstance(hp.foos, list) assert len(hp.foos) == 1 foo = hp.foos[0] assert isinstance(foo, Foo) assert foo.baz == baz <EMAIL> def test_list_with_registry_duplicate_cli_override(baz): data = get_data(baz, bar=True, duplicate=True) cli_args = ['--foos.foo+1.baz', str(baz + 2)] hp = ParentListHP.create(cli_args=cli_args, data={'foos': data}) assert isinstance(hp.foos, list) assert len(hp.foos) == 3 foo = hp.foos[0] assert isinstance(foo, Foo) assert foo.baz == baz bar = hp.foos[1] assert isinstance(bar, Bar) assert bar.baz == baz foo1 = hp.foos[2] assert isinstance(foo1, Foo) assert foo1.baz == baz + 2
<gh_stars>0 import random from typing import List class Solution: def sortArray(self, nums: List[int]) -> List[int]: return sorted(nums) # TLE def bubble_sort(self, nums): n = len(nums) for i in range(n): for j in range(n - i - 1): if nums[j] > nums[j + 1]: nums[j], nums[j + 1] = nums[j + 1], nums[j] return nums # TLE def insert_sort_1(self, nums): n = len(nums) if n == 1: return nums for i in range(1, n): for j in range(i, 0, -1): if nums[j] < nums[j - 1]: nums[j], nums[j - 1] = nums[j - 1], nums[j] else: break return nums # TLE def insert_sort_2(self, nums): for i in range(1, len(nums)): key = nums[i] j = i - 1 while j >= 0 and key < nums[j]: nums[j + 1] = nums[j] j -= 1 nums[j + 1] = key return nums # TLE # O(N^2) def selection_sort(self, nums): for i in range(len(nums)): _min = min(nums[i:]) min_index = nums[i:].index(_min) nums[i + min_index] = nums[i] nums[i] = _min return nums def quick_sort_1(self, nums): if len(nums) <= 1: return nums pivot = random.choice(nums) ls = [n for n in nums if n < pivot] eq = [n for n in nums if n == pivot] gr = [n for n in nums if n > pivot] return self.quick_sort_1(ls) + eq + self.quick_sort_1(gr) def quick_sort_2(self, nums): def helper(head, tail): if head >= tail: return l, r = head, tail mid = (l + r) // 2 pivot = nums[mid] while r >= l: while r >= l and nums[l] < pivot: l += 1 while r >= l and nums[r] > pivot: r -= 1 if r >= l: nums[l], nums[r] = nums[r], nums[l] l += 1 r -= 1 helper(head, r) helper(l, tail) helper(0, len(nums) - 1) return nums def merge_sort(self, nums): def merge(left, right): res = [] while left and right: if left[0] <= right[0]: res.append(left.pop(0)) else: res.append(right.pop(0)) if left: res += left if right: res += right return res if len(nums) <= 1: return nums mid = len(nums) // 2 left = nums[:mid] right = nums[mid:] left = self.merge_sort(left) right = self.merge_sort(right) return merge(left, right) def heap_sort(self, nums): def heapify(start, end): root = start while True: child = 2 * root + 1 if child > end: break if child + 1 <= end and nums[child] < nums[child + 1]: child += 1 if nums[root] < nums[child]: nums[root], nums[child] = nums[child], nums[root] root = child else: break n = len(nums) for s in range((n - 2) // 2, -1, -1): heapify(s, n - 1) for e in range(n - 1, 0, -1): nums[0], nums[e] = nums[e], nums[0] heapify(0, e - 1) return nums
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Generate tf.data.Dataset object for deep speech training/evaluation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import scipy.io.wavfile as wavfile from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf # pylint: disable=g-bad-import-order from data.featurizer import AudioFeaturizer from data.featurizer import TextFeaturizer class AudioConfig(object): """Configs for spectrogram extraction from audio.""" def __init__(self, sample_rate, frame_length, frame_step, fft_length=None, normalize=False, spect_type="linear"): """Initialize the AudioConfig class. Args: sample_rate: an integer denoting the sample rate of the input waveform. frame_length: an integer for the length of a spectrogram frame, in ms. frame_step: an integer for the frame stride, in ms. fft_length: an integer for the number of fft bins. normalize: a boolean for whether apply normalization on the audio tensor. spect_type: a string for the type of spectrogram to be extracted. """ self.sample_rate = sample_rate self.frame_length = frame_length self.frame_step = frame_step self.fft_length = fft_length self.normalize = normalize self.spect_type = spect_type class DatasetConfig(object): """Config class for generating the DeepSpeechDataset.""" def __init__(self, audio_config, data_path, vocab_file_path): """Initialize the configs for deep speech dataset. Args: audio_config: AudioConfig object specifying the audio-related configs. data_path: a string denoting the full path of a manifest file. vocab_file_path: a string specifying the vocabulary file path. Raises: RuntimeError: file path not exist. """ self.audio_config = audio_config assert tf.gfile.Exists(data_path) assert tf.gfile.Exists(vocab_file_path) self.data_path = data_path self.vocab_file_path = vocab_file_path class DeepSpeechDataset(object): """Dataset class for training/evaluation of DeepSpeech model.""" def __init__(self, dataset_config): """Initialize the class. Each dataset file contains three columns: "wav_filename", "wav_filesize", and "transcript". This function parses the csv file and stores each example by the increasing order of audio length (indicated by wav_filesize). Args: dataset_config: DatasetConfig object. """ self.config = dataset_config # Instantiate audio feature extractor. self.audio_featurizer = AudioFeaturizer( sample_rate=self.config.audio_config.sample_rate, frame_length=self.config.audio_config.frame_length, frame_step=self.config.audio_config.frame_step, fft_length=self.config.audio_config.fft_length, spect_type=self.config.audio_config.spect_type) # Instantiate text feature extractor. self.text_featurizer = TextFeaturizer( vocab_file=self.config.vocab_file_path) self.speech_labels = self.text_featurizer.speech_labels self.features, self.labels = self._preprocess_data(self.config.data_path) self.num_feature_bins = ( self.features[0].shape[1] if len(self.features) else None) def _preprocess_data(self, file_path): """Generate a list of waveform, transcript pair. Note that the waveforms are ordered in increasing length, so that audio samples in a mini-batch have similar length. Args: file_path: a string specifying the csv file path for a data set. Returns: features and labels array processed from the audio/text input. """ with tf.gfile.Open(file_path, "r") as f: lines = f.read().splitlines() lines = [line.split("\t") for line in lines] # Skip the csv header. lines = lines[1:] # Sort input data by the length of waveform. lines.sort(key=lambda item: int(item[1])) features = [self._preprocess_audio(line[0]) for line in lines] labels = [self._preprocess_transcript(line[2]) for line in lines] return features, labels def _normalize_audio_tensor(self, audio_tensor): """Perform mean and variance normalization on the spectrogram tensor. Args: audio_tensor: a tensor for the spectrogram feature. Returns: a tensor for the normalized spectrogram. """ mean, var = tf.nn.moments(audio_tensor, axes=[0]) normalized = (audio_tensor - mean) / (tf.sqrt(var) + 1e-6) return normalized def _preprocess_audio(self, audio_file_path): """Load the audio file in memory.""" tf.logging.info( "Extracting spectrogram feature for {}".format(audio_file_path)) sample_rate, data = wavfile.read(audio_file_path) assert sample_rate == self.config.audio_config.sample_rate if data.dtype not in [np.float32, np.float64]: data = data.astype(np.float32) / np.iinfo(data.dtype).max feature = self.audio_featurizer.featurize(data) if self.config.audio_config.normalize: feature = self._normalize_audio_tensor(feature) return tf.Session().run( feature) # return a numpy array rather than a tensor def _preprocess_transcript(self, transcript): return self.text_featurizer.featurize(transcript) def input_fn(batch_size, deep_speech_dataset, repeat=1): """Input function for model training and evaluation. Args: batch_size: an integer denoting the size of a batch. deep_speech_dataset: DeepSpeechDataset object. repeat: an integer for how many times to repeat the dataset. Returns: a tf.data.Dataset object for model to consume. """ features = deep_speech_dataset.features labels = deep_speech_dataset.labels num_feature_bins = deep_speech_dataset.num_feature_bins def _gen_data(): for i in xrange(len(features)): feature = np.expand_dims(features[i], axis=2) input_length = [features[i].shape[0]] label_length = [len(labels[i])] yield { "features": feature, "labels": labels[i], "input_length": input_length, "label_length": label_length } dataset = tf.data.Dataset.from_generator( _gen_data, output_types={ "features": tf.float32, "labels": tf.int32, "input_length": tf.int32, "label_length": tf.int32 }, output_shapes={ "features": tf.TensorShape([None, num_feature_bins, 1]), "labels": tf.TensorShape([None]), "input_length": tf.TensorShape([1]), "label_length": tf.TensorShape([1]) }) # Repeat and batch the dataset dataset = dataset.repeat(repeat) # Padding the features to its max length dimensions. dataset = dataset.padded_batch( batch_size=batch_size, padded_shapes={ "features": tf.TensorShape([None, num_feature_bins, 1]), "labels": tf.TensorShape([None]), "input_length": tf.TensorShape([1]), "label_length": tf.TensorShape([1]) }) # Prefetch to improve speed of input pipeline. dataset = dataset.prefetch(1) return dataset
import os.path import json import sys import argparse # wal_vtop details VERSION = "0.1.1" # Get path for vtop themes vtop_file = "wal.json" def setConfig(): # Get host OS hostOS = getOS() # Get user home directory home_dir = os.getenv("HOME", os.getenv("USERPROFILE")) # Set wal colors file wal_colors = os.path.join(home_dir, ".cache", "wal", "colors.json") # Confirm wal export exists if not os.path.isfile(wal_colors): # Print error and exit if not found print("Wal colors could not be found. Try running `wal` again") sys.exit(1) # Set vtop directory by platform if hostOS == "linux": vtop_path = "/usr/lib/node_modules/vtop/themes" vtop_theme = os.path.join(vtop_path, vtop_file) elif hostOS == "win32": print("Windows platform not supported") sys.exit(1) elif hostOS == "darwin": vtop_path = "/usr/local/lib/node_modules/vtop/themes" vtop_theme = os.path.join(vtop_path, vtop_file) else: # Print error and exit if OS unsupported print("Unsupported operating system") sys.exit(1) # Return file paths as strings for wal color file and vtop theme file return wal_colors, vtop_theme def themeVtop(wal_colors_path, vtop_theme_path): # Open colors.json and load import_colors = json.load(open(wal_colors_path)) # Transfer wal colors to vtop theme json scheme walj = { "name": "Wal", "author": "epl", "title": { "fg": import_colors['colors']['color1'] }, "chart": { "fg": import_colors['colors']['color1'], "border": { "type": "line", "fg": import_colors['colors']['color1'] } }, "table": { "fg": import_colors['colors']['color15'], "items": { "selected": { "bg": import_colors['colors']['color1'], "fg": import_colors['colors']['color15'] }, "item": { "fg": import_colors['colors']['color1'] } }, "border": { "type": "line", "fg": import_colors['colors']['color1'] } }, "footer": { "fg": import_colors['colors']['color1'] } } # Write theme json to vtop themes directory try: with open(vtop_theme_path, 'w') as f: json.dump(walj, f) if os.path.isfile(vtop_theme_path): print("vtop theme written successfully. start vtop with `vtop --theme wal` to view") except: print("Error writing vtop theme file") sys.exit(1) def getOS(): # system call for user platform hostOS = sys.platform # return os as string: linux, win32, darwin return hostOS def getArgs(): # get the arguments with argparse description = "wal vtop" arg = argparse.ArgumentParser(description=description) arg.add_argument("-v", "--version", action="store_true", help="Print wal_vtop version.") return arg.parse_args() def main(): # Parse arguments arguments = getArgs() # Print app version if arguments.version: print("wal vtop", VERSION) sys.exit() # Set file paths wcp, vtp = setConfig() # Translate and write theme themeVtop(wcp, vtp) if __name__ == '__main__': main()
<reponame>guillemcortes/neural-audio-fp<filename>run.py # -*- coding: utf-8 -*- # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ run.py """ import os import sys import pathlib import click import yaml import numpy as np import pandas as pd import faiss def load_config(config_filepath, display=True): if display: if os.path.exists(config_filepath): print(f'cli: Configuration from {config_filepath}') else: sys.exit(f'cli: ERROR! Configuration file {config_filepath} is missing!!') with open(config_filepath, 'r', encoding='utf-8') as f: cfg = yaml.safe_load(f) return cfg def update_config(cfg, key1: str, key2: str, val): cfg[key1][key2] = val return cfg def print_config(cfg): os.system("") print('\033[36m' + yaml.dump(cfg, indent=4, width=120, sort_keys=False) + '\033[0m') return @click.group() def cli(): """ train-> generate-> evaluate. How to use each command: \b\n python run.py COMMAND --help """ return """ Train """ @cli.command() @click.argument('checkpoint_name', required=True) @click.option('--config', '-c', default='default', type=click.STRING, help="Name of model configuration located in './config/.'") @click.option('--max_epoch', default=None, type=click.INT, help='Max epoch.') def train(checkpoint_name, config, max_epoch): """ Train a neural audio fingerprinter. ex) python run.py train CHECKPOINT_NAME --max_epoch=100 # with custom config file python run.py train CHECKPOINT_NAME --max_epoch=100 -c CONFIG_NAME NOTE: If './LOG_ROOT_DIR/checkpoint/CHECKPOINT_NAME already exists, the training will resume from the latest checkpoint in the directory. """ from model.utils.config_gpu_memory_lim import allow_gpu_memory_growth from model.trainer import trainer cfg = load_config(config) if max_epoch: update_config(cfg, 'TRAIN', 'MAX_EPOCH', max_epoch) print_config(cfg) # allow_gpu_memory_growth() trainer(cfg, checkpoint_name) # Generate fingerprint (after training) @cli.command() @click.argument('checkpoint_name', required=True) @click.argument('checkpoint_index', required=False) @click.option('--config', '-c', default='default', required=False, type=click.STRING, help="Path of the model configuration file located in 'config/'." + " Default is 'default'") @click.option('--source', '-s', default=None, type=click.STRING, required=False, help="Custom source root directory. The source must be 16-bit " "8 Khz mono WAV. This is only useful when constructing a database" " without synthesizing queries.") @click.option('--output', '-o', default=None, type=click.STRING, required=False, help="Root directory where the generated embeddings (uncompressed)" + " will be stored. Default is OUTPUT_ROOT_DIR/CHECKPOINT_NAME " + "defined in config.") @click.option('--skip_dummy', default=False, is_flag=True, help='Exclude dummy-DB from the default source.') def generate(checkpoint_name, checkpoint_index, config, source, output, skip_dummy): """ Generate fingerprints from a saved checkpoint. ex) python run.py generate CHECKPOINT_NAME With custom config: \b\n python run.py generate CHECKPOINT_NAME -c CONFIG_NAME • If CHECKPOINT_INDEX is not specified, the latest checkpoint in the OUTPUT_ROOT_DIR will be loaded. • The default value for the fingerprinting source is [TEST_DUMMY_DB] and [TEST_QUERY_DB] specified in config file. """ from model.utils.config_gpu_memory_lim import allow_gpu_memory_growth from model.generate import generate_fingerprint cfg = load_config(config) allow_gpu_memory_growth() generate_fingerprint(cfg, checkpoint_name, checkpoint_index, source, output, skip_dummy) # Create index @cli.command() @click.argument('path_data', required=True) @click.argument('path_shape', required=True) @click.argument('index_path', required=True) @click.option('--config', '-c', default='default', required=False, type=click.STRING, help="Path of the model configuration file located in 'config/'." + " Default is 'default'") def index(path_data, path_shape, index_path, config): """ Create FAISS index from fingerprint memap file. ex) python run.py index PATH_DATA PATH_SHAPE INDEX_PATH -c CONFIG args: PATH_DATA: Path of the fp embeddings memap file. PATH_SHAPE: Path of the fp embeddings shape npy file. INDEX_PATH: Path where the index will be stored. CONFIG: Config file path """ from model.utils.config_gpu_memory_lim import allow_gpu_memory_growth from model.index_creator import create_index cfg = load_config(config) allow_gpu_memory_growth() create_index(path_data, path_shape, index_path, cfg) # match @cli.command() @click.argument('query_bname', required=True) @click.argument('query_fp_path', required=True) @click.argument('refs_fp_path', required=True) @click.argument('index_path', required=True) @click.option('--config', '-c', default='default', required=False, type=click.STRING, help="Path of the model configuration file located in 'config/' ." + " Default is 'default'") @click.option('--extension', '-e', default='.mp3', required=False, type=click.STRING, help="Extension of the original audios.") def match(query_bname, query_fp_path, refs_fp_path, index_path, config, extension): """ Create FAISS index from fingerprint memap file. ex) python run.py match QUERY_BNAME QUERY_FP_PATH REFS_FP_PATH INDEX_PATH -c CONFIG -e EXTENSION args: QUERY_BNAME: Audio file basename (without dir) QUERY_FP_PATH: Path of the query fp embedding npy file. REFS_FP_PATH: Path of the references fp embeddings memap file. INDEX_PATH: Path where the index will be stored. CONFIG: Config file path EXTENSION: Refs extension """ from model.utils.config_gpu_memory_lim import allow_gpu_memory_growth from model.matcher import Matcher from eval.eval_faiss import load_memmap_data cfg = load_config(config, display=False) allow_gpu_memory_growth() #import pudb; pudb.set_trace() refs_segments_path = os.path.join( os.path.dirname(refs_fp_path), 'refs_segments.csv') references_segments = pd.read_csv(refs_segments_path) index = faiss.read_index(index_path, 2) #2 == readonly query = np.load(query_fp_path) references_fp, _ = load_memmap_data( os.path.dirname(refs_fp_path), 'custom_source', append_extra_length=None, shape_only=False, display=False) matcher = Matcher(cfg, index, references_segments) formatted_matches = matcher.match(query, references_fp) print('"Query","Query begin time","Query end time","Reference",' '"Reference begin time","Reference end time","Confidence"') print('"","","","","","",""') for qmatch in formatted_matches: qmatch.update({'query': query_bname.replace('.wav','')}) print('"{}","{}","{}","{}","{}","{}","{}"'.format( qmatch['query'], qmatch['query_start'], qmatch['query_end'], qmatch['ref']+extension, qmatch['ref_start'], qmatch['ref_end'], qmatch['score'])) # Search and evalutation @cli.command() @click.argument('checkpoint_name', required=True) @click.argument('checkpoint_index', required=True) @click.option('--config', '-c', default='default', required=False, type=click.STRING, help="Name of the model configuration file located in 'config/'." + " Default is 'default'.") @click.option('--index_type', '-i', default='ivfpq', type=click.STRING, help="Index type must be one of {'L2', 'IVF', 'IVFPQ', " + "'IVFPQ-RR', 'IVFPQ-ONDISK', HNSW'}") @click.option('--test_seq_len', default='1 3 5 9 11 19', type=click.STRING, help="A set of different number of segments to test. " + "Numbers are separated by spaces. Default is '1 3 5 9 11 19'," + " which corresponds to '1s, 2s, 3s, 5s, 6s, 10s'.") @click.option('--test_ids', '-t', default='icassp', type=click.STRING, help="One of {'all', 'icassp', 'path/file.npy', (int)}. If 'all', " + "test all IDs from the test. If 'icassp', use the 2,000 " + "sequence starting point IDs of 'eval/test_ids_icassp.npy' " + "located in ./eval directory. You can also specify the 1-D array " "file's location. Any numeric input N (int) > 0 will perform " "search test at random position (ID) N times. Default is 'icassp'.") @click.option('--nogpu', default=False, is_flag=True, help='Use this flag to use CPU only.') def evaluate(checkpoint_name, checkpoint_index, config, index_type, test_seq_len, test_ids, nogpu): """ Search and evalutation. ex) python run.py evaluate CHECKPOINT_NAME CHECKPOINT_INDEX With options: \b\n ex) python run.py evaluate CHECKPOINT_NAME CHEKPOINT_INDEX -i ivfpq -t 3000 --nogpu • Currently, the 'evaluate' command does not reference any information other than the output log directory from the config file. """ from eval.eval_faiss import eval_faiss cfg = load_config(config) emb_dir = cfg['DIR']['OUTPUT_ROOT_DIR'] + checkpoint_name + '/' + \ str(checkpoint_index) + '/' if nogpu: eval_faiss([emb_dir, "--index_type", index_type, "--test_seq_len", test_seq_len, "--test_ids", test_ids, "--nogpu"]) else: eval_faiss([emb_dir, "--index_type", index_type, "--test_seq_len", test_seq_len, "--test_ids", test_ids]) if __name__ == '__main__': cli()
import FWCore.ParameterSet.Config as cms from Configuration.StandardSequences.Reconstruction_cff import * # muons with trigger info import PhysicsTools.PatAlgos.producersLayer1.muonProducer_cfi oniaPATMuonsWithoutTrigger = PhysicsTools.PatAlgos.producersLayer1.muonProducer_cfi.patMuons.clone( muonSource = 'muons', embedTrack = True, embedCombinedMuon = True, embedStandAloneMuon = True, embedPFCandidate = False, embedCaloMETMuonCorrs = cms.bool(False), embedTcMETMuonCorrs = cms.bool(False), embedPfEcalEnergy = cms.bool(False), embedPickyMuon = False, embedTpfmsMuon = False, userIsolation = cms.PSet(), # no extra isolation beyond what's in reco::Muon itself isoDeposits = cms.PSet(), # no heavy isodeposits addGenMatch = False, # no mc addTriggerMatching = cms.bool(False) ) oniaSelectedMuons = cms.EDFilter('PATMuonSelector', src = cms.InputTag('oniaPATMuonsWithoutTrigger'), cut = cms.string('muonID(\"TMOneStationTight\")' ' && abs(innerTrack.dxy) < 0.3' ' && abs(innerTrack.dz) < 20.' ' && innerTrack.hitPattern.trackerLayersWithMeasurement > 5' ' && innerTrack.hitPattern.pixelLayersWithMeasurement > 0' ' && innerTrack.quality(\"highPurity\")' ' && ((abs(eta) <= 0.9 && pt > 2.5) || (0.9 < abs(eta) <= 2.4 && pt > 1.5))' ), filter = cms.bool(True) ) # tracks oniaSelectedTracks=cms.EDFilter("TrackSelector", src = cms.InputTag("generalTracks"), cut = cms.string('pt > 0.7 && abs(eta) <= 3.0' '&& charge !=0' '&& quality(\"highPurity\")') ) # dimuon = Onia2MUMU from HeavyFlavorAnalysis.Onia2MuMu.onia2MuMuPAT_cfi import * onia2MuMuPAT.muons=cms.InputTag('oniaSelectedMuons') onia2MuMuPAT.primaryVertexTag=cms.InputTag('offlinePrimaryVertices') onia2MuMuPAT.beamSpotTag=cms.InputTag('offlineBeamSpot') onia2MuMuPAT.dimuonSelection=cms.string("0.2 < mass && abs(daughter('muon1').innerTrack.dz - daughter('muon2').innerTrack.dz) < 25") onia2MuMuPAT.addMCTruth = cms.bool(False) onia2MuMuPATCounter = cms.EDFilter('CandViewCountFilter', src = cms.InputTag('onia2MuMuPAT'), minNumber = cms.uint32(1), ) # make photon candidate conversions for P-wave studies from HeavyFlavorAnalysis.Onia2MuMu.OniaPhotonConversionProducer_cfi import PhotonCandidates as oniaPhotonCandidates # add v0 with tracks embed from HeavyFlavorAnalysis.Onia2MuMu.OniaAddV0TracksProducer_cfi import * # Pick branches you want to keep BPHSkim_EventContent = cms.PSet( outputCommands = cms.untracked.vstring( 'drop *', 'keep recoVertexs_offlinePrimaryVertices_*_*', 'keep *_offlineBeamSpot_*_*', 'keep *_TriggerResults_*_HLT', 'keep *_hltGtStage2ObjectMap_*_HLT', 'keep *_hltTriggerSummaryAOD_*_HLT', 'keep *_gmtStage2Digis_Muon_RECO', 'keep *_gtDigis_*_RECO', 'keep *_oniaSelectedTracks_*_*', 'keep *_oniaPhotonCandidates_*_*', 'keep *_onia2MuMuPAT_*_*', 'keep *_oniaV0Tracks_*_*', 'keep PileupSummaryInfos_*_*_*' ) ) BPHSkimSequence = cms.Sequence( oniaPATMuonsWithoutTrigger * oniaSelectedMuons * onia2MuMuPAT * onia2MuMuPATCounter * oniaPhotonCandidates * oniaV0Tracks * oniaSelectedTracks )
<reponame>Li-En-Good/VISTA import numpy as np import os import pdb import scipy import warnings import pdb def normalize(img): """Subtract mean, set STD to 1.0""" result = img.astype(np.float64) result -= np.mean(result) result /= np.std(result) return result def do_nothing(img): return img.astype(np.float) class Propper(object): """Padder + Cropper""" def __init__(self, action='-', **kwargs): assert action in ('+', '-') self.action = action if self.action == '+': self.transformer = Padder('+', **kwargs) else: self.transformer = Cropper('-', **kwargs) def __repr__(self): return 'Propper({})'.format(self.action) def __str__(self): return '{} => transformer: {}'.format(self.__repr__(), self.transformer) def __call__(self, x_in): return self.transformer(x_in) def undo_last(self, x_in): return self.transformer.undo_last(x_in) class Padder(object): def __init__(self, padding='+', by=16, mode='constant'): """ padding: '+', int, sequence '+': pad dimensions up to multiple of "by" int: pad each dimension by this value sequence: pad each dimensions by corresponding value in sequence by: int for use with '+' padding option mode: str passed to numpy.pad function """ self.padding = padding self.by = by self.mode = mode self.pads = {} self.last_pad = None def __repr__(self): return 'Padder{}'.format((self.padding, self.by, self.mode)) def _calc_pad_width(self, shape_in): if isinstance(self.padding, (str, int)): paddings = (self.padding, )*len(shape_in) else: paddings = self.padding pad_width = [] for i in range(len(shape_in)): if isinstance(paddings[i], int): pad_width.append((paddings[i],)*2) elif paddings[i] == '+': padding_total = int(np.ceil(1.*shape_in[i]/self.by)*self.by) - shape_in[i] pad_left = padding_total//2 pad_right = padding_total - pad_left pad_width.append((pad_left, pad_right)) assert len(pad_width) == len(shape_in) return pad_width def undo_last(self, x_in): """Crops input so its dimensions matches dimensions of last input to __call__.""" assert x_in.shape == self.last_pad['shape_out'] slices = [slice(a, -b) if (a, b) != (0, 0) else slice(None) for a, b in self.last_pad['pad_width']] return x_in[slices].copy() def __call__(self, x_in): shape_in = x_in.shape pad_width = self.pads.get(shape_in, self._calc_pad_width(shape_in)) x_out = np.pad(x_in, pad_width, mode=self.mode) if shape_in not in self.pads: self.pads[shape_in] = pad_width self.last_pad = {'shape_in': shape_in, 'pad_width': pad_width, 'shape_out': x_out.shape} return x_out class Cropper(object): def __init__(self, cropping, by=16, offset='mid', n_max_pixels=9732096): """Crop input array to given shape.""" self.cropping = cropping self.offset = offset self.by = by self.n_max_pixels = n_max_pixels self.crops = {} self.last_crop = None def __repr__(self): return 'Cropper{}'.format((self.cropping, self.by, self.offset, self.n_max_pixels)) def _adjust_shape_crop(self, shape_crop): key = tuple(shape_crop) shape_crop_new = list(shape_crop) prod_shape = np.prod(shape_crop_new) idx_dim_reduce = 0 order_dim_reduce = list(range(len(shape_crop))[-2:]) # alternate between last two dimensions while prod_shape > self.n_max_pixels: dim = order_dim_reduce[idx_dim_reduce] if not (dim == 0 and shape_crop_new[dim] <= 64): shape_crop_new[dim] -= self.by prod_shape = np.prod(shape_crop_new) idx_dim_reduce += 1 if idx_dim_reduce >= len(order_dim_reduce): idx_dim_reduce = 0 value = tuple(shape_crop_new) print('DEBUG: cropper shape change', shape_crop, 'becomes', value) return value def _calc_shape_crop(self, shape_in): croppings = (self.cropping, )*len(shape_in) if isinstance(self.cropping, (str, int)) else self.cropping shape_crop = [] for i in range(len(shape_in)): if croppings[i] is None: shape_crop.append(shape_in[i]) elif isinstance(croppings[i], int): shape_crop.append(shape_in[i] - croppings[i]) elif croppings[i] == '-': shape_crop.append(shape_in[i]//self.by*self.by) else: raise NotImplementedError if self.n_max_pixels is not None: shape_crop = self._adjust_shape_crop(shape_crop) self.crops[shape_in]['shape_crop'] = shape_crop return shape_crop def _calc_offsets_crop(self, shape_in, shape_crop): offsets = (self.offset, )*len(shape_in) if isinstance(self.offset, (str, int)) else self.offset offsets_crop = [] for i in range(len(shape_in)): offset = (shape_in[i] - shape_crop[i])//2 if offsets[i] == 'mid' else offsets[i] if offset + shape_crop[i] > shape_in[i]: warnings.warn('Cannot crop outsize image dimensions ({}:{} for dim {}).'.format(offset, offset + shape_crop[i], i)) raise AttributeError offsets_crop.append(offset) self.crops[shape_in]['offsets_crop'] = offsets_crop return offsets_crop def _calc_slices(self, shape_in): shape_crop = self._calc_shape_crop(shape_in) offsets_crop = self._calc_offsets_crop(shape_in, shape_crop) slices = [slice(offsets_crop[i], offsets_crop[i] + shape_crop[i]) for i in range(len(shape_in))] self.crops[shape_in]['slices'] = slices return slices def __call__(self, x_in): shape_in = x_in.shape if shape_in in self.crops: slices = self.crops[shape_in]['slices'] else: self.crops[shape_in] = {} slices = self._calc_slices(shape_in) x_out = x_in[slices].copy() self.last_crop = {'shape_in': shape_in, 'slices': slices, 'shape_out': x_out.shape} return x_out def undo_last(self, x_in): """Pads input with zeros so its dimensions matches dimensions of last input to __call__.""" assert x_in.shape == self.last_crop['shape_out'] shape_out = self.last_crop['shape_in'] slices = self.last_crop['slices'] x_out = np.zeros(shape_out, dtype=x_in.dtype) x_out[slices] = x_in return x_out class Resizer(object): def __init__(self, factors): """ factors - tuple of resizing factors for each dimension of the input array""" self.factors = factors def __call__(self, x): return scipy.ndimage.zoom(x, (self.factors), mode='nearest') def __repr__(self): return 'Resizer({:s})'.format(str(self.factors)) class ReflectionPadder3d(object): def __init__(self, padding): """Return padded 3D numpy array by mirroring/reflection. Parameters: padding - (int or tuple) size of the padding. If padding is an int, pad all dimensions by the same value. If padding is a tuple, pad the (z, y, z) dimensions by values specified in the tuple.""" self._padding = None if isinstance(padding, int): self._padding = (padding, )*3 elif isinstance(padding, tuple): self._padding = padding if (self._padding == None) or any(i < 0 for i in self._padding): raise AttributeError def __call__(self, ar): return pad_mirror(ar, self._padding) class Capper(object): def __init__(self, low=None, hi=None): self._low = low self._hi = hi def __call__(self, ar): result = ar.copy() if self._hi is not None: result[result > self._hi] = self._hi if self._low is not None: result[result < self._low] = self._low return result def __repr__(self): return 'Capper({}, {})'.format(self._low, self._hi) def pad_mirror(ar, padding): """Pad 3d array using mirroring. Parameters: ar - (numpy.array) array to be padded padding - (tuple) per-dimension padding values """ shape = tuple((ar.shape[i] + 2*padding[i]) for i in range(3)) result = np.zeros(shape, dtype=ar.dtype) slices_center = tuple(slice(padding[i], padding[i] + ar.shape[i]) for i in range(3)) result[slices_center] = ar # z-axis, centers if padding[0] > 0: result[0:padding[0], slices_center[1] , slices_center[2]] = np.flip(ar[0:padding[0], :, :], axis=0) result[ar.shape[0] + padding[0]:, slices_center[1] , slices_center[2]] = np.flip(ar[-padding[0]:, :, :], axis=0) # y-axis result[:, 0:padding[1], :] = np.flip(result[:, padding[1]:2*padding[1], :], axis=1) result[:, padding[1] + ar.shape[1]:, :] = np.flip(result[:, ar.shape[1]:ar.shape[1] + padding[1], :], axis=1) # x-axis result[:, :, 0:padding[2]] = np.flip(result[:, :, padding[2]:2*padding[2]], axis=2) result[:, :, padding[2] + ar.shape[2]:] = np.flip(result[:, :, ar.shape[2]:ar.shape[2] + padding[2]], axis=2) return result
# Copyright 2020 PCL & PKU # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """ Data operations, will be used in run_pretrain.py """ import os import math import numpy as np import mindspore.common.dtype as mstype import mindspore.dataset as ds import mindspore.dataset.transforms.c_transforms as C from .config import cfg def create_bert_dataset(device_num=1, rank=0, do_shuffle="true", data_dir=None, schema_dir=None): """create train dataset""" # apply repeat operations files = os.listdir(data_dir) data_files = [] for file_name in files: if "tfrecord" in file_name: data_files.append(os.path.join(data_dir, file_name)) data_files.sort() data_files.reverse() data_set = ds.TFRecordDataset(data_files, schema_dir if schema_dir != "" else None, columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels", "masked_lm_positions", "masked_lm_ids", "masked_lm_weights"], shuffle=ds.Shuffle.FILES if do_shuffle == "true" else False, num_shards=device_num, shard_id=rank, shard_equal_rows=False) ori_dataset_size = data_set.get_dataset_size() print('origin dataset size: ', ori_dataset_size) type_cast_op = C.TypeCast(mstype.int32) data_set = data_set.map(operations=type_cast_op, input_columns="masked_lm_ids") data_set = data_set.map(operations=type_cast_op, input_columns="masked_lm_positions") data_set = data_set.map(operations=type_cast_op, input_columns="next_sentence_labels") data_set = data_set.map(operations=type_cast_op, input_columns="segment_ids") data_set = data_set.map(operations=type_cast_op, input_columns="input_mask") data_set = data_set.map(operations=type_cast_op, input_columns="input_ids") # apply batch operations data_set = data_set.batch(cfg.batch_size, drop_remainder=True) print("train data size: {}".format(data_set.get_dataset_size())) print("train repeat count: {}".format(data_set.get_repeat_count())) return data_set def create_bert_eval_dataset(batchsize=1, device_num=1, rank=0, data_dir=None, schema_dir=None, count=-1): data_files = [] if os.path.isdir(data_dir): files = os.listdir(data_dir) for file_name in files: if "tfrecord" in file_name: data_files.append(os.path.join(data_dir, file_name)) else: data_files.append(data_dir) data_set = ds.TFRecordDataset(data_files, schema_dir if schema_dir != "" else None, columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels", "masked_lm_positions", "masked_lm_ids", "masked_lm_weights"], shard_equal_rows=True).take(count) orig_size = data_set.get_dataset_size() print("origin size: ", orig_size) dtypes = data_set.output_types() shapes = data_set.output_shapes() output_batches = math.ceil(orig_size / device_num / batchsize) padded_num = output_batches * device_num * batchsize - orig_size print("padded num: ", padded_num) if padded_num > 0: item = {"input_ids": np.zeros(shapes[0], dtypes[0]), "input_mask": np.zeros(shapes[1], dtypes[1]), "segment_ids": np.zeros(shapes[2], dtypes[2]), "next_sentence_labels": np.zeros(shapes[3], dtypes[3]), "masked_lm_positions": np.zeros(shapes[4], dtypes[4]), "masked_lm_ids": np.zeros(shapes[5], dtypes[5]), "masked_lm_weights": np.zeros(shapes[6], dtypes[6])} padded_samples = [item for x in range(padded_num)] padded_ds = ds.PaddedDataset(padded_samples) eval_ds = data_set + padded_ds sampler = ds.DistributedSampler(num_shards=device_num, shard_id=rank, shuffle=False) eval_ds.use_sampler(sampler) else: eval_ds = ds.TFRecordDataset(data_files, schema_dir if schema_dir != "" else None, columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels", "masked_lm_positions", "masked_lm_ids", "masked_lm_weights"], num_shards=device_num, shard_id=rank, shard_equal_rows=True).take(count) type_cast_op = C.TypeCast(mstype.int32) eval_ds = eval_ds.map(input_columns="masked_lm_ids", operations=type_cast_op) eval_ds = eval_ds.map(input_columns="masked_lm_positions", operations=type_cast_op) eval_ds = eval_ds.map(input_columns="next_sentence_labels", operations=type_cast_op) eval_ds = eval_ds.map(input_columns="segment_ids", operations=type_cast_op) eval_ds = eval_ds.map(input_columns="input_mask", operations=type_cast_op) eval_ds = eval_ds.map(input_columns="input_ids", operations=type_cast_op) eval_ds = eval_ds.batch(batchsize, drop_remainder=True) print("eval data size: {}".format(eval_ds.get_dataset_size())) print("eval repeat count: {}".format(eval_ds.get_repeat_count())) return eval_ds def create_ner_dataset(batch_size=1, repeat_count=1, assessment_method="accuracy", data_file_path=None, dataset_format="mindrecord", schema_file_path=None, do_shuffle=True, drop_remainder=True): """create finetune or evaluation dataset""" type_cast_op = C.TypeCast(mstype.int32) if dataset_format == "mindrecord": dataset = ds.MindDataset([data_file_path], columns_list=["input_ids", "input_mask", "segment_ids", "label_ids"], shuffle=do_shuffle) else: dataset = ds.TFRecordDataset([data_file_path], schema_file_path if schema_file_path != "" else None, columns_list=["input_ids", "input_mask", "segment_ids", "label_ids"], shuffle=do_shuffle) if assessment_method == "Spearman_correlation": type_cast_op_float = C.TypeCast(mstype.float32) dataset = dataset.map(operations=type_cast_op_float, input_columns="label_ids") else: dataset = dataset.map(operations=type_cast_op, input_columns="label_ids") dataset = dataset.map(operations=type_cast_op, input_columns="segment_ids") dataset = dataset.map(operations=type_cast_op, input_columns="input_mask") dataset = dataset.map(operations=type_cast_op, input_columns="input_ids") dataset = dataset.repeat(repeat_count) # apply batch operations dataset = dataset.batch(batch_size, drop_remainder=drop_remainder) return dataset def create_classification_dataset(batch_size=1, repeat_count=1, assessment_method="accuracy", data_file_path=None, schema_file_path=None, do_shuffle=True): """create finetune or evaluation dataset""" type_cast_op = C.TypeCast(mstype.int32) data_set = ds.TFRecordDataset([data_file_path], schema_file_path if schema_file_path != "" else None, columns_list=["input_ids", "input_mask", "segment_ids", "label_ids"], shuffle=do_shuffle) if assessment_method == "Spearman_correlation": type_cast_op_float = C.TypeCast(mstype.float32) data_set = data_set.map(operations=type_cast_op_float, input_columns="label_ids") else: data_set = data_set.map(operations=type_cast_op, input_columns="label_ids") data_set = data_set.map(operations=type_cast_op, input_columns="segment_ids") data_set = data_set.map(operations=type_cast_op, input_columns="input_mask") data_set = data_set.map(operations=type_cast_op, input_columns="input_ids") data_set = data_set.repeat(repeat_count) # apply batch operations data_set = data_set.batch(batch_size, drop_remainder=True) return data_set def generator_squad(data_features): for feature in data_features: yield (feature.input_ids, feature.input_mask, feature.segment_ids, feature.unique_id) def create_squad_dataset(batch_size=1, repeat_count=1, data_file_path=None, schema_file_path=None, is_training=True, do_shuffle=True): """create finetune or evaluation dataset""" type_cast_op = C.TypeCast(mstype.int32) if is_training: data_set = ds.TFRecordDataset([data_file_path], schema_file_path if schema_file_path != "" else None, columns_list=["input_ids", "input_mask", "segment_ids", "start_positions", "end_positions", "unique_ids", "is_impossible"], shuffle=do_shuffle) data_set = data_set.map(operations=type_cast_op, input_columns="start_positions") data_set = data_set.map(operations=type_cast_op, input_columns="end_positions") else: data_set = ds.GeneratorDataset(generator_squad(data_file_path), shuffle=do_shuffle, column_names=["input_ids", "input_mask", "segment_ids", "unique_ids"]) data_set = data_set.map(operations=type_cast_op, input_columns="segment_ids") data_set = data_set.map(operations=type_cast_op, input_columns="input_mask") data_set = data_set.map(operations=type_cast_op, input_columns="input_ids") data_set = data_set.map(operations=type_cast_op, input_columns="unique_ids") data_set = data_set.repeat(repeat_count) # apply batch operations data_set = data_set.batch(batch_size, drop_remainder=True) return data_set
<reponame>chengsoonong/crowdastro-projects """Plot a Zooniverse subject. <NAME> <<EMAIL>> Research School of Astronomy and Astrophysics The Australian National University 2017 """ import aplpy import astropy.coordinates import astropy.io.ascii import astropy.io.fits import matplotlib.pyplot as plt import matplotlib.patches import numpy import matplotlib import matplotlib.pyplot as plt import numpy INCHES_PER_PT = 1.0 / 72.27 COLUMN_WIDTH_PT = 240.0 FONT_SIZE_PT = 8.0 pgf_with_latex = { "pgf.texsystem": "pdflatex", "text.usetex": True, "font.family": "serif", "font.serif": [], "font.sans-serif": [], "font.monospace": [], "axes.labelsize": FONT_SIZE_PT, "font.size": FONT_SIZE_PT, "legend.fontsize": FONT_SIZE_PT, "xtick.labelsize": FONT_SIZE_PT, "ytick.labelsize": FONT_SIZE_PT, "figure.figsize": (COLUMN_WIDTH_PT * INCHES_PER_PT, 0.8 * COLUMN_WIDTH_PT * INCHES_PER_PT), "pgf.preamble": [ r"\usepackage[utf8x]{inputenc}", r"\usepackage[T1]{fontenc}", ] } matplotlib.rcParams.update(pgf_with_latex) def plot(radio_path, ir_path, plot_atlas_hosts=False, vmax=99.5, centrebox=False, centreboxwidth=None, width_in_px=True, stretch='arcsinh', fig=None, first=False): fig = aplpy.FITSFigure(ir_path, slices=[0, 1], figure=fig) fig.set_theme('publication') # if not v: # fig.show_grayscale(stretch=stretch, invert=True) # else: # fig.show_grayscale(stretch=stretch, vmin=v[0], vmax=v[1], invert=True) with astropy.io.fits.open(ir_path) as f: fig.show_colorscale(cmap='cubehelix_r', vmin=f[0].data.min(), vmax=numpy.percentile(f[0].data, vmax)) if plot_atlas_hosts: table = astropy.io.ascii.read( '/Users/alger/data/RGZ/dr1_weighted_old/static_rgz_host_full.csv') ras = table['SWIRE.ra'] decs = table['SWIRE.dec'] fig.show_markers(ras, decs, marker='x', s=50, c='red') if centrebox: with astropy.io.fits.open(radio_path) as f, astropy.io.fits.open(ir_path) as g: if first: contours = numpy.array([4, 8, 16, 32, 64, 128, 256]) * 0.14e-3 fig.show_contour(f, levels=contours, colors='black', linewidths=0.75, zorder=2, slices=[2, 3]) else: contours = [4, 8, 16, 32, 64, 128, 256] fig.show_contour(f, levels=contours, colors='black', linewidths=0.75, zorder=2) centre = numpy.array(g[0].data.shape) // 2 if not first: cdelt1 = f[0].header['CDELT1'] cdelt2 = f[0].header['CDELT2'] ra, dec = fig.pixel2world(centre[0], centre[1]) else: cdelt1 = f[0].header['CDELT3'] cdelt2 = f[0].header['CDELT4'] ra, dec = fig.pixel2world(centre[0], centre[1]) if width_in_px: width = -cdelt1 * centreboxwidth height = cdelt2 * centreboxwidth else: width = centreboxwidth height = centreboxwidth fig.show_rectangles([ra], [dec], width, height, color='r', linewidth=1) else: with astropy.io.fits.open(radio_path) as f: if first: contours = numpy.array([4, 8, 16, 32, 64]) * 0.14e-3 fig.show_contour(f, levels=contours, colors='black', linewidths=0.75, zorder=2, slices=[2, 3]) else: contours = [4, 8, 16, 32, 64, 128, 256] fig.show_contour(f, levels=contours, colors='black', linewidths=0.75, zorder=2) fig.axis_labels.set_xtext('Right Ascension (J2000)') fig.axis_labels.set_ytext('Declination (J2000)') # fig.ticks.set_linewidth(2) # fig.ticks.set_color('black') # fig.tick_labels.set_font(size='xx-large', weight='medium', \ # stretch='normal', family='sans-serif', \ # style='normal', variant='normal') # fig.axis_labels.set_font(size='xx-large', weight='medium', \ # stretch='normal', family='sans-serif', \ # style='normal', variant='normal') fig.set_tick_labels_format(xformat='hh:mm:ss',yformat='dd:mm:ss') return fig def plot_box_FIRST(fig, path): fig.show_rectangles([]) # rect = matplotlib.patches.Rectangle((267 / 2 - 267 / 8 * 3 / 2, 267 / 2 - 267 / 8 * 3 / 2), 267 / 8 * 3, 267 / 8 * 3, facecolor='None', edgecolor='red', linewidth=2) # plt.gca().add_patch(rect) # def plot_box_ATLAS(fig, path): # rect = matplotlib.patches.Rectangle((100 - 35, 100 - 35), 70, 70, facecolor='None', edgecolor='red', linewidth=2) # plt.gca().add_patch(rect) if __name__ == '__main__': # radio_path = "/Users/alger/data/RGZ/cdfs/2x2/CI2363_radio.fits" # ir_path = "/Users/alger/data/RGZ/cdfs/2x2/CI2363_ir.fits" # fig = plt.figure() # fig = plot(radio_path, ir_path, plot_atlas_hosts=False, centrebox=True, centreboxwidth=48 / 60 / 60, width_in_px=False, fig=fig) # plt.subplots_adjust(top=1, right=0.95, left=0.3) # plt.show() # plt.savefig('/Users/alger/repos/crowdastro-projects/ATLAS-CDFS/images/CI2363_fig.pdf') # radio_path = "/Users/alger/data/RGZ/cdfs/2x2/CI0077C1_radio.fits" # ir_path = "/Users/alger/data/RGZ/cdfs/2x2/CI0077C1_ir.fits" # fig = plt.figure() # fig = plot(radio_path, ir_path, plot_atlas_hosts=True, centrebox=False, fig=fig, vmax=99.9) # plt.subplots_adjust(top=1, right=0.95, left=0.3) # plt.show() # plt.savefig('/Users/alger/repos/crowdastro-projects/ATLAS-CDFS/images/CI0077C1_fig.pdf') # radio_path = "/Users/alger/repos/crowdastro-projects/ATLAS-CDFS/images/FIRSTJ151227.2+454026_8.fits" # ir_path = "/Users/alger/repos/crowdastro-projects/ATLAS-CDFS/images/2279p454_ac51-w1-int-3_ra228.11333333333332_dec45.67388888888889_asec480.000.fits" # fig = plt.figure() # fig = plot(radio_path, ir_path, plot_atlas_hosts=False, centrebox=True, centreboxwidth=3 / 60, width_in_px=False, stretch='linear', fig=fig, first=True) # plt.subplots_adjust(top=1, right=0.95, left=0.3) # plt.show() # plt.savefig('/Users/alger/repos/crowdastro-projects/ATLAS-CDFS/images/FIRSTJ151227_fig.pdf')
""" Get and set environment variables in deployed lambda functions using the SSM param store variable named "environment". """ import os import sys import select import json import argparse import logging import typing from botocore.exceptions import ClientError from dss.operations import dispatch from dss.operations.util import polite_print from dss.operations.secrets import fix_secret_variable_prefix, fetch_secret_safely from dss.util.aws.clients import secretsmanager as sm_client # type: ignore from dss.util.aws.clients import es as es_client # type: ignore from dss.util.aws.clients import ssm as ssm_client # type: ignore import dss.util.aws.clients lambda_client = getattr(dss.util.aws.clients, "lambda") logger = logging.getLogger(__name__) # --- # Utility functions for SSM parameter store: # --- def get_ssm_variable_prefix() -> str: """ Use info from local environment to assemble necessary prefix for environment variables stored in the SSM param store under $DSS_DEPLOYMENT_STAGE/environment """ store_name = os.environ["DSS_PARAMETER_STORE"] stage_name = os.environ["DSS_DEPLOYMENT_STAGE"] store_prefix = f"/{store_name}/{stage_name}" return store_prefix def fix_ssm_variable_prefix(param_name: str) -> str: """Add (if necessary) the variable store and stage prefix to the front of the name of an SSM store parameter""" prefix = get_ssm_variable_prefix() # Strip trailing slash param_name = param_name.rstrip("/") return f"{prefix}/" + param_name.replace(prefix, "", 1).lstrip("/") def get_ssm_environment() -> dict: """Get the value of environment variables stored in the SSM param store under $DSS_DEPLOYMENT_STAGE/environment""" p = ssm_client.get_parameter(Name=fix_ssm_variable_prefix("environment")) parms = p["Parameter"]["Value"] # this is a string, so convert to dict return json.loads(parms) def set_ssm_environment(env: dict) -> None: """ Set the value of environment variables stored in the SSM param store under $DSS_DEPLOYMENT_STAGE/environment :param env: dict containing environment variables to set in SSM param store :returns: nothing """ ssm_client.put_parameter( Name=fix_ssm_variable_prefix("environment"), Value=json.dumps(env), Type="String", Overwrite=True ) def set_ssm_parameter(env_var: str, value, quiet: bool = False) -> None: """ Set a variable in the lambda environment stored in the SSM store under $DSS_DEPLOYMENT_STAGE/environment :param env_var: name of environment variable being set :param value: value of environment variable being set :param bool quiet: suppress all output if true """ environment = get_ssm_environment() environment[env_var] = value set_ssm_environment(environment) polite_print( quiet, f"Success! Set variable in SSM parameter store environment:\n" f" Name: {env_var}\n" f" Value: {value}\n" ) def unset_ssm_parameter(env_var: str, quiet: bool = False) -> None: """ Unset a variable in the lambda environment stored in the SSM store undre $DSS_DEPLOYMENT_STAGE/environment :param env_var: name of environment variable being set :param value: value of environment variable being set :param bool quiet: suppress all output if true """ environment = get_ssm_environment() try: prev_value = environment[env_var] del environment[env_var] set_ssm_environment(environment) polite_print( quiet, f"Success! Unset variable in SSM store under $DSS_DEPLOYMENT_STAGE/environment:\n" f" Name: {env_var}\n" f" Previous value: {prev_value}\n" ) except KeyError: polite_print( quiet, f"Nothing to unset for variable {env_var} in SSM store under $DSS_DEPLOYMENT_STAGE/environment" ) # --- # Utility functions for lambda functions: # --- def get_elasticsearch_endpoint() -> str: domain_name = os.environ["DSS_ES_DOMAIN"] domain_info = es_client.describe_elasticsearch_domain(DomainName=domain_name) return domain_info["DomainStatus"]["Endpoint"] def get_admin_emails() -> str: gcp_var = os.environ["GOOGLE_APPLICATION_CREDENTIALS_SECRETS_NAME"] gcp_secret_id = fix_secret_variable_prefix(gcp_var) response = fetch_secret_safely(gcp_secret_id)['SecretString'] gcp_service_account_email = json.loads(response)['client_email'] admin_var = os.environ["ADMIN_USER_EMAILS_SECRETS_NAME"] admin_secret_id = fix_secret_variable_prefix(admin_var) response = fetch_secret_safely(admin_secret_id)['SecretString'] email_list = [email.strip() for email in response.split(',') if email.strip()] if gcp_service_account_email not in email_list: email_list.append(gcp_service_account_email) return ",".join(email_list) def get_deployed_lambdas(quiet: bool = True): """ Generator returning names of lambda functions :param bool quiet: if true, don't print warnings about lambdas that can't be found """ stage = os.environ["DSS_DEPLOYMENT_STAGE"] dirs = [] path = os.path.join(os.environ["DSS_HOME"], "daemons") for item in os.scandir(path): if not item.name.startswith('.') and item.is_dir(): dirs.append(item.name) functions = [f"{name}-{stage}" for name in dirs] functions.append(f"dss-{stage}") for name in functions: try: lambda_client.get_function(FunctionName=name) yield name except lambda_client.exceptions.ResourceNotFoundException: polite_print(quiet, f"{name} not deployed, or does not deploy a lambda function") def get_deployed_lambda_environment(lambda_name: str, quiet: bool = True) -> dict: """Get environment variables in a deployed lambda function""" try: lambda_client.get_function(FunctionName=lambda_name) c = lambda_client.get_function_configuration(FunctionName=lambda_name) except lambda_client.exceptions.ResourceNotFoundException: polite_print(quiet, f"{lambda_name} is not a deployed lambda function") return {} else: # get_function_configuration() above returns a dict, no need to convert return c["Environment"]["Variables"] def set_deployed_lambda_environment(lambda_name: str, env: dict) -> None: """Set environment variables in a deployed lambda function""" lambda_client.update_function_configuration( FunctionName=lambda_name, Environment={"Variables": env} ) def get_local_lambda_environment(quiet: bool = True) -> dict: """ For each environment variable being set in deployed lambda functions, get each environment variable and its value from the local environment, put them in a dict, and return it. :param bool quiet: if true, don't print warning messages :returns: dict containing local environment's value of each variable exported to deployed lambda functions """ env = dict() for name in os.environ["EXPORT_ENV_VARS_TO_LAMBDA"].split(): try: env[name] = os.environ[name] except KeyError: polite_print( quiet, f"Warning: environment variable {name} is in the list of environment variables " "to export to lambda functions, EXPORT_ENV_VARS_TO_LAMBDA, but variable is not " "defined in the local environment, so there is no value to set." ) return env def set_lambda_var(env_var: str, value, lambda_name: str, quiet: bool = False) -> None: """Set a single variable in environment of the specified lambda function""" environment = get_deployed_lambda_environment(lambda_name, quiet=False) environment[env_var] = value set_deployed_lambda_environment(lambda_name, environment) polite_print(quiet, f"Success! Set variable {env_var} in deployed lambda function {lambda_name}") def unset_lambda_var(env_var: str, lambda_name: str, quiet: bool = False) -> None: """Unset a single variable in environment of the specified lambda function""" environment = get_deployed_lambda_environment(lambda_name, quiet=False) try: del environment[env_var] set_deployed_lambda_environment(lambda_name, environment) polite_print(quiet, f"Success! Unset variable {env_var} in deployed lambda function {lambda_name}") except KeyError: polite_print(quiet, f"Nothing to unset for variable {env_var} in deployed lambda function {lambda_name}") def print_lambda_env(lambda_name, lambda_env): """Print environment variables set in a specified lambda function""" print(f"\n{lambda_name}:") for name, val in lambda_env.items(): print(f"{name}={val}") # --- # Command line utility functions # --- lambda_params = dispatch.target("lambda", arguments={}, help=__doc__) ssm_params = dispatch.target("params", arguments={}, help=__doc__) json_flag_options = dict( default=False, action="store_true", help="format the output as JSON if this flag is present" ) dryrun_flag_options = dict( default=False, action="store_true", help="do a dry run of the actual operation" ) quiet_flag_options = dict( default=False, action="store_true", help="suppress output" ) @ssm_params.action( "environment", arguments={ "--json": dict( default=False, action="store_true", help="format the output as JSON if this flag is present" ) }, ) def ssm_environment(argv: typing.List[str], args: argparse.Namespace): """Print out all variables stored in the SSM store under $DSS_DEPLOYMENT_STAGE/environment""" ssm_env = get_ssm_environment() if args.json: print(json.dumps(ssm_env, indent=4)) else: for name, val in ssm_env.items(): print(f"{name}={val}") print("\n") @lambda_params.action( "list", arguments={ "--json": json_flag_options, } ) def lambda_list(argv: typing.List[str], args: argparse.Namespace): """Print a list of names of each deployed lambda function""" lambda_names = list(get_deployed_lambdas(quiet=args.json)) if args.json: print(json.dumps(lambda_names, indent=4, default=str)) else: for lambda_name in lambda_names: print(lambda_name) @lambda_params.action( "environment", arguments={ "--json": json_flag_options, "--lambda-name": dict( required=False, help="specify the name of a lambda function whose environment will be listed" ) } ) def lambda_environment(argv: typing.List[str], args: argparse.Namespace): """Print out the current environment of deployed lambda functions""" if args.lambda_name: lambda_names = [args.lambda_name] # single lambda function else: lambda_names = list(get_deployed_lambdas()) # all lambda functions # Iterate over lambda functions and get their environments d = {} for lambda_name in lambda_names: lambda_env = get_deployed_lambda_environment(lambda_name, quiet=args.json) if lambda_env != {}: d[lambda_name] = lambda_env # Print environments if args.json: print(json.dumps(d, indent=4, default=str)) else: for lambda_name, lambda_env in d.items(): print_lambda_env(lambda_name, lambda_env) @lambda_params.action( "set", arguments={ "name": dict(help="name of variable to set in environment of deployed lambdas"), "--dry-run": dryrun_flag_options, "--quiet": quiet_flag_options, } ) def lambda_set(argv: typing.List[str], args: argparse.Namespace): """ Set a variable in the SSM store under $DSS_DEPLOYMENT_STAGE/environment, then set the variable in each deployed lambda. """ name = args.name # Use stdin for value if not select.select([sys.stdin], [], [], 0.0)[0]: raise RuntimeError("Error: stdin was empty! A variable value must be provided via stdin") val = sys.stdin.read() if args.dry_run: polite_print( args.quiet, f"Dry-run setting variable {name} in lambda environment in SSM store under " "$DSS_DEPLOYMENT_STAGE/environment" ) polite_print(args.quiet, f" Name: {name}") polite_print(args.quiet, f" Value: {val}") for lambda_name in get_deployed_lambdas(): polite_print(args.quiet, f"Dry-run setting variable {name} in lambda {lambda_name}") else: # Set the variable in the SSM store first, then in each deployed lambda set_ssm_parameter(name, val, quiet=args.quiet) for lambda_name in get_deployed_lambdas(): set_lambda_var(name, val, lambda_name, quiet=args.quiet) @lambda_params.action( "unset", arguments={ "name": dict(help="name of variable to unset in environment of deployed lambdas"), "--dry-run": dryrun_flag_options, "--quiet": quiet_flag_options } ) def lambda_unset(argv: typing.List[str], args: argparse.Namespace): """ Unset a variable in the SSM store under $DSS_DEPLOYMENT_STAGE/environment, then unset the variable in each deployed lambda. """ name = args.name # Unset the variable from the SSM store first, then from each deployed lambda if args.dry_run: polite_print(args.quiet, f"Dry-run deleting variable {name} from SSM store") for lambda_name in get_deployed_lambdas(): polite_print(args.quiet, f'Dry-run deleting variable {name} from lambda function "{lambda_name}"') else: unset_ssm_parameter(name, quiet=args.quiet) for lambda_name in get_deployed_lambdas(): unset_lambda_var(name, lambda_name, quiet=args.quiet) @lambda_params.action( "update", arguments={ "--update-deployed": dict( default=False, action="store_true", help="update the environment variables of all deployed lambdas, in addition to " "updating the lambda environment stored in SSM store under $DSS_DEPLOYMENT_STAGE/environment", ), "--force": dict( default=False, action="store_true", help="force the action to happen (no interactive prompt)", ), "--dry-run": dryrun_flag_options, "--quiet": quiet_flag_options } ) def lambda_update(argv: typing.List[str], args: argparse.Namespace): """ Update the lambda environment stored in the SSM store under $DSS_DEPLOYMENT_STAGE/environment by taking values from the current (local) environment. If --update-deployed flag is provided, also update the environment of deployed lambda functions. """ if not args.force and not args.dry_run: # Prompt the user to make sure they really want to do this confirm = f""" *** WARNING!!! *** Calling the lambda update function will overwrite the current values of the lambda function environment stored in the SSM store at $DSS_DEPLOY_STAGE/environment with local values from environment variables on your machine. Note: - To do a dry run of this operation first, use the --dry-run flag. - To ignore this warning, use the --force flag. - To see the current environment stored in the SSM store, run: ./dss-ops.py lambda environment Are you really sure you want to update the environment in SSM store "{get_ssm_variable_prefix()}"? (Type 'y' or 'yes' to confirm): """ response = input(confirm) if response.lower() not in ["y", "yes"]: raise RuntimeError("You safely aborted the lambda update operation!") # Only elasticsearch endpoint and admin emails are updated dynamically, # everything else comes from the local environment. local_env = get_local_lambda_environment() local_env["DSS_ES_ENDPOINT"] = get_elasticsearch_endpoint() local_env["ADMIN_USER_EMAILS"] = get_admin_emails() if args.dry_run: polite_print( args.quiet, f"Dry-run redeploying local environment to lambda function environment " "stored in SSM store under $DSS_DEPLOYMENT_STAGE/environment" ) else: set_ssm_environment(local_env) polite_print( args.quiet, f"Finished redeploying local environment to lambda function environment " "stored in SSM store under $DSS_DEPLOY_STAGE/environment" ) # Optionally, update environment of each deployed lambda if args.update_deployed: for lambda_name in get_deployed_lambdas(): # Add the new variable to each lambda's environment lambda_env = get_deployed_lambda_environment(lambda_name) lambda_env.update(local_env) if args.dry_run: polite_print( args.quiet, f"Dry-run redeploying lambda function environment from SSM store for {lambda_name}" ) else: set_deployed_lambda_environment(lambda_name, lambda_env) polite_print( args.quiet, f"Finished redeploying lambda function environment from SSM store for {lambda_name}" )
# # author: <NAME> # # Copyright 2010 University of Zurich # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import traceback from unittest import TestCase from asyncrpc.client import Proxy, AsyncProxy, AsyncSingleInstanceProxy, SingleInstanceProxy from asyncrpc.log import set_logger_level, LOGGER_NAME, DEBUG, setup_logger, uninstall_logger from asyncrpc.manager import AsyncManager, SingleInstanceAsyncManager from asyncrpc.tornadorpc import TornadoManager, TornadoHttpRpcProxy, TornadoAsyncHttpRpcProxy, asynchronous, async_call from asyncrpc.messaging import select from tornado import gen from cPickle import dumps, loads __author__ = 'basca' setup_logger(name=LOGGER_NAME) set_logger_level(DEBUG, name=LOGGER_NAME) uninstall_logger() select('msgpack') class MyClass(object): def __init__(self, counter=0): self._c = counter def add(self, value=1): self._c += value return self._c def dec(self, value=1): self._c -= value def current_counter(self): return self._c class AsyncClass(object): def __init__(self): self._innerval = 10 @asynchronous def sum(self, addr, val): vals = yield async_call(addr).add(val) sv = sum(vals) raise gen.Return(sv) SUCCESS = 100101102103 def capture_exception(func): def wrapper(*args, **kwargs): try: rv = func(*args, **kwargs) if rv == SUCCESS: print 'Function {0} completed successfully'.format(func.__name__) else: print 'Function {0} completed with failure!'.format(func.__name__) raise AssertionError return rv except Exception, e: print 'Got exception {0}, while running {1}'.format(e, func.__name__) print traceback.format_exc() raise AssertionError wrapper.__name__ = func.__name__ return wrapper class TestManager(TestCase): def setUp(self): # set_logger_level('critical') # set_level('debug') pass def _threaded_manager(self, async=False): class MyManager(AsyncManager): pass MyManager.register("MyClass", MyClass) return MyManager(async=async) @capture_exception def test_01_tornadomanager_blocking(self): instance = MyClass(counter=10) manager = TornadoManager(instance, async=False) manager.start() self.assertIsInstance(manager, TornadoManager) my_instance = manager.proxy() self.assertIsInstance(my_instance, TornadoHttpRpcProxy) self.assertEqual(my_instance.current_counter(), 10) my_instance.add(20) self.assertEqual(my_instance.current_counter(), 30) my_instance.dec(30) self.assertEqual(my_instance.current_counter(), 0) del manager return SUCCESS @capture_exception def test_02_tornadomanager_blocking_multiple(self): instance = MyClass(counter=10) manager = TornadoManager(instance, async=False) manager.start() manager2 = TornadoManager(instance, async=False) manager2.start() self.assertIsInstance(manager, TornadoManager) my_instance = manager.proxy() self.assertIsInstance(my_instance, TornadoHttpRpcProxy) self.assertEqual(my_instance.current_counter(), 10) my_instance.add(20) self.assertEqual(my_instance.current_counter(), 30) my_instance.dec(30) self.assertEqual(my_instance.current_counter(), 0) manager.stop() my_instance = manager2.proxy() self.assertIsInstance(my_instance, TornadoHttpRpcProxy) self.assertEqual(my_instance.current_counter(), 10) my_instance.add(20) self.assertEqual(my_instance.current_counter(), 30) my_instance.dec(30) self.assertEqual(my_instance.current_counter(), 0) manager2.stop() return SUCCESS @capture_exception def test_03_tornadomanager_async(self): instance = MyClass(counter=10) manager = TornadoManager(instance, async=True) manager.start() self.assertIsInstance(manager, TornadoManager) my_instance = AsyncSingleInstanceProxy(manager.bound_address) cc = my_instance.current_counter() self.assertEqual(cc, 10) my_instance.add(20) cc = my_instance.current_counter() self.assertEqual(cc, 30) my_instance.dec(30) cc = my_instance.current_counter() self.assertEqual(cc, 0) del manager return SUCCESS @capture_exception def test_04_tornadomanager_async_multi(self): instance = AsyncClass() manager = TornadoManager(instance, async=True) manager.start() i1 = MyClass(counter=1) m1 = TornadoManager(i1, async=True) m1.start() i2 = MyClass(counter=2) m2 = TornadoManager(i2, async=True) m2.start() cc = AsyncSingleInstanceProxy(manager.bound_address).sum([m1.bound_address, m2.bound_address], 1) self.assertEqual(cc, 5) del m1 del m2 del manager return SUCCESS @capture_exception def test_05_geventmanager_blocking(self): manager = self._threaded_manager(async=False) manager.start() self.assertIsInstance(manager, AsyncManager) my_instance = manager.MyClass(counter=10) self.assertIsInstance(my_instance, Proxy) self.assertEqual(my_instance.current_counter(), 10) my_instance.add(20) self.assertEqual(my_instance.current_counter(), 30) my_instance.dec(30) self.assertEqual(my_instance.current_counter(), 0) del manager return SUCCESS @capture_exception def test_06_geventmanager_async(self): manager = self._threaded_manager(async=True) manager.start() self.assertIsInstance(manager, AsyncManager) my_instance = manager.MyClass(counter=10) self.assertIsInstance(my_instance, AsyncProxy) self.assertEqual(my_instance.current_counter(), 10) my_instance.add(20) self.assertEqual(my_instance.current_counter(), 30) my_instance.dec(30) self.assertEqual(my_instance.current_counter(), 0) del manager return SUCCESS @capture_exception def test_07_tornadomanager_blocking_pickle(self): instance = MyClass(counter=10) manager = TornadoManager(instance, async=False) manager.start() self.assertIsInstance(manager, TornadoManager) my_instance = manager.proxy() _rep = dumps(my_instance) my_instance = loads(_rep) self.assertIsInstance(my_instance, TornadoHttpRpcProxy) self.assertEqual(my_instance.current_counter(), 10) my_instance.add(20) self.assertEqual(my_instance.current_counter(), 30) my_instance.dec(30) self.assertEqual(my_instance.current_counter(), 0) del manager return SUCCESS @capture_exception def test_08_tornadomanager_async_pickle(self): instance = MyClass(counter=10) manager = TornadoManager(instance, async=True) manager.start() self.assertIsInstance(manager, TornadoManager) my_instance = AsyncSingleInstanceProxy(manager.bound_address) _rep = dumps(my_instance) my_instance = loads(_rep) cc = my_instance.current_counter() self.assertEqual(cc, 10) my_instance.add(20) cc = my_instance.current_counter() self.assertEqual(cc, 30) my_instance.dec(30) cc = my_instance.current_counter() self.assertEqual(cc, 0) del manager return SUCCESS @capture_exception def test_09_geventmanager_blocking_pickle(self): manager = self._threaded_manager(async=False) manager.start() self.assertIsInstance(manager, AsyncManager) my_instance = manager.MyClass(counter=10) my_instance.owner = False _rep = dumps(my_instance) my_instance = loads(_rep) self.assertIsInstance(my_instance, Proxy) self.assertEqual(my_instance.current_counter(), 10) my_instance.add(20) self.assertEqual(my_instance.current_counter(), 30) my_instance.dec(30) self.assertEqual(my_instance.current_counter(), 0) del manager return SUCCESS @capture_exception def test_10_geventmanager_async_pickle(self): manager = self._threaded_manager(async=True) manager.start() self.assertIsInstance(manager, AsyncManager) my_instance = manager.MyClass(counter=10) my_instance.owner = False _rep = dumps(my_instance) my_instance = loads(_rep) self.assertIsInstance(my_instance, AsyncProxy) self.assertEqual(my_instance.current_counter(), 10) my_instance.add(20) self.assertEqual(my_instance.current_counter(), 30) my_instance.dec(30) self.assertEqual(my_instance.current_counter(), 0) del manager return SUCCESS @capture_exception def test_11_singleinstance_geventmanager_blocking(self): instance = MyClass(counter=10) manager = SingleInstanceAsyncManager(instance, async=False) manager.start() self.assertIsInstance(manager, SingleInstanceAsyncManager) my_instance = manager.proxy() self.assertIsInstance(my_instance, SingleInstanceProxy) self.assertEqual(my_instance.current_counter(), 10) my_instance.add(20) self.assertEqual(my_instance.current_counter(), 30) my_instance.dec(30) self.assertEqual(my_instance.current_counter(), 0) del manager return SUCCESS @capture_exception def test_12_singleinstance_geventmanager_async(self): instance = MyClass(counter=10) manager = SingleInstanceAsyncManager(instance, async=True) manager.start() self.assertIsInstance(manager, SingleInstanceAsyncManager) my_instance = AsyncSingleInstanceProxy(manager.bound_address) cc = my_instance.current_counter() self.assertEqual(cc, 10) my_instance.add(20) cc = my_instance.current_counter() self.assertEqual(cc, 30) my_instance.dec(30) cc = my_instance.current_counter() self.assertEqual(cc, 0) del manager return SUCCESS
from flask import Flask from edmunds.foundation.concerns.config import Config as ConcernsConfig from edmunds.foundation.concerns.runtimeenvironment import RuntimeEnvironment as ConcernsRuntimeEnvironment from edmunds.foundation.concerns.serviceproviders import ServiceProviders as ConcernsServiceProviders from edmunds.foundation.concerns.middleware import Middleware as ConcernsMiddleware from edmunds.foundation.concerns.storage import Storage as ConcernsStorage from edmunds.foundation.concerns.session import Session as ConcernsSession from edmunds.foundation.concerns.database import Database as ConcernsDatabase from edmunds.foundation.concerns.localization import Localization as ConcernsLocalization from edmunds.foundation.concerns.cache import Cache as ConcernsCache from edmunds.foundation.concerns.auth import Auth as ConcernsAuth from edmunds.exceptions.exceptionsserviceprovider import ExceptionsServiceProvider from edmunds.log.providers.logserviceprovider import LogServiceProvider from edmunds.session.providers.sessionserviceprovider import SessionServiceProvider from edmunds.storage.providers.storageserviceprovider import StorageServiceProvider from edmunds.database.providers.databaseserviceprovider import DatabaseServiceProvider from edmunds.localization.providers.localizationserviceprovider import LocalizationServiceProvider from edmunds.cache.providers.cacheserviceprovider import CacheServiceProvider from edmunds.http.providers.httpserviceprovider import HttpServiceProvider from edmunds.auth.providers.authserviceprovider import AuthServiceProvider from edmunds.foundation.providers.runtimeenvironmentprovider import RuntimeEnvironmentServiceProvider from edmunds.profiler.providers.profilerserviceprovider import ProfilerServiceProvider from edmunds.config.config import Config from edmunds.http.request import Request from edmunds.http.response import Response from edmunds.http.route import Route class Application(Flask, ConcernsConfig, ConcernsRuntimeEnvironment, ConcernsServiceProviders, ConcernsMiddleware, ConcernsStorage, ConcernsSession, ConcernsDatabase, ConcernsLocalization, ConcernsCache, ConcernsAuth): """ The Edmunds Application """ request_class = Request response_class = Response config_class = Config def __init__(self, import_name, config_dirs=None, *args, **kwargs): """ Initialize the application :param import_name: Import name :type import_name: str :param config_dirs: Configuration directories :type config_dirs: list :param args: Additional args :type args: list :param kwargs: Additional kwargs :type kwargs: dict """ super(Application, self).__init__(import_name, *args, **kwargs) self.logger_name = 'edmunds.%s' % import_name self._init_config(config_dirs) self._init_database() self.register(RuntimeEnvironmentServiceProvider) self.register(ProfilerServiceProvider) self.register(HttpServiceProvider) self.register(StorageServiceProvider) self.register(ExceptionsServiceProvider) self.register(LogServiceProvider) self.register(SessionServiceProvider) self.register(DatabaseServiceProvider) self.register(CacheServiceProvider) self.register(LocalizationServiceProvider) self.register(AuthServiceProvider) def route(self, rule, **options): """ Register a route This van be done the old skool way, or with the uses :param rule: The rule for routing the request :type rule: str :param options: List of options :type options: list :return: Route instance or decorator function :rtype: edmunds.http.route.Route """ route = Route(self) # Register middleware that was given with the options if 'middleware' in options: for middleware in options.pop('middleware'): if isinstance(middleware, tuple): route.middleware(middleware[0], *middleware[1:]) else: route.middleware(middleware) if 'uses' in options: # Add controller and method controller_class, method_name = options.pop('uses') route.uses(controller_class, method_name) return_value = route else: return_value = route.decorate # Define endpoint if 'endpoint' in options: endpoint = options.pop('endpoint') else: endpoint = 'edmunds.route.%s' % rule # Add route self.add_url_rule(rule, endpoint=endpoint, view_func=route.handle) return return_value
import os import sys from drake.tools.lint.formatter import IncludeFormatter def _check_unguarded_openmp_uses(filename): """Return 0 if all OpenMP uses in @p filename are properly guarded by #if defined(_OPENMP), and 1 otherwise. """ openmp_include = "#include <omp.h>" openmp_pragma = "#pragma omp" openmp_pre_guard = "#if defined(_OPENMP)" openmp_post_guard = "#endif" with open(filename, mode='r', encoding='utf-8') as file: lines = file.readlines() for index, current_line in enumerate(lines): if openmp_include in current_line or openmp_pragma in current_line: previous_line = lines[index - 1] if (index - 1) >= 0 else "" next_line = lines[index + 1] if (index + 1) < len(lines) else "" missing_pre_guard = previous_line.strip() != openmp_pre_guard missing_post_guard = next_line.strip() != openmp_post_guard if missing_pre_guard or missing_post_guard: print(f"ERROR: {filename}:{index + 1}: " "OpenMP includes and directives must be guarded by " f"{openmp_pre_guard} on the previous line and " f"{openmp_post_guard} on the following line") return 1 return 0 def _check_invalid_line_endings(filename): """Return 0 if all of the newlines in @p filename are Unix, and 1 otherwise. """ # Ask Python to read the file and determine the newlines convention. with open(filename, mode='r', encoding='utf-8') as file: file.read() if file.newlines is None: newlines = tuple() else: newlines = tuple(file.newlines) # Only allow Unix newlines. for newline in newlines: if newline != '\n': print("ERROR: non-Unix newline characters found") return 1 return 0 def _check_includes(filename): """Return 0 if clang-format-includes is a no-op, and 1 otherwise.""" try: tool = IncludeFormatter(filename) except Exception as e: print("ERROR: " + filename + ":0: " + str(e)) return 1 tool.format_includes() first_difference = tool.get_first_differing_original_index() if first_difference is not None: print(f"ERROR: {filename}:{first_difference + 1}: " "the #include ordering is incorrect") print("note: fix via bazel-bin/tools/lint/clang-format-includes " + filename) print("note: if that program does not exist, " "you might need to compile it first: " "bazel build //tools/lint/...") return 1 return 0 def _check_shebang(filename, disallow_executable): """Return 0 if the filename's executable bit is consistent with the presence of a shebang line and the shebang line is in the whitelist of acceptable shebang lines, and 1 otherwise. If the string "# noqa: shebang" is present in the file, then this check will be ignored. """ with open(filename, mode='r', encoding='utf8') as file: content = file.read() if "# noqa: shebang" in content: # Ignore. return 0 is_executable = os.access(filename, os.X_OK) if is_executable and disallow_executable: print("ERROR: {} is executable, but should not be".format(filename)) print("note: fix via chmod a-x '{}'".format(filename)) return 1 lines = content.splitlines() assert len(lines) > 0, f"Empty file? {filename}" shebang = lines[0] has_shebang = shebang.startswith("#!") if is_executable and not has_shebang: print("ERROR: {} is executable but lacks a shebang".format(filename)) print("note: fix via chmod a-x '{}'".format(filename)) return 1 if has_shebang and not is_executable: print("ERROR: {} has a shebang but is not executable".format(filename)) print("note: fix by removing the first line of the file") return 1 shebang_whitelist = { "bash": "#!/bin/bash", "python": "#!/usr/bin/env python3", } if has_shebang and shebang not in list(shebang_whitelist.values()): print(("ERROR: shebang '{}' in the file '{}' is not in the shebang " "whitelist").format(shebang, filename)) for hint, replacement_shebang in shebang_whitelist.items(): if hint in shebang: print(("note: fix by replacing the shebang with " "'{}'").format(replacement_shebang)) return 1 return 0 def main(): """Run Drake lint checks on each path specified as a command-line argument. Exit 1 if any of the paths are invalid or any lint checks fail. Otherwise exit 0. """ total_errors = 0 if len(sys.argv) > 1 and sys.argv[1] == "--disallow_executable": disallow_executable = True filenames = sys.argv[2:] else: disallow_executable = False filenames = sys.argv[1:] for filename in filenames: print("drakelint.py: Linting " + filename) total_errors += _check_invalid_line_endings(filename) if not filename.endswith((".cc", ".cpp", ".h")): # TODO(jwnimmer-tri) We should enable this check for C++ files # also, but that runs into some struggle with genfiles. total_errors += _check_shebang(filename, disallow_executable) if not filename.endswith(".py"): total_errors += _check_includes(filename) total_errors += _check_unguarded_openmp_uses(filename) if total_errors == 0: sys.exit(0) else: sys.exit(1) if __name__ == "__main__": main()
#!/usr/bin/env python3 import subprocess import logging import collections import time import datetime import threading from prometheus_client.core import GaugeMetricFamily import utils logger = logging.getLogger(__name__) class nv_host(object): def __init__(self): pass def __enter__(self): p = subprocess.Popen(["nv-hostengine"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) code = p.wait(timeout=60) logger.info("start nv-hostengine exit with %s, stdout %s, stderr %s", code, p.stdout.read(), p.stderr.read()) def __exit__(self, type, value, traceback): p = subprocess.Popen(["nv-hostengine", "--term"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) code = p.wait(timeout=60) logger.info("stop nv-hostengine exit with %s, stdout %s, stderr %s", code, p.stdout.read(), p.stderr.read()) # Ref: https://github.com/NVIDIA/gpu-monitoring-tools/blob/9e2979804d297cf5a81640ba8a8e941365e58f14/dcgm-exporter/dcgm-exporter#L85 mapping = [ (54, "uuid", "uuid of gpu"), # do not remove uuid, this is very important label (100, "sm_clock", "SM clock frequency (in MHz)."), (101, "memory_clock", "Memory clock frequency (in MHz)."), (140, "memory_temp", "Memory temperature (in C)."), (150, "gpu_temp", "GPU temperature (in C)."), (155, "power_usage", "Power draw (in W)."), (156, "total_energy_consumption", "Total energy consumption since boot (in mJ)."), (200, "pcie_tx_throughput", "Total number of bytes transmitted through PCIe TX (in KB) via NVML."), (201, "pcie_rx_throughput", "Total number of bytes received through PCIe RX (in KB) via NVML."), (202, "pcie_replay_counter", "Total number of PCIe retries."), (203, "gpu_util", "GPU utilization (in %)."), (204, "mem_copy_util", "Memory utilization (in %)."), (206, "enc_util", "Encoder utilization (in %)."), (207, "dec_util", "Decoder utilization (in %)."), (230, "xid_errors", "Value of the last XID error encountered."), (240, "power_violation", "Throttling duration due to power constraints (in us)."), (241, "thermal_violation", "Throttling duration due to thermal constraints (in us)."), (242, "sync_boost_violation", "Throttling duration due to sync-boost constraints (in us)."), (243, "board_limit_violation", "Throttling duration due to board limit constraints (in us)."), (244, "low_util_violation", "Throttling duration due to low utilization (in us)."), (245, "reliability_violation", "Throttling duration due to reliability constraints (in us)."), (246, "app_clock_violation", "Total throttling duration (in us)."), (251, "fb_free", "Framebuffer memory free (in MiB)."), (252, "fb_used", "Framebuffer memory used (in MiB)."), (310, "ecc_sbe_volatile_total", "Total number of single-bit volatile ECC errors."), (311, "ecc_dbe_volatile_total", "Total number of double-bit volatile ECC errors."), (312, "ecc_sbe_aggregate_total", "Total number of single-bit persistent ECC errors."), (313, "ecc_dbe_aggregate_total", "Total number of double-bit persistent ECC errors."), (390, "retired_pages_sbe", "Total number of retired pages due to single-bit errors."), (391, "retired_pages_dbe", "Total number of retired pages due to double-bit errors."), (392, "retired_pages_pending", "Total number of pages pending retirement."), (409, "nvlink_flit_crc_error_count_total", "Total number of NVLink flow-control CRC errors."), (419, "nvlink_data_crc_error_count_total", "Total number of NVLink data CRC errors."), (429, "nvlink_replay_error_count_total", "Total number of NVLink retries."), (439, "nvlink_recovery_error_count_total", "Total number of NVLink recovery errors."), (449, "nvlink_bandwidth_total", "Total number of NVLink bandwidth counters for all lanes"), ] DCGMMetrics = collections.namedtuple("DCGMMetrics", list(map(lambda x: x[1], mapping))) class DCGMHandler(object): def __init__(self, interval, gauge_ref, info_ref, dcgmi_histogram, dcgmi_timeout): self.interval = interval self.gauge_ref = gauge_ref self.info_ref = info_ref self.dcgmi_histogram = dcgmi_histogram self.dcgmi_timeout = dcgmi_timeout self.args = ",".join(map(lambda x: str(x[0]), mapping)) self.thread = None def start(self): self.thread = threading.Thread(target=self.run, name="dcgm_handler", args=(), daemon=True) self.thread.start() def run(self): with nv_host(): while True: try: metrics, gauges = self.get_dcgm_metric() now = datetime.datetime.now() self.info_ref.set(metrics, now) self.gauge_ref.set(gauges, now) time.sleep(self.interval) except Exception: logger.exception("DCGMHandler.run got exception") def get_dcgm_metric(self): metrics = {} # minor_number -> DCGMMetrics gauges = {} # gauge_name -> GaugeMetricFamily try: dcgmi_output = utils.exec_cmd( ["dcgmi", "dmon", "-c", "1", "-d", "1", "-e", self.args], histogram=self.dcgmi_histogram, timeout=self.dcgmi_timeout) except subprocess.CalledProcessError as e: logger.exception("command '%s' return with error (code %d): %s", e.cmd, e.returncode, e.output) return metrics, gauges except subprocess.TimeoutExpired: logger.warning("dcgmi timeout") return metrics, gauges try: for _, name, desc in mapping[1:]: gauges[name] = GaugeMetricFamily( "dcgm_" + name, desc, labels=["minor_number", "uuid"]) # [2:] is to remove headers for line in dcgmi_output.split("\n")[2:]: if line == "": # end of output continue part = line.split() minor_number = part[0] args = {} for i, (_, name, _) in enumerate(mapping): value = part[i + 1] args[name] = value if i == 0: # do not generate uuid metric continue if value == "N/A": continue gauges[name].add_metric([minor_number, args["uuid"]], float(value)) metrics[minor_number] = DCGMMetrics(**args) return metrics, gauges except Exception: logger.exception("calling dcgmi failed") return metrics, gauges if __name__ == '__main__': logging.basicConfig( format= "%(asctime)s - %(levelname)s - %(threadName)s - %(filename)s:%(lineno)s - %(message)s", level="DEBUG") import collector import datetime from prometheus_client import Histogram cmd_histogram = Histogram("cmd_dcgmi_latency_seconds", "Command call latency for nvidia-smi (seconds)", buckets=(1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0, float("inf"))) gauge_ref = collector.AtomicRef(datetime.timedelta(seconds=60)) metric_ref = collector.AtomicRef(datetime.timedelta(seconds=60)) dcgm_handler = dcgm.DCGMHandler(1, self.gauge_ref, metric_ref, cmd_histogram, 600) dcgm_handler.run() for _ in range(10): now = datetime.datetime.now() gauge = gauge_ref.get(now) metric = metric_ref.get(now) logger.info("gauge is %s", gauge) logger.info("metric is %s", metric) time.sleep(2)
import random import time from .room import Room, Tunnel, DeadEnd, Store from .item import Trash, Stick, Gem, Hammer from .constants.adjectives import adjectives from .constants.places import places class Map: def __init__(self, size, room_limit): self.grid = [] row = [0] * size for i in range(size): row = row.copy() self.grid.append(row) self.locations = [''] * (len(adjectives) * len(places)) i = 0 for adjective in adjectives: for place in places: self.locations[i] = {"adjective": adjective, "place": place} i += 1 self.center = size // 2 self.room_count = 0 self.size = size self.room_limit = room_limit self.rooms = dict() self.set_grid(self.center, self.center) def get_loc_name(self): if not self.locations: return {"adjective": "invisible", "place": "nowhere"} idx = random.randint(0, len(self.locations)-1) loc_name = self.locations[idx] del self.locations[idx] return loc_name def create_room(self, x, y, room_type, world=None): id = int(f"{str(x)}{str(y)}") world_loc = (x,y) name = f"Room #{id}" description = f"The description for {name}." potential_items = [Trash(random.randint(0, 10**8)) for _ in range(10)] potential_items += [Stick(random.randint(0, 10**8)) for _ in range(10)] potential_items += [Hammer(random.randint(0, 10**8)) for _ in range(5)] potential_items += [Gem(random.randint(0, 10**8)) for _ in range(1)] items = { i.id:i for i in random.choices(potential_items, k=10) } if room_type == "dead-end": loc_name = {"place": "dead end", "adjective": None} return DeadEnd(world, world_loc, loc_name, id) elif room_type == "tunnel": loc_name = {"place": "tunnel", "adjective": None} return Tunnel(world, world_loc, loc_name, id) elif room_type == "store": loc_name = {"place": "ant store", "adjective": None} return Store(world, world_loc, loc_name, id) else: loc_name = self.get_loc_name() title_adj = loc_name["adjective"].title() title_place = loc_name["place"].title() name = f"The {title_adj} {title_place}" description = f"Wow, this place is so {loc_name['adjective']}!" return Room(world, name, description, world_loc, loc_name, id, items) def set_grid(self, y, x): if self.grid[y][x] != 1: self.grid[y][x] = 1 self.room_count += 1 def generate_grid(self, map_seed = None): walkers = [ Walker(self, 1, 2, self.center, self.center + 1), Walker(self, 2, 3, self.center + 1, self.center), Walker(self, 3, 3, self.center - 1, self.center), Walker(self, 4, 2, self.center, self.center - 1) ] if map_seed is None: grid_seed = random.randint(0, 10**6) else: grid_seed = map_seed random.seed(grid_seed) while self.room_count < self.room_limit: for walker in walkers: if self.room_count == self.room_limit: break walker.move(self) return grid_seed def get_neighbors(self, i, j): neighbors = [] if i > 0 and self.grid[i-1][j] == 1: neighbors.append('n') if i < self.size-1 and self.grid[i+1][j] == 1: neighbors.append('s') if j < self.size-1 and self.grid[i][j+1] == 1: neighbors.append('e') if j > 0 and self.grid[i][j-1] == 1: neighbors.append('w') return neighbors def get_corner_type(self, neighbors): if len(neighbors) != 2: return None neighbor_str = ''.join(neighbors) if neighbor_str == 'ns' or neighbor_str == 'ew': return None else: return neighbor_str def has_inside_diag_neighbor(self, corner_type, i, j): bound = self.size-1 switcher = { "ne": i > 0 and j < bound and self.grid[i-1][j+1], "nw": i > 0 and j > 0 and self.grid[i-1][j-1], "se": i < bound and j < bound and self.grid[i+1][j+1], "sw": i < bound and j > 0 and self.grid[i+1][j-1], } return switcher.get(corner_type) == 1 def get_indef_article(self, string): vowels = ['a', 'e', 'i', 'o', 'u'] stupid_exceptions = ['union', 'university', 'united', 'uniform'] string = string.lower() if string[0] in vowels and string not in stupid_exceptions: return 'an' else: return 'a' def get_descriptions(self): for coords, room in self.rooms.items(): if isinstance(room, Tunnel) or isinstance(room, DeadEnd): continue neighbors = { "north": self.rooms.get((coords[0], coords[1]-1)), "south": self.rooms.get((coords[0], coords[1]+1)), "east": self.rooms.get((coords[0]+1, coords[1])), "west": self.rooms.get((coords[0]-1, coords[1])) } desc_strings = [] for direction, neighbor in neighbors.items(): if neighbor: article = self.get_indef_article(neighbor.loc_name["place"]) desc_strings.append(f"to the {direction} is {article} {neighbor.loc_name['place']}") for i, string in enumerate(desc_strings): length = len(desc_strings) if i == 0: desc_strings[i] = ' ' + string[:1].upper() + string[1:] if i < length-1 and length > 2: desc_strings[i] += ',' elif i == length-1: desc_strings[i] += '.' desc_strings.insert(-1, 'and') room.description += ' '.join(desc_strings) def generate_rooms(self, world=None): room_count = 0 for i in range(self.size): for j in range(self.size): if self.grid[i][j] == 1: neighbors = self.get_neighbors(i, j) if room_count % 50 == 0: room_type = 'store' elif len(neighbors) == 1: room_type = 'dead-end' elif len(neighbors) > 2: room_type = 'room' else: corner = self.get_corner_type(neighbors) if corner and self.has_inside_diag_neighbor(corner, i, j): room_type = 'room' else: room_type = 'tunnel' self.rooms[(j,i)] = self.create_room(j, i, room_type, world) room_count += 1 self.get_descriptions() return self.rooms def print_grid(self): for i, row in enumerate(self.grid): row_str = '' for j, item in enumerate(row): if item == 1: room = self.rooms.get((j,i)) if isinstance(room, Tunnel): item_str = "\x1b[1;32m1" elif isinstance(room, Store): item_str = "\x1b[1;34m1" elif isinstance(room, DeadEnd): item_str = "\x1b[1;35m1" else: item_str = "\x1b[1;33m1" else: item_str = "\x1b[1;30m0" row_str += item_str print(row_str) class Walker: def __init__(self, map, mode, rand_factor, y=0, x=0): self.y = y self.x = x self.mode = mode % 4 self.rand_factor = rand_factor map.set_grid(y, x) # modes map as follows: # 0 => up # 1 => right # 2 => down # 3 => left def move(self, map): map_size = map.size - 1 action = random.randint(0, self.rand_factor) if action == 0: self.mode = (self.mode + 1) % 4 if self.mode == 0 and self.y > 0: self.y -= 1 map.set_grid(self.y, self.x) elif self.mode == 1 and self.x < map_size: self.x += 1 map.set_grid(self.y, self.x) elif self.mode == 2 and self.y < map_size: self.y += 1 map.set_grid(self.y, self.x) elif self.mode == 3 and self.x > 0: self.x -= 1 map.set_grid(self.y, self.x)
<reponame>bitranox/fingerprint #################################################################################### # DEPRICATED OLD VERSION - WILL BE SPLITTED INTO DIFFERENT FILES, WORK IN PROGRESS # # THIS FILE IS BROKEN !!! WORK IN PROGRESS # # --> fp_files_diff.py V2.0.0 (finished) # # --> fp_reg_diff.py V2.0.0 (TODO) # #################################################################################### import argparse from lib_diff_files import FileDiff from lib_diff_registry import RegistryDiff from lib_helper_functions import * import lib_doctest import logging import sys logger = logging.getLogger() lib_doctest.setup_doctest_logger() def get_commandline_parameters(): """ >>> sys.argv.append('--name=test') >>> sys.argv.append('--drive=c:/') >>> sys.argv.append('--target=c:/test') >>> args = get_commandline_parameters() >>> args.target 'c:/test' >>> args.reg_save_param '' """ parser = argparse.ArgumentParser(description='create diff files for two system fingerprints') parser.add_argument('positional_ignored', type=str, nargs='*', help='Positional Arguments are ignored') parser.add_argument('--name1', type=str, required=False, default='', help='Fingerprint Name1, e.g. "before-install"') parser.add_argument('--name2', type=str, required=False, default='', help='Fingerprint Name2, e.g. "after-install"') parser.add_argument('--drive', type=str, required=False, default='c:/', help='Fingerprint Drive, e.g. "c:\\"') parser.add_argument('--target', type=str, required=False, default='c:/fingerprint',help='Fingerprint Target Directory, e.g. "c:\\fingerprint"') parser.add_argument('--field_length_limit', type=int, required=False, default=32767, help='data from registry, default set to maximum length of a cell in excel (32767) - we can support much longer fields') parser.add_argument('--check_modified', type=bool, required=False, default=False, help='check if only the modify date of a key changed - noisy ! we check also the value') args = parser.parse_args() return args def get_logfile_fullpath(fingerprint_result_dir:str, fingerprint_name_1:str, fingerprint_name_2:str)->str: """ >>> fingerprint_result_dir = 'c:/test' >>> fingerprint_name_1 = 'test' >>> fingerprint_name_2 = 'test2' >>> set_logfile_fullpath(fingerprint_result_dir, fingerprint_name_1, fingerprint_name_2) 'c:/test/test_test2_diff.log' """ logfile_fullpath = convert_path_to_posix(os.path.join(fingerprint_result_dir, f'{fingerprint_name_1}_{fingerprint_name_2}_diff.log')) return logfile_fullpath def main(fingerprint_name_1: str, fingerprint_name_2: str, fingerprint_drive: str, fingerprint_result_dir: str, field_length_limit: int = 32767, check_modified: bool = False): """ :param fingerprint_name_1: the name of the first fingerprint, e.g. 'before-install' :param fingerprint_name_2: the name of the second fingerprint, e.g. 'after-install' :param fingerprint_drive: Fingerprint Drive, for instance 'c:/' :param fingerprint_result_dir: the result dir, e.g. 'c:/test' :param field_length_limit: data from registry, default set to maximum length of a cell in excel (32767) - we can support much longer fields :param check_modified: check if only the modify date of a key changed - noisy ! we check also the value :return: >>> main(fingerprint_name_1='test', fingerprint_name_2='test2', fp_dir='c:/', fingerprint_result_dir='c:/test') """ setup_console_logger() logger.info('create difference between fingerprints:\n\ndrive: {}\nresults directory: {}\nregistry field_length_limit: {}\ncheck_modified_date: {}\n\n' .format(fingerprint_drive, fingerprint_result_dir, field_length_limit, check_modified)) if not fingerprint_name_1: fingerprint_name_1 = input('name of the fingerprint_1 (e.g. test1): ') if not fingerprint_name_2: fingerprint_name_2 = input('name of the fingerprint_2 (e.g. test2): ') logfile_fullpath = get_logfile_fullpath(fingerprint_result_dir, fingerprint_name_1, fingerprint_name_2) setup_file_logging(logfile_fullpath) logger.info('creating diff between fingerprints {} and {}'.format(fingerprint_name_1, fingerprint_name_2)) with FileDiff(fingerprint_name_1=fingerprint_name_1, fingerprint_name_2=fingerprint_name_2, fingerprint_result_dir=fingerprint_result_dir, fingerprint_drive=fingerprint_drive) as file_diff: file_diff.create_diff_file() with RegistryDiff(fingerprint_name_1=fingerprint_name_1, fingerprint_name_2=fingerprint_name_2, fingerprint_result_dir=fingerprint_result_dir, field_length_limit=field_length_limit, check_modified=check_modified) as registry_diff: registry_diff.create_diff_file() logger.info('Finished\n\n') input('enter for exit, check the logfile') if __name__ == '__main__': commandline_args = get_commandline_parameters() main(fingerprint_name_1=commandline_args.name1, fingerprint_name_2=commandline_args.name2, fingerprint_drive=commandline_args.drive, fingerprint_result_dir=commandline_args.target, field_length_limit=commandline_args.field_length_limit, check_modified=commandline_args.check_modified)
#!/usr/bin/python3 # -*- coding: utf-8 -*- # ----------------------------------------------------------- # bot # created 01.10.2021 # <NAME>, <EMAIL> # https://github.com/kaulketh # ----------------------------------------------------------- import os import signal import time from multiprocessing import Process import telepot from telepot.loop import MessageLoop from bot import singleton from config import RING_BOT_TOKEN, RING_BOT_NAME, RING_RING_GROUP, \ THK # no public deployment (secret.py) from config import switch_state, DING_DONG, WELCOME, RUNNING, STOPPED, \ UNKNOWN_CMD, UNKNOWN_TYPE, CMD_START, CMD_STOP, CMD_REBOOT, REBOOT, \ START, STARTED, STOPPING from logger import LOGGER class RingBot(singleton.Singleton): """ Bot class using telepot framework (https://telepot.readthedocs.io), Python >= 3 """ def __init__(self, token, admin): self.__log = LOGGER self.__log.debug(f"Initialize instance of {self.__class__.__name__}") self.__token = token self.__admin = admin self.__bot = telepot.Bot(self.__token) self.__ding_dong = DING_DONG.format(RING_BOT_NAME) self.__receiver = RING_RING_GROUP self.__checker = None def __check_bell(self, timeout=.25): while True: if switch_state(): self.__log.info(switch_state()) self.__send(self.__receiver, self.__ding_dong) time.sleep(timeout) def __send(self, chat_id, text): self.__log.debug( f"Message posted: " f"{chat_id}|{text}".replace("\n", " ")) self.__bot.sendMessage(chat_id, text) def __handle(self, msg): content_type, chat_type, chat_id = telepot.glance(msg) self.__log.debug(msg) # check user if chat_id != self.__admin: # TODO: wrong id pass return # check content if content_type == 'text': command = msg['text'] self.__log.info(f"Got command '{command}'") # commands # start if command == CMD_START: if self.__checker is None: self.__checker = Process(target=self.__check_bell) self.__checker.start() self.__send(self.__admin, STARTED) self.__send(self.__admin, RUNNING) # stop elif command == CMD_STOP: if isinstance(self.__checker, Process): self.__checker.terminate() self.__checker = None self.__send(self.__admin, STOPPING) self.__send(self.__admin, STOPPED) elif command == CMD_REBOOT: self.__send(self.__admin, REBOOT.format(RING_BOT_NAME)) os.system("sudo reboot") else: self.__send(self.__admin, UNKNOWN_CMD) else: self.__send(self.__admin, UNKNOWN_TYPE) def start(self): try: MessageLoop(self.__bot, {'chat': self.__handle}).run_as_thread() self.__log.info(START) self.__send(self.__admin, WELCOME.format(RING_BOT_NAME)) while True: try: signal.pause() except KeyboardInterrupt: self.__log.warning('Program interrupted') exit() except Exception as e: self.__log.error(f"An error occurred: {e}") exit() def run(): RingBot(RING_BOT_TOKEN, THK).start() if __name__ == '__main__': pass
# -*- coding: utf-8 -*- #Scrapped from #https://www.engineeringtoolbox.com/mineral-density-d_1555.html densities = \ {'Acanthite': 7200.0, #kg/m^3 'Acmite': 3520.0, 'Actinolite': 3040.0, 'Alabandite': 4000.0, 'Alamandine': 4090.0, 'Albite': 2620.0, 'Allanite': 3300.0, 'Allemontite': 6150.0, 'Allophane': 1900.0, 'Altaite': 8140.000000000001, 'Alunite': 2590.0, 'Amblygonite': 2980.0, 'Analcime': 2300.0, 'Anatese': 3900.0, 'Andalusite': 3150.0, 'Andesine': 2670.0, 'Andradite': 3700.0, 'Anglesite': 6300.0, 'Ankerite': 3050.0, 'Annebergite': 3050.0, 'Anorthite': 2730.0, 'Anorthoclase': 2580.0, 'Anthophyllite': 2850.0, 'Anthydrite': 2970.0, 'Antimony': 6660.0, 'Antlerite': 3900.0, 'Apatite': 3190.0, 'Apidote': 3300.0, 'Apophyllite': 2340.0, 'Aragonite': 2930.0, 'Arfvedsonite': 3440.0, 'Argenite': 7300.0, 'Arsenic': 5700.0, 'Arsenopyrite': 6070.0, 'Atacamite': 3760.0, 'Augite': 3400.0, 'Aurichalcite': 3640.0, 'Autonite': 3150.0, 'Awaruite': 8000.0, 'Axinite': 3280.0, 'Azurite': 3830.0, 'Barite': 4480.0, 'Bastnaesite': 4950.0, 'Beidellite': 2150.0, 'Beryl': 2630.0, 'Biotite': 2800.0, 'Bismite': 8500.0, 'Bismuth': 9750.0, 'Boehmite': 3030.0, 'Boracite': 2900.0, 'Borax': 1710.0, 'Bornite': 5090.0, 'Boulangerite': 5700.0, 'Brannerite': 4500.0, 'Braunite': 4760.0, 'Bravoite': 5010.0, 'Brochantite': 3970.0, 'Bromargyrite': 5800.0, 'Bronzite': 3200.0, 'Brookite': 4110.0, 'Brucite': 2390.0, 'Bytownite': 2710.0, 'Calcite': 2710.0, 'Calomel': 6450.0, 'Calvarite': 9040.0, 'Cancrinite': 2450.0, 'Carnallite': 1600.0, 'Carnotite': 3700.0, 'Cassiterite': 6900.0, 'Celestite': 3950.0, 'Celsian': 3250.0, 'Cerargyrite': 5550.0, 'Cerussite': 6580.0, 'Cervanite': 6500.0, 'Chabazite': 2090.0, 'Chalcanthite': 2210.0, 'Chalcocite': 5500.0, 'Chalcopyrite': 4190.0, 'Chlorite': 2420.0, 'Chloritoid': 3510.0, 'Chondrodite': 3150.0, 'Chromite': 4500.0, 'Chrysoberyl': 3500.0, 'Chrysocolla': 2000.0, 'Cinnibar': 8100.0, 'Clinochlore': 2650.0, 'Clinoclase': 4290.0, 'Clinoenstatite': 3400.0, 'Clinoferrosilite': 4100.0, 'Clinohumite': 3260.0, 'Clinzoite': 3340.0, 'Cobaltite': 6330.0, 'Colemanite': 2420.0, 'Columbite': 5300.0, 'Copper': 8940.0, 'Cordierite': 2550.0, 'Corundum': 4050.0, 'Coveillite': 4680.0, 'Cristobalite': 2270.0, 'Crocoite': 6000.0, 'Cruolite': 2970.0, 'Cubanite': 4700.0, 'Cummingtonite': 3350.0, 'Cuprite': 6100.0, 'Danburite': 2990.0, 'Datolite': 2900.0, 'Diamond': 3510.0, 'Diaspore': 3400.0, 'Dickite': 2600.0, 'Digenite': 5600.0, 'Diopside': 3400.0, 'Dioptase': 3310.0, 'Dolmite': 2840.0, 'Dolomite': 2840.0, 'Dumortierite': 3340.0, 'Edenite': 3020.0, 'Embolite': 5600.0, 'Enargite': 4450.0, 'Enstatite': 3200.0, 'Erythrite': 3120.0, 'Espomite': 1670.0, 'Euclase': 3040.0, 'Euxenite': 4840.0, 'Fayalite': 4390.0, 'Feberite': 7450.0, 'Fergusonite': 4500.0, 'Ferrimolybdite': 4000.0, 'Ferrosilite': 3950.0, 'Fluorite': 3130.0, 'Forsterite': 3270.0, 'Franklinite': 5140.0, 'Gadolinite': 4000.0, 'Gahnite': 4000.0, 'Galaxite': 4230.0, 'Galena': 7200.0, 'Garnierite': 2410.0, 'Gaylussite': 1960.0, 'Geocronite': 6400.0, 'Gersdorffite': 6110.0, 'Gibbsite': 2340.0, 'Glauberite': 2770.0, 'Glauconite': 2400.0, 'Glaucophane': 3070.0, 'Gmelinite': 2090.0, 'Goethite': 3300.0, 'Gold': 19320.0, 'Goslarite': 2000.0, 'Graphite': 2160.0, 'Greenocktite': 3980.0, 'Grossularite': 3420.0, 'Gypsum': 2300.0, 'Halite': 2170.0, 'Halloysite': 2000.0, 'Harmotome': 2460.0, 'Hastingsite': 3170.0, 'Hausmanite': 4760.0, 'Hauynite': 2450.0, 'Hectorite': 2000.0, 'Hedenbergite': 3550.0, 'Helvite': 3260.0, 'Hemimorphite': 3450.0, 'Hercynite': 3950.0, 'Hermatite': 5300.0, 'Hessite': 7200.0, 'Heulandite': 2200.0, 'Hornblende': 3000.0, 'Huebnerite': 7150.0, 'Humite': 3150.0, 'Hyalophane': 2810.0, 'Hydrozincite': 3200.0, 'Hypersthene': 3200.0, 'Ice': 990.0, 'Iddingsite': 2500.0, 'Idocrase': 3400.0, 'Illemenite': 4720.0, 'Illite': 2600.0, 'Ilvaite': 4010.0, 'Iodobromite': 5700.0, 'Iodyrite': 5600.0, 'Irdosmine': 19300.0, 'Iridium': 22700.0, 'Jacobsite': 4750.0, 'Jadeite': 3300.0, 'Jamesonite': 5560.0, 'Jarosite': 2900.0, 'Kainite': 2100.0, 'Kaliophilite': 2580.0, 'Kaolinite': 2600.0, 'Kernite': 1910.0, 'Krennerite': 8530.0, 'Kyanite': 3610.0, 'Langbeinite': 2830.0, 'Larsenite': 5900.0, 'Laumonite': 2290.0, 'Lawsonite': 3090.0, 'Lazulite': 3050.0, 'Lazurite': 2400.0, 'Lechatelierite': 2500.0, 'Lepidocrocite': 4000.0, 'Lepidolite': 2840.0, 'Leucite': 2470.0, 'Liebthenite': 3800.0, 'Limonite': 3300.0, 'Linarite': 5400.0, 'Linneaeite': 4800.0, 'Lithiophilite': 3340.0, 'Loellingite': 7100.0, 'Magnesite': 3000.0, 'Magnetite': 5150.0, 'Malachite': 3600.0, 'Manganite': 4340.0, 'Manganosite': 5180.0, 'Marcasite': 4890.0, 'Margarite': 3030.0, 'Marialite': 2560.0, 'Meionite': 2690.0, 'Melanterite': 1890.0, 'Melilite': 2950.0, 'Menaghinite': 6380.0, 'Miargyrite': 5190.0, 'Microcline': 2560.0, 'Microlite': 4200.0, 'Millerite': 5500.0, 'Mimetite': 7170.0, 'Minium': 8200.0, 'Molybdenite': 5500.0, 'Monazite': 4800.0, 'Monticellite': 3200.0, 'Montmotillonite': 2000.0, 'Mullite': 3050.0, 'Musscovite': 2820.0, 'Nacrite': 2600.0, 'Nagyagite': 7500.0, 'Natrolite': 2250.0, 'Nephheline': 2550.0, 'Ni skutterudite': 6500.0, 'Niccolite': 7790.0, 'Nitre': 2100.0, 'Nontronite': 2300.0, 'Norbergite': 3150.0, 'Noselite': 2340.0, 'Oligoclase': 2650.0, 'Opal': 2090.0, 'Orpiment': 3520.0, 'Orthoclase': 2560.0, 'Ottrelite': 3520.0, 'Palladium': 11550.0, 'Paragonite': 2780.0, 'Pargasite': 3120.0, 'Pearceite': 6130.0, 'Pectolite': 2860.0, 'Pentlandite': 4600.0, 'Perovskite': 4000.0, 'Petalite': 2420.0, 'Petzite': 8700.0, 'Phenaktite': 2980.0, 'Phillipsite': 2200.0, 'Phlogopite': 2700.0, 'Phosgenite': 6000.0, 'Phosphuranylite': 4100.0, 'Pigeonite': 3300.0, 'Plagionite': 5400.0, 'Platinum': 21450.0, 'Polucite': 2900.0, 'Polybasite': 4600.0, 'Polycrase': 5000.0, 'Polyhalite': 2770.0, 'Potash alum': 1750.0, 'Powellite': 4340.0, 'Prehnite': 2870.0, 'Proustite': 5550.0, 'Pyrargyrite': 5850.0, 'Pyrite': 5010.0, 'Pyrochlore': 4200.0, 'Pyrolusite': 4400.0, 'Pyromorphite': 6700.0, 'Pyrope': 3650.0, 'Pyrophyllite': 2840.0, 'Pyrrhotite': 4610.0, 'Quartz': 2620.0, 'Rammelsbergite': 7100.0, 'Realgar': 3560.0, 'Rhodochrosite': 3690.0, 'Rhodonite': 3500.0, 'Riebeckite': 3400.0, 'Roscoelite': 2970.0, 'Rutile': 4250.0, 'Samarskite': 5600.0, 'Sanidine': 2520.0, 'Saponite': 2300.0, 'Scapolite': 2660.0, 'Scheelite': 6010.0, 'Scolecite': 2160.0, 'Scorodite': 3200.0, 'Scorzalite': 3270.0, 'Semseyite': 5800.0, 'Sepiolite': 2000.0, 'Serpentine': 2530.0, 'Siderite': 3960.0, 'Siegenite': 4900.0, 'Sillimanite': 3240.0, 'Silver': 10500.0, 'Sklodowskite': 3540.0, 'Skutterudite': 6100.0, 'Smithsonite': 4450.0, 'Soda nitre': 2260.0, 'Sodalite': 2290.0, 'Sperrylite': 10580.0, 'Spessartine': 4180.0, 'Sphalerite': 4050.0, 'Sphene': 3480.0, 'Spinel group': 3570.0, 'Spodumene': 3150.0, 'Stannite': 4300.0, 'Staurolite': 3710.0, 'Stephanite': 6250.0, 'Sternbergite': 4220.0, 'Stibnite': 4630.0, 'Stilbite': 2150.0, 'Stillwellite': 4610.0, 'Stolzite': 7900.0, 'Stromeyerite': 6000.0, 'Strontianite': 3780.0, 'Sulphur': 2060.0, 'Sylvanite': 7900.0, 'Sylvite': 1990.0, 'Talc': 2750.0, 'Tantalite': 6200.0, 'Tennantite': 4600.0, 'Tenorite': 6500.0, 'Tephroite': 4110.0, 'Tetrahedrite': 4600.0, 'Thenardite': 2680.0, 'Thomsonite': 2340.0, 'Thorianite': 10000.0, 'Thorite': 4000.0, 'Tin': 7280.0, 'Topaz': 3550.0, 'Torbernite': 3200.0, 'Tourmaline': 3000.0, 'Tremolite': 2900.0, 'Tridymite': 2280.0, 'Triphylite': 3400.0, 'Troilite': 4610.0, 'Trona': 2130.0, 'Tungstite': 5500.0, 'Turquoise': 2600.0, 'Tyuyamunite': 3300.0, 'Ulexite': 1950.0, 'Uraninite': 6500.0, 'Uranophane': 3900.0, 'Uvarovite': 3400.0, 'Vanadinite': 6800.0, 'Varicite': 2500.0, 'Vaterite': 2710.0, #Assume now same as calcite 'Vermiculite': 2300.0, 'Violarite': 4500.0, 'Vivianite': 2650.0, 'Wavellite': 2340.0, 'Willemite': 3900.0, 'Witherite': 4300.0, 'Wolframite': 7100.0, 'Wollastonite': 2840.0, 'Wulfenite': 6500.0, 'Wurtzite': 4030.0000000000005, 'Xenotime': 4400.0, 'Zincite': 5430.0, 'Zinkenite': 5120.0, 'Zircon': 4650.0, 'Zoisite': 3300.0}
<reponame>harunpehlivan/wav2letter #!/usr/bin/env python3 import math import os import struct import sys import numpy as np from wav2letter.common import Dictionary, createWordDict, loadWords, tkn2Idx from wav2letter.decoder import ( CriterionType, DecoderOptions, KenLM, SmearingMode, Trie, WordLMDecoder, ) if len(sys.argv) != 2: print(f"usage: {sys.argv[0]} decoder_test_data_path", file=sys.stderr) print(" (usually: <wav2letter_root>/src/decoder/test)", file=sys.stderr) sys.exit(1) data_path = sys.argv[1] def read_struct(file, fmt): return struct.unpack(fmt, file.read(struct.calcsize(fmt))) def load_TN(path): with open(path, "rb") as file: T = read_struct(file, "i")[0] N = read_struct(file, "i")[0] return T, N def load_emissions(path): with open(path, "rb") as file: return np.frombuffer(file.read(T * N * 4), dtype=np.float32) def load_transitions(path): with open(path, "rb") as file: return np.frombuffer(file.read(N * N * 4), dtype=np.float32) def assert_near(x, y, tol): assert abs(x - y) <= tol # def ptr_as_bytes(x): # return struct.pack("P", x) # # # def get_numpy_ptr_as_bytes(arr): # if not arr.flags["C_CONTIGUOUS"]: # raise ValueError("numpy array is not contiguous") # return ptr_as_bytes(arr.ctypes.data) # load test files T, N = load_TN(os.path.join(data_path, "TN.bin")) emissions = load_emissions(os.path.join(data_path, "emission.bin")) transitions = load_transitions(os.path.join(data_path, "transition.bin")) lexicon = loadWords(os.path.join(data_path, "words.lst")) wordDict = createWordDict(lexicon) tokenDict = Dictionary(os.path.join(data_path, "letters.lst")) tokenDict.addEntry("1") lm = KenLM(os.path.join(data_path, "lm.arpa"), wordDict) # test LM sentence = ["the", "cat", "sat", "on", "the", "mat"] lm_state = lm.start(False) total_score = 0 lm_score_target = [-1.05971, -4.19448, -3.33383, -2.76726, -1.16237, -4.64589] for i in range(len(sentence)): lm_state, lm_score = lm.score(lm_state, wordDict.getIndex(sentence[i])) assert_near(lm_score, lm_score_target[i], 1e-5) total_score += lm_score lm_state, lm_score = lm.finish(lm_state) total_score += lm_score assert_near(total_score, -19.5123, 1e-5) # build trie sil_idx = tokenDict.getIndex("|") unk_idx = wordDict.getIndex("<unk>") trie = Trie(tokenDict.indexSize(), sil_idx) start_state = lm.start(False) for word, spellings in lexicon.items(): usr_idx = wordDict.getIndex(word) _, score = lm.score(start_state, usr_idx) for spelling in spellings: # maxReps should be 1; using 0 here to match DecoderTest bug spelling_idxs = tkn2Idx(spelling, tokenDict, 0) trie.insert(spelling_idxs, usr_idx, score) trie.smear(SmearingMode.MAX) trie_score_target = [-1.05971, -2.87742, -2.64553, -3.05081, -1.05971, -3.08968] for i in range(len(sentence)): word = sentence[i] # maxReps should be 1; using 0 here to match DecoderTest bug word_tensor = tkn2Idx([c for c in word], tokenDict, 0) node = trie.search(word_tensor) assert_near(node.maxScore, trie_score_target[i], 1e-5) opts = DecoderOptions(2500, 100.0, 2.0, 2.0, -math.inf, False, -1, CriterionType.ASG) decoder = WordLMDecoder(opts, trie, lm, sil_idx, -1, unk_idx, transitions) results = decoder.decode(emissions.ctypes.data, T, N) print(f"Decoding complete, obtained {len(results)} results") print("Showing top 5 results:") for i in range(5): prediction = [] for idx in results[i].tokens: if idx < 0: break prediction.append(tokenDict.getEntry(idx)) prediction = " ".join(prediction) print(f"score={results[i].score} prediction='{prediction}'") assert len(results) == 1452 hyp_score_target = [-278.111, -278.652, -279.275, -279.847, -280.01] for i in range(5): assert_near(results[i].score, hyp_score_target[i], 1e-3)
import scrapy import re from YFSpider.items import EventItem, EvtProfileItem, EvtSymbolItem class SurpriseSpider(scrapy.Spider): name = "surprise-profiles" allowed_domains = ["biz.yahoo.com", "finance.yahoo.com"] start_urls = ["http://biz.yahoo.com/z/20110103.html"] def parse(self, response): # extract and follow weekday links, unless links lead beyond range. for href in response.xpath("//center[position()=4]/a/@href"): relurl = href.extract() year = int(relurl[0:4]) # controls which range of dates the spider looks at if (2010 < year < 2016): fullurl = response.urljoin(relurl) yield scrapy.Request(url=fullurl, callback=self.parse) table = response.xpath("//table[position()=5]") # extracting non-header rows with 7 columns with non-zero surprises rows = [] for node in table.xpath("tr"): if node.xpath("@bgcolor").extract_first() != "dcdcdc": if (len(node.xpath("td")) == 7 and node.xpath("td[position()=3]//text()").extract_first() != "0.00"): rows.append(node) date = re.search("[0-9]{8}", response.url).group(0) for row in rows: # dig out the link to the profile # ie, http://finance.yahoo.com/q/pr?s=chmp url = row.xpath(".//a[contains(., 'Pro" + "file')]/@href").extract_first() if url: yield scrapy.Request(url=url, meta={'dont_redirect': True}, callback=self.parse_stock_profile) # if a redirect occurs, there is nothing to process for row in rows: event = EventItem() event['date'] = '{y}-{m}-{d}'.format(y=date[0:4], m=date[4:6], d=date[6:8]) # in order to catch empty cells, we will need to iterate # each cell individually vals = [] for cell in row.xpath("td"): val = cell.xpath(".//text()").extract_first() vals.append(val) (event['name'], event['symbol'], event['surprise'], event['eps'], event['eeps']) = vals[0:5] yield event def parse_stock_profile(self, response): profile = EvtProfileItem() symchg = EvtSymbolItem() # check for, record and follow # "Changed Ticker Symbol", ie # https://finance.yahoo.com/q/pr?s=bota titleText = response.xpath("//title/text()").extract_first() isInvalid = re.search("^Invalid", titleText) if isInvalid: # record the symbol change cell = response.xpath("//table[@id='yfncsumtab']" + "/tr[position()=2]/td") items = cell.xpath("./big//b//text()").extract() symchg['symbol'] = re.sub("\"", "", items[0]) symchg['newSymbol'] = items[1] yield symchg # follow the link relurl = cell.xpath(".//a[(contains(@href, " + "'/q/pr?s='))]/@href").extract_first() fullurl = response.urljoin(relurl) yield scrapy.Request(url=fullurl, meta={'dont_redirect': True}, callback=self.parse_stock_profile) else: titleDiv = response.xpath("//div[@class='title']") titleStr = re.sub("^\s*|\s*$", "", "".join(titleDiv.xpath(".//text()").extract())) # extracting the bourse name (nameSymbol, bourse) = re.split("\) -", titleStr) # making a single right-hand regex split # prepare the string by reversing it: nameSymbol = nameSymbol[::-1] p = re.compile(r"\( ") # reversed pattern (symbol, name) = p.split(nameSymbol, maxsplit=1) # reverse the substrings name = name[::-1] symbol = symbol[::-1] profile['name'] = name.encode('utf-8') profile['symbol'] = symbol profile['bourse'] = bourse # dig out the link to the map, extract last query parameter maplink = response.xpath("//a[(contains(@href, " + "'/maps_result?'))]/@href").extract_first() if maplink: profile['country'] = re.sub("%20", " ", re.sub(".*&country=", "", maplink)) else: profile['country'] = "" profile['sector'] = response.xpath(".//td[text()='Sector:']" + "/../td[position()=2]" + "//text()").extract_first() profile['industry'] = response.xpath(".//td[text()='Industry:']" + "/../td[position()=2]" + "//text()").extract_first() yield profile
<filename>bcs-ui/backend/templatesets/legacy_apps/configuration/showversion/serializers.py # -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import re from django.utils.translation import ugettext_lazy as _ from rest_framework import serializers from rest_framework.exceptions import ValidationError from backend.utils.exceptions import ResNotFoundError from .. import models RE_SHOW_NAME = re.compile(r"^[a-zA-Z0-9-_.]{1,45}$") class ShowVersionNameSLZ(serializers.Serializer): name = serializers.RegexField( RE_SHOW_NAME, max_length=45, required=True, error_messages={"invalid": "请填写1至45个字符(字母、数字、下划线以及 - 或 .)"} ) comment = serializers.CharField(default="", allow_blank=True) class ShowVersionCreateSLZ(ShowVersionNameSLZ): project_id = serializers.CharField(required=True) template_id = serializers.CharField(required=True) class ShowVersionWithEntitySLZ(ShowVersionCreateSLZ): real_version_id = serializers.IntegerField(required=True) show_version_id = serializers.IntegerField(required=True) def validate(self, data): real_version_id = data["real_version_id"] if real_version_id <= 0: raise ValidationError(_("请先填写模板内容,再保存")) template_id = data["template_id"] try: models.VersionedEntity.objects.get(id=real_version_id, template_id=template_id) except models.VersionedEntity.DoesNotExist: raise ValidationError(_("模板集版本(id:{})不属于该模板(id:{})").format(real_version_id, template_id)) template = models.get_template_by_project_and_id(data["project_id"], template_id) data["template"] = template return data class GetShowVersionSLZ(serializers.Serializer): show_version_id = serializers.CharField(required=True) template_id = serializers.CharField(required=True) project_id = serializers.CharField(required=True) def validate(self, data): try: data["show_version_id"] = int(data["show_version_id"]) except Exception as e: raise ValidationError(e) template_id = data["template_id"] template = models.get_template_by_project_and_id(data["project_id"], template_id) data["template"] = template show_version_id = data["show_version_id"] if show_version_id == -1: data["show_version"] = None return data try: data["show_version"] = models.ShowVersion.objects.get(id=show_version_id, template_id=template_id) except models.ShowVersion.DoesNotExist: raise ValidationError( f"show version(id:{show_version_id}) does not exist or not belong to template(id:{template_id})" ) else: return data class GetLatestShowVersionSLZ(serializers.Serializer): template_id = serializers.CharField(required=True) project_id = serializers.CharField(required=True) def validate(self, data): template = models.get_template_by_project_and_id(data["project_id"], data["template_id"]) data["template"] = template data["show_version"] = models.ShowVersion.objects.get_latest_by_template(template.id) data["show_version_id"] = data["show_version"].id return data class ResourceConfigSLZ(serializers.Serializer): show_version_id = serializers.IntegerField(required=True) config = serializers.SerializerMethodField() def get_config(self, obj): show_version_id = obj["show_version_id"] template = obj["template"] config = {"show_version_id": show_version_id} if show_version_id == -1: config["version"] = template.draft_version config.update(template.get_draft()) return config show_version = obj["show_version"] real_version_id = show_version.real_version_id # ugly! real_version_id may be integer(-1, 0, ...) or None if real_version_id is None: config["version"] = real_version_id return config # version_id 为 -1 则查看草稿 if real_version_id == -1: config["version"] = real_version_id config.update(template.get_draft()) return config # real_version_id 为 0 查看最新版本 if real_version_id == 0: ventity = models.VersionedEntity.objects.get_latest_by_template(template.id) else: try: ventity = models.VersionedEntity.objects.get(id=real_version_id) except models.VersionedEntity.DoesNotExist: raise ResNotFoundError(_("模板集版本(id:{})不存在").format(real_version_id)) if ventity: config["version"] = ventity.id config.update(ventity.get_resource_config()) return config def to_representation(self, instance): instance = super().to_representation(instance) config = instance["config"] del instance["config"] instance.update(config) return instance class ListShowVersionSLZ(serializers.ModelSerializer): show_version_id = serializers.IntegerField(required=False, source="id") class Meta: model = models.ShowVersion fields = ("show_version_id", "real_version_id", "name", "updator", "updated", "comment") class ListShowVersionISLZ(serializers.ModelSerializer): id = serializers.IntegerField(source="real_version_id") show_version_id = serializers.IntegerField(source="id") show_version_name = serializers.CharField(source="name") version = serializers.CharField(source="name") class Meta: model = models.ShowVersion fields = ("id", "show_version_id", "show_version_name", "version", "comment")
import datetime import smtplib import uuid from email.header import Header from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from flask import render_template, url_for from itsdangerous import URLSafeTimedSerializer TEST_MESSAGES = [] class EmailService: def __init__(self, app): self.app = app self.api_url = app.config['API_URL'] self.site_url = app.config['SITE_URL'] self.admin_email = app.config['ADMIN_EMAIL'] self.principal_investigator_email = app.config['PRINCIPAL_INVESTIGATOR_EMAIL'] def tracking_code(self): return str(uuid.uuid4())[:16] def email_server(self): server = smtplib.SMTP(host=self.app.config['MAIL_SERVER'], port=self.app.config['MAIL_PORT'], timeout=self.app.config['MAIL_TIMEOUT']) server.ehlo() if self.app.config['MAIL_USE_TLS']: server.starttls() if self.app.config['MAIL_USERNAME']: server.login(self.app.config['MAIL_USERNAME'], self.app.config['MAIL_PASSWORD']) return server def send_email(self, subject, recipients, text_body, html_body, sender=None, ical=None): msgRoot = MIMEMultipart('related') msgRoot.set_charset('utf8') if sender is None: sender = self.app.config['MAIL_DEFAULT_SENDER'] msgRoot['Subject'] = Header(subject.encode('utf-8'), 'utf-8').encode() msgRoot['From'] = sender msgRoot['To'] = ', '.join(recipients) msgRoot.preamble = 'This is a multi-part message in MIME format.' msgAlternative = MIMEMultipart('alternative') msgRoot.attach(msgAlternative) part1 = MIMEText(text_body, 'plain', _charset='UTF-8') part2 = MIMEText(html_body, 'html', _charset='UTF-8') msgAlternative.attach(part1) msgAlternative.attach(part2) if ical: ical_atch = MIMEText(ical.decode("utf-8"),'calendar') ical_atch.add_header('Filename','event.ics') ical_atch.add_header('Content-Disposition','attachment; filename=event.ics') msgRoot.attach(ical_atch) if 'TESTING' in self.app.config and self.app.config['TESTING']: print("TEST: Recording Emails, not sending - %s - to:%s" % (subject, recipients)) TEST_MESSAGES.append(msgRoot) return server = self.email_server() server.sendmail(sender, recipients, msgRoot.as_bytes()) server.quit() def confirm_email(self, user, current_studies, tracking_code=None, logo_url=None, days='0days'): if tracking_code is None and logo_url is None: tracking_code = self.tracking_code() logo_url = url_for('track.logo', user_id=user.id, code=tracking_code, _external=True) user.token_url = '' ts = URLSafeTimedSerializer(self.app.config["SECRET_KEY"]) token = ts.dumps(user.email, salt='email-reset-key') role = '' + user.role.name + '/' ga_link = self.generate_google_analytics_link_content('reset_password', days) studies_ga_link = self.generate_google_analytics_link_content('reset_password_studies', days) subject = "Autism DRIVE: Confirm Email" confirm_url = self.app.config['FRONTEND_EMAIL_RESET'] + role + token + ga_link text_body = render_template("confirm_email.txt", user=user, confirm_url=confirm_url, forgot_pass_url=self.app.config['FRONTEND_FORGOT_PASSWORD'] + ga_link, tracking_code=tracking_code, current_studies=current_studies, studies_url=self.site_url + '/#/studies/currently_enrolling' + studies_ga_link) html_body = render_template("confirm_email.html", user=user, confirm_url=confirm_url, forgot_pass_url=self.app.config['FRONTEND_FORGOT_PASSWORD'] + ga_link, logo_url=logo_url, tracking_code=tracking_code, current_studies=current_studies, studies_url=self.site_url + '/#/studies/currently_enrolling' + studies_ga_link) self.send_email(subject, recipients=[user.email], text_body=text_body, html_body=html_body) if self.app.config.get('TESTING') or self.app.config.get('DEVELOPMENT'): user.token_url = confirm_url return tracking_code def async_confirm_email(self, user, current_studies, days): with self.app.app_context(), self.app.test_request_context(): tracking_code = self.tracking_code() logo_url = self.api_url + '/api/track/' + str(user.id) + '/' + tracking_code + '/UVA_STAR-logo.png' self.confirm_email(user, current_studies, tracking_code, logo_url, days) def reset_email(self, user): user.token_url = '' ts = URLSafeTimedSerializer(self.app.config["SECRET_KEY"]) token = ts.dumps(user.email, salt='email-reset-key') role = '' + user.role.name + '/' tracking_code = self.tracking_code() subject = "Autism DRIVE: Password Reset Email" reset_url = self.app.config['FRONTEND_EMAIL_RESET'] + role + token logo_url = url_for('track.logo', user_id=user.id, code=tracking_code, _external=True) text_body = render_template("reset_email.txt", user=user, reset_url=reset_url, forgot_pass_url=self.app.config['FRONTEND_FORGOT_PASSWORD'], tracking_code=tracking_code) html_body = render_template("reset_email.html", user=user, reset_url=reset_url, forgot_pass_url=self.app.config['FRONTEND_FORGOT_PASSWORD'], logo_url=logo_url, tracking_code=tracking_code) self.send_email(subject, recipients=[user.email], text_body=text_body, html_body=html_body) if self.app.config.get('TESTING') or self.app.config.get('DEVELOPMENT'): user.token_url = reset_url return tracking_code def study_inquiry_email(self, study, user): tracking_code = self.tracking_code() subject = "Autism DRIVE: Study Inquiry Email" logo_url = url_for('track.logo', user_id=user.id, code=tracking_code, _external=True) text_body = render_template("study_inquiry_email.txt", user=user, study=study, user_detail_url=self.site_url + '/#/admin/user/' + str(user.id), tracking_code=tracking_code) html_body = render_template("study_inquiry_email.html", user=user, study=study, user_detail_url=self.site_url + '/#/admin/user/' + str(user.id), logo_url=logo_url, tracking_code=tracking_code) self.send_email(subject, recipients=[study.coordinator_email, '<EMAIL>'], text_body=text_body, html_body=html_body) return tracking_code def admin_alert_email(self, subject, message, alert_principal_investigator=False): with self.app.app_context(): context = {} text_body = render_template("admin_email.txt", msg=message, site_url=self.site_url, **context) html_body = render_template("admin_email.html", msg=message, site_url=self.site_url, **context) recipients = [self.admin_email] if alert_principal_investigator: recipients.append(self.principal_investigator_email) self.send_email(subject, recipients=recipients, text_body=text_body, html_body=html_body) def complete_registration_prompt_email(self, user, current_studies, days): with self.app.app_context(), self.app.test_request_context(): tracking_code = self.tracking_code() ga_link = self.generate_google_analytics_link_content('create_yourprofile', days) studies_ga_link = self.generate_google_analytics_link_content('create_yourprofile_studies', days) subject = "Autism DRIVE: Complete Your Registration" logo_url = self.api_url + '/api/track/' + str(user.id) + '/' + tracking_code + '/UVA_STAR-logo.png' text_body = render_template("complete_registration_email.txt", profile_url=self.app.config['SITE_URL'] + '/#/profile' + ga_link, forgot_pass_url=self.app.config['FRONTEND_FORGOT_PASSWORD'] + ga_link, current_studies=current_studies, studies_url=self.site_url + '/#/studies/currently_enrolling' + studies_ga_link) html_body = render_template("complete_registration_email.html", profile_url=self.app.config['SITE_URL'] + '/#/profile' + ga_link, forgot_pass_url=self.app.config['FRONTEND_FORGOT_PASSWORD'] + ga_link, logo_url=logo_url, tracking_code=tracking_code, current_studies=current_studies, studies_url=self.site_url + '/#/studies/currently_enrolling' + studies_ga_link) self.send_email(subject, recipients=[user.email], text_body=text_body, html_body=html_body) return tracking_code def complete_dependent_profile_prompt_email(self, user, current_studies, days): with self.app.app_context(), self.app.test_request_context(): tracking_code = self.tracking_code() ga_link = self.generate_google_analytics_link_content('create_dependentprofile', days) studies_ga_link = self.generate_google_analytics_link_content('create_dependentprofile_studies', days) subject = "Autism DRIVE: Complete Your Dependent's Profile" logo_url = self.api_url + '/api/track/' + str(user.id) + '/' + tracking_code + '/UVA_STAR-logo.png' text_body = render_template("complete_dependent_profile_email.txt", profile_url=self.app.config['SITE_URL'] + '/#/profile' + ga_link, forgot_pass_url=self.app.config['FRONTEND_FORGOT_PASSWORD'] + ga_link, current_studies=current_studies, studies_url=self.site_url + '/#/studies/currently_enrolling' + studies_ga_link) html_body = render_template("complete_dependent_profile_email.html", profile_url=self.app.config['SITE_URL'] + '/#/profile' + ga_link, forgot_pass_url=self.app.config['FRONT<PASSWORD>'] + ga_link, logo_url=logo_url, tracking_code=tracking_code, current_studies=current_studies, studies_url=self.site_url + '/#/studies/currently_enrolling' + studies_ga_link) self.send_email(subject, recipients=[user.email], text_body=text_body, html_body=html_body) return tracking_code @staticmethod def generate_google_analytics_link_content(campaign, days): return '?utm_source=email&utm_medium=referral&utm_campaign=' + campaign + '&utm_content=' \ + days + '&utm_term=' + str(datetime.date.today())
<gh_stars>1-10 import numpy as np from typing import Any, Dict, List, Tuple, NoReturn import argparse import os def parse_arguments() -> Any: """Parse command line arguments.""" parser = argparse.ArgumentParser() parser.add_argument( "--data_dir", default="", type=str, help="Directory where the features (npy files) are saved", ) parser.add_argument("--mode", required=True, type=str, help="train/val/test/sample", choices=['train', 'test', 'val','sample']) parser.add_argument("--obs_len", default=2, type=int, help="Observed length of the trajectory in seconds", choices=[1,2,3,4,5]) parser.add_argument("--filter", default='ekf', type=str, help="Filter to process the data noise. (ekf/none/ekf-savgol/savgol", choices=['ekf', 'none', 'ekf-savgol', 'savgol']) return parser.parse_args() def stats(traj:np.ndarray) -> NoReturn: #central tendency : mean #dispersion : std #bounds : min max #quantile : 0.25, 0.5, 0.75 labels = ['mean_v', 'mean_acc', 'mean_deac', 'std_jy'] for i, l in zip(range(0, traj.shape[1]), labels): t = traj[:, i] _mean = round(np.mean(t),2) _std = round(np.std(t),2) _min = round(np.min(t),2) _max = round(np.max(t),2) _q25 = round(np.quantile(t, 0.25),2) _q50 = round(np.quantile(t, 0.5),2) _q75 = round(np.quantile(t, 0.75),2) print (f'Feature: {l}') print ('\tmean:{} | std:{} | min:{} | max:{} | q25:{} | q50:{} | q75:{}'.format(_mean, _std, _min, _max, _q25, _q50, _q75)) if __name__== '__main__': #_filters = ['none', 'ekf', 'savgol', 'ekf-savgol'] #_modes = ['train', 'val', 'test', 'sample'] #_obs_len = [2,5] #seg = _obs_len[0] #mode = _modes[3] #filter_name = _filters[0] args = parse_arguments() if args.mode == 'test': args.obs_len = 2 assert os.path.exists(args.data_dir),\ f'[Analysis][main][ERROR] data_dir not found!({args.data_dir})' data_file = 'features_{}_{}s_{}.npy'.format(args.mode, args.obs_len, args.filter) assert os.path.exists(os.path.join(args.data_dir, data_file)),\ f'[Analysis][main][ERROR] data_file not found!({data_file})' print ('[Analysis] loading dataset....') # (m, 4) # [mean_v, mean_acc, mean_deac, std_jy] data = np.load(os.path.join(args.data_dir,data_file)) print ('[Analysis] mode:{} | filter:{} | obs_len:{}'.format(args.mode, args.filter, args.obs_len)) print ('[Analysis] data shape:{}'.format(data.shape)) print ('[Analysis] stats:') stats(data)
<filename>Bioinformatics_k-mer_generator_with_Flask/venv/Lib/site-packages/Bio/AlignIO/Interfaces.py # Copyright 2008-2018 by <NAME>. All rights reserved. # # This file is part of the Biopython distribution and governed by your # choice of the "Biopython License Agreement" or the "BSD 3-Clause License". # Please see the LICENSE file that should have been included as part of this # package. """AlignIO support module (not for general use). Unless you are writing a new parser or writer for Bio.AlignIO, you should not use this module. It provides base classes to try and simplify things. """ from __future__ import print_function import sys # for checking if Python 2 from Bio.Alphabet import single_letter_alphabet class AlignmentIterator(object): """Base class for building MultipleSeqAlignment iterators. You should write a next() method to return Aligment objects. You may wish to redefine the __init__ method as well. """ # TODO - Should the default be Gapped(single_letter_alphabet) instead? def __init__(self, handle, seq_count=None, alphabet=single_letter_alphabet): """Create an AlignmentIterator object. Arguments: - handle - input file - count - optional, expected number of records per alignment Recommend for fasta file format. - alphabet - optional, e.g. Bio.Alphabet.generic_protein Note when subclassing: - there should be a single non-optional argument, the handle, and optional count and alphabet IN THAT ORDER. - you do not have to require an alphabet (?). - you can add additional optional arguments. """ self.handle = handle self.records_per_alignment = seq_count self.alphabet = alphabet ##################################################### # You may want to subclass this, for example # # to read through the file to find the first record,# # or if additional arguments are required. # ##################################################### def __next__(self): """Return the next alignment in the file. This method should be replaced by any derived class to do something useful. """ raise NotImplementedError("This object should be subclassed") ##################################################### # You SHOULD subclass this, to split the file up # # into your individual alignments and convert these # # into MultipleSeqAlignment objects. # ##################################################### if sys.version_info[0] < 3: def next(self): """Python 2 style alias for Python 3 style __next__ method.""" return self.__next__() def __iter__(self): """Iterate over the entries as MultipleSeqAlignment objects. Example usage for (concatenated) PHYLIP files:: with open("many.phy","r") as myFile: for alignment in PhylipIterator(myFile): print "New alignment:" for record in alignment: print record.id print record.seq """ return iter(self.__next__, None) class AlignmentWriter(object): """Base class for building MultipleSeqAlignment writers. You should write a write_alignment() method. You may wish to redefine the __init__ method as well. """ def __init__(self, handle): """Initialize the class.""" self.handle = handle def write_file(self, alignments): """Use this to write an entire file containing the given alignments. Arguments: - alignments - A list or iterator returning MultipleSeqAlignment objects In general, this method can only be called once per file. This method should be replaced by any derived class to do something useful. It should return the number of alignments.. """ raise NotImplementedError("This object should be subclassed") ##################################################### # You SHOULD subclass this, to write the alignment # # objecta to the file handle # ##################################################### def clean(self, text): """Use this to avoid getting newlines in the output.""" return text.replace("\n", " ").replace("\r", " ") class SequentialAlignmentWriter(AlignmentWriter): """Base class for building MultipleSeqAlignment writers. This assumes each alignment can be simply appended to the file. You should write a write_alignment() method. You may wish to redefine the __init__ method as well. """ def __init__(self, handle): """Initialize the class.""" self.handle = handle def write_file(self, alignments): """Use this to write an entire file containing the given alignments. Arguments: - alignments - A list or iterator returning MultipleSeqAlignment objects In general, this method can only be called once per file. """ self.write_header() count = 0 for alignment in alignments: self.write_alignment(alignment) count += 1 self.write_footer() return count def write_header(self): """Use this to write any header. This method should be replaced by any derived class to do something useful. """ pass def write_footer(self): """Use this to write any footer. This method should be replaced by any derived class to do something useful. """ pass def write_alignment(self, alignment): """Use this to write a single alignment. This method should be replaced by any derived class to do something useful. """ raise NotImplementedError("This object should be subclassed") ##################################################### # You SHOULD subclass this, to write the alignment # # objecta to the file handle # #####################################################
""" news page handlers: news_cnbc news_reuters news_cnn news_inquirer news_gma news_bworld process: derive data from news front page url as html and json files get document similarity between header and content get news summary, applying document similarity measures to skip first content line if similar to header convert news data to docx as close as possible to target format postprocess: filter and sort news blocks as required copy to final report """ import datetime def isoformat_date_to_datetime_obj(dt_str): return datetime.datetime.strptime(dt_str,"%Y-%m-%d") def get_before_date(dt: datetime): days_before = 2 if dt.strftime('%A') == "Sunday" else 1 return (NOW_DT - datetime.timedelta(days=days_before)) NOW_DT = datetime.datetime.now() NOW = NOW_DT.isoformat()[:10] NOW_DT = isoformat_date_to_datetime_obj(NOW) NOW_BEFORE = get_before_date(NOW_DT).isoformat()[:10] NOW_BEFORE_DT = isoformat_date_to_datetime_obj(NOW_BEFORE) # ROOT = r"D:\Shared\test\bmw" import os import tempfile ROOT = os.path.join(tempfile.gettempdir(), "bmw_gen") import argparse def cli(): ap = argparse.ArgumentParser( description="""BMW generator \t1. scrape news websites (GMA, CNBC, Reuters, CNN, Inquirer, BWorld) \t2. generate summary from found content, pruning first sentence if close to news title measured by TF-IDF and Soft Cosine Similarity \t3. classify between PHL and ROW with Multinomial Bayes model trained on 2018-2021 BMW documents \t4. output formatted docx files""", formatter_class=argparse.RawTextHelpFormatter ) ap.add_argument('--start_date', '-sd', type=str, help="YYYY-MM-DD start of news articles for output") ap.add_argument('--end_date', '-ed', type=str, help="YYYY-MM-DD end of news articles for output") ap.add_argument('--out_dir', '-o', type=str, help="output directory for output ") args = ap.parse_args() return ( args.start_date, args.end_date, args.out_dir, ) def set_configuration(out, ed, sd): global ROOT global NOW global NOW_DT global NOW_BEFORE global NOW_BEFORE_DT if not out is None: ROOT = out if not ed is None: NOW = ed NOW_DT = isoformat_date_to_datetime_obj(NOW) if not sd is None: NOW_BEFORE = sd NOW_BEFORE_DT = isoformat_date_to_datetime_obj(NOW_BEFORE) # else: # NOW_BEFORE = get_before_date(NOW_DT).isoformat()[:10] # NOW_BEFORE_DT = isoformat_date_to_datetime_obj(NOW_BEFORE) print(f"{NOW_BEFORE} {NOW_BEFORE_DT.strftime('%A')} to {NOW} {NOW_DT.strftime('%A')}") print(f"saving output files to {ROOT}") if not os.path.exists(ROOT): os.makedirs(ROOT) if __name__ == "__main__": sd, ed, out = cli() set_configuration(out, ed, sd) input("enter to continue") print("setting up libraries") from typing import List, Union from news import NewsItem, NewsMinimal, ensure_dirs_exist, html_dir, json_dir, NewsGroup from news_nlp import process_ng__json, process_nlp_applied__json from news_bmw_docx import convert_ng_nlp_to_docx, docx_out def append_filter_date(ni: Union[dict, NewsItem, NewsMinimal]): global NOW_DT global NOW_BEFORE_DT ni_date = None try: if isinstance(ni, dict): ni_date = isoformat_date_to_datetime_obj(ni['date'][:10]) elif isinstance(ni, NewsItem): ni_date = isoformat_date_to_datetime_obj(ni.date[:10]) elif isinstance(ni, NewsMinimal): ni_date = isoformat_date_to_datetime_obj(ni.date[:10]) if ni_date is None: raise RuntimeError(f"unhandled object {str(ni.__class__)}") except ValueError: return False return NOW_BEFORE_DT <= ni_date and ni_date <= NOW_DT def process_news_to_docx( ni_cls, front_url, html_dir_=html_dir, json_dir_=json_dir, docx_out_=docx_out, use_selenium_ng=False, append_filter=None, use_selenium_ni=False ): ## OVERRIDING APPEND_FILTER DUE TO PROCESS MULTITHREADING failing to pickle the function if append_filter is None: append_filter=append_filter_date html_path, json_path, ng = NewsGroup.process( ni_cls, front_url, html_dir_=html_dir_, json_dir_=json_dir_, use_selenium_ng=use_selenium_ng, use_selenium_ni=use_selenium_ni ) fa, fb = os.path.splitext(os.path.basename(html_path)) b = os.path.join(json_dir_, fa + "--nlp" + fb) b_applied = os.path.join(json_dir_, fa + "--nlp--applied" + fb) process_ng__json(json_path, b) process_nlp_applied__json(b, b_applied) docx_path = os.path.join(docx_out_, fa + ".docx") convert_ng_nlp_to_docx(b_applied, docx_path, append_filter=append_filter) from multiprocessing import Process def process_news_urls_to_docx__process( ni_cls, html_dir_=html_dir, json_dir_=json_dir, docx_out_=docx_out, use_selenium_ng=False, append_filter=None, use_selenium_ni=False ): for front_url in ni_cls.URLS: # process_news_to_docx( # ni_cls, front_url, # html_dir_ = html_dir_, # json_dir_ = json_dir_, # docx_out_ = docx_out_, # use_selenium_ng = use_selenium_ng, # use_selenium_ni = use_selenium_ni, # ) yield Process( target=process_news_to_docx, args=[ni_cls, front_url], kwargs={ "html_dir_": html_dir_, "json_dir_": json_dir_, "docx_out_": docx_out_, "use_selenium_ng": use_selenium_ng, "use_selenium_ni": use_selenium_ni, # "append_filter": append_filter, #not passing append_filter due to process multithreading failing to pickle the function } ) from news_corpus_nlp import process_grouped_sorted TASK_LIST: List[Process] = list() TASK_LIST_MAX = 4 def handle_parallel_tasks_start(task: Process): global TASK_LIST global TASK_LIST_MAX task.start() TASK_LIST.append(task) if len(TASK_LIST) > TASK_LIST_MAX: handle_parallel_tasks_join() TASK_LIST = list() def handle_parallel_tasks_join(): global TASK_LIST for t in TASK_LIST: t.join() import pickle from sklearn.naive_bayes import MultinomialNB with open('mnb_bmw.pkl','rb') as fin: mnb_bmw: MultinomialNB = pickle.load(fin) def predict_phl(s): return mnb_bmw.predict([s])[0] == 0 def predict_row(s): return mnb_bmw.predict([s])[0] == 1 def get_doc(ni: Union[dict, NewsItem, NewsMinimal]): ni_doc = None if isinstance(ni, dict): ni_doc = ni['header'] + ' ' + ni['summary'] elif isinstance(ni, NewsItem): ni_doc = ni.header + ' ' + ni.summary elif isinstance(ni, NewsMinimal): ni_doc = ni.header + ' ' + ni.summary if ni_doc is None: raise RuntimeError(f"unhandled object {str(ni.__class__)}") return ni_doc def append_filter_date__phl(ni: Union[dict, NewsItem, NewsMinimal]): return append_filter_date(ni) and predict_phl(get_doc(ni)) def append_filter_date__row(ni: Union[dict, NewsItem, NewsMinimal]): return append_filter_date(ni) and predict_row(get_doc(ni)) from news_sources.news_cnbc import NewsItem_CNBC from news_sources.news_reuters import NewsItem_Reuters from news_sources.news_cnn import NewsItem_CNN from news_sources.news_inquirer import NewsItem_Inquirer from news_sources.news_gma import NewsItem_GMA from news_sources.news_bworld import NewsItem_BWorld def run(start_date=None, end_date=None, out_dir=None): set_configuration(out_dir, end_date, start_date) start = datetime.datetime.now() hd = os.path.join(ROOT, NOW, "html") jd = os.path.join(ROOT, NOW, "json") dd = os.path.join(ROOT, NOW, "docx") dirs = [hd, jd, dd] ensure_dirs_exist(dirs) for ni_cls, use_selenium_ng, use_selenium_ni in [ [NewsItem_GMA, True, False], [NewsItem_CNBC, False, False], [NewsItem_Reuters, False, False], [NewsItem_CNN, False, False], [NewsItem_Inquirer, False, False], [NewsItem_BWorld, False, True], ]: # process_news_urls_to_docx__process( # ni_cls, html_dir_=hd, json_dir_=jd, docx_out_=dd, # use_selenium_ng=use_selenium_ng, # use_selenium_ni=use_selenium_ni, # append_filter = append_filter_date # ) for p in process_news_urls_to_docx__process( ni_cls, html_dir_=hd, json_dir_=jd, docx_out_=dd, use_selenium_ng=use_selenium_ng, use_selenium_ni=use_selenium_ni, append_filter = append_filter_date ): handle_parallel_tasks_start(p) handle_parallel_tasks_join() process_grouped_sorted( json_dir_=jd, docx_out_=dd, append_filter = append_filter_date ) process_grouped_sorted( json_dir_=jd, docx_out_=dd, docx_fp="compiled_phl.docx", append_filter = append_filter_date__phl ) process_grouped_sorted( json_dir_=jd, docx_out_=dd, docx_fp="compiled_row.docx", append_filter = append_filter_date__row ) end = datetime.datetime.now() duration = (end-start).total_seconds() print(f"{duration}s ~{int(duration/60)}m ~{int(duration/60/60)}h") if __name__ == "__main__": run()
<gh_stars>0 # -*- coding: utf-8 -*- """ This python script uses XGBoost classifier to detect guanine quadruplexes in DNA sequences. """ !pip install xgboost==1.5.1 from typing import List, Dict, Tuple from dataclasses import dataclass, asdict import requests import pandas as pd import numpy as np from time import sleep from xgboost import XGBClassifier class G4Conv(): _FIXED_WINDOW_SIZE: int = 30 _MERGING_INTERVAL_LENGTH: int = 15 def _sequence_convertor(self, *, sequence: str) -> np.array: """ Convert sequences NOTE: don't change cause tree is trained to use exactly this parameters :param sequence: input sequence for conversion :return: numpy array with converted windows """ converted_sequences = [] for i in range(0, len(sequence) - self._FIXED_WINDOW_SIZE): converted = [] for base in sequence[i:i+self._FIXED_WINDOW_SIZE]: if base == 'G': converted.append(-1) elif base == 'C': converted.append(1) else: converted.append(0) converted_sequences.append(converted) return np.array(converted_sequences) def _init_tree(self, *, model_path: str) -> XGBClassifier: """ Create model instance and load parameters from json model file :param model_path: path to file with model params in json :return: instance of gradient boosted tree """ xgb = XGBClassifier() xgb.load_model(model_path) return xgb def _predict(self, *, model: XGBClassifier, converted_sequences: np.array) -> List[int]: """ Return indexes with positive predictions :param model: :param converted_sequences: :return: """ results: List[int] = [] predictions = model.predict(converted_sequences) predictions = list(predictions) for index, prediction in enumerate(predictions): if bool(prediction): results.append(index) return results def _create_intervals(self, predicted_position: List[int]) -> List[int]: """ Create intervals used for merging :param predicted_position: :return: """ intervals = [(i, i+self._MERGING_INTERVAL_LENGTH) for i in predicted_position] return intervals def _merge_results(self, *, results: List[Tuple[int]]) -> List[Tuple[int]]: """ Return merged adjacent results from predict method :param results: :return: """ results = sorted(results, key=lambda x: x[0]) i = 0 for result in results: if result[0] > results[i][1]: i += 1 results[i] = result else: results[i] = (results[i][0], result[1]) return results[:i+1] def _filter_results(self, *, merged_results: List[Tuple[int]]) -> List[Tuple[int]]: """ Remove single occurance results from set :param merged_results: :return: """ filtered = [] for window in merged_results: if window[1] - window[0] > self._MERGING_INTERVAL_LENGTH: filtered.append(window) return filtered def _create_results(self, filtered_results, sequence) -> pd.DataFrame: """ Format filtered results into Pandas dataframe :param filtered_results: :return: """ df_data = {'Position': [position[0] for position in filtered_results], 'Sequence': [sequence[position[0]:position[1]+self._MERGING_INTERVAL_LENGTH] for position in filtered_results], 'Length': [position[1]+self._MERGING_INTERVAL_LENGTH-position[0] for position in filtered_results]} return pd.DataFrame(df_data) def analyse(self, sequence: str, model_path: str) -> pd.DataFrame: """ Analyse sequence for possible g4s :param sequence: :param model_path: :return: """ model = self._init_tree(model_path=model_path) converted_sequences = self._sequence_convertor(sequence=sequence) predicted_position = self._predict(model=model, converted_sequences=converted_sequences) merg_intervals = self._create_intervals(predicted_position=predicted_position) merged_results = self._merge_results(results=merg_intervals) filtered_results = self._filter_results(merged_results=merged_results) return self._create_results(filtered_results, sequence)
<reponame>fColangelo/MORA-Multi-Objective-Routing-Algorithm # -*- coding: utf-8 -*- import sys sys.dont_write_bytecode import json import os from geopy.geocoders import Nominatim # https://github.com/geopy/geopy from geopy.distance import great_circle from service_flows.data_processor import get_mean_link_bw import time def read_from_json(json_path): """ Returns data read from json file at found at 'json_path' location. Arguments: json_path {str} -- relative path of json file to be read. Returns: [dict] -- Dictionary with data read from json. """ # Read data with open(json_path, 'r') as json_file: data = json.load(json_file) return data def write_to_json(data, filename, json_path): """ Write 'data' to json file named 'filename' at 'json_path' location. Arguments: data {dict} -- data to be written. filename {str} -- name of file to be created/overwritten. json_path {str} -- relative path of json file to be created/overwritten, """ # Get the complete path filepath = os.path.join(json_path, filename) # Write data with open(filepath + '.json', 'w+') as f: json.dump(data, f, sort_keys=True, indent=4) def preprocess_metadata(topo_name): """ Add geo-coordinates (latitude-longitude) to network nodes and calculate length/latency of every network link. Args: topo_name (string): topology name. """ # Get topology metadata current_dir = os.path.dirname(__file__) meta_path = os.path.join(current_dir, topo_name, topo_name + '_metadata.json') meta = read_from_json(meta_path) node_dict = meta['nodes'] link_dict = meta['links'] print(" *** PREPROCESSING METADATA *** ") print(" *** ADDING GEO-COORDINATES and LINK LATENCY ***") add_geo_coordinates(node_dict) calculate_latency(link_dict, node_dict) # ??? print(" *** SETTING AVERAGE LINK USAGE (ALU) *** ") set_average_link_usage(link_dict) # Save topology data in the proper folder save_topology_info(topo_name, node_dict, link_dict) def set_average_link_usage(link_dict): """ ??? Args: link_dict ([type]): [description] """ mean_link_bw = get_mean_link_bw() for link in link_dict: link_dict[link]["alu"] = 0.0 for link_bw in mean_link_bw: if link_dict[link]["_id"] == link_bw: link_dict[link]["alu"] = round(mean_link_bw[link_bw]/1e6,0) def add_geo_coordinates(node_dict): """ Add geo-coordinates (latitude-longitude) to a network node. Args: node_dict (dict): node attributes. """ geolocator = Nominatim(user_agent='geant') for node in node_dict: # Extract city and nation values from node. This is where the PoP is. city = node_dict[node]['pop']['city'] nation = node_dict[node]['pop']['nation'] # Get geographical info on the PoP location. location = geolocator.geocode("{}, {}".format(city, nation)) # Set latitude and longitude info for the node. node_dict[node]['pop']['latitude'] = location.latitude node_dict[node]['pop']['longitude'] = location.longitude time.sleep(1) def calculate_latency(link_dict, node_dict): """ Calculate length and latency of a network link. Args: link_dict (dict): link attributes. node_dict (dict): node attributes. """ for link in link_dict: node1 = link_dict[link]['node1'] node2 = link_dict[link]['node2'] for node in node_dict: if node_dict[node]['_id'] == node1: node1_latitude = node_dict[node]['pop']['latitude'] node1_longitude = node_dict[node]['pop']['longitude'] node1_coordinates = (node1_latitude, node1_longitude) elif node_dict[node]['_id'] == node2: node2_latitude = node_dict[node]['pop']['latitude'] node2_longitude = node_dict[node]['pop']['longitude'] node2_coordinates = (node2_latitude, node2_longitude) # link length is calculated as the air-line distance between the two link endpoints distance = great_circle(node1_coordinates, node2_coordinates).kilometers # Km # link latency (delay) is estimated taking into account the speed of light in optical fiber medium (200000 Km/s) light_speed_in_fiber = 200000.0 # Km/s delay = 1000 * (distance/light_speed_in_fiber) # ms delay = round(delay, 1) # OUTPUT link_dict[link]['delay'] = delay # ms link_dict[link]['len'] = round(distance, 3) # Km def save_topology_info(topo_name, node_dict, link_dict): """ Save topology info. 'node_dict' and 'link_dict' are saved in folder ."self.name"/"self.name"DB/ respectively in the files nodes.json and links.json. """ # Build up database_path current_dir = os.path.dirname(__file__) database_folder = topo_name + 'DB' database_path = os.path.join(current_dir, topo_name, database_folder) # If it doesn't exists, create it if not os.path.exists(database_path): os.mkdir(database_path) # Save nodes and links data write_to_json(node_dict, 'nodes', database_path) write_to_json(link_dict, 'links', database_path)
import logging import os import tkinter import tkinter.filedialog import yaml from msquaredc import persistence from msquaredc import utils from msquaredc.ui.gui.widgets import ScaleWidget class MainFrame(object): # pragma no cover def __init__(self, widgets): self.widgets = widgets self.tk = tkinter.Tk() self.tk.title("msquaredc") self.q = self.a = None self.__is_fullscreen = False self.frame = tkinter.Frame(self.tk) self.frame.grid(row=0, column=0) self.init_keybindings() self.init_menubar() self.init_content() self.open_files = {"save": None, "open": None} self.__input = None self.showResults("<No file loaded!>", "<Please open a file!>") self.act = None self.prev = [] self.user = None def init(self): # Show NameDialog # validate output # draw gui pass def init_menubar(self): menubar = tkinter.Menu(self.tk) self.tk.config(menu=menubar) fileMenu = tkinter.Menu(menubar) fileMenu.add_command(label="Open", command=persistence.open_file) fileMenu.add_command(label="Save", command=self.save_file) fileMenu.add_separator() fileMenu.add_command(label="Exit", underline=0, command=self.onExit) menubar.add_cascade(label="File", underline=0, menu=fileMenu) def showResults(self, q, a): self.q = tkinter.Label(self.tk, text=q) self.q.grid(column=2, row=1, sticky=tkinter.NSEW, columnspan=1) self.a = tkinter.Label(self.tk, text=a) self.a.grid(column=2, row=2, sticky=tkinter.NSEW, columnspan=1) def init_content(self): for i, j in enumerate(self.widgets): j.draw(self.tk, i + 3) self.tk.grid_rowconfigure(0, weight=1) self.tk.grid_rowconfigure(len(self.widgets) + 3, weight=1) self.tk.grid_columnconfigure(0, weight=1) self.tk.grid_columnconfigure(len(self.widgets) + 3, weight=1) def init_keybindings(self): self.tk.bind("<F11>", self.toggle_fullscreen) self.tk.bind("<Escape>", self.end_fullscreen) def toggle_fullscreen(self, event=None): self.__is_fullscreen = not self.__is_fullscreen # Just toggling the boolean self.tk.attributes('-fullscreen', self.__is_fullscreen) self.tk.overrideredirect(self.__is_fullscreen) return "break" def end_fullscreen(self, event=None): self.__is_fullscreen = False self.tk.attributes("-fullscreen", False) self.tk.overrideredirect(False) return "break" def save_file(self): filename = tkinter.filedialog.asksaveasfilename() try: file = open(filename, 'w') self.open_files["save"].append(file) except FileNotFoundError: pass def onExit(self): for category in self.open_files: self.open_files[category].close() self.tk.quit() def start(self): self.tk.mainloop() class MainApplication(tkinter.Frame): # pragma no cover def __init__(self, parent, *args, **kwargs): tkinter.Frame.__init__(self, parent, *args, **kwargs) self.parent = parent self.callbacks = {} self.statusbar = StatusBar(self) self.toolbar = ToolBar(parent, self) self.navbar = NavBar(self) self.main = Main(self, "config.yml", "jerome.txt") self.statusbar.pack(side="bottom", fill="x") # self.toolbar.pack(side="top", fill="x") self.navbar.pack(side="bottom", anchor="se") self.main.pack(side="top", expand=True) # fill removed def add_callback(self, name, function): callbacks = self.get_callbacks(name) callbacks.append(function) self.callbacks[name] = callbacks def get_callbacks(self, name): return self.callbacks.get(name, []) def handle_callback(self, name): if self.get_callbacks(name): for i in self.get_callbacks(name): i() else: self.notice("".join(["The event ", name, " has been unhandled!"])) def notice(self, string): logging.log(logging.INFO, string) self.statusbar.variable.set(string) class StatusBar(tkinter.Frame): # pragma no cover def __init__(self, master): tkinter.Frame.__init__(self, master) self.variable = tkinter.StringVar() self.label = tkinter.Label(self, bd=1, relief=tkinter.SUNKEN, anchor=tkinter.W, textvariable=self.variable, font=('arial', 10, 'normal')) self.variable.set('Status Bar') self.label.pack(fill=tkinter.X) class NavBar(tkinter.Frame): # pragma no cover def __init__(self, master): tkinter.Frame.__init__(self, master) self.next = tkinter.Button(text="Next >", command=lambda: master.handle_callback("next")) self.prev = tkinter.Button(text="< Previous", command=lambda: master.handle_callback("prev")) self.prev.grid(column=0, row=0, in_=self, pady=5) self.next.grid(column=1, row=0, in_=self, padx=5, pady=5) class ToolBar(tkinter.Menu): # pragma no cover def __init__(self, master, handler): tkinter.Menu.__init__(self, master) master.config(menu=self) fileMenu = tkinter.Menu(self, tearoff=False) fileMenu.add_command(label="Open", command=lambda: handler.handle_callback("open")) fileMenu.add_command(label="Save", command=lambda: handler.handle_callback("save")) fileMenu.add_separator() fileMenu.add_command(label="Exit", underline=0, command=lambda: handler.handle_callback("exit")) self.add_cascade(label="File", underline=0, menu=fileMenu) class Main(tkinter.Frame): # pragma no cover def __init__(self, master, paper, data): tkinter.Frame.__init__(self, master) master.add_callback("next", lambda: Main.get_next(self)) self.master = master # Get paper information ci = None with open(paper) as stream: ci = yaml.load(stream) self.questions = ci["questions"] self.title = ci["title"] self.order = ci["order"] self.show = ci["show"] self.user = ci["user"] # Get Data self.data = persistence.obtain(data) self.infofield = InfoField(self) self.infofield.grid(row=0) self.infofield.title = self.title self.widgetfield = WidgetField(self, {}) self.current_question_index = 0 self.current_answerer_index = 0 self.start() def run(self): questions = [i["text"] for i in self.questions] for i, question in enumerate(self.questions): # Collect answers to code # coded = [] if "out{}.txt".format(i) in os.listdir(os.getcwd()): # coded = persistence.obtain("out{}.txt".format(i)) pass for answerer in self.data: for column in answerer: if column not in questions: pass def start(self): # Pick question + solution # Build and display self.infofield.question = self.questions[self.current_question_index]["text"] self.infofield.answer = self.data[self.current_answerer_index][self.infofield.question] self.widgetfield = WidgetField(self, self.questions[self.current_question_index]["coding"]) self.widgetfield.show() self.widgetfield.grid(row=1) def get_next(self): # store previous used = [i["text"] for i in self.questions] sample = {i: self.data[self.current_answerer_index][i] for i in self.data[self.current_answerer_index] if i not in used} sample["question"] = self.questions[self.current_question_index]["text"] sample.update(self.widgetfield.get_res_dict()) print(sample) persistence.persist("out{}.txt".format(self.current_question_index), sample, "a+") self.current_answerer_index += 1 if self.current_answerer_index >= len(self.data): self.current_answerer_index = 0 self.current_question_index += 1 # Check for resumables if self.current_question_index >= len(self.questions): self.infofield.question = "Finished" self.infofield.answer = "You may now leave" else: self.infofield.question = self.questions[self.current_question_index]["text"] if self.infofield.question in self.data[self.current_answerer_index]: self.infofield.answer = self.data[self.current_answerer_index][self.infofield.question] else: best = -1 element = None for i in self.data[self.current_answerer_index]: res = utils.lcs(i, self.infofield.question) if len(res) > best: element = i best = len(res) self.infofield.answer = self.data[self.current_answerer_index][element] self.widgetfield.grid_forget() self.widgetfield.destroy() self.widgetfield = WidgetField(self, self.questions[self.current_question_index]["coding"]) self.widgetfield.show() self.widgetfield.grid(row=1) class InfoField(tkinter.Frame): # pragma no cover def __init__(self, master): font = ("serif", 16) tkinter.Frame.__init__(self, master) self.__titlevar = tkinter.StringVar(self, "Title") self.__title = tkinter.Label(master, textvariable=self.__titlevar, font=("Helvetica", 18), pady=10) self.__questionvar = tkinter.StringVar(self, "Question") self.__question = tkinter.Label(master, textvariable=self.__questionvar, anchor=tkinter.W, font=("serif", 16, "bold"), pady=5) self.__answervar = tkinter.StringVar(self, "Answer") self.__answer = tkinter.Label(master, textvariable=self.__answervar, anchor=tkinter.W, font=("Times", 16), pady=5, relief="groove") self.__lengthvar = tkinter.StringVar(self, "Length") self.__length = tkinter.Label(master, textvariable=self.__lengthvar, anchor=tkinter.W, font=font, pady=5) self.q = tkinter.Label(self, text="Question:", anchor=tkinter.E, font=font, pady=5) self.a = tkinter.Label(self, text="Answer:", anchor=tkinter.E, font=font, pady=10) self.length_label = tkinter.Label(self, text="Length:", anchor=tkinter.E, font=font, pady=5) self.__title.grid(in_=self, row=0, columnspan=2) self.q.grid(in_=self, column=0, row=1) self.__question.grid(in_=self, column=1, row=1) self.a.grid(in_=self, column=0, row=2) self.__answer.grid(in_=self, column=1, row=2) # self.l.grid(in_=self,column=0,row=3) # self.__length.grid(in_=self,column=1,row=3) @property def title(self): return self.__titlevar.get() @title.setter def title(self, value): self.__titlevar.set(value) @property def question(self): return self.__questionvar.get() @question.setter def question(self, value): self.__questionvar.set(value) @property def answer(self): return self.__answervar.get() @answer.setter def answer(self, value): self.__answervar.set(value) self.__lengthvar.set(" ".join(["Symbols", str(len(self.answer)), "Words", str(len(self.answer.split(" ")))])) @property def length(self): return self.__lengthvar.get() @length.setter def length(self, value): self.__lengthvar.set(value) class WidgetField(tkinter.Frame): # pragma no cover def __init__(self, master, criterias): tkinter.Frame.__init__(self, master) self.criterias = criterias self.widgets = [] for i in criterias: self.widgets.append(ScaleWidget(master, i["criteria"], i["min"], i["max"])) def show(self): for i, element in enumerate(self.widgets): element.variables[0].grid(column=0, row=i, in_=self) for i, element in enumerate(self.widgets): element.label.grid(column=1, row=i, in_=self) for i, element in enumerate(self.widgets): index = 2 for k, j in enumerate(element.variables[1:]): j.grid(column=index + k, row=i, in_=self) def get_res_dict(self): return {element.label.cget('text'): element.variables[0].get() for element in self.widgets}
#!/usr/bin/env python # -*- coding: utf-8 -*- import web, config, json import datetime import time import re import base64 import sys import os import usbauth import hashlib import sqlite3 import json2db session = None def is_dict(d): """ additional template function, registered with web.template.render """ return type(d) is dict class status(web.HTTPError): """ custom http error handler """ http_codes = { '100': 'Continue', '101': 'Switching Protocols', '200': 'OK', '201': 'Created', '202': 'Accepted', '203': 'Non-Authoritative Information','204': 'No Content', '205': 'Reset Content', '206': 'Partial Content', '300': 'Multiple Choices', '301': 'Moved Permanently', '302': 'Found', '303': 'See Other', '304': 'Not Modified', '305': 'Use Proxy', '306': '(Unused)', '307': 'Temporary Redirect', '400': 'Bad Request', '401': 'Unauthorized', '402': 'Payment Required', '403': 'Forbidden', '404': 'Not Found', '405': 'Method Not Allowed', '406': 'Not Acceptable', '407': 'Proxy Authentication Required', '408': 'Request Timeout', '409': 'Conflict', '410': 'Gone', '411': 'Length Required', '412': 'Precondition Failed', '413': 'Request Entity Too Large', '414': 'Request-URI Too Long', '415': 'Unsupported Media Type', '416': 'Requested Range Not Satisfiable', '417': 'Expectation Failed', '500': 'Internal Server Error', '501': 'Not Implemented', '502': 'Bad Gateway', '503': 'Service Unavailable', '504': 'Gateway Timeout', '505': 'HTTP Version Not Supported' } def _http_code_lookup(self, code): out = None try: out = self.http_codes[str(code)] out = str(code) + " " + out except: out = "520 Unknown Error" return out code = None status = None headers = None def __init__(self, code, data=None, headers=None): self.code = code self.headers = {'Content-Type': 'text/html'} self.status = self._http_code_lookup(code) self.data = "<h1>" + str(self.status) + "</h1>" if headers != None: self.headers = headers if data != None: self.data = data def fail(self): web.debug(self.status) web.debug(self.headers) web.debug(self.data) web.HTTPError.__init__(self, self.status, self.headers, self.data) ## page methods ################################################################ class webctx(object): session = None no_auth = False __authenticated = False def auth_check(self): """ check if user is authenticated """ """ try: session.uid except: web.debug("creating session") for e in session_default: session[e] = session_default[e] """ #session = get_session() # check if we have a valid session if session != None and session.uid > 0: self.__authenticated = True return True # authentication for this request not required if self.no_auth == True: return True # check if the user has submitted credentials return None def render(self): return web.template.render('template', globals={ 'is_dict': is_dict }) def error(self, code): st = status(code) content = self.render().error(code, st.status, "sss") st.fail() class login(webctx): no_auth = True def GET(self): #global session user_data = web.input(logout=False) web.debug(user_data.logout) if (user_data.logout == "true"): #session = session_default session.kill() raise web.seeother('/') """ authenticate user """ def POST(self): # read posted json data data = web.data() credentials = json.loads(data) username = credentials["username"] password = credentials["password"] # check credentials against database pwhash = hashlib.md5(password).hexdigest() web.debug(pwhash) authdb = sqlite3.connect('../etc/fit.db') cur = authdb.cursor() sql = 'SELECT id FROM user WHERE username=? AND password=?' web.debug(sql) check = cur.execute(sql, (username, pwhash)) web.debug(str(check) + " " + str(cur.rowcount)) if check: row = cur.fetchone() if row: authdb.close() web.debug(row) #session = session_default session.uid = row[0] session.user = username # if we found one, exit return '{"success": true}' authdb.close() # if not found check against ldap usbauth.init( authdn = "CN=MUANA,OU=GenericMove,OU=Users,OU=USB,DC=ms,DC=uhbs,DC=ch", authpw = "<PASSWORD>", baseDN = "ou=USB,dc=ms,dc=uhbs,dc=ch", host = "ms.uhbs.ch", ) emp = usbauth.check(username, password) if (emp and emp["lockoutTime"] == None): #session = session_default session.uid = emp["employeeNumber"] session.user = username session.email = emp["email"] return '{"success": true}' return '{"success": false}' class index(webctx): """ Serve index page """ def GET(self): if not self.auth_check(): return self.render().login() #web.debug(auth_check) #web.debug(session) render = web.template.render('template') return render.index() #return out class image(webctx): no_auth = True """ Serve image, this method requires not authentication """ def GET(self): filename = "static/py.png" web.header('Content-Type', 'image/png') web.header('Content-Length', os.path.getsize(filename)) import datetime t = datetime.datetime.fromtimestamp(os.path.getmtime(filename)) #strdate = t.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') web.http.lastmodified(t) fp = open(filename, "r") yield fp.read() fp.close() return class env(webctx): """ display environment variables """ def GET(self): if not self.auth_check(): return self.render().login() out = {} for property, value in vars(web.ctx).iteritems(): out[property] = value return self.render().env(out) class json1(webctx): """ Serve json example page (using JQuery)""" def GET(self): if not self.auth_check(): return self.render().login() #render = web.template.render('template') return self.render().json1() #return out def POST(self): if not self.auth_check(): return self.render().login() post = web.input() web.header('Content-Type', 'application/json') try: i1 = int(post.int1) i2 = int(post.int2) res = i1 + i2 except: return '{"error": 1}' return '{"error": 0, "i1": '+str(i1)+', "i2": '+str(i2)+', "res": '+str(res)+'}' class json2(webctx): """ Serve json example page (100% VanillaJS)""" def GET(self): if not self.auth_check(): return self.render().login() render = web.template.render('template') return render.json2() def POST(self): if not self.auth_check(): return self.render().login() web.header('Content-Type', 'application/json') try: post = json.loads(web.data()) i1 = int(post["int1"]) i2 = int(post["int2"]) res = i1 + i2 print str(res) except: return '{"error": 1}' return '{"error": 0, "i1": '+str(i1)+', "i2": '+str(i2)+', "res": '+str(res)+'}' class example(webctx): def GET(self): if not self.auth_check(): return self.render().login() render = web.template.render('template') return render.example() def POST(self): if not self.auth_check(): return self.render().login() web.header('Content-Type', 'application/json') try: pass post = json.loads(web.data()) return json2db.insertcalories(session.uid, web.data()) except: return '{"error": 1}' class food(webctx): def GET(self): if not self.auth_check(): return self.render().login() render = web.template.render('template') return render.food() def POST(self): if not self.auth_check(): return self.render().login() web.header('Content-Type', 'application/json') try: pass post = json.loads(web.data()) return json2db.insertcalories(session.uid, web.data()) except: return '{"error": 1}' class sport(webctx): def GET(self): if not self.auth_check(): return self.render().login() render = web.template.render('template') return render.sport() def POST(self): if not self.auth_check(): return self.render().login() web.header('Content-Type', 'application/json') try: pass post = json.loads(web.data()) return json2db.insertcalories(session.uid, web.data()) except: return '{"error": 1}' class body(webctx): def GET(self): if not self.auth_check(): return self.render().login() render = web.template.render('template') return render.body() def POST(self): if not self.auth_check(): return self.render().login() web.header('Content-Type', 'application/json') try: pass post = json.loads(web.data()) return json2db.insertbodymass(session.uid, web.data()) except: return '{"error": 1}'
<filename>tools/replay/modeleval.py import numpy as np import struct import imgproc import ekf np.set_printoptions(suppress=True) def replay_LL(fname, f): x, P = ekf.initial_state() t0 = None dt = 1.0 / 30 # gyrozs = [] wheels_last = None frameno = 0 last_throttle = 0 last_steering = 0 LLsum = np.zeros(3) steertrim = open("steertrim.txt", "w") likelihood = open("likelihood.txt", "w") trackcurv = open("trackcurv.txt", "w") s_coord = 0 while True: imgsiz = imgproc.bucketcount.shape[0] * imgproc.bucketcount.shape[1] * 3 framesiz = 55 + imgsiz buf = f.read(framesiz) if len(buf) < framesiz: break header = struct.unpack("=IIIbbffffffBHHHHHHHH", buf[:55]) if header[0] != framesiz: print "recording frame size doesn't match this parser" raise Exception("image should be %d bytes (%dx%d), is %d bytes" % ( (framesiz,) + imgproc.bucketcount.shape + (header[0],))) tstamp = header[1] + header[2] / 1000000. throttle, steering = header[3:5] accel = np.float32(header[5:8]) gyro = np.float32(header[8:11]) servo = header[11] wheels = np.uint16(header[12:16]) periods = np.uint16(header[16:20]) frame = np.frombuffer(buf[55:], np.uint8).reshape( (imgproc.bucketcount.shape[0], imgproc.bucketcount.shape[1], 3)) frame = np.int32(frame) frameno += 1 if t0 is not None: dt = tstamp - t0 print 'dt', dt t0 = tstamp t = (last_throttle + 2*throttle) / 3.0 s = last_steering #x, P = ekf.predict(x, P, dt, throttle / 127.0, steering / 127.0) x, P = ekf.predict(x, P, dt, t / 127.0, s / 127.0) last_throttle, last_steering = throttle, steering # print 'x_predict\n', x hv, th, B, yc, Rk = imgproc.detect_centerline(frame[:, :, 1]) LL_center, LL_imu, LL_encoders = 0, 0, 0 if B is not None: x, P, LL_center = ekf.update_centerline(x, P, B[0], B[1], B[2], yc, Rk) # print 'x_centerline\n', x # print 'accel', accel # print 'gyro', gyro[2] x, P, LL_imu = ekf.update_IMU(x, P, gyro[2]) # print 'x_gyro\n', x # print 'wheels', wheels, 'periods', periods if wheels_last is not None: # wait WHAT? this isn't what i'm doing in the car.. ds = np.sum(wheels - wheels_last) / 4.0 if ds != 0: x, P, LL_encoders = ekf.update_encoders(x, P, ds/dt, float(servo)) # print 'x_encoders\n', x else: x, P, LL_encoders = ekf.update_encoders(x, P, 0, float(servo)) # print 'x_encoders\n', x wheels_last = wheels print >>steertrim, x[0], x[1], last_steering, gyro print >>likelihood, LL_center, LL_imu, LL_encoders v, delta, ye, psie, kappa = x[:5] s_coord += dt * v / (1 - kappa * ye) # we should get our error estimate on s also.. if B is not None: # measurement was taken ds meters in front of the car ds = yc * np.cos(x[2]) print >>trackcurv, s_coord + ds, kappa, np.sqrt(P[4, 4]) # gyrozs.append(gyro[2]) # print 'gyro', gyro[2], 'mean', np.mean(gyrozs), 'std', np.std(gyrozs) print 'LL', LL_center, LL_imu, LL_encoders LLsum += [LL_center, LL_imu, LL_encoders] print 'final x\n', x print 'final P\n', np.diag(P) return LLsum if __name__ == '__main__': import sys print replay_LL(sys.argv[1], open(sys.argv[1]))
<filename>smartsheet/discussions.py # pylint: disable=C0111,R0902,R0913 # Smartsheet Python SDK. # # Copyright 2016 Smartsheet.com, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"): you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import import logging import os.path import six from . import fresh_operation class Discussions(object): """Class for handling Discussions operations.""" def __init__(self, smartsheet_obj): """Init Discussions with base Smartsheet object.""" self._base = smartsheet_obj self._log = logging.getLogger(__name__) def add_comment_to_discussion(self, sheet_id, discussion_id, comment_obj=None): """Add a Comment to the specified Discussion Args: sheet_id (int): Sheet ID discussion_id (int): Discussion ID comment_obj (Comment): Comment object. Returns: Result """ _op = fresh_operation('add_comment_to_discussion') _op['method'] = 'POST' _op['path'] = '/sheets/' + str(sheet_id) + '/discussions/' + str( discussion_id) + '/comments' _op['json'] = comment_obj # filter before we go _op['json'].pre_request_filter = 'add_comment_to_discussion' expected = ['Result', 'Comment'] prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response # pylint: disable=invalid-name def add_comment_to_discussion_with_attachment(self, sheet_id, discussion_id, comment, _file=None): """Add a Comment with an Attachment to the specified Discussion Args: sheet_id (int): Sheet ID discussion_id (int): Discussion ID comment (file): Comment object. _file (file): String or file stream object. Returns: Result """ if not all(val is not None for val in ['sheet_id', 'discussion_id', 'comment']): raise ValueError( ('One or more required values ' 'are missing from call to ' + __name__)) _op = fresh_operation('add_comment_to_discussion_with_attachment') _op['method'] = 'POST' _op['path'] = '/sheets/' + str(sheet_id) + '/discussions/' + str( discussion_id) + '/comments' _op['files'] = {} comment.pre_request_filter = 'add_comment_to_discussion_with_attachment' field_str = comment.to_json() _op['files']['comment'] = (None, six.StringIO(field_str), 'application/json') _op['files']['file'] = _file expected = ['Result', 'Comment'] prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response # pylint: enable=invalid-name def create_discussion_on_row(self, sheet_id, row_id, discussion_obj=None): """Create a new Discussion on a Row. Args: sheet_id (int): Sheet ID row_id (int): Row ID discussion_obj (Discussion): Discussion object. Returns: Result """ _op = fresh_operation('create_discussion_on_row') _op['method'] = 'POST' _op['path'] = '/sheets/' + str(sheet_id) + '/rows/' + str( row_id) + '/discussions' _op['json'] = discussion_obj # filter before we go _op['json'].pre_request_filter = 'create_discussion_on_row' expected = ['Result', 'Discussion'] prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response # pylint: disable=invalid-name def create_discussion_on_row_with_attachment(self, sheet_id, row_id, discussion, _file=None): """Create a new Discussion on a Row with an attachment. Args: sheet_id (int): Sheet ID row_id (int): Row ID discussion (file): Discussion object. _file (file): String or file stream object. Returns: Result """ if not all(val is not None for val in ['sheet_id', 'row_id', 'discussion']): raise ValueError( ('One or more required values ' 'are missing from call to ' + __name__)) _op = fresh_operation('create_discussion_on_row_with_attachment') _op['method'] = 'POST' _op['path'] = '/sheets/' + str(sheet_id) + '/rows/' + str( row_id) + '/discussions' _op['files'] = {} discussion.pre_request_filter = 'create_discussion_on_row_with_attachment' field_str = discussion.to_json() _op['files']['discussion'] = (None, six.StringIO(field_str), 'application/json') _op['files']['file'] = _file expected = ['Result', 'Discussion'] prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response # pylint: enable=invalid-name def create_discussion_on_sheet(self, sheet_id, discussion_obj=None): """Create a new Discussion on a Sheet. Args: sheet_id (int): Sheet ID discussion_obj (Discussion): Discussion object. Returns: Result """ _op = fresh_operation('create_discussion_on_sheet') _op['method'] = 'POST' _op['path'] = '/sheets/' + str(sheet_id) + '/discussions' _op['json'] = discussion_obj # filter before we go _op['json'].pre_request_filter = 'create_discussion_on_sheet' expected = ['Result', 'Discussion'] prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response # pylint: disable=invalid-name def create_discussion_on_sheet_with_attachment(self, sheet_id, discussion, _file=None): """Create a new Discussion on a Sheet with an attachment. Args: sheet_id (int): Sheet ID discussion (file): Discussion object. _file (file): String or file stream object. Returns: Result """ _op = fresh_operation('create_discussion_on_sheet_with_attachment') _op['method'] = 'POST' _op['path'] = '/sheets/' + str(sheet_id) + '/discussions' _op['files'] = {} discussion.pre_request_filter = 'create_discussion_on_sheet_with_attachment' field_str = discussion.to_json() _op['files']['discussion'] = (None, six.StringIO(field_str), 'application/json') _op['files']['file'] = _file expected = ['Result', 'Discussion'] prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response # pylint: enable=invalid-name def delete_discussion(self, sheet_id, discussion_id): """Delete the specified Discussion. Args: sheet_id (int): Sheet ID discussion_id (int): Discussion ID Returns: Result """ _op = fresh_operation('delete_discussion') _op['method'] = 'DELETE' _op['path'] = '/sheets/' + str(sheet_id) + '/discussions/' + str( discussion_id) expected = 'Result' prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response def delete_discussion_comment(self, sheet_id, comment_id): """Delete the specified Sheet Comment. Delete the specified Comment from the specified Sheet. Args: sheet_id (int): Sheet ID comment_id (int): Comment ID Returns: Result """ _op = fresh_operation('delete_discussion_comment') _op['method'] = 'DELETE' _op['path'] = '/sheets/' + str(sheet_id) + '/comments/' + str( comment_id) expected = 'Result' prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response def get_all_discussions(self, sheet_id, include=None, page_size=100, page=1, include_all=False): """Get a list of all Discussions on the specified Sheet. Get a list of all Discussions associated with the specified Sheet (both sheet-level discussions and row-level discussions). Args: sheet_id (int): Sheet ID include (list[str]): A comma-separated list of optional elements to include in the response. Valid list values: comments, attachments page_size (int): The maximum number of items to return per page. Defaults to 100. page (int): Which page to return. Defaults to 1 if not specified. include_all (bool): If true, include all results (i.e. do not paginate). Returns: IndexResult """ _op = fresh_operation('get_all_discussions') _op['method'] = 'GET' _op['path'] = '/sheets/' + str(sheet_id) + '/discussions' _op['query_params']['include'] = include _op['query_params']['pageSize'] = page_size _op['query_params']['page'] = page _op['query_params']['includeAll'] = include_all expected = ['IndexResult', 'Discussion'] prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response def get_discussion(self, sheet_id, discussion_id): """Get the specified Discussion. Args: sheet_id (int): Sheet ID discussion_id (int): Discussion ID Returns: Discussion """ _op = fresh_operation('get_discussion') _op['method'] = 'GET' _op['path'] = '/sheets/' + str(sheet_id) + '/discussions/' + str( discussion_id) expected = 'Discussion' prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response def get_discussion_comment(self, sheet_id, comment_id): """Get the specified Comment. Args: sheet_id (int): Sheet ID comment_id (int): Comment ID Returns: Comment """ _op = fresh_operation('get_discussion_comment') _op['method'] = 'GET' _op['path'] = '/sheets/' + str(sheet_id) + '/comments/' + str( comment_id) expected = 'Comment' prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response def get_row_discussions(self, sheet_id, row_id, include=None, page_size=100, page=1, include_all=False): """Get a list of all Discussions associated with the specified Row. Args: sheet_id (int): Sheet ID row_id (int): Row ID include (list[str]): A comma-separated list of optional elements to include in the response. Valid list values: comments, attachments. (Attachments is effective only if comments is present, otherwise ignored.) page_size (int): The maximum number of items to return per page. Defaults to 100. page (int): Which page to return. Defaults to 1 if not specified. include_all (bool): If true, include all results (i.e. do not paginate). Returns: IndexResult """ _op = fresh_operation('get_row_discussions') _op['method'] = 'GET' _op['path'] = '/sheets/' + str(sheet_id) + '/rows/' + str( row_id) + '/discussions' _op['query_params']['include'] = include _op['query_params']['pageSize'] = page_size _op['query_params']['page'] = page _op['query_params']['includeAll'] = include_all expected = ['IndexResult', 'Discussion'] prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response def update_comment(self, sheet_id, comment_id, comment_obj): """Update the specified Comment. Args: sheet_id (int): Sheet ID comment_id (int): Comment ID comment_obj (Comment): Comment object with the following attributes: * text (required) Returns: Result """ _op = fresh_operation('update_comment') _op['method'] = 'PUT' _op['path'] = '/sheets/' + str(sheet_id) + '/comments/' + str(comment_id) _op['json'] = comment_obj # filter before we go _op['json'].pre_request_filter = 'update_comment' expected = ['Result', 'Comment'] prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response
<reponame>WNoxchi/rfcx_species_audio_detection<gh_stars>0 # AUTOGENERATED! DO NOT EDIT! File to edit: 01_spectrogram_processor.ipynb (unless otherwise specified). __all__ = ['parser', 'args', 'fpath', 'num_cpus', 'serial', 'n_fft', 'hop_length', 'n_mels', 'mel_n_fft', 'mel_hop_length', 'frq', 'mel', 'compute_spectrogram', 'compute_mel_spectrogram', 'write_spectrogram', 'load_compute_write', 'spgmtext', 'cputext', 't', 'path', 'path_spgm_frq', 'path_spgm_mel', 'audio_files'] # Cell import argparse from pathlib import Path import json import librosa import numpy as np from PIL import Image import time from fastprogress import progress_bar from os import uname from fastcore.parallel import parallel from fastcore.parallel import num_cpus # Cell # ignore librosa pysoundfile load warning import warnings warnings.filterwarnings( action='ignore', category=UserWarning, module=r'librosa' ) # Cell parser = argparse.ArgumentParser() parser.add_argument("--path", help="parameter filepath", nargs=1) args = parser.parse_args() # args = parser.parse_args(["--path", "sample parameters.json"]) # ← for testing # Cell fpath = Path(args.path[0]) assert fpath.exists(), (f"Filepath '{fpath}' not found.") assert fpath.is_file(), (f"'{fpath}' is not a valid file.") # Cell with open(fpath, 'r') as file: parameters = json.load(file) print(f"Parameters file \"{fpath}\" loaded.") # Cell num_cpus = num_cpus() # func → int serial = True if uname().sysname.lower() != 'linux' else parameters['serial'] # no parallelization support on MacOS or Windows n_fft = parameters['n_fft'] hop_length = parameters['hop_length'] n_mels = parameters['n_mels'] mel_n_fft = parameters['mel_n_fft_override'] if mel_n_fft == None: mel_n_fft = n_fft mel_hop_length = parameters['mel_hop_length_override'] if mel_hop_length == None: mel_hop_length = hop_length if n_fft / n_mels < 2**3: print(f"Warning: N FFT ({n_fft}) is fewer than 3 powers of 2 greater than N Mels ({n_mels})." f" This may result in null values at lower mels in spectrograms.") frq = parameters['frq'] mel = parameters['mel'] # Cell def compute_spectrogram(wf, n_fft=1024, hop_length=512): return librosa.power_to_db(np.abs(librosa.stft(wf, n_fft=n_fft, hop_length=hop_length))**2) def compute_mel_spectrogram(wf, sr=None, n_fft=1024, hop_length=512, n_mels=128): return librosa.power_to_db(librosa.feature.melspectrogram(wf, sr=sr, n_fft=n_fft, hop_length=hop_length, n_mels=n_mels)) def write_spectrogram(spgm, filepath): # convert fp32 array to 0-255 UInt8. shift = abs(spgm.min()) if spgm.min() < 0 else 0.0 x = ((spgm + shift) * (255/(shift + spgm.max()))) img = Image.fromarray(x.round().astype(np.uint8)[::-1]) # vertical flip img.save(filepath) # Cell def load_compute_write(audio_file, diagnostic_suffix="", frq=True, mel=True, **kwargs): """ For parallelization. """ wf,sr = librosa.load(audio_file, sr=None) spgm_frq = compute_spectrogram(wf, n_fft=n_fft, hop_length=hop_length) spgm_mel = compute_mel_spectrogram(wf, sr=sr, n_fft=mel_n_fft, hop_length=mel_hop_length, n_mels=n_mels) if frq: write_spectrogram(spgm_frq, path_spgm_frq/f"{audio_file.stem}{diagnostic_suffix}.png") if mel: write_spectrogram(spgm_mel, path_spgm_mel/f"{audio_file.stem}{diagnostic_suffix}.png") # return {'audio_file':audio_file.stem,'spgm_frq':spgm_frq,'spgm_mel':spgm_mel} # Cell spgmtext = f"Spectrograms: {['','FRQ'][frq]}{['',', '][frq and mel]}{['','MEL'][mel]}" cputext = [f" with {num_cpus} core{['','s'][num_cpus>1]}",""][serial] print(f"Parallelization {['ON','OFF'][serial]}{cputext}. {spgmtext}") # Cell t = time.time() # Cell path = Path(parameters['path']) path_spgm_frq = path/'spectrogram_frq' path_spgm_frq.mkdir(parents=True, exist_ok=True) path_spgm_mel = path/'spectrogram_mel' path_spgm_mel.mkdir(parents=True, exist_ok=True) # get list of audio files audio_files = [] for audio_folder in parameters['audio_folders']: audio_path = path/audio_folder audio_files += [file for file in audio_path.iterdir() if file.suffix in parameters['codecs']] # load waveform if serial: pb = progress_bar(audio_files) for audio_file in pb: pb.comment = f"processing: {audio_file.parent.name}/{audio_file.name}" wf,sr = librosa.load(audio_file, sr=None) # compute freq & mel spectrograms spgm_frq = compute_spectrogram(wf, n_fft=n_fft, hop_length=hop_length) spgm_mel = compute_mel_spectrogram(wf, sr=sr, n_fft=mel_n_fft, hop_length=mel_hop_length, n_mels=n_mels) # write spectrograms if frq: write_spectrogram(spgm_frq, path_spgm_frq/f"{audio_file.stem}.png") if mel: write_spectrogram(spgm_mel, path_spgm_mel/f"{audio_file.stem}.png") else: _ = parallel(load_compute_write, audio_files, frq, mel, **{'n_fft':n_fft,'hop_length':hop_length,'n_mels':n_mels}, threadpool=True, n_workers = num_cpus) # Cell print(f"\n{len(audio_files)} files processed in {time.strftime('%H:%M:%S', time.gmtime(time.time() - t))}")
# from skimage.io import imread import datetime import os import pickle import sys import math from os import mkdir # from torchsummary import summary from os.path import join from time import time from memory_profiler import profile import cv2 import matplotlib.pyplot as plt import numpy as np from src.data.utils.utils import get_mask import src.data.utils.utils as utils import torch import torch.nn as nn from torch.utils.tensorboard import SummaryWriter import torch.nn.functional as F import torch.utils.data from torchvision.utils import make_grid import torchvision from PIL import Image from src.data.constants import (CV2_CONNECTED_ALGORITHM, DATA_DIR, IMG_DIR, MASK_DIR, MEDIAN_FILTER_KERNEL, NUMBER_CONNECTIVITY, SIMPLE_THRESHOLD) from src.models.utils import transforms as T from src.models.utils.model import get_instance_segmentation_model from torch import optim from torch.cuda.amp import GradScaler, autocast from torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR from torchvision.utils import draw_bounding_boxes, draw_segmentation_masks from src.models.BetaCellDataset import BetaCellDataset, get_dataloaders # import torch.optim as optim # import torchvision def train(model, device, opt, epochs, data_tr, data_val, time_str, hparam_dict, writer, save=False, write=False): '''Train''' torch.backends.cudnn.benchmark = True print(f'Training has begun for model: {time_str}') size = hparam_dict['image_size'] batch_size = hparam_dict['batch_size'] # TODO: send HPC support email # regarding why it runs out of memory but shows only # 2 gb used scheduler = ReduceLROnPlateau(opt, threshold=0.01, verbose=True) log_every = 1 # How often to print out losses save_every = 10 # How often to save model scaler = GradScaler() loss_list = hparam_dict['losses'].split(';') # loss_classifier, loss_objectness # TODO: remove loss_classifier from loss_list # and also objectness? if we don't care about detecting # objects, maybe the faint ones will be caught as well. # Transforms scale_jitter = T.ScaleJitter((size / 2, size / 2), scale_range=[0.7, 1.5]) transforms_list = [T.RandomIoUCrop(), scale_jitter] transforms = T.Compose(transforms_list) tot_train_losses = [] tot_val_losses = [] # print(x_val[0].unsqueeze(0).shape) # writer.add_graph(model, [x_val[0].to(device)]) for i, epoch in enumerate(range(epochs)): model.train() # train mode tic = time() print(f'\n* Epoch {epoch+1}/{epochs}') x_val, y_val = next(iter(data_val)) train_loss = 0 for j, (x_batch, y_batch) in enumerate(data_tr): with autocast(): x_batch = [x.to(device) for x in x_batch] y_batch = [{k: v.to(device) for k, v in t.items()} for t in y_batch] # TODO: Invalid box coordinates # Transforms that can't run parallel in dataloader # need to be performed here # for (x, y) in zip(x_batch, y_batch): # x, y = transforms_list[0](x.squeeze(0), y) # print(np.unique(y['boxes'].cpu()), 'after crop') # x, y = transforms_list[1](x.squeeze(0), y) # print(np.unique(y['boxes'].cpu()), 'after scale jitter') x_batch = torch.stack(x_batch) x_batch.to(device) # set parameter gradients to zero opt.zero_grad(set_to_none=True) # forward pass Y_pred = model(x_batch, y_batch) # print(x_batch.shape, len(y_batch)) # print(np.unique(x_batch.cpu()), '\n' * 5) # print(np.unique(y_batch.cpu()), '\n' * 7) # Select only losses of interest losses = [value for loss, value in Y_pred.items() if loss in loss_list] losses = sum(losses) # End of training loop for mini-batch scaler.scale(losses).backward() scaler.step(opt) scaler.update() # calculate metrics to show the user train_loss += float(losses / len(data_tr)) # End training loop for epoch tot_train_losses.append(train_loss) writer.add_scalar('Training Loss', train_loss, epoch) # Validation val_losses = 0 for x_val, y_val in data_val: with torch.no_grad(), autocast(): model.train() # x_val, y_val = to_device([x_val, y_val], device) x_val = [x.to(device) for x in x_val] y_val = [{k: v.to(device) for k, v in t.items()} for t in y_val] val_losses += get_loss(model, loss_list, x_val, y_val) # TODO: make sure scheduler works # by printing out the learning rate each epoch # if write: # if i == save_every: # debug_opencv_mask() model.eval() y_hat = model(x_val) # Convert ys to masks # yhat_boxes = [y['boxes']] ..... # Convert y_hat to CUDA # y_hat = to_device([y_hat], device) # y_hat = [{k: v.to(device) for k, v in t.items()} # for t in y_hat] # Strip everything except masks y_hat, y_val = y_to_mask([y_hat, y_val]) # Consolidate masks in batch y_hat = [get_mask(y) for y in y_hat] y_val = [get_mask(y) for y in y_val] # y_hat = torch.cat(y_hat, dim=0) # .detach().cpu() x_val = [x.squeeze() for x in x_val] # end validation loop val_loss = float(val_losses) / len(data_val) tot_val_losses.append(val_loss) writer.add_scalar('Validation Loss', val_loss, epoch) if write: image_grid = create_grid(x_val, y_val, y_hat, batch_size) writer.add_image(f'epoch_{epoch}', image_grid, epoch, dataformats='NCHW') # writer.add_hparams( # hparam_dict, {'hparam/loss': val_losses.item()}, run_name=f'runs/{time_str}') scheduler.step(val_losses) if i % log_every == 0: # loss is nan; cancel training if math.isnan(float(train_loss)): print('training loss is nan\n') return train_loss, np.nan print(f'Training loss: {train_loss:.3f}') print(f'Validation loss: {val_loss:.3f}') elapsed = utils.time_report(tic, time()) print('Time:', elapsed) # Save progress every `save_every` epochs if (i + 1) % save_every == 0 and save: dump_model(model, time_str) # early stopping patience = 15 # number of epochs to wait for validation loss to improve if i > patience: early_thresh = 0.95 # ratio threshold at which to stop at val_prev = tot_val_losses[i-patience:i] val_now = val_loss if val_now / np.mean(val_prev) > early_thresh: print('Early stopping activated; stopping training.') break # end epoch # select random images and their target indices # images, labels = select_n_random() # # get the class labels for each image # # class_labels = [classes[lab] for lab in labels] # # log embeddings # # features = images.view(-1, 28 * 28) # writer.add_embedding(images, # label_img=images.unsqueeze(1)) return tot_train_losses, tot_val_losses def create_grid(x_val, y_val, y_hat, batch_size): image_grid = make_grid( [*x_val, *y_hat, *y_val], nrow=batch_size, pad_value=220, padding=30) image_grid = image_grid.squeeze().unsqueeze(1) image_grid = (image_grid * 255).type(torch.uint8) return image_grid def to_device(tensor_list, device): ''' Moves data onto device. ''' main_list = [] for batch in tensor_list: if type(batch) == dict: batch = [{k: v.to(device) for k, v in t.items()} for t in batch] elif type(batch) == torch.Tensor: batch = [x.to(device) for x in batch] main_list.append(batch) return main_list def y_to_mask(ys): if type(ys) == dict: ys = [item['masks'] for item in ys] ys = [item.squeeze(1) for item in ys] return ys for y in ys: y = [item['masks'] for item in y] y = [item.squeeze(1) for item in y] return ys def get_loss(model, loss_list, x_val, y_val): output = model(x_val, y_val) # losses # float(sum(loss for loss in output.values())) losses = [value for loss, value in output.items() if loss in loss_list] losses = sum(losses) return losses def debug_opencv_mask(): # Visualize the masks generated by opencv # for debugging purposes dataset = BetaCellDataset(DATA_DIR) img, target = dataset[500] plt.subplot(1, 2, 1) plt.imshow(img, cmap='viridis') plt.subplot(1, 2, 2) # Values in target['masks'] are either 0 or 1 # so multiply by 255 for image pixel values plotted = torch.sum(target['masks'], dim=0) * 255 plt.imshow(plotted, cmap='gray') plt.savefig(join(save, 'opencv_mask.jpg')) def dump_model(model, time_str): # Make folder unique to this run in order to save model and loss utils.make_dir(save) pickle.dump(model, open(join(save, f'model_{time_str}.pkl'), 'wb')) def predict(model, data): '''Predict''' model.eval() # testing mode Y_pred = [F.sigmoid(model(X_batch.to(device))) for X_batch, _ in data] return np.array(Y_pred) # helper function def select_n_random(n=100): ''' Selects n random datapoints and their corresponding labels from a dataset source: https://pytorch.org/tutorials/intermediate/tensorboard_tutorial.html ''' assert len(data) == len(labels) data = BetaCellDataset() perm = torch.randperm(len(data)) return data[perm][:n] def bce_loss(y_real, y_pred): '''bce_loss''' return torch.mean(y_pred - y_real * y_pred + torch.log(1 + torch.exp(-y_pred))) if __name__ == '__main__': tic = time() # Environment variable for memory management alloc_conf = 'PYTORCH_CUDA_ALLOC_CONF' try: print(alloc_conf, os.environ[alloc_conf]) except KeyError: print(alloc_conf, 'not found') conn = NUMBER_CONNECTIVITY algo = CV2_CONNECTED_ALGORITHM kernel = MEDIAN_FILTER_KERNEL threshold = SIMPLE_THRESHOLD device = utils.set_device() print(f'Running on {device}.') utils.setcwd(__file__) # our dataset has two classes only - background and person num_classes = 2 # hyperparameters size = 1024 batch_size = 8 # 2 pretrained = True num_epochs = 30 # 500 lr = 3.418507038460298e-06 wd = 1.2957404400334042e-08 beta1 = 0.2438598958001344 beta2 = 0.9849760264270886 n_img_select = 1101 manual_select = 1 img_filter = 'bilateral' data_tr, data_val = get_dataloaders( batch_size=batch_size, num_workers=4, resize=size, n_img_select=(n_img_select, 1), manual_select=(manual_select, 1), img_filter=img_filter) # get the model using our helper function model = get_instance_segmentation_model(pretrained=pretrained) model.to(device) # Unique identifier for newly saved objects now = datetime.datetime.now() time_str = f'{now.day:02d}_{now.month:02d}_{now.hour}H_{now.minute}M_{now.second}S' save = f'interim/run_{time_str}' params = [p for p in model.parameters() if p.requires_grad] opt = optim.Adam(params, lr=lr, weight_decay=wd, betas=[beta1, beta2]) loss_list = ['loss_mask', 'loss_rpn_box_reg', 'loss_box_reg', 'loss_classifier', 'loss_objectness'] # loss_list = ['loss_mask', 'loss_rpn_box_reg'] hparam_dict = { 'learning_rate': lr, 'weight_decay': wd, 'num_epochs': num_epochs, 'optimizer': f'{opt}', 'losses': ';'.join(loss_list), 'image_size': size if size else 1024, 'batch_size': batch_size, 'pretrained': pretrained } # TODO: add "how many weakly annotated" # TODO: add /pred/ folder in addition to /runs/ # so make a writer in predict_model which saves images # add_video í SummaryWriter description = f'''{time_str}\n Learning rate: {lr}\n Weight decay: {wd}\n Optimizer: {opt}\n Losses: {loss_list} ''' with SummaryWriter(f'runs/{time_str}') as w: losses = train(model, device, opt, num_epochs, data_tr, data_val, time_str, hparam_dict, w, save=True, write=False) w.add_text('description', description) losses = np.array(losses).T pickle.dump(model, open(join('interim', f'run_{time_str}', f'model_{time_str}.pkl'), 'wb')) elapsed = utils.time_report(tic, time()) print('train_model finished after', elapsed)
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest import Serializer, Deserializer from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.core.polling import LROPoller, NoPolling, PollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.arm_polling import ARMPolling if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union class NetworkManagementClientOperationsMixin(object): def begin_delete_bastion_shareable_link( self, resource_group_name, # type: str bastion_host_name, # type: str bsl_request, # type: "_models.BastionShareableLinkListRequest" **kwargs # type: Any ): """Deletes the Bastion Shareable Links for all the VMs specified in the request. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param bastion_host_name: The name of the Bastion Host. :type bastion_host_name: str :param bsl_request: Post request for all the Bastion Shareable Link endpoints. :type bsl_request: ~azure.mgmt.network.v2021_03_01.models.BastionShareableLinkListRequest :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ api_version = self._get_api_version('begin_delete_bastion_shareable_link') if api_version == '2019-09-01': from .v2019_09_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-11-01': from .v2019_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-12-01': from .v2019_12_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-03-01': from .v2020_03_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-04-01': from .v2020_04_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-05-01': from .v2020_05_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-06-01': from .v2020_06_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-07-01': from .v2020_07_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-08-01': from .v2020_08_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-11-01': from .v2020_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2021-02-01': from .v2021_02_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2021-03-01': from .v2021_03_01.operations import NetworkManagementClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'begin_delete_bastion_shareable_link'".format(api_version)) mixin_instance = OperationClass() mixin_instance._client = self._client mixin_instance._config = self._config mixin_instance._serialize = Serializer(self._models_dict(api_version)) mixin_instance._serialize.client_side_validation = False mixin_instance._deserialize = Deserializer(self._models_dict(api_version)) return mixin_instance.begin_delete_bastion_shareable_link(resource_group_name, bastion_host_name, bsl_request, **kwargs) def begin_generatevirtualwanvpnserverconfigurationvpnprofile( self, resource_group_name, # type: str virtual_wan_name, # type: str vpn_client_params, # type: "_models.VirtualWanVpnProfileParameters" **kwargs # type: Any ): """Generates a unique VPN profile for P2S clients for VirtualWan and associated VpnServerConfiguration combination in the specified resource group. :param resource_group_name: The resource group name. :type resource_group_name: str :param virtual_wan_name: The name of the VirtualWAN whose associated VpnServerConfigurations is needed. :type virtual_wan_name: str :param vpn_client_params: Parameters supplied to the generate VirtualWan VPN profile generation operation. :type vpn_client_params: ~azure.mgmt.network.v2021_03_01.models.VirtualWanVpnProfileParameters :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either VpnProfileResponse or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2021_03_01.models.VpnProfileResponse] :raises ~azure.core.exceptions.HttpResponseError: """ api_version = self._get_api_version('begin_generatevirtualwanvpnserverconfigurationvpnprofile') if api_version == '2019-08-01': from .v2019_08_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-09-01': from .v2019_09_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-11-01': from .v2019_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-12-01': from .v2019_12_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-03-01': from .v2020_03_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-04-01': from .v2020_04_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-05-01': from .v2020_05_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-06-01': from .v2020_06_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-07-01': from .v2020_07_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-08-01': from .v2020_08_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-11-01': from .v2020_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2021-02-01': from .v2021_02_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2021-03-01': from .v2021_03_01.operations import NetworkManagementClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'begin_generatevirtualwanvpnserverconfigurationvpnprofile'".format(api_version)) mixin_instance = OperationClass() mixin_instance._client = self._client mixin_instance._config = self._config mixin_instance._serialize = Serializer(self._models_dict(api_version)) mixin_instance._serialize.client_side_validation = False mixin_instance._deserialize = Deserializer(self._models_dict(api_version)) return mixin_instance.begin_generatevirtualwanvpnserverconfigurationvpnprofile(resource_group_name, virtual_wan_name, vpn_client_params, **kwargs) def begin_get_active_sessions( self, resource_group_name, # type: str bastion_host_name, # type: str **kwargs # type: Any ): """Returns the list of currently active sessions on the Bastion. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param bastion_host_name: The name of the Bastion Host. :type bastion_host_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns an iterator like instance of either BastionActiveSessionListResult or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_03_01.models.BastionActiveSessionListResult]] :raises ~azure.core.exceptions.HttpResponseError: """ api_version = self._get_api_version('begin_get_active_sessions') if api_version == '2019-09-01': from .v2019_09_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-11-01': from .v2019_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-12-01': from .v2019_12_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-03-01': from .v2020_03_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-04-01': from .v2020_04_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-05-01': from .v2020_05_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-06-01': from .v2020_06_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-07-01': from .v2020_07_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-08-01': from .v2020_08_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-11-01': from .v2020_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2021-02-01': from .v2021_02_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2021-03-01': from .v2021_03_01.operations import NetworkManagementClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'begin_get_active_sessions'".format(api_version)) mixin_instance = OperationClass() mixin_instance._client = self._client mixin_instance._config = self._config mixin_instance._serialize = Serializer(self._models_dict(api_version)) mixin_instance._serialize.client_side_validation = False mixin_instance._deserialize = Deserializer(self._models_dict(api_version)) return mixin_instance.begin_get_active_sessions(resource_group_name, bastion_host_name, **kwargs) def begin_put_bastion_shareable_link( self, resource_group_name, # type: str bastion_host_name, # type: str bsl_request, # type: "_models.BastionShareableLinkListRequest" **kwargs # type: Any ): """Creates a Bastion Shareable Links for all the VMs specified in the request. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param bastion_host_name: The name of the Bastion Host. :type bastion_host_name: str :param bsl_request: Post request for all the Bastion Shareable Link endpoints. :type bsl_request: ~azure.mgmt.network.v2021_03_01.models.BastionShareableLinkListRequest :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns an iterator like instance of either BastionShareableLinkListResult or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_03_01.models.BastionShareableLinkListResult]] :raises ~azure.core.exceptions.HttpResponseError: """ api_version = self._get_api_version('begin_put_bastion_shareable_link') if api_version == '2019-09-01': from .v2019_09_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-11-01': from .v2019_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-12-01': from .v2019_12_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-03-01': from .v2020_03_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-04-01': from .v2020_04_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-05-01': from .v2020_05_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-06-01': from .v2020_06_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-07-01': from .v2020_07_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-08-01': from .v2020_08_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-11-01': from .v2020_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2021-02-01': from .v2021_02_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2021-03-01': from .v2021_03_01.operations import NetworkManagementClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'begin_put_bastion_shareable_link'".format(api_version)) mixin_instance = OperationClass() mixin_instance._client = self._client mixin_instance._config = self._config mixin_instance._serialize = Serializer(self._models_dict(api_version)) mixin_instance._serialize.client_side_validation = False mixin_instance._deserialize = Deserializer(self._models_dict(api_version)) return mixin_instance.begin_put_bastion_shareable_link(resource_group_name, bastion_host_name, bsl_request, **kwargs) def check_dns_name_availability( self, location, # type: str domain_name_label, # type: str **kwargs # type: Any ): """Checks whether a domain name in the cloudapp.azure.com zone is available for use. :param location: The location of the domain name. :type location: str :param domain_name_label: The domain name to be verified. It must conform to the following regular expression: ^[a-z][a-z0-9-]{1,61}[a-z0-9]$. :type domain_name_label: str :keyword callable cls: A custom type or function that will be passed the direct response :return: DnsNameAvailabilityResult, or the result of cls(response) :rtype: ~azure.mgmt.network.v2021_03_01.models.DnsNameAvailabilityResult :raises: ~azure.core.exceptions.HttpResponseError """ api_version = self._get_api_version('check_dns_name_availability') if api_version == '2015-06-15': from .v2015_06_15.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2016-09-01': from .v2016_09_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2016-12-01': from .v2016_12_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2017-03-01': from .v2017_03_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2017-06-01': from .v2017_06_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2017-09-01': from .v2017_09_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2017-10-01': from .v2017_10_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2017-11-01': from .v2017_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2018-01-01': from .v2018_01_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2018-02-01': from .v2018_02_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2018-04-01': from .v2018_04_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2018-06-01': from .v2018_06_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2018-07-01': from .v2018_07_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2018-08-01': from .v2018_08_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2018-10-01': from .v2018_10_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2018-11-01': from .v2018_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2018-12-01': from .v2018_12_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-02-01': from .v2019_02_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-04-01': from .v2019_04_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-06-01': from .v2019_06_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-07-01': from .v2019_07_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-08-01': from .v2019_08_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-09-01': from .v2019_09_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-11-01': from .v2019_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-12-01': from .v2019_12_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-03-01': from .v2020_03_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-04-01': from .v2020_04_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-05-01': from .v2020_05_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-06-01': from .v2020_06_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-07-01': from .v2020_07_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-08-01': from .v2020_08_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-11-01': from .v2020_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2021-02-01': from .v2021_02_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2021-03-01': from .v2021_03_01.operations import NetworkManagementClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'check_dns_name_availability'".format(api_version)) mixin_instance = OperationClass() mixin_instance._client = self._client mixin_instance._config = self._config mixin_instance._serialize = Serializer(self._models_dict(api_version)) mixin_instance._serialize.client_side_validation = False mixin_instance._deserialize = Deserializer(self._models_dict(api_version)) return mixin_instance.check_dns_name_availability(location, domain_name_label, **kwargs) def disconnect_active_sessions( self, resource_group_name, # type: str bastion_host_name, # type: str session_ids, # type: "_models.SessionIds" **kwargs # type: Any ): """Returns the list of currently active sessions on the Bastion. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param bastion_host_name: The name of the Bastion Host. :type bastion_host_name: str :param session_ids: The list of sessionids to disconnect. :type session_ids: ~azure.mgmt.network.v2021_03_01.models.SessionIds :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either BastionSessionDeleteResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_03_01.models.BastionSessionDeleteResult] :raises: ~azure.core.exceptions.HttpResponseError """ api_version = self._get_api_version('disconnect_active_sessions') if api_version == '2019-09-01': from .v2019_09_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-11-01': from .v2019_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-12-01': from .v2019_12_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-03-01': from .v2020_03_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-04-01': from .v2020_04_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-05-01': from .v2020_05_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-06-01': from .v2020_06_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-07-01': from .v2020_07_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-08-01': from .v2020_08_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-11-01': from .v2020_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2021-02-01': from .v2021_02_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2021-03-01': from .v2021_03_01.operations import NetworkManagementClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'disconnect_active_sessions'".format(api_version)) mixin_instance = OperationClass() mixin_instance._client = self._client mixin_instance._config = self._config mixin_instance._serialize = Serializer(self._models_dict(api_version)) mixin_instance._serialize.client_side_validation = False mixin_instance._deserialize = Deserializer(self._models_dict(api_version)) return mixin_instance.disconnect_active_sessions(resource_group_name, bastion_host_name, session_ids, **kwargs) def get_bastion_shareable_link( self, resource_group_name, # type: str bastion_host_name, # type: str bsl_request, # type: "_models.BastionShareableLinkListRequest" **kwargs # type: Any ): """Return the Bastion Shareable Links for all the VMs specified in the request. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param bastion_host_name: The name of the Bastion Host. :type bastion_host_name: str :param bsl_request: Post request for all the Bastion Shareable Link endpoints. :type bsl_request: ~azure.mgmt.network.v2021_03_01.models.BastionShareableLinkListRequest :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either BastionShareableLinkListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_03_01.models.BastionShareableLinkListResult] :raises: ~azure.core.exceptions.HttpResponseError """ api_version = self._get_api_version('get_bastion_shareable_link') if api_version == '2019-09-01': from .v2019_09_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-11-01': from .v2019_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-12-01': from .v2019_12_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-03-01': from .v2020_03_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-04-01': from .v2020_04_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-05-01': from .v2020_05_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-06-01': from .v2020_06_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-07-01': from .v2020_07_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-08-01': from .v2020_08_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-11-01': from .v2020_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2021-02-01': from .v2021_02_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2021-03-01': from .v2021_03_01.operations import NetworkManagementClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'get_bastion_shareable_link'".format(api_version)) mixin_instance = OperationClass() mixin_instance._client = self._client mixin_instance._config = self._config mixin_instance._serialize = Serializer(self._models_dict(api_version)) mixin_instance._serialize.client_side_validation = False mixin_instance._deserialize = Deserializer(self._models_dict(api_version)) return mixin_instance.get_bastion_shareable_link(resource_group_name, bastion_host_name, bsl_request, **kwargs) def supported_security_providers( self, resource_group_name, # type: str virtual_wan_name, # type: str **kwargs # type: Any ): """Gives the supported security providers for the virtual wan. :param resource_group_name: The resource group name. :type resource_group_name: str :param virtual_wan_name: The name of the VirtualWAN for which supported security providers are needed. :type virtual_wan_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: VirtualWanSecurityProviders, or the result of cls(response) :rtype: ~azure.mgmt.network.v2021_03_01.models.VirtualWanSecurityProviders :raises: ~azure.core.exceptions.HttpResponseError """ api_version = self._get_api_version('supported_security_providers') if api_version == '2018-08-01': from .v2018_08_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2018-10-01': from .v2018_10_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2018-11-01': from .v2018_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2018-12-01': from .v2018_12_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-02-01': from .v2019_02_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-04-01': from .v2019_04_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-06-01': from .v2019_06_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-07-01': from .v2019_07_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-08-01': from .v2019_08_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-09-01': from .v2019_09_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-11-01': from .v2019_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2019-12-01': from .v2019_12_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-03-01': from .v2020_03_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-04-01': from .v2020_04_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-05-01': from .v2020_05_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-06-01': from .v2020_06_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-07-01': from .v2020_07_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-08-01': from .v2020_08_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2020-11-01': from .v2020_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2021-02-01': from .v2021_02_01.operations import NetworkManagementClientOperationsMixin as OperationClass elif api_version == '2021-03-01': from .v2021_03_01.operations import NetworkManagementClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'supported_security_providers'".format(api_version)) mixin_instance = OperationClass() mixin_instance._client = self._client mixin_instance._config = self._config mixin_instance._serialize = Serializer(self._models_dict(api_version)) mixin_instance._serialize.client_side_validation = False mixin_instance._deserialize = Deserializer(self._models_dict(api_version)) return mixin_instance.supported_security_providers(resource_group_name, virtual_wan_name, **kwargs)
""" =============================== 05. Simulate beta modulated ERP =============================== This example demonstrates how event related potentials (ERP) are modulated by prestimulus beta events. Specifically, this example reproduces Figure 5 from Law et al. 2021 [1]_. To be consistent with the publication, the default network connectivity is altered. These modfications demonstrate a potential mechanism by which transient beta activity in the neocortex can suppress the perceptibility of sensory input. This suppression depends on the timing of the beta event, and the incoming sensory information. """ # Authors: <NAME> <<EMAIL>> from hnn_core import simulate_dipole, law_2021_model, jones_2009_model from hnn_core.viz import plot_dipole ############################################################################### # We begin by instantiating the network model from Law et al. 2021 [1]_. net = law_2021_model() ############################################################################### # The Law 2021 model is based on the network model described in # Jones et al. 2009 [2]_ with several important modifications. One of the most # significant changes is substantially increasing the rise and fall time # constants of GABAb-conductances on L2 and L5 pyramidal. Another important # change is the removal of calcium channels from basal dendrites and soma of # L5 pyramidal cells specifically. # We can inspect these properties with the ``net.cell_types`` attribute which # contains information on the biophysics and geometry of each cell. net_jones = jones_2009_model() jones_rise = net_jones.cell_types['L5_pyramidal'].synapses['gabab']['tau1'] law_rise = net.cell_types['L5_pyramidal'].synapses['gabab']['tau1'] print(f'GABAb Rise (ms): {jones_rise} -> {law_rise}') jones_fall = net_jones.cell_types['L5_pyramidal'].synapses['gabab']['tau2'] law_fall = net.cell_types['L5_pyramidal'].synapses['gabab']['tau2'] print(f'GABAb Fall (ms): {jones_fall} -> {law_fall}\n') print('Apical Dendrite Channels:') print(net.cell_types['L5_pyramidal'].sections['apical_1'].mechs.keys()) print("\nBasal Dendrite Channels ('ca' missing):") print(net.cell_types['L5_pyramidal'].sections['basal_1'].mechs.keys()) ############################################################################### # A major change to the Jones 2009 model is the addition of a # Martinotti-like recurrent tuft connection [3]_. This new connection # originates from L5 basket cells, and provides GABAa inhibition on # the distal dendrites of L5 pyramidal cells. print('Recurrent Tuft Connection') print(net.connectivity[16]) ############################################################################### # The remaining changes to the connectivity was the removal of an # L2_basket -> L5_pyramidal GABAa connection, and replacing it with GABAb. print('New GABAb connection') print(net.connectivity[15]) print('\nConnection Removed from Law Model') print(net_jones.connectivity[10]) ############################################################################### # To demonstrate sensory depression, we will add the drives necessary to # generate and ERP similar to # :ref:`evoked example <sphx_glr_auto_examples_plot_simulate_evoked.py>`, # but modified to reflect the parameters used in Law et al. 2021. # Specifically, we are considering the case where a tactile stimulus is # delivered at 150 ms. 25 ms later, the first input to sensory cortex arrives # as a proximal drive to the cortical column. Proximal drive corresponds to # projections from the direct thalamic nuclei. This is followed by one # :term:`distal` drive representing projections from indirect thalamic nuclei, # and a final late proximal drive. It is important to note that the parameter # values for each are different from previous examples of the evoked response. # This reflects the altered network dynamics due to the changes described # above. def add_erp_drives(net, stimulus_start): # Distal evoked drive weights_ampa_d1 = {'L2_basket': 0.0005, 'L2_pyramidal': 0.004, 'L5_pyramidal': 0.0005} weights_nmda_d1 = {'L2_basket': 0.0005, 'L2_pyramidal': 0.004, 'L5_pyramidal': 0.0005} syn_delays_d1 = {'L2_basket': 0.1, 'L2_pyramidal': 0.1, 'L5_pyramidal': 0.1} net.add_evoked_drive( 'evdist1', mu=70.0 + stimulus_start, sigma=0.0, numspikes=1, weights_ampa=weights_ampa_d1, weights_nmda=weights_nmda_d1, location='distal', synaptic_delays=syn_delays_d1, event_seed=4) # Two proximal drives weights_ampa_p1 = {'L2_basket': 0.002, 'L2_pyramidal': 0.0011, 'L5_basket': 0.001, 'L5_pyramidal': 0.001} syn_delays_prox = {'L2_basket': 0.1, 'L2_pyramidal': 0.1, 'L5_basket': 1., 'L5_pyramidal': 1.} # all NMDA weights are zero; pass None explicitly net.add_evoked_drive( 'evprox1', mu=25.0 + stimulus_start, sigma=0.0, numspikes=1, weights_ampa=weights_ampa_p1, weights_nmda=None, location='proximal', synaptic_delays=syn_delays_prox, event_seed=4) # Second proximal evoked drive. NB: only AMPA weights differ from first weights_ampa_p2 = {'L2_basket': 0.005, 'L2_pyramidal': 0.005, 'L5_basket': 0.01, 'L5_pyramidal': 0.01} # all NMDA weights are zero; omit weights_nmda (defaults to None) net.add_evoked_drive( 'evprox2', mu=135.0 + stimulus_start, sigma=0.0, numspikes=1, weights_ampa=weights_ampa_p2, location='proximal', synaptic_delays=syn_delays_prox, event_seed=4) return net ############################################################################### # A beta event is created by inducing simultaneous proximal and distal # drives. The input is just strong enough to evoke spiking in the # L2 basket cells. This spiking causes GABAb mediated inhibition # of the network, and ultimately suppressed sensory detection. def add_beta_drives(net, beta_start): # Distal Drive weights_ampa_d1 = {'L2_basket': 0.00032, 'L2_pyramidal': 0.00008, 'L5_pyramidal': 0.00004} syn_delays_d1 = {'L2_basket': 0.5, 'L2_pyramidal': 0.5, 'L5_pyramidal': 0.5} net.add_bursty_drive( 'beta_dist', tstart=beta_start, tstart_std=0., tstop=beta_start + 50., burst_rate=1., burst_std=10., numspikes=2, spike_isi=10, n_drive_cells=10, location='distal', weights_ampa=weights_ampa_d1, synaptic_delays=syn_delays_d1, event_seed=20) # Proximal Drive weights_ampa_p1 = {'L2_basket': 0.00004, 'L2_pyramidal': 0.00002, 'L5_basket': 0.00002, 'L5_pyramidal': 0.00002} syn_delays_p1 = {'L2_basket': 0.1, 'L2_pyramidal': 0.1, 'L5_basket': 1.0, 'L5_pyramidal': 1.0} net.add_bursty_drive( 'beta_prox', tstart=beta_start, tstart_std=0., tstop=beta_start + 50., burst_rate=1., burst_std=20., numspikes=2, spike_isi=10, n_drive_cells=10, location='proximal', weights_ampa=weights_ampa_p1, synaptic_delays=syn_delays_p1, event_seed=20) return net ############################################################################### # We can now use our functions to create three distinct simulations: # 1) beta event only, 2) ERP only, and 3) beta event + ERP. beta_start, stimulus_start = 50.0, 125.0 net_beta = net.copy() net_beta = add_beta_drives(net_beta, beta_start) net_erp = net.copy() net_erp = add_erp_drives(net_erp, stimulus_start) net_beta_erp = net_beta.copy() net_beta_erp = add_erp_drives(net_beta_erp, stimulus_start) ############################################################################### # And finally we simulate. Note that the default simulation time has been # increased to 400 ms to observe the long time course over which beta events # can influence sensory input to the cortical column. dpls_beta = simulate_dipole(net_beta, tstop=400) dpls_erp = simulate_dipole(net_erp, tstop=400) dpls_beta_erp = simulate_dipole(net_beta_erp, tstop=400) ############################################################################### # By inspecting the activity during the beta event, we can see that spiking # occurs exclusively at 50 ms, the peak of the gaussian distributed proximal # and distal inputs. This spiking activity leads to sustained GABAb mediated # inhibition of the L2 and L5 pyrmaidal cells. One effect of this inhibition # is an assymetric beta event with a long positive tail. import matplotlib.pyplot as plt import numpy as np fig, axes = plt.subplots(4, 1, sharex=True, figsize=(7, 7), constrained_layout=True) net_beta.cell_response.plot_spikes_hist(ax=axes[0], show=False) axes[0].set_title('Beta Event Generation') plot_dipole(dpls_beta, ax=axes[1], layer='agg', tmin=1.0, show=False) net_beta.cell_response.plot_spikes_raster(ax=axes[2], show=False) axes[2].set_title('Spike Raster') # Create a fixed-step tiling of frequencies from 1 to 40 Hz in steps of 1 Hz freqs = np.arange(10., 60., 1.) dpls_beta[0].plot_tfr_morlet(freqs, n_cycles=7, ax=axes[3]) ############################################################################### # Next we will inspect what happens when a sensory stimulus is delivered 75 ms # after a beta event. Note that the delay time for a tactile stimulus at the # hand to arrive at the cortex is roughly 25 ms, which means the first proximal # input to the cortical column occurs ~100 ms after the beta event. dpls_beta_erp[0].smooth(45) fig, axes = plt.subplots(3, 1, sharex=True, figsize=(7, 7), constrained_layout=True) plot_dipole(dpls_beta_erp, ax=axes[0], layer='agg', tmin=1.0, show=False) axes[0].set_title('Beta Event + ERP') net_beta_erp.cell_response.plot_spikes_hist(ax=axes[1], show=False) axes[1].set_title('Input Drives Histogram') net_beta_erp.cell_response.plot_spikes_raster(ax=axes[2], show=False) axes[2].set_title('Spike Raster') ############################################################################### # To help understand the effect of beta mediated inhibition on the response to # incoming sensory stimuli, we can compare the ERP and spiking activity due to # sensory input with and without a beta event. # The sustained inhibition of the network ultimately depresses # the sensory response which is associated with a reduced ERP amplitude dpls_erp[0].smooth(45) fig, axes = plt.subplots(3, 1, sharex=True, figsize=(7, 7), constrained_layout=True) plot_dipole(dpls_beta_erp, ax=axes[0], layer='agg', tmin=1.0, show=False) plot_dipole(dpls_erp, ax=axes[0], layer='agg', tmin=1.0, show=False) axes[0].set_title('Beta ERP Comparison') axes[0].legend(['ERP + Beta', 'ERP']) net_beta_erp.cell_response.plot_spikes_raster(ax=axes[1], show=False) axes[1].set_title('Beta + ERP Spike Raster') net_erp.cell_response.plot_spikes_raster(ax=axes[2], show=False) axes[2].set_title('ERP Spike Raster') plt.show() ############################################################################### # References # ---------- # .. [1] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., # <NAME>., <NAME>., & <NAME>. (2021). Thalamocortical # mechanisms regulating the relationship between transient beta events # and human tactile perception. BioRxiv, 2021.04.16.440210. # https://doi.org/10.1101/2021.04.16.440210 # .. [2] <NAME>., <NAME>., <NAME>., <NAME>., # <NAME>., & <NAME>. (2009). Quantitative Analysis and # Biophysically Realistic Neural Modeling of the MEG Mu Rhythm: # Rhythmogenesis and Modulation of Sensory-Evoked Responses. Journal of # Neurophysiology, 102(6), 3554–3572. # https://doi.org/10.1152/jn.00535.2009 # .. [3] <NAME>., & <NAME>. (2007). Disynaptic Inhibition between # Neocortical Pyramidal Cells Mediated by Martinotti Cells. Neuron, # 53(5), 735–746. https://doi.org/10.1016/j.neuron.2007.02.012
# https://www.nayuki.io/res/number-theoretic-transform-integer-dft/numbertheoretictransform.py # # Number-theoretic transform library (Python 2, 3) # # Copyright (c) 2017 Project Nayuki # All rights reserved. Contact Nayuki for licensing. # https://www.nayuki.io/page/number-theoretic-transform-integer-dft # import itertools, numbers # ---- High-level NTT functions ---- # Finds an appropriate set of parameters for the NTT, computes the forward transform on # the given vector, and returns a tuple containing the output vector and NTT parameters. # Note that all input values must be integers in the range [0, minmod). def find_params_and_transform(invec, minmod): check_int(minmod) mod = find_modulus(len(invec), minmod) root = find_primitive_root(len(invec), mod - 1, mod) return (transform(invec, root, mod), root, mod) # Returns the forward number-theoretic transform of the given vector with # respect to the given primitive nth root of unity under the given modulus. def transform(invec, root, mod): check_int(root) check_int(mod) if len(invec) >= mod: raise ValueError() if not all((0 <= val < mod) for val in invec): raise ValueError() if not (1 <= root < mod): raise ValueError() outvec = [] for i in range(len(invec)): temp = 0 for (j, val) in enumerate(invec): temp += val * pow(root, i * j, mod) temp %= mod outvec.append(temp) return outvec # Returns the inverse number-theoretic transform of the given vector with # respect to the given primitive nth root of unity under the given modulus. def inverse_transform(invec, root, mod): outvec = transform(invec, reciprocal(root, mod), mod) scaler = reciprocal(len(invec), mod) return [(val * scaler % mod) for val in outvec] # Computes the forward number-theoretic transform of the given vector in place, # with respect to the given primitive nth root of unity under the given modulus. # The length of the vector must be a power of 2. def transform_radix_2(vector, root, mod): n = len(vector) levels = n.bit_length() - 1 if 1 << levels != n: raise ValueError("Length is not a power of 2") powtable = [] temp = 1 for i in range(n // 2): powtable.append(temp) temp = temp * root % mod def reverse(x, bits): y = 0 for i in range(bits): y = (y << 1) | (x & 1) x >>= 1 return y for i in range(n): j = reverse(i, levels) if j > i: vector[i], vector[j] = vector[j], vector[i] size = 2 while size <= n: halfsize = size // 2 tablestep = n // size for i in range(0, n, size): k = 0 for j in range(i, i + halfsize): l = j + halfsize left = vector[j] right = vector[l] * powtable[k] vector[j] = (left + right) % mod vector[l] = (left - right) % mod k += tablestep size *= 2 # Returns the circular convolution of the given vectors of integers. # All values must be non-negative. Internally, a sufficiently large modulus # is chosen so that the convolved result can be represented without overflow. def circular_convolve(vec0, vec1): if not (0 < len(vec0) == len(vec1)): raise ValueError() if any((val < 0) for val in itertools.chain(vec0, vec1)): raise ValueError() maxval = max(val for val in itertools.chain(vec0, vec1)) minmod = maxval ** 2 * len(vec0) + 1 temp0, root, mod = find_params_and_transform(vec0, minmod) temp1 = transform(vec1, root, mod) temp2 = [(x * y % mod) for (x, y) in zip(temp0, temp1)] return inverse_transform(temp2, root, mod) # ---- Mid-level number theory functions for NTT ---- # Returns the smallest modulus mod such that mod = i * veclen + 1 # for some integer i >= 1, mod > veclen, and mod is prime. # Although the loop might run for a long time and create arbitrarily large numbers, # Dirichlet's theorem guarantees that such a prime number must exist. def find_modulus(veclen, minimum): check_int(veclen) check_int(minimum) if veclen < 1 or minimum < 1: raise ValueError() start = (minimum - 1 + veclen - 1) // veclen for i in itertools.count(max(start, 1)): n = i * veclen + 1 assert n >= minimum if is_prime(n): return n # Returns an arbitrary generator of the multiplicative group of integers modulo mod. # totient must equal the Euler phi function of mod. If mod is prime, an answer must exist. def find_generator(totient, mod): check_int(totient) check_int(mod) if not (1 <= totient < mod): raise ValueError() for i in range(1, mod): if is_generator(i, totient, mod): return i raise ValueError("No generator exists") # Returns an arbitrary primitive degree-th root of unity modulo mod. # totient must be a multiple of degree. If mod is prime, an answer must exist. def find_primitive_root(degree, totient, mod): check_int(degree) check_int(totient) check_int(mod) if not (1 <= degree <= totient < mod): raise ValueError() if totient % degree != 0: raise ValueError() gen = find_generator(totient, mod) root = pow(gen, totient // degree, mod) assert 0 <= root < mod return root # Tests whether val generates the multiplicative group of integers modulo mod. totient # must equal the Euler phi function of mod. In other words, the set of numbers # {val^0 % mod, val^1 % mod, ..., val^(totient-1) % mod} is equal to the set of all # numbers in the range [0, mod) that are coprime to mod. If mod is prime, then # totient = mod - 1, and powers of a generator produces all integers in the range [1, mod). def is_generator(val, totient, mod): check_int(val) check_int(totient) check_int(mod) if not (0 <= val < mod): raise ValueError() if not (1 <= totient < mod): raise ValueError() pf = unique_prime_factors(totient) return pow(val, totient, mod) == 1 and all((pow(val, totient // p, mod) != 1) for p in pf) # Tests whether val is a primitive degree-th root of unity modulo mod. # In other words, val^degree % mod = 1, and for each 1 <= k < degree, val^k % mod != 1. def is_primitive_root(val, degree, mod): check_int(val) check_int(degree) check_int(mod) if not (0 <= val < mod): raise ValueError() if not (1 <= degree < mod): raise ValueError() pf = unique_prime_factors(degree) return pow(val, degree, mod) == 1 and all((pow(val, degree // p, mod) != 1) for p in pf) # ---- Low-level common number theory functions ---- # Returns the multiplicative inverse of n modulo mod. The inverse x has the property that # 0 <= x < mod and (x * n) % mod = 1. The inverse exists if and only if gcd(n, mod) = 1. def reciprocal(n, mod): check_int(n) check_int(mod) if not (0 <= n < mod): raise ValueError() x, y = mod, n a, b = 0, 1 while y != 0: a, b = b, a - x // y * b x, y = y, x % y if x == 1: return a % mod else: raise ValueError("Reciprocal does not exist") # Returns a list of unique prime factors of the given integer in # ascending order. For example, unique_prime_factors(60) = [2, 3, 5]. def unique_prime_factors(n): check_int(n) if n < 1: raise ValueError() result = [] i = 2 end = sqrt(n) while i <= end: if n % i == 0: n //= i result.append(i) while n % i == 0: n //= i end = sqrt(n) i += 1 if n > 1: result.append(n) return result # Tests whether the given integer n >= 2 is a prime number. def is_prime(n): check_int(n) if n <= 1: raise ValueError() return all((n % i != 0) for i in range(2, sqrt(n) + 1)) # Returns floor(sqrt(n)) for the given integer n >= 0. def sqrt(n): check_int(n) if n < 0: raise ValueError() i = 1 while i * i <= n: i *= 2 result = 0 while i > 0: if (result + i) ** 2 <= n: result += i i //= 2 return result # Returns silently if the given value is an integer, otherwise raises a TypeError. def check_int(n): if not isinstance(n, numbers.Integral): raise TypeError()
<gh_stars>1-10 import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from tqdm import tqdm import networkx as nx import uproot from collections import deque # !cd tools/ && python setup_opera_distance_metric.py build_ext --inplace from tools.opera_distance_metric import generate_k_nearest_graph, opera_distance_metric_py, generate_radius_graph import torch import torch_scatter import torch_geometric from torch_geometric.data import Data from sklearn.neighbors import NearestNeighbors from tqdm import tqdm from collections import defaultdict def load_mc(filename="./EM_data/mcdata_taue2.root", step=1): f = uproot.open(filename) mc = f['Data'].pandas.df(["Event_id", "ele_P", "BT_X", "BT_Y", "BT_Z","BT_SX", "BT_SY","ele_x", "ele_y", "ele_z", "ele_sx", "ele_sy", "chisquare", ], flatten=False) pmc = pd.DataFrame(mc) pmc['numtracks'] = pmc.BT_X.apply(lambda x: len(x)) # cuts shapechange = [pmc.shape[0]] pmc = pmc[pmc.ele_P > 0.1] shapechange.append(pmc.shape[0]) pmc = pmc[pmc.ele_z < 0] shapechange.append(pmc.shape[0]) pmc = pmc[pmc.numtracks > 3] shapechange.append(pmc.shape[0]) print("numtracks reduction by cuts: ", shapechange) pmc['m_BT_X'] = pmc.BT_X.apply(lambda x: x.mean()) pmc['m_BT_Y'] = pmc.BT_Y.apply(lambda x: x.mean()) pmc['m_BT_Z'] = pmc.BT_Z.apply(lambda x: x.mean()) print("len(pmc): {len}".format(len=len(pmc))) return pmc def pmc_to_ship_format(pmc): showers = defaultdict(list) for i, idx in enumerate(pmc.index): shower = pmc.loc[idx] n = len(shower['BT_X']) showers['SX'].extend(shower['BT_X']) showers['SY'].extend(shower['BT_Y']) showers['SZ'].extend(shower['BT_Z']) showers['TX'].extend(shower['BT_SX']) showers['TY'].extend(shower['BT_SY']) showers['ele_P'].extend(n * [shower['ele_P']]) showers['ele_SX'].extend(n * [shower['ele_x']]) showers['ele_SY'].extend(n * [shower['ele_y']]) showers['ele_SZ'].extend(n * [shower['ele_z']]) showers['ele_TX'].extend(n * [shower['ele_sx']]) showers['ele_TY'].extend(n * [shower['ele_sy']]) showers['signal'].extend(n * [i % NUM_SHOWERS_IN_BRICK]) showers['brick_id'].extend(n * [i // NUM_SHOWERS_IN_BRICK]) return showers from joblib import Parallel, delayed def gen_one_shower(df_brick, knn=False, r=250, k=5, directed=False, e=0.00005, scale=1e4): from tools.opera_distance_metric import generate_k_nearest_graph, opera_distance_metric_py, generate_radius_graph if knn: edges_from, edge_to, dist = generate_k_nearest_graph( df_brick[["brick_id", "SX", "SY", "SZ", "TX", "TY"]].values, k, e=e, symmetric=directed); edges = np.vstack([edges_from, edge_to]) dist = np.array(dist) edge_index = torch.LongTensor(edges) else: edges_from, edge_to, dist = generate_radius_graph(df_brick[["brick_id", "SX", "SY", "SZ", "TX", "TY"]].values, r, e=e, symmetric=directed); edges = np.vstack([edges_from, edge_to]) dist = np.array(dist) edge_index = torch.LongTensor(edges) x = torch.FloatTensor(df_brick[["SX", "SY", "SZ", "TX", "TY"]].values / np.array([scale, scale, scale, 1., 1.])) shower_data = torch.FloatTensor( df_brick[["ele_P", "ele_SX", "ele_SY", "ele_SZ", "ele_TX", "ele_TY", "numtracks", "signal"]].values / np.array( [1., scale, scale, scale, 1., 1., 1., 1.])) edge_attr = torch.log(torch.FloatTensor(dist).view(-1, 1)) y = torch.LongTensor(df_brick.signal.values) shower = torch_geometric.data.Data( x=x, edge_index=edge_index, shower_data=shower_data, pos=x, edge_attr=edge_attr, y=y ) return shower def gen_torch_showers(df, knn=False, r=250, k=5, directed=False, e=0.00005, scale=1e4): df_bricks = [df[df.brick_id == brick_id] for brick_id in list(df.brick_id.unique())[:3]] showers = Parallel(n_jobs=10)( delayed(gen_one_shower)(df_brick, knn=knn, r=r, k=k, directed=directed, e=e, scale=scale) for df_brick in df_bricks) return showers def main(root_file='./data/mcdata_taue2.root', output_file='./data/train_.pt', knn=True, k=10, directed=False, e=10): pmc = load_mc(filename=root_file, step=1) pmc = pmc.loc[(pmc["BT_X"].apply(lambda x: len(x)) > 70) & (pmc["BT_X"].apply(lambda x: len(x)) < 3000), :] showers = pmc_to_ship_format(pmc) df = pd.DataFrame(showers) showers = gen_torch_showers(df=df, knn=knn, k=k, directed=directed, e=e) torch.save(showers, output_file) if __name__ == "__main__": main()
<reponame>ghjan/vnpy # encoding: UTF-8 # 定义Tick数据的格式 # 默认空值 EMPTY_STRING = '' EMPTY_UNICODE = u'' EMPTY_INT = 0 EMPTY_FLOAT = 0.0 class CtaTickData(object): """Tick数据""" # ---------------------------------------------------------------------- def __init__(self): """Constructor""" self.vtSymbol = EMPTY_STRING # vt系统代码 CF705 self.symbol = EMPTY_STRING # 合约代码 CF1705 self.exchange = EMPTY_STRING # 交易所代码 # 成交数据 self.lastPrice = EMPTY_FLOAT # 最新成交价 self.volume = EMPTY_INT # 最新成交量 self.preOpenInterest = EMPTY_INT # 昨持仓量 self.openInterest = EMPTY_INT # 持仓量 self.upperLimit = EMPTY_FLOAT # 涨停价 self.lowerLimit = EMPTY_FLOAT # 跌停价 # tick的时间 self.tradingDay = EMPTY_STRING # 交易日期 self.date = EMPTY_STRING # 日期 self.time = EMPTY_STRING # 时间 self.datetime = None # python的datetime时间对象 # 五档行情 self.bidPrice1 = EMPTY_FLOAT self.bidPrice2 = EMPTY_FLOAT self.bidPrice3 = EMPTY_FLOAT self.bidPrice4 = EMPTY_FLOAT self.bidPrice5 = EMPTY_FLOAT self.askPrice1 = EMPTY_FLOAT self.askPrice2 = EMPTY_FLOAT self.askPrice3 = EMPTY_FLOAT self.askPrice4 = EMPTY_FLOAT self.askPrice5 = EMPTY_FLOAT self.bidVolume1 = EMPTY_INT self.bidVolume2 = EMPTY_INT self.bidVolume3 = EMPTY_INT self.bidVolume4 = EMPTY_INT self.bidVolume5 = EMPTY_INT self.askVolume1 = EMPTY_INT self.askVolume2 = EMPTY_INT self.askVolume3 = EMPTY_INT self.askVolume4 = EMPTY_INT self.askVolume5 = EMPTY_INT from pymongo import MongoClient mongodb_host = '192.168.0.202' mongodb_port = 27017 mongodb_user = 'vnpy' mongodb_pwd = '<PASSWORD>' class mongodb_client(object): def __init__(self): self.dbClient = None # ---------------------------------------------------------------------- def writeLog(self, content): """日志""" print( content) # ---------------------------------------------------------------------- def dbConnect(self): """连接MongoDB数据库""" if not self.dbClient: try: # 设置MongoDB操作的超时时间为0.5秒 self.dbClient = MongoClient(mongodb_host, mongodb_port, serverSelectionTimeoutMS=500) # 这里使用了ticks这个库来验证用户账号和密码 self.dbClient.ticks.authenticate(mongodb_user, mongodb_pwd, mechanism='SCRAM-SHA-1') # 调用server_info查询服务器状态,防止服务器异常并未连接成功 self.dbClient.server_info() self.writeLog(u'MongoDB连接成功') except Exception as ex: self.writeLog(u'MongoDB连接失败{0}'.format(ex)) # ---------------------------------------------------------------------- def dbInsert(self, dbName, collectionName, d): """向MongoDB中插入数据,d是具体数据""" if self.dbClient: db = self.dbClient[dbName] collection = db[collectionName] collection.insert(d) # ---------------------------------------------------------------------- def dbInsertMany(self, dbName, collectionName, dataList): """向MongoDB中插入Multi数据,dataList是具体数据List""" if self.dbClient: db = self.dbClient[dbName] collection = db.getCollection(collectionName) collection.insertMany(dataList)\ # ---------------------------------------------------------------------- def dbQuery(self, dbName, collectionName, d): """从MongoDB中读取数据,d是查询要求,返回的是数据库查询的指针""" if self.dbClient: db = self.dbClient[dbName] collection = db[collectionName] cursor = collection.find(d) return cursor else: return None from datetime import datetime from collections import OrderedDict import os import pandas as pd def load_ticks_from_csv_file(file_name, symbol, trading_day): """从csv tick文件中UnicodeDictReader读取tick file_name,文件全路径 symbol,合约代码,RB01, RBMI 等 trading_day,交易日字符串 """ # 先读取数据到Dict,以日期时间为key ticks = OrderedDict() if not os.path.isfile(file_name): print( u'{0}文件不存在'.format(file_name)) return ticks dt = None start_time = datetime.now() df = pd.read_csv(file_name, encoding='gbk', parse_dates=False) df.columns = ['date', 'time', 'lastPrice', 'lastVolume', 'totalInterest', 'position', 'bidPrice1', 'bidVolume1', 'bidPrice2', 'bidVolume2', 'bidPrice3', 'bidVolume3', 'askPrice1', 'askVolume1', 'askPrice2', 'askVolume2', 'askPrice3', 'askVolume3', 'BS'] readed_ticks = len(df) position = 0 for i in range(0, len(df)): # 日期, 时间, 成交价, 成交量, 总量, 属性(持仓增减), B1价, B1量, B2价, B2量, B3价, B3量, S1价, S1量, S2价, S2量, S3价, S3量, BS # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 row = df.iloc[i].to_dict() tick = CtaTickData() tick.vtSymbol = symbol tick.symbol = symbol tick.date = row['date'] tick.tradingDay = trading_day tick.time = row['time'] try: tick.datetime = datetime.strptime(tick.date + ' ' + tick.time, '%Y-%m-%d %H:%M:%S') except Exception as ex: print( u'日期转换错误:{0},{1}:{2}'.format(tick.date + ' ' + tick.time, Exception, ex)) continue tick.date = tick.datetime.strftime('%Y%m%d') # 修正毫秒 if tick.datetime.replace(microsecond=0) == dt: # 与上一个tick的时间(去除毫秒后)相同,修改为500毫秒 tick.datetime = tick.datetime.replace(microsecond=500) tick.time = tick.datetime.strftime('%H:%M:%S.%f') else: tick.datetime = tick.datetime.replace(microsecond=0) tick.time = tick.datetime.strftime('%H:%M:%S.%f') dt = tick.datetime tick.lastPrice = float(row['lastPrice']) tick.volume = int(float(row['lastVolume'])) tick.bidPrice1 = float(row['bidPrice1']) # 叫买价(价格低) tick.bidVolume1 = int(float(row['bidVolume1'])) tick.askPrice1 = float(row['askPrice1']) # 叫卖价(价格高) tick.askVolume1 = int(float(row['askVolume1'])) tick.dayVolume = int(float(row['totalInterest'])) # 当日累计成交量 position += int(float(row['position'])) # 持仓量 tick.openInterest = position # 持仓量 dtStr = tick.date + ' ' + tick.time if dtStr not in ticks: ticks[dtStr] = tick if len(ticks) != readed_ticks: print( u'分析tick对象数量{0}与读取数据数量{1}不一致'.format(len(ticks), readed_ticks)) print( u'读取{0},共加载{1}条数据,耗时:{2}seconds'.format(file_name, readed_ticks, str(datetime.now() - start_time))) return ticks import csv def load_ticks_from_txt_file(file_name, symbol, trading_day): """ 读取中金所txt 格式的tick文件 file_name,文件全路径 symbol,合约代码,IC1601 等 trading_day,交易日字符串 返回ctatick对象dict,无内容返回空的dict """ # 先读取数据到Dict,以日期时间为key ticks = OrderedDict() if not os.path.isfile(file_name): print( u'{0}文件不存在'.format(file_name)) return [] last_tick_datetime = None # 文件句柄 csv_read_file = open(file_name, 'r',encoding='utf8') # 通过csv模块的DictReader,一次性读取所有数据 reader = csv.DictReader(csv_read_file, delimiter=",") start_time = datetime.now() # 逐行数据处理 for row in reader: tick = CtaTickData() # vtSymbol:CF1705, symbol:CF1705 tick.vtSymbol = symbol tick.symbol = symbol # 日期格式为 '20170120',交易日期,在夜盘时,交易日期为下一交易日 tick.date = trading_day tick.tradingDay = tick.date tick.time = row['Time'] # 转换为datetime格式 try: tick.datetime = datetime.strptime(tick.date + ' ' + tick.time, '%Y%m%d %H:%M:%S.%f') except Exception as ex: # 抛弃本tick print( u'日期转换错误:{0},{1}:{2}'.format(tick.date + ' ' + tick.time, Exception, ex)) continue # 修正毫秒 if tick.datetime.replace(microsecond=0) == last_tick_datetime: # 与上一个tick的时间(去除毫秒后)相同,修改为500毫秒 tick.datetime = tick.datetime.replace(microsecond=500) tick.time = tick.datetime.strftime('%H:%M:%S.%f') else: tick.datetime = tick.datetime.replace(microsecond=0) tick.time = tick.datetime.strftime('%H:%M:%S.%f') # 记录最新tick的时间 last_tick_datetime = tick.datetime tick.lastPrice = float(row['LastPrice']) # 最新价 tick.volume = int(float(row['LVolume'])) # 成交量 tick.bidPrice1 = float(row['BidPrice']) # 叫买价(价格低) tick.bidVolume1 = int(float(row['BidVolume'])) # 叫买量 tick.askPrice1 = float(row['AskPrice']) # 叫卖价(价格高) tick.askVolume1 = int(float(row['AskVolume'])) # 叫卖量 tick.openInterest = int(float(row['OpenInterest'])) # 持仓量 tick.dayVolume = int(float(row['TradeVolume'])) # 当日累计成交量 dtStr = tick.date + ' ' + tick.time if dtStr not in ticks: ticks[dtStr] = tick print( u'读取{0},共加载{1}条数据,耗时:{2}seconds'.format(file_name, len(ticks), str(datetime.now() - start_time))) return ticks import re def import_ticks_from_folder(folder_path): branch_time = datetime.now() for dirpath, folder_names, file_names in os.walk(folder_path): for file_name in file_names: file_path = os.path.join(dirpath, file_name) start_time = datetime.now() # 处理csb格式 if file_name.lower().find('.csv') != -1: s = file_name.replace('.csv', '').split('_') if len(s) != 2: print( u'{0} not match format'.format(file_path)) continue file_symbol = s[0] file_trading_day = s[1] # print('{0} {1}'.format(file_symbol,file_trading_day)) if len(file_trading_day) != 8: print( u'{0} trading_day not match format'.format(file_path)) continue try: ticks = load_ticks_from_csv_file(file_name=file_path, symbol=file_symbol, trading_day=file_trading_day) insert_list = [x.__dict__ for x in ticks.values()] mc.dbInsert(dbName='ticks', collectionName=file_symbol, d=insert_list) print( u'写入完成,共{0}条,耗时:{1}seconds'.format(len(insert_list), str(datetime.now() - start_time))) except Exception as ex: print( u'{0} load ticks and insert exception'.format(file_path)) continue elif file_name.lower().find('.txt') != -1: symbol_name = file_name.replace('.txt', '') if symbol_name.lower().find('survey') != -1: print( '{0} not import'.format(file_name)) continue p = re.compile(r"([A-Z]+)[0-9]+", re.I) short_symbol = p.match(symbol_name) if short_symbol is None: print( '{0} not import'.format(file_name)) continue short_symbol = short_symbol.group(1) collection_name = short_symbol + symbol_name[-2:] path_list = dirpath.split('/') if path_list[-2] != short_symbol: print( '{0} not import'.format(file_name)) continue trading_day = path_list[-4] + path_list[-1] #print collection_name, trading_day try: ticks = load_ticks_from_txt_file(file_name=file_path, symbol=symbol_name, trading_day=trading_day) insert_list = [x.__dict__ for x in ticks.values()] mc.dbInsert(dbName='ticks', collectionName=collection_name, d=insert_list) print( u'写入完成,共{0}条,耗时:{1}seconds'.format(len(insert_list), str(datetime.now() - start_time))) except Exception as ex: print( u'{0} load ticks and insert exception'.format(file_path)) continue print( '完成 {0} ticks ,耗时:{1}seconds'.format(folder_path, str(datetime.now() - branch_time))) mc=mongodb_client() mc.dbConnect() #import_ticks_from_folder('/home/ubuntu/Ticks/ZZ/2016/201612') #import_ticks_from_folder('/home/ubuntu/Ticks/ZZ/2016/201611') #import_ticks_from_folder('/home/ubuntu/Ticks/ZZ/2016/201610') #import_ticks_from_folder('/home/ubuntu/Ticks/ZZ/2016/201609') #import_ticks_from_folder('/home/ubuntu/Ticks/ZZ/2016/201608') #import_ticks_from_folder('/home/ubuntu/Ticks/ZZ/2016/201607') #import_ticks_from_folder('/home/ubuntu/Ticks/ZZ/2016/201606') #import_ticks_from_folder('/home/ubuntu/Ticks/ZZ/2016/201605') #import_ticks_from_folder('/home/ubuntu/Ticks/ZZ/2016/201604') #import_ticks_from_folder('/home/ubuntu/Ticks/ZZ/2016/201603') #import_ticks_from_folder('/home/ubuntu/Ticks/ZZ/2016/201602') #import_ticks_from_folder('/home/ubuntu/Ticks/ZZ/2016/201601') #import_ticks_from_folder('/home/ubuntu/Ticks/ZZ/2015') #import_ticks_from_folder('/home/ubuntu/Ticks/ZZ/2014') #import_ticks_from_folder('/home/ubuntu/Ticks/ZZ/2013') #import_ticks_from_folder('/home/ubuntu/Ticks/ZZ/2012') import_ticks_from_folder('/home/ubuntu/Ticks/ZZ/2011') import_ticks_from_folder('/home/ubuntu/Ticks/ZZ/2010')
<filename>code/train_qc_baseline.py<gh_stars>0 # header files import torch import torch.nn as nn import torchvision from torch.nn import CrossEntropyLoss, Dropout, Softmax, Linear, Conv2d, LayerNorm from torch.nn.modules.utils import _pair import numpy as np import skimage from skimage import io, transform import glob import csv from PIL import Image import time import matplotlib import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix from PIL import ImageFile from dataset import * from utils import * from metrics import * torch.backends.cudnn.benchmark = True # ensure same result is produced np.random.seed(1234) torch.manual_seed(1234) torch.cuda.manual_seed(1234) # dataset paths (only change these according to the required paths) train_path = "/dgx1nas1/cellpainting-datasets/2019_07_11_JUMP_CP_pilots/train" val_path = "/dgx1nas1/cellpainting-datasets/2019_07_11_JUMP_CP_pilots/validation" results_dir = "/home/jupyter-arpit@broadinstitu-ef612/" gpu_on_dgx = "cuda:4" # hyperparameters image_size = 1024 # 512 or 1024 is_pretrained = True # True or False lr = 0.001 batch_size = 8 num_epochs = 51 output_classes = 4 # create PyTorch dataset class and create train data and val data train_data = ImageQC_1channel_Dataset(train_path, img_size=image_size, is_train=True) val_data = ImageQC_1channel_Dataset(val_path, img_size=image_size, is_train=False) print(len(train_data)) print(len(val_data)) # load the data train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=10, shuffle=True) val_loader = torch.utils.data.DataLoader(val_data, batch_size=batch_size, shuffle=False, num_workers=10) # model model = torchvision.models.resnet50(pretrained=is_pretrained) if is_pretrained: for param in model.parameters(): param.requires_grad = False model.conv1 = torch.nn.Sequential( torch.nn.Conv2d(1, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False), torch.nn.BatchNorm2d(3), torch.nn.ReLU(inplace=True), torch.nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) ) model.fc = torch.nn.Sequential( torch.nn.Linear(2048, 512), torch.nn.ReLU(), torch.nn.Dropout(), torch.nn.Linear(512, output_classes) ) #load model to gpu device = torch.device(gpu_on_dgx if torch.cuda.is_available() else "cpu") model.to(device) # define optimizer optimizer = torch.optim.Adam(model.parameters(), lr=lr) lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1) # define loss (smoothing=0 is equivalent to standard Cross-Entropy loss) criterion = torch.nn.CrossEntropyLoss() # training and validation loop metrics = StreamMetrics(output_classes) best_metric = -1 best_metric_epoch = -1 train_loss = [] val_loss = [] train_acc = [] val_acc = [] confusion_matrix = None best_confusion_matrix = -1 # train and validate for epoch in range(1, num_epochs): print("Epoch: " + str(epoch)) print() # train model.train() training_loss = 0.0 total = 0 correct = 0 for i, (input, target, _) in enumerate(train_loader): input = input.to(device) target = target.to(device) optimizer.zero_grad() output = model(input) loss = criterion(output, target) loss.backward() optimizer.step() training_loss = training_loss + loss.item() _, predicted = output.max(1) total += target.size(0) correct += predicted.eq(target).sum().item() training_loss = training_loss/float(len(train_loader)) training_accuracy = str(100.0*(float(correct)/float(total))) train_acc.append(training_accuracy) train_loss.append(training_loss) # validate if epoch%5 == 0: metrics.reset() model.eval() valid_loss = 0.0 total = 0 correct = 0 with torch.no_grad(): for i, (input, target, _) in enumerate(val_loader): input = input.to(device) target = target.to(device) output = model(input) loss = criterion(output, target) valid_loss = valid_loss + loss.item() _, predicted = output.max(1) total += target.size(0) correct += predicted.eq(target).sum().item() # get confusion matrix targets = target.cpu().numpy() predicted = predicted.cpu().numpy() metrics.update(targets, predicted) valid_loss = valid_loss/float(len(val_loader)) valid_accuracy = str(100.0*(float(correct)/float(total))) results = metrics.get_results() confusion_matrix = results["Confusion Matrix"] val_loss.append(valid_loss) val_acc.append(valid_accuracy) # store best model if(float(valid_accuracy)>best_metric): best_metric = float(valid_accuracy) best_metric_epoch = epoch best_confusion_matrix = confusion_matrix #torch.save(model.state_dict(), "/home/jupyter-arpit@broadinstitu-ef612/qc_bestmodel_baseline_size2.pth") torch.save(model.state_dict(), results_dir + "qc_bestmodel_baseline_size2_pretrained.pth") print() print("Epoch" + str(epoch) + ":") print("Training Accuracy: " + str(training_accuracy) + " Validation Accuracy: " + str(valid_accuracy)) print("Training Loss: " + str(training_loss) + " Validation Loss: " + str(valid_loss)) print("Best metric: " + str(best_metric)) print(confusion_matrix) print(best_confusion_matrix) print() # lr scheduler lr_scheduler.step() # val_loss vs epoch epoch = [] for i in range(0, len(val_acc)): epoch.append(i*5) val_acc[i] = float(val_acc[i]) val_loss[i] = float(val_loss[i]) plt.xlabel("Epochs") plt.ylabel("Validation Loss") plt.plot(epoch, val_loss) #plt.savefig("/home/jupyter-arpit@broadinstitu-ef612/val_loss_qc_baseline_size2.png") plt.savefig(results_dir + "val_loss_qc_baseline_size2_pretrained.png") # val_acc vs epoch plt.cla() epoch = [] for i in range(0, len(val_acc)): epoch.append(i*5) val_acc[i] = float(val_acc[i]) val_loss[i] = float(val_loss[i]) plt.xlabel("Epochs") plt.ylabel("Validation Acc") plt.plot(epoch, val_acc) #plt.savefig("/home/jupyter-arpit@broadinstitu-ef612/val_acc_qc_baseline_size2.png") plt.savefig(results_dir + "val_acc_qc_baseline_size2_pretrained.png")
<gh_stars>0 from tkinter import * from tkinter import font import string import pygments class PygmentsText(Text): """Class that uses the pygments syntax-based highlighter to color-code text displayed in a Tk Text widget. Note that this isn't the same as a code pretty-printer. It just color-codes. It is also heuristic. It's not easy, in general, to determine how much of a text has to be reformatted given a change in it. We make a guess (1-2 lines) that should be good for a wide variety of highlighted langauges and use cases. """ def __init__(self, root, lexer, formatter, parent = None, **kwargs): self.root = root self.parent = parent if parent else root Text.__init__(self, self.parent, **kwargs) self.tk.eval(''' proc widget_proxy {widget widget_command args} { # call the real tk widget command with the real args set result [uplevel [linsert $args 0 $widget_command]] # generate the event for certain types of commands if {([lindex $args 0] in {insert replace delete}) || ([lrange $args 0 2] == {mark set insert}) || ([lrange $args 0 1] == {xview moveto}) || ([lrange $args 0 1] == {xview scroll}) || ([lrange $args 0 1] == {yview moveto}) || ([lrange $args 0 1] == {yview scroll})} { event generate $widget <<Change>> -when tail } # return the result from the real widget command return $result } ''') self.tk.eval(''' rename {widget} _{widget} interp alias {{}} ::{widget} {{}} widget_proxy {widget} _{widget} '''.format(widget=str(self))) self.lexer = lexer # from pygments.lexers self.formatter = formatter # a TkFormatter self.config_tags() self.bind('<KeyRelease>', self.key_press) self.bind('<Control-l>', lambda *args: self.reformatEverything() ) def insertFormatted(self, location, text, add_sep=False): """Similar to self.insert(), but instead of plain text, uses pygments to provide a set of formatting tags. The formatter should return stream of tagged lines of the format tagName:payload_string\n, which this class then inserts in tagged way. Note that if the given text is smaller than a "complete syntactic unit" of the language being syntax-highlighted, insertFormatted() probably won't result in correct syntax highlighting. Use self.reformatRange() or self.reformatEverything() to reformat a larger enclosing range if you're making micro-inserts.""" #RPC: Added this to stop the formatter from replacing liternal '\n'. if add_sep: self.edit_separator() text = string.replace(text, r'\n', chr(1)) textTagged = pygments.highlight(text, self.lexer, self.formatter) insertList = [] #inQuotes = False for chunk in textTagged.splitlines(): # split tagged lines into component parts tagEnd = string.index(chunk, ':') tagName, stringPart = chunk[:tagEnd], chunk[tagEnd+1:] # clean up / unquote / reformat data as necessary #num = stringPart.count('"') + stringPart.count("'") stringPart = string.replace(stringPart, r'\n', "\n") #Convert literal '\n' back. stringPart = string.replace(stringPart, chr(1), r'\n') #print stringPart # add to the insert list insertList.append(stringPart) insertList.append(tagName) # pygments.highlight() can send back extra linefeed markers at the end. # So if we didn't mean to end with a return, check for these, and if # they're just linefeeds, truncate them if not text.endswith("\n"): penultimate = insertList[-2] if (penultimate.endswith("\n")): if penultimate == "\n": insertList = insertList[:-2] else: insertList[-2] = insertList[-2][:-1] # if something to insert, do it (actual typed returns are the missing # else case here; net-net they don't get formatted through pygments) #print insertList if insertList: self.insert(location, *insertList) def config_tags(self): """Get style defintions from the friendly local pygments formatter, and instantiate them as Tk.Text tag definitions.""" # Discover what 'basefont' currently in use curFontName = self.cget('font') curFont = tkFont.nametofont(curFontName) curFontSpecs = curFont.actual() basefont = ' '.join([ str(curFontSpecs[k]) for k in 'family size'.split() ]) # Get tag definitions from our pygments formatter styledict = self.formatter.get_style_defs() # Define our tags accordingly for tagName, attTupleList in styledict.iteritems(): # print "tagconfig:", tagName, tkatts for attTuple in attTupleList: (attName, attValue) = attTuple if attName == 'font': f = basefont.rsplit(' ', 1) f = (f[0], f[1], attValue) self.tag_configure(tagName, font = f) #self.tag_configure(tagName, font = basefont + ' ' + attValue) else: attSetter = dict([attTuple]) self.tag_configure(tagName, **attSetter) def key_press(self, key): """On key press (key release, acctually, so the character has already been inserted), reformat the effected area. The laziest, lowest-performance, and yet most correct approach would be to reformat the entire text contents. But that might be a bit slow / brute force. So we localize the reformatting, usually to a single line, with a fall back to several lines. If this doesn't catch it, the user can always type a key (default Control-L) bound to self.reformatEverything().""" #TODO (RPC): Fix the way separators are added for a consistent and smooth undo/redo operation. self.edit_separator() savePosn = self.index(INSERT) linenum = int(savePosn.split('.')[0]) startline = linenum extraline = None #RPC: Had to change this to make newlines work correctly when entered at the beginning of the line. if key.char in {"\n", "\r"}: # breaking a line, so reformat this line and the one before extraline = linenum - 1 if linenum > 1 else 1 self.reformatRange("{0}.0".format(startline), "{0}.end".format(linenum)) self.see(savePosn) self.mark_set(INSERT, savePosn) if extraline: self.reformatRange("{0}.0".format(extraline), "{0}.end".format(extraline)) def reformatRange(self, start, end): """Reformat the given range of text. """ buffer = str(self.get(start, end)) self.delete(start, end) self.insertFormatted(start, buffer) def reformatEverything(self): """Reformat the works!""" self.reformatRange("1.0", "end") def setLexer(self, lexer): """ Change the Lexer (if the user decides they want a different one). """ self.lexer = lexer
from functools import wraps from flask import Flask from flask import render_template, request, redirect, url_for, jsonify import json from post import Post from comment import Comment from category import Category from user import User app = Flask(__name__) def require_login(func): @wraps(func) def wrapper(*args, **kwargs): token = request.cookies.get('token') if not token or not User.verify_token(token): return redirect('/login') return func(*args, **kwargs) return wrapper @app.route('/') def hello_world(): return redirect("/categories") @app.route('/posts') def list_posts(): return render_template('posts.html', posts=Post.all()) @app.route('/posts/<int:id>') def show_post(id): post = Post.find(id) return render_template('post.html', post=post) @app.route('/posts/<int:id>/edit', methods=['GET', 'POST']) def edit_post(id): post = Post.find(id) if request.method == 'GET': return render_template( 'edit_post.html', post=post, categories=Category.all() ) elif request.method == 'POST': post.name = request.form['name'] post.author = request.form['author'] post.content = request.form['content'] post.category = Category.find(request.form['category_id']) post.save() return redirect(url_for('show_post', id=post.id)) @app.route('/posts/new', methods=['GET', 'POST']) @require_login def new_post(): if request.method == 'GET': return render_template('new_post.html', categories=Category.all()) elif request.method == 'POST': categ = Category.find(request.form['category_id']) values = ( None, request.form['name'], request.form['author'], request.form['content'], categ ) Post(*values).create() return redirect('/') @app.route('/posts/<int:id>/delete', methods=['POST']) def delete_post(id): post = Post.find(id) post.delete() return redirect('/') @app.route('/comments/new', methods=['POST']) def new_comment(): if request.method == 'POST': post = Post.find(request.form['post_id']) values = (None, post, request.form['message']) Comment(*values).create() return redirect(url_for('show_post', id=post.id)) @app.route('/categories') def get_categories(): return render_template("categories.html", categories=Category.all()) @app.route('/categories/new', methods=["GET", "POST"]) def new_category(): if request.method == "GET": return render_template("new_category.html") elif request.method == "POST": category = Category(None, request.form["name"]) category.create() return redirect("/categories") @app.route('/categories/<int:id>') def get_category(id): return render_template("category.html", category=Category.find(id)) @app.route('/categories/<int:id>/delete') def delete_category(id): Category.find(id).delete() return redirect("/") @app.route('/register', methods=['GET', 'POST']) def register(): if request.method == 'GET': return render_template('register.html') elif request.method == 'POST': values = ( None, request.form['username'], User.hash_password(request.form['password']) ) User(*values).create() return redirect('/') @app.route('/login', methods=["GET", "POST"]) def login(): if request.method == 'GET': return render_template('login.html') elif request.method == 'POST': data = json.loads(request.data.decode('ascii')) username = data['username'] password = data['password'] user = User.find_by_username(username) if not user or not user.verify_password(password): return jsonify({'token': None}) token = user.generate_token() return jsonify({'token': token.decode('ascii')}) if __name__ == '__main__': app.run()
<gh_stars>0 #!/usr/bin/env python """ Generalised class: Displays data sources for a class """ import os import sys import lib_util import lib_common try: import lib_wbem wbemOk = True except ImportError: wbemOk = False import lib_wmi from lib_properties import pc # Now, adds the base classes of this one, at least one one level. def WbemAddBaseClass(grph,connWbem,wbemNode,entity_host, wbemNamespace, entity_type): wbemKlass = lib_wbem.WbemGetClassObj(connWbem,entity_type,wbemNamespace) if not wbemKlass: return ( None, None ) superKlassName = wbemKlass.superclass # sys.stderr.write("WBEM superKlassName=%s\n" % superKlassName) # An empty string or None. if not superKlassName: return ( None, None ) # TODO: Should be changed, this is slow and inconvenient. wbemSuperUrlsList = lib_wbem.GetWbemUrls( entity_host, wbemNamespace, superKlassName, "" ) if not wbemSuperUrlsList: return ( None, None ) # TODO: Which one should we take, http or https ??? wbemSuperUrl = wbemSuperUrlsList[0][0] DEBUG("WBEM wbemSuperUrl=%s", wbemSuperUrl) wbemSuperNode = lib_common.NodeUrl(wbemSuperUrl) grph.add( ( wbemSuperNode, pc.property_cim_subclass, wbemNode ) ) klaDescrip = lib_wbem.WbemClassDescription(connWbem,superKlassName,wbemNamespace) if not klaDescrip: klaDescrip = "Undefined class %s %s" % ( wbemNamespace, superKlassName ) grph.add( ( wbemSuperNode, pc.property_information, lib_common.NodeLiteral(klaDescrip ) ) ) return ( wbemSuperNode, superKlassName ) # Adds the list of base classes. Returns the list of pairs (name node), # so it can be matched againt another inheritance tree. def WbemAddBaseClasses(grph,connWbem,wbemNode,entity_host, wbemNamespace, entity_type): pairNameNode = dict() while wbemNode: ( wbemSuperNode, superClass ) = WbemAddBaseClass(grph,connWbem,wbemNode,entity_host, wbemNamespace, entity_type) pairNameNode[entity_type] = wbemNode wbemNode = wbemSuperNode entity_type = superClass return pairNameNode def CreateWbemNode(grph,rootNode,entity_host, nameSpace, className, entity_id): wbemNamespace = nameSpace.replace("\\","/") wbem_servers_desc_list = lib_wbem.GetWbemUrls( entity_host, wbemNamespace, className, entity_id ) # If there are no servers. pairNameNode = None for url_server in wbem_servers_desc_list: wbemNode = lib_common.NodeUrl(url_server[0]) grph.add( ( rootNode, pc.property_wbem_data, wbemNode ) ) wbemHostNode = lib_common.gUriGen.HostnameUri( url_server[1] ) grph.add( ( wbemNode, pc.property_host, wbemHostNode ) ) # TODO: Add a Yawn server ?? grph.add( ( wbemNode, pc.property_wbem_server, lib_common.NodeLiteral( url_server[1] ) ) ) # Now adds the description of the class. connWbem = lib_wbem.WbemConnection(entity_host) klaDescrip = lib_wbem.WbemClassDescription(connWbem,className,wbemNamespace) okWbemClass = True if not klaDescrip: okWbemClass = False klaDescrip = "Undefined class %s %s" % ( wbemNamespace, className ) grph.add( ( wbemNode, pc.property_information, lib_common.NodeLiteral(klaDescrip) ) ) # Maybe this class is not Known in WBEM. try: pairNameNode = WbemAddBaseClasses(grph,connWbem,wbemNode,entity_host, nameSpace, className) except: pairNameNode = None if okWbemClass and wbemOk and nameSpace != "" and entity_host != "": namespaceUrl = lib_wbem.NamespaceUrl(nameSpace,entity_host,className) namespaceNode = lib_common.NodeUrl( namespaceUrl ) grph.add( ( wbemNode, pc.property_information, namespaceNode ) ) # TODO: This is a bit absurd because we return just one list. return pairNameNode # Adds a WMI node and other stuff, for the class name. def CreateWmiNode(grph,rootNode,entity_host, nameSpace, className, entity_id): wmiurl = lib_wmi.GetWmiUrl( entity_host, nameSpace, className, entity_id ) if wmiurl is None: return # There might be "http:" or the port number around the host. # hostOnly = lib_util.EntHostToIp(entity_host) # sys.stderr.write("entity_host=%s nameSpace=%s entity_type=%s className=%s wmiurl=%s\n" % ( entity_host, nameSpace, entity_type, className, str(wmiurl) ) ) wmiNode = lib_common.NodeUrl(wmiurl) grph.add( ( rootNode, pc.property_wmi_data, wmiNode ) ) # TODO: Shame, we just did it in GetWmiUrl. ipOnly = lib_util.EntHostToIp(entity_host) try: # It simply returns if it cannot connect. connWmi = lib_wmi.WmiConnect(ipOnly,nameSpace,False) if not connWmi: raise Exception("Cannot connect") lib_wmi.WmiAddClassQualifiers( grph, connWmi, wmiNode, className, False ) # Now displays the base classes, to the top of the inheritance tree. pairNameNode = lib_wmi.WmiAddBaseClasses(grph,connWmi,wmiNode,ipOnly, nameSpace, className) except Exception: pairNameNode = None # TODO: If the class is not defined, maybe do not display it. exc = sys.exc_info()[1] errMsg = "WMI connection %s: %s" % (ipOnly,str(exc)) grph.add( ( wmiNode, lib_common.MakeProp("WMI Error"), lib_common.NodeLiteral(errMsg) ) ) urlNameSpace = lib_wmi.NamespaceUrl(nameSpace,ipOnly,className) # sys.stderr.write("entity_host=%s urlNameSpace=%s\n"%(entity_host,urlNameSpace)) grph.add( ( wmiNode, pc.property_information, lib_common.NodeUrl(urlNameSpace) ) ) return pairNameNode # entity_type = "CIM_Process", "Win32_Service" etc... # This might happen at an intermediary level, with inheritance (To be implemented). def AddCIMClasses(grph,rootNode,entity_host, nameSpace, className, entity_id): # Maybe some of these servers are not able to display anything about this object. pairNameNodeWbem = None if wbemOk: if lib_wbem.ValidClassWbem(className): pairNameNodeWbem = CreateWbemNode(grph,rootNode,entity_host, nameSpace, className, entity_id) pairNameNodeWmi = None if lib_wmi.ValidClassWmi(className): pairNameNodeWmi = CreateWmiNode(grph,rootNode,entity_host, nameSpace, className, entity_id) # Match the two inheritance trees. if pairNameNodeWbem and pairNameNodeWmi: for ( baseClsNam, nodeWbem ) in lib_util.six_iteritems( pairNameNodeWbem ): try: nodeWmi = pairNameNodeWmi[baseClsNam] except KeyError: continue nodeClsAll = lib_util.EntityClassNode( baseClsNam, nameSpace, entity_host, "WBEM ou WMI" ) grph.add( ( nodeClsAll, pc.property_wbem_data, nodeWbem)) grph.add( ( nodeClsAll, pc.property_wmi_data, nodeWmi)) def CreateOurNode(grph,rootNode,entity_host, nameSpace, className, entity_id): # This try to find a correct url for an entity type, without an entity id. # At the moment, we just expect a file called "enumerate_<entity>.py" enumerateScript = "enumerate_" + className + ".py" # sys.stderr.write("enumerateScript=%s\n" % enumerateScript) baseDir = lib_util.gblTopScripts + "/sources_types" # TODO: This is absurd !!! Why looping, because the filename is already known !?!? for dirpath, dirnames, filenames in os.walk( baseDir ): # sys.stderr.write("dirpath=%s\n" % dirpath) for filename in [f for f in filenames if f == enumerateScript ]: shortDir = dirpath[ len(lib_util.gblTopScripts) : ] fullScriptNam = os.path.join(shortDir, filename).replace('\\','/') DEBUG("fullScriptNam=%s", fullScriptNam) # TODO: Maybe remove the beginning of the file. localClassUrl = lib_util.ScriptizeCimom( fullScriptNam, className, entity_host ) localClassNode = lib_common.NodeUrl( localClassUrl ) grph.add( ( rootNode, pc.property_directory, localClassNode ) ) def Main(): # This should be able to process remote hosts because it calls scripts which can access remote data. cgiEnv = lib_common.CgiEnv(can_process_remote = True) ( nameSpace, className, entity_type ) = cgiEnv.get_namespace_type() # If nameSpace is not provided, it is set to "root/CIMV2" by default. if not className: lib_common.ErrorMessageHtml("Class name should not be empty") # Just in case ... if nameSpace == "/": nameSpace = "" entity_host = cgiEnv.GetHost() entity_id = cgiEnv.m_entity_id # QUERY_STRING=xid=http%3A%2F%2F192.168.1.88%3A5988%2Froot%2FPG_Internal%3APG_WBEMSLPTemplate DEBUG("class_type_all entity_host=%s entity_id=%s", entity_host, entity_id ) grph = cgiEnv.GetGraph() rootNode = lib_util.RootUri() objtypeNode = lib_common.NodeUrl( lib_util.uriRoot + '/objtypes.py' ) grph.add( ( rootNode, pc.property_rdf_data_nolist2, objtypeNode ) ) # This displays the documentation of the Python module of this entity class. entity_module = lib_util.GetEntityModule(className) entDoc = entity_module.__doc__ if entDoc: entDoc = entDoc.strip() grph.add( ( rootNode, pc.property_information, lib_common.NodeLiteral(entDoc) ) ) CreateOurNode(grph,rootNode,entity_host, nameSpace, className, entity_id) # Do this for each intermediary entity type (Between slashes). AddCIMClasses(grph,rootNode,entity_host, nameSpace, className, entity_id) cgiEnv.OutCgiRdf("LAYOUT_RECT_TB") if __name__ == '__main__': Main()
<reponame>Pibben/sim74 from unittest import TestCase from core import Net from p74xx import P74161, P74181 from system import System from util import BinaryBus, SystemClock, Injector, BusInjector class TestP74161(TestCase): def test_single(self): part = P74161("test") outbus = BinaryBus(("QD", "QC", "QB", "QA")) outbus.connect_part(part) clk_inj = Injector([part.get_pin("CLK")]) clk_inj.set_value(0) rconet = Net("rco") rconet.add_pin(part.get_pin("RCO")) enp_inj = Injector([part.get_pin("ENP")]) enp_inj.set_value(1) sys = System({"msb": part}) sc = SystemClock(clk_inj, sys) sys.run() self.assertEqual(outbus.get_value(), 0) sc.step() self.assertEqual(outbus.get_value(), 1) sc.run(14) self.assertEqual(outbus.get_value(), 15) self.assertEqual(rconet.get_value(), 1) sc.step() self.assertEqual(outbus.get_value(), 0) self.assertEqual(rconet.get_value(), 0) def test_cascade(self): lsb = P74161("lsb") msb = P74161("msb") outbus = BinaryBus(("QH", "QG", "QF", "QE", "QD", "QC", "QB", "QA")) outbus.connect_pins(msb.get_pins(["QD", "QC", "QB", "QA"]) + lsb.get_pins(["QD", "QC", "QB", "QA"])) clk_inj = Injector([msb.get_pin("CLK"), lsb.get_pin("CLK")]) clk_inj.set_value(0) rcopin = lsb.get_pin("RCO") rcopin.connect(msb.get_pin("ENP")) enp_inj = Injector([lsb.get_pin("ENP")]) enp_inj.set_value(1) sys = System({"lsb": lsb, "msb": msb}) sc = SystemClock(clk_inj, sys) sys.run() self.assertEqual(outbus.get_value(), 0) sc.step() self.assertEqual(outbus.get_value(), 1) sc.run(14) self.assertEqual(outbus.get_value(), 15) sc.step() self.assertEqual(outbus.get_value(), 16) # https://github.com/fdecaire/LogicLibrary/blob/master/TTLLibrary.Tests/TTL74181Tests.cs class TestP74181(TestCase): def test_74181(self): part = P74181("test") abus = BinaryBus(("A3", "A2", "A1", "A0")) abus.connect_part(part) bbus = BinaryBus(("B3", "B2", "B1", "B0")) bbus.connect_part(part) fbus = BinaryBus(("F3", "F2", "F1", "F0")) fbus.connect_part(part) sbus = BinaryBus(("S3", "S2", "S1", "S0")) sbus.connect_part(part) m_inj = Injector([part.get_pin("M")]) cn_inj = Injector([part.get_pin("CN")]) a_inj = BusInjector(abus) b_inj = BusInjector(bbus) s_inj = BusInjector(sbus) # F equals B s_inj.set_value(0b1010) m_inj.set_value(1) a_inj.set_value(0) b_inj.set_value(5) cn_inj.set_value(1) part.update_impl("foo") self.assertEqual(fbus.get_value(), 5) b_inj.set_value(4) part.update_impl("foo") self.assertEqual(fbus.get_value(), 4) # F equals A s_inj.set_value(0b1111) m_inj.set_value(1) a_inj.set_value(5) b_inj.set_value(0) part.update_impl("foo") self.assertEqual(fbus.get_value(), 5) a_inj.set_value(4) part.update_impl("foo") self.assertEqual(fbus.get_value(), 4) # F = A + B s_inj.set_value(0b1001) m_inj.set_value(0) a_inj.set_value(5) b_inj.set_value(2) part.update_impl("foo") self.assertEqual(fbus.get_value(), 7) a_inj.set_value(4) b_inj.set_value(1) part.update_impl("foo") self.assertEqual(fbus.get_value(), 5) def test_cascaded_74181(self): msb_alu = P74181("msb") lsb_alu = P74181("lsb") abus = BinaryBus('A' + str(i) for i in range(8)) abus.connect_pins(msb_alu.get_pins(["A3", "A2", "A1", "A0"]) + lsb_alu.get_pins(["A3", "A2", "A1", "A0"])) bbus = BinaryBus('B' + str(i) for i in range(8)) bbus.connect_pins(msb_alu.get_pins(["B3", "B2", "B1", "B0"]) + lsb_alu.get_pins(["B3", "B2", "B1", "B0"])) fbus = BinaryBus('F' + str(i) for i in range(8)) fbus.connect_pins(msb_alu.get_pins(["F3", "F2", "F1", "F0"]) + lsb_alu.get_pins(["F3", "F2", "F1", "F0"])) sbus = BinaryBus('S' + str(i) for i in range(4)) sbus.connect_pins(msb_alu.get_pins(["S3", "S2", "S1", "S0"])) sbus.connect_pins(lsb_alu.get_pins(["S3", "S2", "S1", "S0"])) lsb_alu.get_pin("CN+4").connect(msb_alu.get_pin("CN")) a_inj = BusInjector(abus) b_inj = BusInjector(bbus) s_inj = BusInjector(sbus) m_inj = Injector([msb_alu.get_pin("M"), lsb_alu.get_pin("M")]) cn_inj = Injector([lsb_alu.get_pin("CN")]) s_inj.set_value(0b1001) m_inj.set_value(0) a_inj.set_value(113) b_inj.set_value(11) cn_inj.set_value(1) m_inj.set_value(0) sys = System({"lsb": lsb_alu, "msb": msb_alu}) sys.run() self.assertEqual(fbus.get_value(), 124)
<reponame>0xecho/botogram<filename>botogram/inline.py # Copyright (c) 2015-2020 The Botogram Authors (see AUTHORS) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. from . import syntaxes def process(bot, chains, update): """Process an inline update""" for hook in chains["inline"]: bot.logger.debug("Processing update #%s with the hook %s..." % (update.update_id, hook.name)) result = hook.call(bot, update) if result is {'ok': True, 'result': True}: bot.logger.debug("Update #%s was just processed by the %s hook." % (update.update_id, hook.name)) return bot.logger.debug("No hook actually processed the #%s update." % update.update_id) def inline_feedback_process(bot, chains, update): """Process a chosen inline result update""" for hook in chains["inline_feedback"]: bot.logger.debug("Processing update #%s with the hook %s..." % (update.update_id, hook.name)) result = hook.call(bot, update) if result is {'ok': True}: bot.logger.debug("Update #%s was just processed by the %s hook." % (update.update_id, hook.name)) return bot.logger.debug("No hook actually processed the #%s update." % update.update_id) class InlineInputMessage: """A factory for InputMessageContent Telegram objects""" def __init__(self, text, syntax=None, preview=True): self.text = text self.syntax = syntax self.preview = preview def _serialize(self): args = { "message_text": self.text, "disable_web_page_preview": not self.preview, } syntax = syntaxes.guess_syntax(self.text, self.syntax) if syntax: args["parse_mode"] = syntax return args class InlineInputLocation: """A factory for InputLocationMessageContent Telegram objects""" def __init__(self, latitude, longitude, live_period=None): self.latitude = latitude self.longitude = longitude self.live_period = live_period def _serialize(self): args = { "latitude": self.latitude, "longitude": self.longitude, } if self.live_period is not None: args["live_period"] = self.live_period return args class InlineInputVenue: """A factory for InputVenueMessageContent Telegram objects""" def __init__(self, latitude, longitude, title, address, foursquare_id=None, foursquare_type=None): self.latitude = latitude self.longitude = longitude self.title = title self.address = address self.foursquare_id = foursquare_id self.foursquare_type = foursquare_type def _serialize(self): args = { "latitude": self.latitude, "longitude": self.longitude, "title": self.title, "address": self.address, } if self.foursquare_id is not None: args["foursquare_id"] = self.foursquare_id if self.foursquare_type is not None: args["foursquare_type"] = self.foursquare_type return args class InlineInputContact: """A factory for InputContactMessageContent Telegram objects""" def __init__(self, phone, first_name, last_name=None, vcard=None): self.phone_number = phone self.first_name = first_name self.last_name = last_name self.vcard = vcard def _serialize(self): args = { "phone_number": self.phone_number, "first_name": self.first_name, } if self.last_name is not None: args["last_name"] = self.last_name if self.vcard is not None: args["vcard"] = self.vcard return args
# day 6 Numpy Array Function import numpy as np import time from timeit import timeit np.random.seed(0) def compute_reciprocals(values): output = np.empty(len(values)) for i in range(len(values)): output[i] = 1.0 / values[i] return output # time loop # value1 = np.random.randint(1, 10, size=5) # t1 = timeit('compute_reciprocals(value1)', 'from __main__ import compute_reciprocals, value1', number=1) # print('timeit', t1) # # value2 = np.random.randint(1, 100, size=1000000) # t2 = timeit('compute_reciprocals(value2)', 'from __main__ import compute_reciprocals, value2', number=1) # print('timeit', t2) # # t3 = timeit('1.0 / value2', 'from __main__ import value2', number=1) # print('timeit', t3) def array_arithmetic(): x = np.arange(4) print("x =", x) print("x + 5 =", x + 5) print("x - 5 =", x - 5) print("x * 2 =", x * 2) print("x / 2 =", x / 2) print("x // 2 =", x // 2) print("-x = ", -x) print("x ** 2 = ", x ** 2) print("x % 2 = ", x % 2) print('-(0.5*x + 1) ** 2', -(0.5*x + 1) ** 2) print('add', np.add(x, 2)) x = np.array([-2, -1, 0, 1, 2]) print('x=', x) print('abs(x)=', abs(x)) print('np.absolute(x) = ', np.absolute(x)) print('np.abs(x) = ', np.abs(x)) x = np.array([3 - 4j, 4 - 3j, 2 + 0j, 0 + 1j]) print('x=', x) print('np.abs(x) = ', np.abs(x)) def trigonometric(): theta = np.linspace(0, np.pi, 3) print("theta = ", theta) print("sin(theta) = ", np.sin(theta)) print("cos(theta) = ", np.cos(theta)) print("tan(theta) = ", np.tan(theta)) x = [-1, 0, 1] print("x = ", x) print("arcsin(x) = ", np.arcsin(x)) print("arccos(x) = ", np.arccos(x)) print("arctan(x) = ", np.arctan(x)) def logarithms(): x = [1, 2, 3] print("x =", x) print("e^x =", np.exp(x)) print("2^x =", np.exp2(x)) print("3^x =", np.power(3, x)) x = [1, 2, 4, 10] print("x =", x) print("ln(x) =", np.log(x)) print("log2(x) =", np.log2(x)) print("log10(x) =", np.log10(x)) x = [0, 0.001, 0.01, 0.1] print("exp(x) - 1 =", np.expm1(x)) print("log(1 + x) =", np.log1p(x)) def advanced_feature(): # write computation results directly to the memory location x = np.arange(5) y = np.empty(5) np.multiply(x, 10, out=y) print(y) y = np.zeros(10) np.power(2, x, out=y[::2]) print(y) x = np.arange(1, 6) print('x=', x) sum = np.add.reduce(x) print('sum=', sum) mul = np.multiply.reduce(x) print('multiply reduce=', mul) sum2 = np.add.accumulate(x) mul2 = np.multiply.accumulate(x) out = np.multiply.outer(x, x) print('add.accumulate=', sum2) print('multiply.accumulate=', mul2) print('multiply.outer=', out) from scipy import special def scipy_special(): # Gamma functions x = [1, 5, 10] print("gamma(x) =", special.gamma(x)) print("ln|gamma(x)| =", special.gammaln(x)) print("beta(x, 2) =", special.beta(x, 2)) # Error function (integral of Gaussian) x = np.array([0, 0.3, 0.7, 1.0]) print("erf(x) =", special.erf(x)) print("erfc(x) =", special.erfc(x)) print("erfinv(x) =", special.erfinv(x)) if __name__ == '__main__': print('Numpy Version', np.__version__) array_arithmetic() trigonometric() logarithms() advanced_feature() scipy_special()
# This example is designed to check the likelihood calculation under most models # supported by Phycas. A data set is simulated under the most complex model, and # analyzed under a spectrum of simpler models. The data set is saved as a nexus file # complete with PAUP blocks that allow verification of Phycas's likelihood # calculations by PAUP. A second sweep of models is done for a real data set # (nyldna4.nex) and a paup command file is written to allow verification of Phycas' # likelihood calculations. from phycas import * def tryAllModels(fn): # Create string containing PAUP commands that will be added to the end of the # file fn to check results - we will add more commands to this string as we go paup_commands = [] #paup_commands.append('\n[!\n***** HKY+G+I (estimate everything) *****]') paup_commands.append('\n[!\n***** GTR+G+I (estimate everything) *****]') #paup_commands.append('lset nst=2 variant=hky basefreq=estimate tratio=estimate rates=gamma shape=estimate pinvar=estimate;') paup_commands.append('lset nst=6 basefreq=estimate rmatrix=estimate rates=gamma shape=estimate pinvar=estimate;') paup_commands.append('lscores 1 / userbrlen;') print print '************* Testing GTRModel *******************' # Compute likelihood using the GTR+G+I model print '\nGTR+G+I model' model.type = 'gtr' model.relrates = [1.8, 4.0, 1.5, 1.2, 5.0, 1.0] model.state_freqs = [0.1, 0.2, 0.3, 0.4] model.num_rates = 4 model.gamma_shape = 1.2 model.pinvar_model = True model.pinvar = 0.3 lnL = like() ref_lnL = lnL print 'lnL = %.5f (this is the reference lnL)' % (lnL) paup_commands.append('\n[!\n***** GTR+G+I (using GTRModel) *****]') paup_commands.append('lset nst=6 basefreq=(0.1 0.2 0.3) rmatrix=(1.8 4.0 1.5 1.2 5.0) rates=gamma shape=1.2 pinvar=0.3;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas GTR+G+I lnL = %.5f]' % lnL) # Compute likelihood using the GTR+I model print '\nGTR+I model' model.type = 'gtr' model.relrates = [1.8, 4.0, 1.5, 1.2, 5.0, 1.0] model.state_freqs = [0.1, 0.2, 0.3, 0.4] model.num_rates = 1 model.pinvar_model = True model.pinvar = 0.3 lnL = like() #ref_lnL = lnL print 'lnL = %.5f (%.5f worse than the reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('\n[!\n***** GTR+I (using GTRModel) *****]') paup_commands.append('lset nst=6 basefreq=(0.1 0.2 0.3) rmatrix=(1.8 4.0 1.5 1.2 5.0) rates=equal pinvar=0.3;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas GTR+I lnL = %.5f]' % lnL) # Compute likelihood using the GTR+G model print '\nGTR+G model' model.type = 'gtr' model.relrates = [1.8, 4.0, 1.5, 1.2, 5.0, 1.0] model.state_freqs = [0.1, 0.2, 0.3, 0.4] model.num_rates = 4 model.gamma_shape = 1.2 model.pinvar_model = False lnL = like() print 'lnL = %.5f (%.5f worse than the reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('\n[!\n***** GTR+G (using GTRModel) *****]') paup_commands.append('lset nst=6 basefreq=(0.1 0.2 0.3) rmatrix=(1.8 4.0 1.5 1.2 5.0) rates=gamma shape=1.2 pinvar=0.0;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas GTR+G lnL = %.5f]' % lnL) # ************* temporary below here ************** #print '\nGTR+psr model' #phycas.model = Likelihood.GTRModel() #phycas.model.setRelRates([1.8, 4.0, 1.5, 1.2, 5.0, 1.0]) #phycas.model.setNucleotideFreqs(0.1, 0.2, 0.3, 0.4) #phycas.model.setNGammaRates(4) #phycas.model.setShape(1.2) #phycas.model.setNotPinvarModel() #phycas.likelihood.usePatternSpecificRates() #phycas.likelihood.replaceModel(phycas.model) #phycas.likelihood.prepareForLikelihood(phycas.tree) #lnL = phycas.likelihood.calcLnL(phycas.tree) #print 'lnL = %.5f (%.5f worse than the reference lnL)' % (lnL, ref_lnL - lnL) #paup_commands.append('\n[!\n***** GTR+psr (actually, using GTR+G since no way to do psr in PAUP*) *****]') #paup_commands.append('lset nst=6 basefreq=(0.1 0.2 0.3) rmatrix=(1.8 4.0 1.5 1.2 5.0) rates=gamma shape=1.2 pinvar=0.0;') #paup_commands.append('lscores 1 / userbrlen;') #paup_commands.append('[!Phycas GTR+G lnL = %.5f]' % lnL) #phycas.likelihood.doNotUsePatternSpecificRates() # ************* temporary above here ************** # Compute likelihood using the GTR model print '\nGTR model' model.type = 'gtr' model.relrates = [1.8, 4.0, 1.5, 1.2, 5.0, 1.0] model.state_freqs = [0.1, 0.2, 0.3, 0.4] model.num_rates = 1 model.pinvar_model = False model.pinvar = 0.3 lnL = like() print 'lnL = %.5f (%.5f worse than reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('\n[!\n***** GTR (using GTRModel) *****]') paup_commands.append('lset nst=6 basefreq=(0.1 0.2 0.3) rmatrix=(1.8 4.0 1.5 1.2 5.0) rates=equal pinvar=0.0;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas GTR lnL = %.5f]' % lnL) print print '************* Testing HKYModel *******************' # Compute likelihood using the HKY+G+I model print '\nHKY+G+I model' model.type = 'hky' model.kappa = 4.0 model.state_freqs = [0.1, 0.2, 0.3, 0.4] model.num_rates = 4 model.gamma_shape = 1.2 model.pinvar_model = True model.pinvar = 0.3 lnL = like() ref_lnL = lnL print 'lnL = %.5f (%.5f worse than the reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('\n[!\n***** HKY+G+I (using HKYModel) *****]') paup_commands.append('lset nst=2 variant=hky basefreq=(0.1 0.2 0.3) tratio=1.8333333 rates=gamma shape=1.2 pinvar=0.3;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas HKY+G+I lnL = %.5f]' % lnL) # Compute likelihood using the HKY+I model print '\nHKY+I model' model.type = 'hky' model.kappa = 4.0 model.state_freqs = [0.1, 0.2, 0.3, 0.4] model.num_rates = 1 #model.gamma_shape = 1.2 model.pinvar_model = True model.pinvar = 0.3 lnL = like() ref_lnL = lnL print 'lnL = %.5f (%.5f worse than the reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('\n[!\n***** HKY+I (using HKYModel) *****]') paup_commands.append('lset nst=2 variant=hky basefreq=(0.1 0.2 0.3) tratio=1.8333333 rates=equal pinvar=0.3;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas HKY+I lnL = %.5f]' % lnL) # Compute likelihood using the HKY+G model print '\nHKY+G model' model.type = 'hky' model.kappa = 4.0 model.state_freqs = [0.1, 0.2, 0.3, 0.4] model.num_rates = 4 model.gamma_shape = 1.2 model.pinvar_model = False #model.pinvar = 0.3 lnL = like() print 'lnL = %.5f (%.5f worse than the reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('\n[!\n***** HKY+G (using HKYModel) *****]') paup_commands.append('lset nst=2 variant=hky basefreq=(0.1 0.2 0.3) tratio=1.8333333 rates=gamma shape=1.2 pinvar=0.0;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas HKY+G lnL = %.5f]' % lnL) # Compute likelihood using the HKY model print '\nHKY model' model.type = 'hky' model.kappa = 4.0 model.state_freqs = [0.1, 0.2, 0.3, 0.4] model.num_rates = 1 #model.gamma_shape = 1.2 model.pinvar_model = False #model.pinvar = 0.3 lnL = like() print 'lnL = %.5f (%.5f worse than reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('\n[!\n***** HKY (using HKYModel) *****]') paup_commands.append('lset nst=2 variant=hky basefreq=(0.1 0.2 0.3) tratio=1.8333333 rates=equal pinvar=0.0;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas HKY lnL = %.5f]' % lnL) # Compute likelihood using the F81+G+I model print '\nF81+G+I model' model.type = 'hky' model.kappa = 1.0 model.state_freqs = [0.1, 0.2, 0.3, 0.4] model.num_rates = 4 model.gamma_shape = 1.2 model.pinvar_model = True model.pinvar = 0.3 lnL = like() print 'lnL = %.5f (%.5f worse than reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('[!\n***** F81+G+I (using HKYModel) *****]') paup_commands.append('lset nst=1 basefreq=(0.1 0.2 0.3) rates=gamma shape=1.2 pinvar=0.3;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas F81+G+I lnL = %.5f]' % lnL) # Compute likelihood using the F81+I model print '\nF81+I model' model.type = 'hky' model.kappa = 1.0 model.state_freqs = [0.1, 0.2, 0.3, 0.4] model.num_rates = 1 #model.gamma_shape = 1.2 model.pinvar_model = True model.pinvar = 0.3 lnL = like() print 'lnL = %.5f (%.5f worse than reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('[!\n***** F81+I (using HKYModel) *****]') paup_commands.append('lset nst=1 basefreq=(0.1 0.2 0.3) rates=equal pinvar=0.3;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas F81+I lnL = %.5f]' % lnL) # Compute likelihood using the F81+G model print '\nF81+G model' model.type = 'hky' model.kappa = 1.0 model.state_freqs = [0.1, 0.2, 0.3, 0.4] model.num_rates = 4 model.gamma_shape = 1.2 model.pinvar_model = False #model.pinvar = 0.3 lnL = like() print 'lnL = %.5f (%.5f worse than reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('[!\n***** F81+G (using HKYModel) *****]') paup_commands.append('lset nst=1 basefreq=(0.1 0.2 0.3) rates=gamma shape=1.2 pinvar=0.0;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas F81+G lnL = %.5f]' % lnL) # Compute likelihood using the F81 model print '\nF81 model' model.type = 'hky' model.kappa = 1.0 model.state_freqs = [0.1, 0.2, 0.3, 0.4] model.num_rates = 1 #model.gamma_shape = 1.2 model.pinvar_model = False #model.pinvar = 0.3 lnL = like() print 'lnL = %.5f (%.5f worse than reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('[!\n***** F81 (using HKYModel) *****]') paup_commands.append('lset nst=1 basefreq=(0.1 0.2 0.3) rates=equal pinvar=0.0;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas F81 lnL = %.5f]' % lnL) # Compute likelihood using the K80+G+I model print '\nK80+G+I model' model.type = 'hky' model.kappa = 4.0 model.state_freqs = [0.25, 0.25, 0.25, 0.25] model.num_rates = 4 model.gamma_shape = 1.2 model.pinvar_model = True model.pinvar = 0.3 lnL = like() print 'lnL = %.5f (%.5f worse than reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('[!\n***** K80+G+I (using HKYModel) *****]') paup_commands.append('lset nst=2 basefreq=equal tratio=2.0 rates=gamma shape=1.2 pinvar=0.3;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas K80+G+I lnL = %.5f]' % lnL) # Compute likelihood using the K80+I model print '\nK80+I model' model.type = 'hky' model.kappa = 4.0 model.state_freqs = [0.25, 0.25, 0.25, 0.25] model.num_rates = 1 #model.gamma_shape = 1.2 model.pinvar_model = True model.pinvar = 0.3 lnL = like() print 'lnL = %.5f (%.5f worse than reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('[!\n***** K80+I (using HKYModel) *****]') paup_commands.append('lset nst=2 basefreq=equal tratio=2.0 rates=equal pinvar=0.3;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas K80+I lnL = %.5f]' % lnL) # Compute likelihood using the K80+G model print '\nK80+G model' model.type = 'hky' model.kappa = 4.0 model.state_freqs = [0.25, 0.25, 0.25, 0.25] model.num_rates = 4 model.gamma_shape = 1.2 model.pinvar_model = False #model.pinvar = 0.3 lnL = like() print 'lnL = %.5f (%.5f worse than reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('[!\n***** K80+G (using HKYModel) *****]') paup_commands.append('lset nst=2 basefreq=equal tratio=2.0 rates=gamma shape=1.2 pinvar=0.0;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas K80+G lnL = %.5f]' % lnL) # Compute likelihood using the K80 model print '\nK80 model' model.type = 'hky' model.kappa = 4.0 model.state_freqs = [0.25, 0.25, 0.25, 0.25] model.num_rates = 1 #model.gamma_shape = 1.2 model.pinvar_model = False #model.pinvar = 0.3 lnL = like() print 'lnL = %.5f (%.5f worse than reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('[!\n***** K80 (using HKYModel) *****]') paup_commands.append('lset nst=2 basefreq=equal tratio=2.0 rates=equal pinvar=0.0;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas K80 lnL = %.5f]' % lnL) # Compute likelihood using the JC+G+I model print '\nJC+G+I model' model.type = 'hky' model.kappa = 1.0 model.state_freqs = [0.25, 0.25, 0.25, 0.25] model.num_rates = 4 model.gamma_shape = 1.2 model.pinvar_model = True model.pinvar = 0.3 lnL = like() print 'lnL = %.5f (%.5f worse than reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('[!\n***** JC+G+I (using HKYModel) *****]') paup_commands.append('lset nst=1 basefreq=equal rates=gamma shape=1.2 pinvar=0.3;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas JC+G+I lnL = %.5f]' % lnL) # Compute likelihood using the JC+I model print '\nJC+I model' model.type = 'hky' model.kappa = 1.0 model.state_freqs = [0.25, 0.25, 0.25, 0.25] model.num_rates = 1 #model.gamma_shape = 1.2 model.pinvar_model = True model.pinvar = 0.3 lnL = like() print 'lnL = %.5f (%.5f worse than reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('[!\n***** JC+I (using HKYModel) *****]') paup_commands.append('lset nst=1 basefreq=equal rates=equal pinvar=0.3;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas JC+I lnL = %.5f]' % lnL) # Compute likelihood using the JC+G model print '\nJC+G model' model.type = 'hky' model.kappa = 1.0 model.state_freqs = [0.25, 0.25, 0.25, 0.25] model.num_rates = 4 model.gamma_shape = 1.2 model.pinvar_model = False #model.pinvar = 0.3 lnL = like() print 'lnL = %.5f (%.5f worse than reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('[!\n***** JC+G (using HKYModel) *****]') paup_commands.append('lset nst=1 basefreq=equal rates=gamma shape=1.2 pinvar=0.0;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas JC+G lnL = %.5f]' % lnL) # Compute likelihood using the JC model print '\nJC model' model.type = 'hky' model.kappa = 1.0 model.state_freqs = [0.25, 0.25, 0.25, 0.25] model.num_rates = 1 #model.gamma_shape = 1.2 model.pinvar_model = False #model.pinvar = 0.3 lnL = like() print 'lnL = %.5f (%.5f worse than reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('[!\n***** JC (using HKYModel) *****]') paup_commands.append('lset nst=1 basefreq=equal rates=equal pinvar=0.0;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas JC lnL = %.5f]' % lnL) print print '************** Testing JCModel *******************' # Compute likelihood using the JC+G+I model print '\nJC+G+I model' model.type = 'jc' #model.kappa = 1.0 #model.state_freqs = [0.25, 0.25, 0.25, 0.25] model.num_rates = 4 model.gamma_shape = 1.2 model.pinvar_model = True model.pinvar = 0.3 lnL = like() print 'lnL = %.5f (%.5f worse than reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('[!\n***** JC+G+I (using JCModel) *****]') paup_commands.append('lset nst=1 basefreq=equal rates=gamma shape=1.2 pinvar=0.3;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas JC+G+I lnL = %.5f]' % lnL) # Compute likelihood using the JC+I model print '\nJC+I model' model.type = 'jc' #model.kappa = 1.0 #model.state_freqs = [0.25, 0.25, 0.25, 0.25] model.num_rates = 1 #model.gamma_shape = 1.2 model.pinvar_model = True model.pinvar = 0.3 lnL = like() print 'lnL = %.5f (%.5f worse than reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('[!\n***** JC+I (using JCModel) *****]') paup_commands.append('lset nst=1 basefreq=equal rates=equal pinvar=0.3;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas JC+I lnL = %.5f]' % lnL) # Compute likelihood using the JC+G model print '\nJC+G model' model.type = 'jc' #model.kappa = 1.0 #model.state_freqs = [0.25, 0.25, 0.25, 0.25] model.num_rates = 4 model.gamma_shape = 1.2 model.pinvar_model = False #model.pinvar = 0.3 lnL = like() print 'lnL = %.5f (%.5f worse than reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('[!\n***** JC+G (using JCModel) *****]') paup_commands.append('lset nst=1 basefreq=equal rates=gamma shape=1.2 pinvar=0.0;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas JC+G lnL = %.5f]' % lnL) # Compute likelihood using the JC model print '\nJC model' model.type = 'jc' #model.kappa = 1.0 #model.state_freqs = [0.25, 0.25, 0.25, 0.25] model.num_rates = 1 #model.gamma_shape = 1.2 model.pinvar_model = False #model.pinvar = 0.3 lnL = like() print 'lnL = %.5f (%.5f worse than reference lnL)' % (lnL, ref_lnL - lnL) paup_commands.append('[!\n***** JC (using JCModel) *****]') paup_commands.append('lset nst=1 basefreq=equal rates=equal pinvar=0.0;') paup_commands.append('lscores 1 / userbrlen;') paup_commands.append('[!Phycas JC lnL = %.5f]' % lnL) # Add a PAUP block to the file named fn to make it easy to check the results f = file(fn, 'a') f.write('\n') f.write('\nbegin paup;') f.write('\n set criterion=likelihood storebrlen;') f.write('\nend;') f.write('\n') f.write('\nbegin trees;') f.write('\n translate') for i,nm in enumerate(blob.taxon_labels): if nm.count(' ') > 0: f.write("\n %d '%s'" % (i+1, nm)) else: f.write("\n %d %s" % (i+1, nm)) if i < len(blob.taxon_labels) - 1: f.write(',') else: f.write(';') f.write('\n utree t = %s' % model_tree_str) f.write('\nend;') f.write('\n') f.write('\nbegin paup;') f.write('\nlog file=paup.log start replace;\n') f.write('\n'.join(paup_commands)) f.write('\n\nlog stop;') f.write('\nend;') f.write('\n') f.close() def simulateData(fn): # NOT YET READY FOR PARTITIONED VERSION # Define the names of the taxa to use when the simulated data set is saved to a file phycas.taxon_names = ['P. parksii', 'P. articulata', 'P._gracilis', 'P. macrophylla'] # Create a simulation model #phycas.model = Likelihood.HKYModel() phycas.model = Likelihood.GTRModel() #phycas.model.setKappa(4.0) phycas.model.setRelRates([1.8, 4.0, 1.5, 1.2, 5.0, 1.0]) phycas.model.setNGammaRates(4) phycas.model.setShape(1.2) phycas.model.setNucleotideFreqs(0.1, 0.2, 0.3, 0.4) phycas.model.setPinvarModel() phycas.model.setPinvar(0.3) # Create a likelihood object to orchestrate both simulations and likelihood calculations phycas.likelihood = Likelihood.TreeLikelihood(phycas.model) # Prepare the tree for simulation (i.e. equip nodes with transition matrices) phycas.likelihood.prepareForSimulation(phycas.tree) # Simulation settings phycas.r.setSeed(13579) phycas.sim_nreps = 1 # ignored at present phycas.sim_outfile = 'simout.nex' #num_sites = 5000 num_sites = 100000 # Create a SimData object to hold the simulated data sim_data = Likelihood.SimData() # Simulate num_sites of data and store in sim_data # Use the function simulateFirst (rather than just simulate) in order # to force calculation of transition probabilities phycas.likelihood.simulateFirst(sim_data, phycas.tree, phycas.r, num_sites) # Save simulated data to a NEXUS file using taxon_names, datatype=dna and # using the symbols a, c, g, and t for state codes 0, 1, 2, and 3, respectively sim_data.saveToNexusFile('simulated.nex', phycas.taxon_names, 'dna', ('a','c','g','t')) # Copy the simulated data from sim_data to phycas.likelihood so that # we can compute the likelihood for the simulated data phycas.likelihood.copyDataFromSimData(sim_data) def createCommandFile(fn, dataf): outf = file(fn, 'w') outf.write('#nexus\n\n') outf.write('begin paup;\n') outf.write(" set nowarnroot;\n") outf.write(" exe '%s';\n" % dataf) outf.write('end;\n') outf.close() if __name__ == '__main__': print print '+------------------------------------------------+' print '| Analyzing nyldna4.nex |' print '+------------------------------------------------+' dataf = getPhycasTestData('nyldna4.nex') blob = readFile(dataf) nchar = blob.characters.getMatrix().getNChar() partition.validate(nchar) # Create a model tree model_tree_str = '(1:0.1,2:0.15,(3:0.025,4:0.15):0.05);' model_tree = TreeCollection(newick=model_tree_str) like.data_source = blob.characters like.tree_source = model_tree like.starting_edgelen_dist = None like.store_site_likes = False createCommandFile('check.nex', dataf) tryAllModels('check.nex') #doingSimTest = False #if doingSimTest: # print # print '+------------------------------------------------+' # print '| Analyzing Simulated Data |' # print '+------------------------------------------------+' # # simulateData('simulated.nex') # tryAllModels('simulated.nex') #else: # d = os.path.dirname(__file__) # o = open(os.path.join(d, 'reference_output','simulated.nex'), "rU") # t = open("simulated.nex", "w") # t.write(o.read()) # t.close() # o.close()
#!/usr/bin/env python ''' estimate % tumour from allele frequencies ''' import argparse import logging import sys import cyvcf2 import numpy as np import scipy.stats import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt def estimate_percentile(values): ''' ultra simple approach of just taking the 99th percentile ''' return {'tumour': np.percentile(np.array([v for v in values if v > 0.1]), 99)} def generate_estimate(params): # now make our estimate based on our distributions l = np.linspace(0, 1, 100) #logging.debug('%s %s', l, params) cdf_signal = scipy.stats.norm.cdf(l, loc=params[0], scale=params[1]) * params[4] cdf_noise = scipy.stats.gamma.cdf(l, loc=params[2], a=params[3]) * (1-params[4]) cdf = cdf_signal + cdf_noise # TODO can I just do this? predictions = [cdf[i+1] - cdf[i] for i in range(0, 99)] return predictions def measure_error(values): def error_fn(params): #logging.debug('trying %s...', params) # make the values into a histogram targets, xs = np.histogram(values, bins=99, range=(0, 1), density=True) targets /= sum(targets) targets = np.array([max(target, 0.001) for target in targets]) #logging.debug('targets %s', len(targets)) # now make our estimate based on our distributions predictions = generate_estimate(params) # rmse error = np.sqrt(np.mean((predictions-targets)**2)) # TODO log might be better #error = np.sqrt(np.mean((np.log(predictions)-np.log(targets))**2)) logging.debug('trying %s: error %.2f', params, error) return error return error_fn def estimate_model(values): ''' model a sum of distributions ''' # gamma + gaussian = four parameters # gamma_alpha, gamma_beta, gaussian_mean, gaussian_sd # start off with some reasonable estimates # todo some proportion of each as well #params = { 'gamma_alpha': 0.1, 'gamma_beta': 1.0, 'gaussian_mu': 0.4, 'gaussian_sd': 0.1, 'gamma_proportion': 0.5 } params = [0.4, 0.1, 0.1, 1.0, 0.5] bounds=[(0.0, np.inf), (0.0, np.inf), (0.01, 0.6), (0.001, np.inf), (0.0, 1.0)] minimizer_kwargs = dict(method="L-BFGS-B", bounds=bounds) result = scipy.optimize.basinhopping(measure_error(values), params, minimizer_kwargs=minimizer_kwargs) logging.info('%s has error %.2f', result.x, measure_error(values)(result.x)) # measure the error and solve return {'tumour': result.x[2] * 2, 'distribution': generate_estimate(result.x)} ESTIMATE = { 'percentile': estimate_percentile, 'model': estimate_model } def read_vcf(fn, pass_only, dp_threshold, info_af): logging.info('reading vcf from stdin...') skipped_dp = skipped_pass = 0 vcf_in = cyvcf2.VCF(fn) values = [] for variant_count, variant in enumerate(vcf_in): # calculate vaf if len(variant.ALT) > 1: logging.warn('variant %i is multi-allelic', variant_count + 1) is_pass = variant.FILTER is None or variant.FILTER == 'alleleBias' if pass_only and not is_pass: skipped_pass += 1 continue if variant.INFO["DP"] < dp_threshold: # somatic + germline skipped_dp += 1 continue if info_af: value = variant.INFO["AF"] else: ad = variant.format("AD")[sample_id] ref = ad[0] alt = ad[1] if ref + alt > 0: value = alt / (ref + alt) else: value = 0 values.append(value) return values def estimate(method, values, pass_only, dp_threshold, info_af): est = ESTIMATE[method](values) logging.info('done') return est def main(pass_only, dp_threshold, info_af, plot, trials): values = read_vcf('-', pass_only, dp_threshold, info_af) result = estimate('percentile', values, pass_only, dp_threshold, info_af) sys.stdout.write('Estimated tumour percentage (percentile):\t{:.2f}\n'.format(result['tumour'])) results = [] for _ in range(trials): result = estimate('model', values, pass_only, dp_threshold, info_af) results.append(result) sys.stdout.write('Estimated tumour percentage (model):\t{}\n'.format(' '.join(['{:.2f}'.format(result['tumour']) for result in results]))) if plot: # draw histogram of normalised values and fitted distribution targets, xs = np.histogram(values, bins=99, range=(0, 1), density=True) targets /= sum(targets) fig = plt.figure(figsize=(8, 6)) ax = fig.add_subplot(111) l = np.linspace(0, 1, 99) ax.plot(l, targets, label='Observed') if trials > 0: ax.plot(l, results[0]['distribution'], label='Predicted') plt.tight_layout() plt.savefig(plot) if __name__ == '__main__': parser = argparse.ArgumentParser(description='estimate tumour %') parser.add_argument('--dp_threshold', required=False, default=50, help='use af in info field') parser.add_argument('--pass_only', action='store_true', help='only pass variants') parser.add_argument('--info_af', action='store_true', help='use af in info field') parser.add_argument('--trials', required=False, type=int, default=1, help='how many runs of model') parser.add_argument('--plot', required=False, help='use af in info field') parser.add_argument('--verbose', action='store_true', help='more logging') args = parser.parse_args() if args.verbose: logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.DEBUG) else: logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO) main(args.pass_only, args.dp_threshold, args.info_af, args.plot, args.trials)
#!/usr/bin/python # -*- coding: utf-8 -*- ########################################################################## # # AutoTST - Automated Transition State Theory # # Copyright (c) 2015-2020 <NAME> (<EMAIL>) # and the AutoTST Team # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the 'Software'), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # ########################################################################## import unittest from .geometry import Bond, Angle, Torsion, CisTrans, ChiralCenter class TestBond(unittest.TestCase): def setUp(self): self.bond = Bond(index=1, atom_indices=[1,2], length=1.8, reaction_center=True, mask=[True,True,True]) def test_index(self): self.assertIsInstance(self.bond.index,int) def test_atom_indices(self): self.assertIsInstance(self.bond.atom_indices,list) self.assertEqual(len(self.bond.atom_indices),2) def test_length(self): self.assertIsInstance(self.bond.length,float) def test_reaction_center(self): self.assertIsInstance(self.bond.reaction_center,bool) def test_mask(self): self.assertIsInstance(self.bond.mask,list) class TestAngle(unittest.TestCase): def setUp(self): self.angle = Angle(index=1, atom_indices=[1,2], degree=90.0, reaction_center=True, mask=[True,True,True]) def test_index(self): self.assertIsInstance(self.angle.index,int) def test_atom_indices(self): self.assertIsInstance(self.angle.atom_indices,list) self.assertEqual(len(self.angle.atom_indices),2) def test_length(self): self.assertIsInstance(self.angle.degree,float) def test_reaction_center(self): self.assertIsInstance(self.angle.reaction_center,bool) def test_mask(self): self.assertIsInstance(self.angle.mask,list) class TestTorsion(unittest.TestCase): def setUp(self): self.torsion = Torsion(index=1, atom_indices=[1,2,3], dihedral=60.0, reaction_center=True, mask=[True,True,True]) def test_index(self): self.assertIsInstance(self.torsion.index,int) def test_atom_indices(self): self.assertIsInstance(self.torsion.atom_indices,list) self.assertEqual(len(self.torsion.atom_indices),3) def test_length(self): self.assertIsInstance(self.torsion.dihedral,float) def test_reaction_center(self): self.assertIsInstance(self.torsion.reaction_center,bool) def test_mask(self): self.assertIsInstance(self.torsion.mask,list) class TestCisTrans(unittest.TestCase): def setUp(self): self.cistrans = CisTrans(index=1, atom_indices=[1,2,3], dihedral=60.0, reaction_center=True, stero='str', mask=[True,True,True]) def test_index(self): self.assertIsInstance(self.cistrans.index,int) def test_atom_indices(self): self.assertIsInstance(self.cistrans.atom_indices,list) self.assertEqual(len(self.cistrans.atom_indices),3) def test_length(self): self.assertIsInstance(self.cistrans.dihedral,float) def test_reaction_center(self): self.assertIsInstance(self.cistrans.reaction_center,bool) def test_mask(self): self.assertIsInstance(self.cistrans.mask,list) def test_stereo(self): self.assertIsInstance(self.cistrans.stero,str) class TestChiralCenter(unittest.TestCase): def setUp(self): self.chiralcenter = ChiralCenter(index=1, atom_index=1, chirality='chirality') def test_index(self): self.assertIsInstance(self.chiralcenter.index,int) def test_atom_index(self): self.assertIsInstance(self.chiralcenter.atom_index,int) def test_chirality(self): self.assertIsInstance(self.chiralcenter.chirality,str) if __name__ == "__main__": unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
""" PRONTO Captura frame da tela, processa imagem, salva imagem em disco e inicia thread para escutar teclado """ import numpy as np import cv2 import mss.tools import time from threading import Thread import os import shutil from captura_teclado import CapturaTeclado class CapturaTela: """Inicializa thread para escutar teclado, captura tela e salva em disco""" def __init__(self): """ :param capturar_mais: Se deseja capturar a tela inteira do jogo ou somente a parte central (sem placar e chao) """ self.contador = 0 self.path = '/' # Cria thread para escutar teclado self.cap_teclado = CapturaTeclado() self.threads = [Thread(target=self.cap_teclado.escuta_teclado)] for thread in self.threads: thread.start() # Captura tela do jogo rodando na maquina # Tela parcial do jogo self.monitor = {"top": 85, "left": 100, "width": 590, "height": 90} # Toda tela do jogo # self.monitor = {"top": 45, "left": 100, "width": 600, "height": 150} def is_salvar_model(self): """ Verifica se foi solicitado para salvar model :return: Boolean se deve salvar model """ if self.cap_teclado.salvar_model: self.cap_teclado.salvar_model = False return True return False def capturar_frame_tela(self, mostra_tela=False, colorido=False): """ Pegar frame da tela, processa e retorna imagem :return: Frame processado e tecla manual (humano) """ # Pega tecla manual, caso tenha sido pressionado (padrao = 0) tecla = self.cap_teclado.tecla # Limpa tecla caso ela tenha sido liberado (isso eh usado para nao correr risco de nao coletar alguma tecla) self.cap_teclado.limpar_tecla() # Inicia sistema para capturar frame with mss.mss() as sct: # Captura frame img = np.array(sct.grab(self.monitor)) if not colorido: img = self._processar_imagem(img) if mostra_tela: cv2.imshow("OpenCV/Numpy normal", img) return img, tecla def preparar_salvar_frame(self, rodada, path='Frame'): """ Cria pasta para armazenar frames, deve ser chamado antes de "salvar_frame" :param rodada: Rodada atual, sera usado como nome da pasta para armazenar os frames :param path: Caminho a pasta principal (as novas pastas sera criadas dentro dela) """ self.path = path + '/' + str(rodada) + '/' # Se pasta existe, exclui if os.path.isdir(self.path): shutil.rmtree(self.path, ignore_errors=True) # Demora para excluir, dependendo do tamanho da pasta, entao espera, caso contrario ira dar erro no mkdir time.sleep(1) os.mkdir(self.path) self.contador = 0 def salvar_frame(self, frame, tecla, game_over): """ Salva frame, nome do arquivo possui contador, tecla informada e se foi frame de game over. Método "preparar_salvar_frame" deve ser chamado primeiro :param frame: Imagem a ser salva :param tecla: Tecla pressionado no frame :param game_over: Se o frame representa game over """ cv2.imwrite('{}frame{}-{}-{}.png'.format(self.path, self.contador, tecla, game_over), frame) self.contador += 1 def _processar_imagem(self, image): """ Processa imagem para ficar com 1 channel (grayscale) e destaca as bordas :return: Imagem processada """ processed_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) processed_img = cv2.Canny(processed_img, threshold1=250, threshold2=255) return processed_img def encerrar(self): cv2.destroyAllWindows()
import json import requests from bs4 import BeautifulSoup #Copyright @Huseyin <NAME>, @Deniz <NAME> #edit term name to accsess #Put '{}' in json file before running! term = "201601" filename = term + ".html" with open(filename,"r", encoding="utf8") as html_file: soup = BeautifulSoup(html_file, 'lxml') html_file.close() data = {} json_name = 'data_' + term + '.json' with open(json_name,"r", encoding='utf-8') as json_file: data = json.load(json_file) # data = {"2019-2020 Fall":{"AL 102":["SL","Academic Literaties","Total","Actual", "3.0 SU",{"Ekrem Sabit Şimşek": [123,124], "Ali Nihat Eken": [163,156]},"CoRequisites","PreRequisities" ]}} data[term] = {} #lesson_details = soup.find_all("td", class_="dddefault") lesson_name_details = soup.find_all("th", class_="ddlabel") #table_details = soup.find_all("table", class_="datadisplaytable") length = len(lesson_name_details) for i in range(length): lesson = lesson_name_details[i] current_details = lesson.parent.findNext('td') current_table = lesson.parent.findNext("table") #current_table = table_details[i + 1] course_details = (lesson.a.text).split(" - ") if course_details[-1] == "X": continue #course code course_code = course_details[-2] if course_code[-1] != "R" and course_code[-1] != "L" and course_code[-1] != "D" and course_code[-1] != "P": if course_code not in data[term]: data[term][course_code] = ["","",0,0,0,{},[],[]] #course name course_name = course_details[0] data[term][course_code][1] = course_name #course address course_address = lesson.a["href"] details = current_details fieldlabeltext = details.find_all("span", class_="fieldlabeltext") #course faculty course_faculty = (fieldlabeltext[3].next_sibling)[1:-1].split(" ")[-1] data[term][course_code][0] = course_faculty #course credit course_credit = str(((current_table.previous_element.previous_element.previous_element.previous_element.previous_element.previous_element.previous_element.previous_element.previous_element.previous_element)[1:-1].strip(" ").split(" "))[0]) if course_credit != "": course_credit = float(course_credit) data[term][course_code][4] = course_credit else: data[term][course_code][4] = "ERROR_EMPTYCREDIT" #course instructors primary_instructor_raw = (current_table.find("abbr", {"title": "Primary"})).previous_sibling[:-1] instructor_dict = data[term][course_code][5] primary_instructor = "" names_of_instructor = primary_instructor_raw.split(" ") for x in range(len(names_of_instructor)): name = names_of_instructor[x] if name == "" or name == " ": continue else: primary_instructor = primary_instructor + name + " " primary_instructor = primary_instructor[:-1] if primary_instructor not in instructor_dict: data[term][course_code][5][primary_instructor] = [0,0] r = requests.get(course_address) source = BeautifulSoup(r.content,"lxml") match = source.find_all("td", class_="dddefault") #course capacity course_capacity = int(match[1].text) data[term][course_code][2] = data[term][course_code][2] + course_capacity data[term][course_code][5][primary_instructor][0] = data[term][course_code][5][primary_instructor][0] + course_capacity #course actual course_actual = int(match[2].text) data[term][course_code][3] = data[term][course_code][3] + course_actual data[term][course_code][5][primary_instructor][1] = data[term][course_code][5][primary_instructor][1] + course_actual co_reqs = [] pre_reqs = [] a_s = source.find_all('a', href=True) for a in range(4,len(a_s)-2): a_text = a_s[a].text if len(a_text) > 0 and '0' <= a_text[-1] <= '9': pre_reqs.append(a_text) elif len(a_text) > 0 and (a_text[-1] == 'R' or a_text[-1] == 'L' or a_text[-1] == 'D'): co_reqs.append(a_text) #course corequisities and prerequisities data[term][course_code][6] = co_reqs data[term][course_code][7] = pre_reqs print(str(i+1) + "/" + str(length)) else: continue with open(json_name, 'w', encoding='utf-8') as json_file: json.dump(data, json_file, ensure_ascii=False) json_file.close()
<reponame>IPSW1/bytecode #!/usr/bin/env python3 import io import sys import unittest import contextlib from bytecode import ( Label, Compare, SetLineno, Instr, Bytecode, ConcreteBytecode, BasicBlock, ControlFlowGraph, ) from bytecode.tests import disassemble as _disassemble, TestCase, WORDCODE def disassemble( source, *, filename="<string>", function=False, remove_last_return_none=False ): code = _disassemble(source, filename=filename, function=function) blocks = ControlFlowGraph.from_bytecode(code) if remove_last_return_none: # drop LOAD_CONST+RETURN_VALUE to only keep 2 instructions, # to make unit tests shorter block = blocks[-1] test = ( block[-2].name == "LOAD_CONST" and block[-2].arg is None and block[-1].name == "RETURN_VALUE" ) if not test: raise ValueError( "unable to find implicit RETURN_VALUE <None>: %s" % block[-2:] ) del block[-2:] return blocks class BlockTests(unittest.TestCase): def test_iter_invalid_types(self): # Labels are not allowed in basic blocks block = BasicBlock() block.append(Label()) with self.assertRaises(ValueError): list(block) with self.assertRaises(ValueError): block.legalize(1) # Only one jump allowed and only at the end block = BasicBlock() block2 = BasicBlock() block.extend([Instr("JUMP_ABSOLUTE", block2), Instr("NOP")]) with self.assertRaises(ValueError): list(block) with self.assertRaises(ValueError): block.legalize(1) # jump target must be a BasicBlock block = BasicBlock() label = Label() block.extend([Instr("JUMP_ABSOLUTE", label)]) with self.assertRaises(ValueError): list(block) with self.assertRaises(ValueError): block.legalize(1) def test_slice(self): block = BasicBlock([Instr("NOP")]) next_block = BasicBlock() block.next_block = next_block self.assertEqual(block, block[:]) self.assertIs(next_block, block[:].next_block) def test_copy(self): block = BasicBlock([Instr("NOP")]) next_block = BasicBlock() block.next_block = next_block self.assertEqual(block, block.copy()) self.assertIs(next_block, block.copy().next_block) class BytecodeBlocksTests(TestCase): maxDiff = 80 * 100 def test_constructor(self): code = ControlFlowGraph() self.assertEqual(code.name, "<module>") self.assertEqual(code.filename, "<string>") self.assertEqual(code.flags, 0) self.assertBlocksEqual(code, []) def test_attr(self): source = """ first_line = 1 def func(arg1, arg2, *, arg3): x = 1 y = 2 return arg1 """ code = disassemble(source, filename="hello.py", function=True) self.assertEqual(code.argcount, 2) self.assertEqual(code.filename, "hello.py") self.assertEqual(code.first_lineno, 3) if sys.version_info > (3, 8): self.assertEqual(code.posonlyargcount, 0) self.assertEqual(code.kwonlyargcount, 1) self.assertEqual(code.name, "func") self.assertEqual(code.cellvars, []) code.name = "name" code.filename = "filename" code.flags = 123 self.assertEqual(code.name, "name") self.assertEqual(code.filename, "filename") self.assertEqual(code.flags, 123) # FIXME: test non-empty cellvars def test_add_del_block(self): code = ControlFlowGraph() code[0].append(Instr("LOAD_CONST", 0)) block = code.add_block() self.assertEqual(len(code), 2) self.assertIs(block, code[1]) code[1].append(Instr("LOAD_CONST", 2)) self.assertBlocksEqual(code, [Instr("LOAD_CONST", 0)], [Instr("LOAD_CONST", 2)]) del code[0] self.assertBlocksEqual(code, [Instr("LOAD_CONST", 2)]) del code[0] self.assertEqual(len(code), 0) def test_setlineno(self): # x = 7 # y = 8 # z = 9 code = Bytecode() code.first_lineno = 3 code.extend( [ Instr("LOAD_CONST", 7), Instr("STORE_NAME", "x"), SetLineno(4), Instr("LOAD_CONST", 8), Instr("STORE_NAME", "y"), SetLineno(5), Instr("LOAD_CONST", 9), Instr("STORE_NAME", "z"), ] ) blocks = ControlFlowGraph.from_bytecode(code) self.assertBlocksEqual( blocks, [ Instr("LOAD_CONST", 7), Instr("STORE_NAME", "x"), SetLineno(4), Instr("LOAD_CONST", 8), Instr("STORE_NAME", "y"), SetLineno(5), Instr("LOAD_CONST", 9), Instr("STORE_NAME", "z"), ], ) def test_legalize(self): code = Bytecode() code.first_lineno = 3 code.extend( [ Instr("LOAD_CONST", 7), Instr("STORE_NAME", "x"), Instr("LOAD_CONST", 8, lineno=4), Instr("STORE_NAME", "y"), SetLineno(5), Instr("LOAD_CONST", 9, lineno=6), Instr("STORE_NAME", "z"), ] ) blocks = ControlFlowGraph.from_bytecode(code) blocks.legalize() self.assertBlocksEqual( blocks, [ Instr("LOAD_CONST", 7, lineno=3), Instr("STORE_NAME", "x", lineno=3), Instr("LOAD_CONST", 8, lineno=4), Instr("STORE_NAME", "y", lineno=4), Instr("LOAD_CONST", 9, lineno=5), Instr("STORE_NAME", "z", lineno=5), ], ) def test_repr(self): r = repr(ControlFlowGraph()) self.assertIn("ControlFlowGraph", r) self.assertIn("1", r) def test_to_bytecode(self): # if test: # x = 2 # x = 5 blocks = ControlFlowGraph() blocks.add_block() blocks.add_block() blocks[0].extend( [ Instr("LOAD_NAME", "test", lineno=1), Instr("POP_JUMP_IF_FALSE", blocks[2], lineno=1), ] ) blocks[1].extend( [ Instr("LOAD_CONST", 5, lineno=2), Instr("STORE_NAME", "x", lineno=2), Instr("JUMP_FORWARD", blocks[2], lineno=2), ] ) blocks[2].extend( [ Instr("LOAD_CONST", 7, lineno=3), Instr("STORE_NAME", "x", lineno=3), Instr("LOAD_CONST", None, lineno=3), Instr("RETURN_VALUE", lineno=3), ] ) bytecode = blocks.to_bytecode() label = Label() self.assertEqual( bytecode, [ Instr("LOAD_NAME", "test", lineno=1), Instr("POP_JUMP_IF_FALSE", label, lineno=1), Instr("LOAD_CONST", 5, lineno=2), Instr("STORE_NAME", "x", lineno=2), Instr("JUMP_FORWARD", label, lineno=2), label, Instr("LOAD_CONST", 7, lineno=3), Instr("STORE_NAME", "x", lineno=3), Instr("LOAD_CONST", None, lineno=3), Instr("RETURN_VALUE", lineno=3), ], ) # FIXME: test other attributes def test_label_at_the_end(self): label = Label() code = Bytecode( [ Instr("LOAD_NAME", "x"), Instr("UNARY_NOT"), Instr("POP_JUMP_IF_FALSE", label), Instr("LOAD_CONST", 9), Instr("STORE_NAME", "y"), label, ] ) cfg = ControlFlowGraph.from_bytecode(code) self.assertBlocksEqual( cfg, [ Instr("LOAD_NAME", "x"), Instr("UNARY_NOT"), Instr("POP_JUMP_IF_FALSE", cfg[2]), ], [Instr("LOAD_CONST", 9), Instr("STORE_NAME", "y")], [], ) def test_from_bytecode(self): bytecode = Bytecode() label = Label() bytecode.extend( [ Instr("LOAD_NAME", "test", lineno=1), Instr("POP_JUMP_IF_FALSE", label, lineno=1), Instr("LOAD_CONST", 5, lineno=2), Instr("STORE_NAME", "x", lineno=2), Instr("JUMP_FORWARD", label, lineno=2), # dead code! Instr("LOAD_CONST", 7, lineno=4), Instr("STORE_NAME", "x", lineno=4), Label(), # unused label label, Label(), # unused label Instr("LOAD_CONST", None, lineno=4), Instr("RETURN_VALUE", lineno=4), ] ) blocks = ControlFlowGraph.from_bytecode(bytecode) label2 = blocks[3] self.assertBlocksEqual( blocks, [ Instr("LOAD_NAME", "test", lineno=1), Instr("POP_JUMP_IF_FALSE", label2, lineno=1), ], [ Instr("LOAD_CONST", 5, lineno=2), Instr("STORE_NAME", "x", lineno=2), Instr("JUMP_FORWARD", label2, lineno=2), ], [Instr("LOAD_CONST", 7, lineno=4), Instr("STORE_NAME", "x", lineno=4)], [Instr("LOAD_CONST", None, lineno=4), Instr("RETURN_VALUE", lineno=4)], ) # FIXME: test other attributes def test_from_bytecode_loop(self): # for x in (1, 2, 3): # if x == 2: # break # continue if sys.version_info < (3, 8): label_loop_start = Label() label_loop_exit = Label() label_loop_end = Label() code = Bytecode() code.extend( ( Instr("SETUP_LOOP", label_loop_end, lineno=1), Instr("LOAD_CONST", (1, 2, 3), lineno=1), Instr("GET_ITER", lineno=1), label_loop_start, Instr("FOR_ITER", label_loop_exit, lineno=1), Instr("STORE_NAME", "x", lineno=1), Instr("LOAD_NAME", "x", lineno=2), Instr("LOAD_CONST", 2, lineno=2), Instr("COMPARE_OP", Compare.EQ, lineno=2), Instr("POP_JUMP_IF_FALSE", label_loop_start, lineno=2), Instr("BREAK_LOOP", lineno=3), Instr("JUMP_ABSOLUTE", label_loop_start, lineno=4), Instr("JUMP_ABSOLUTE", label_loop_start, lineno=4), label_loop_exit, Instr("POP_BLOCK", lineno=4), label_loop_end, Instr("LOAD_CONST", None, lineno=4), Instr("RETURN_VALUE", lineno=4), ) ) blocks = ControlFlowGraph.from_bytecode(code) expected = [ [Instr("SETUP_LOOP", blocks[8], lineno=1)], [Instr("LOAD_CONST", (1, 2, 3), lineno=1), Instr("GET_ITER", lineno=1)], [Instr("FOR_ITER", blocks[7], lineno=1)], [ Instr("STORE_NAME", "x", lineno=1), Instr("LOAD_NAME", "x", lineno=2), Instr("LOAD_CONST", 2, lineno=2), Instr("COMPARE_OP", Compare.EQ, lineno=2), Instr("POP_JUMP_IF_FALSE", blocks[2], lineno=2), ], [Instr("BREAK_LOOP", lineno=3)], [Instr("JUMP_ABSOLUTE", blocks[2], lineno=4)], [Instr("JUMP_ABSOLUTE", blocks[2], lineno=4)], [Instr("POP_BLOCK", lineno=4)], [Instr("LOAD_CONST", None, lineno=4), Instr("RETURN_VALUE", lineno=4)], ] self.assertBlocksEqual(blocks, *expected) else: label_loop_start = Label() label_loop_exit = Label() code = Bytecode() code.extend( ( Instr("LOAD_CONST", (1, 2, 3), lineno=1), Instr("GET_ITER", lineno=1), label_loop_start, Instr("FOR_ITER", label_loop_exit, lineno=1), Instr("STORE_NAME", "x", lineno=1), Instr("LOAD_NAME", "x", lineno=2), Instr("LOAD_CONST", 2, lineno=2), Instr("COMPARE_OP", Compare.EQ, lineno=2), Instr("POP_JUMP_IF_FALSE", label_loop_start, lineno=2), Instr("JUMP_ABSOLUTE", label_loop_exit, lineno=3), Instr("JUMP_ABSOLUTE", label_loop_start, lineno=4), Instr("JUMP_ABSOLUTE", label_loop_start, lineno=4), label_loop_exit, Instr("LOAD_CONST", None, lineno=4), Instr("RETURN_VALUE", lineno=4), ) ) blocks = ControlFlowGraph.from_bytecode(code) expected = [ [Instr("LOAD_CONST", (1, 2, 3), lineno=1), Instr("GET_ITER", lineno=1)], [Instr("FOR_ITER", blocks[6], lineno=1)], [ Instr("STORE_NAME", "x", lineno=1), Instr("LOAD_NAME", "x", lineno=2), Instr("LOAD_CONST", 2, lineno=2), Instr("COMPARE_OP", Compare.EQ, lineno=2), Instr("POP_JUMP_IF_FALSE", blocks[1], lineno=2), ], [Instr("JUMP_ABSOLUTE", blocks[6], lineno=3)], [Instr("JUMP_ABSOLUTE", blocks[1], lineno=4)], [Instr("JUMP_ABSOLUTE", blocks[1], lineno=4)], [Instr("LOAD_CONST", None, lineno=4), Instr("RETURN_VALUE", lineno=4)], ] self.assertBlocksEqual(blocks, *expected) class BytecodeBlocksFunctionalTests(TestCase): def test_eq(self): # compare codes with multiple blocks and labels, # Code.__eq__() renumbers labels to get equal labels source = "x = 1 if test else 2" code1 = disassemble(source) code2 = disassemble(source) self.assertEqual(code1, code2) # Type mismatch self.assertFalse(code1 == 1) # argnames mismatch cfg = ControlFlowGraph() cfg.argnames = 10 self.assertFalse(code1 == cfg) # instr mismatch cfg = ControlFlowGraph() cfg.argnames = code1.argnames self.assertFalse(code1 == cfg) def check_getitem(self, code): # check internal Code block indexes (index by index, index by label) for block_index, block in enumerate(code): self.assertIs(code[block_index], block) self.assertIs(code[block], block) self.assertEqual(code.get_block_index(block), block_index) def test_delitem(self): cfg = ControlFlowGraph() b = cfg.add_block() del cfg[b] self.assertEqual(len(cfg.get_instructions()), 0) def sample_code(self): code = disassemble("x = 1", remove_last_return_none=True) self.assertBlocksEqual( code, [Instr("LOAD_CONST", 1, lineno=1), Instr("STORE_NAME", "x", lineno=1)] ) return code def test_split_block(self): code = self.sample_code() code[0].append(Instr("NOP", lineno=1)) label = code.split_block(code[0], 2) self.assertIs(label, code[1]) self.assertBlocksEqual( code, [Instr("LOAD_CONST", 1, lineno=1), Instr("STORE_NAME", "x", lineno=1)], [Instr("NOP", lineno=1)], ) self.check_getitem(code) label2 = code.split_block(code[0], 1) self.assertIs(label2, code[1]) self.assertBlocksEqual( code, [Instr("LOAD_CONST", 1, lineno=1)], [Instr("STORE_NAME", "x", lineno=1)], [Instr("NOP", lineno=1)], ) self.check_getitem(code) with self.assertRaises(TypeError): code.split_block(1, 1) with self.assertRaises(ValueError) as e: code.split_block(code[0], -2) self.assertIn("positive", e.exception.args[0]) def test_split_block_end(self): code = self.sample_code() # split at the end of the last block requires to add a new empty block label = code.split_block(code[0], 2) self.assertIs(label, code[1]) self.assertBlocksEqual( code, [Instr("LOAD_CONST", 1, lineno=1), Instr("STORE_NAME", "x", lineno=1)], [], ) self.check_getitem(code) # split at the end of a block which is not the end doesn't require to # add a new block label = code.split_block(code[0], 2) self.assertIs(label, code[1]) self.assertBlocksEqual( code, [Instr("LOAD_CONST", 1, lineno=1), Instr("STORE_NAME", "x", lineno=1)], [], ) def test_split_block_dont_split(self): code = self.sample_code() # FIXME: is it really useful to support that? block = code.split_block(code[0], 0) self.assertIs(block, code[0]) self.assertBlocksEqual( code, [Instr("LOAD_CONST", 1, lineno=1), Instr("STORE_NAME", "x", lineno=1)] ) def test_split_block_error(self): code = self.sample_code() with self.assertRaises(ValueError): # invalid index code.split_block(code[0], 3) def test_to_code(self): # test resolution of jump labels bytecode = ControlFlowGraph() bytecode.first_lineno = 3 bytecode.argcount = 3 if sys.version_info > (3, 8): bytecode.posonlyargcount = 0 bytecode.kwonlyargcount = 2 bytecode.name = "func" bytecode.filename = "hello.py" bytecode.flags = 0x43 bytecode.argnames = ("arg", "arg2", "arg3", "kwonly", "kwonly2") bytecode.docstring = None block0 = bytecode[0] block1 = bytecode.add_block() block2 = bytecode.add_block() block0.extend( [ Instr("LOAD_FAST", "x", lineno=4), Instr("POP_JUMP_IF_FALSE", block2, lineno=4), ] ) block1.extend( [Instr("LOAD_FAST", "arg", lineno=5), Instr("STORE_FAST", "x", lineno=5)] ) block2.extend( [ Instr("LOAD_CONST", 3, lineno=6), Instr("STORE_FAST", "x", lineno=6), Instr("LOAD_FAST", "x", lineno=7), Instr("RETURN_VALUE", lineno=7), ] ) if WORDCODE: expected = ( b"|\x05" b"r\x08" b"|\x00" b"}\x05" b"d\x01" b"}\x05" b"|\x05" b"S\x00" ) else: expected = ( b"|\x05\x00" b"r\x0c\x00" b"|\x00\x00" b"}\x05\x00" b"d\x01\x00" b"}\x05\x00" b"|\x05\x00" b"S" ) code = bytecode.to_code() self.assertEqual(code.co_consts, (None, 3)) self.assertEqual(code.co_argcount, 3) if sys.version_info > (3, 8): self.assertEqual(code.co_posonlyargcount, 0) self.assertEqual(code.co_kwonlyargcount, 2) self.assertEqual(code.co_nlocals, 6) self.assertEqual(code.co_stacksize, 1) # FIXME: don't use hardcoded constants self.assertEqual(code.co_flags, 0x43) self.assertEqual(code.co_code, expected) self.assertEqual(code.co_names, ()) self.assertEqual( code.co_varnames, ("arg", "arg2", "arg3", "kwonly", "kwonly2", "x") ) self.assertEqual(code.co_filename, "hello.py") self.assertEqual(code.co_name, "func") self.assertEqual(code.co_firstlineno, 3) # verify stacksize argument is honored explicit_stacksize = code.co_stacksize + 42 code = bytecode.to_code(stacksize=explicit_stacksize) self.assertEqual(code.co_stacksize, explicit_stacksize) def test_get_block_index(self): blocks = ControlFlowGraph() block0 = blocks[0] block1 = blocks.add_block() block2 = blocks.add_block() self.assertEqual(blocks.get_block_index(block0), 0) self.assertEqual(blocks.get_block_index(block1), 1) self.assertEqual(blocks.get_block_index(block2), 2) other_block = BasicBlock() self.assertRaises(ValueError, blocks.get_block_index, other_block) class CFGStacksizeComputationTests(TestCase): def check_stack_size(self, func): code = func.__code__ bytecode = Bytecode.from_code(code) cfg = ControlFlowGraph.from_bytecode(bytecode) self.assertEqual(code.co_stacksize, cfg.compute_stacksize()) def test_empty_code(self): cfg = ControlFlowGraph() del cfg[0] self.assertEqual(cfg.compute_stacksize(), 0) def test_handling_of_set_lineno(self): code = Bytecode() code.first_lineno = 3 code.extend( [ Instr("LOAD_CONST", 7), Instr("STORE_NAME", "x"), SetLineno(4), Instr("LOAD_CONST", 8), Instr("STORE_NAME", "y"), SetLineno(5), Instr("LOAD_CONST", 9), Instr("STORE_NAME", "z"), ] ) self.assertEqual(code.compute_stacksize(), 1) def test_handling_of_extended_arg(self): code = Bytecode() code.first_lineno = 3 code.extend( [ Instr("LOAD_CONST", 7), Instr("STORE_NAME", "x"), Instr("EXTENDED_ARG", 1), Instr("LOAD_CONST", 8), Instr("STORE_NAME", "y"), ] ) self.assertEqual(code.compute_stacksize(), 1) def test_invalid_stacksize(self): code = Bytecode() code.extend([Instr("STORE_NAME", "x")]) with self.assertRaises(RuntimeError): code.compute_stacksize() def test_stack_size_computation_and(self): def test(arg1, *args, **kwargs): # pragma: no cover return arg1 and args # Test JUMP_IF_FALSE_OR_POP self.check_stack_size(test) def test_stack_size_computation_or(self): def test(arg1, *args, **kwargs): # pragma: no cover return arg1 or args # Test JUMP_IF_TRUE_OR_POP self.check_stack_size(test) def test_stack_size_computation_if_else(self): def test(arg1, *args, **kwargs): # pragma: no cover if args: return 0 elif kwargs: return 1 else: return 2 self.check_stack_size(test) def test_stack_size_computation_for_loop_continue(self): def test(arg1, *args, **kwargs): # pragma: no cover for k in kwargs: if k in args: continue else: return 1 self.check_stack_size(test) def test_stack_size_computation_while_loop_break(self): def test(arg1, *args, **kwargs): # pragma: no cover while True: if arg1: break self.check_stack_size(test) def test_stack_size_computation_with(self): def test(arg1, *args, **kwargs): # pragma: no cover with open(arg1) as f: return f.read() self.check_stack_size(test) def test_stack_size_computation_try_except(self): def test(arg1, *args, **kwargs): # pragma: no cover try: return args[0] except Exception: return 2 self.check_stack_size(test) def test_stack_size_computation_try_finally(self): def test(arg1, *args, **kwargs): # pragma: no cover try: return args[0] finally: return 2 self.check_stack_size(test) def test_stack_size_computation_try_except_finally(self): def test(arg1, *args, **kwargs): # pragma: no cover try: return args[0] except Exception: return 2 finally: print("Interrupt") self.check_stack_size(test) def test_stack_size_computation_try_except_else_finally(self): def test(arg1, *args, **kwargs): # pragma: no cover try: return args[0] except Exception: return 2 else: return arg1 finally: print("Interrupt") self.check_stack_size(test) def test_stack_size_computation_nested_try_except_finally(self): def test(arg1, *args, **kwargs): # pragma: no cover k = 1 try: getattr(arg1, k) except AttributeError: pass except Exception: try: assert False except Exception: return 2 finally: print("unexpected") finally: print("attempted to get {}".format(k)) self.check_stack_size(test) def test_stack_size_computation_nested_try_except_else_finally(self): def test(*args, **kwargs): try: v = args[1] except IndexError: try: w = kwargs["value"] except KeyError: return -1 else: return w finally: print("second finally") else: return v finally: print("first finally") # A direct comparison of the stack depth fails because CPython # generate dead code that is used in stack computation. cpython_stacksize = test.__code__.co_stacksize test.__code__ = Bytecode.from_code(test.__code__).to_code() self.assertLessEqual(test.__code__.co_stacksize, cpython_stacksize) with contextlib.redirect_stdout(io.StringIO()) as stdout: self.assertEqual(test(1, 4), 4) self.assertEqual(stdout.getvalue(), "first finally\n") with contextlib.redirect_stdout(io.StringIO()) as stdout: self.assertEqual(test([], value=3), 3) self.assertEqual(stdout.getvalue(), "second finally\nfirst finally\n") with contextlib.redirect_stdout(io.StringIO()) as stdout: self.assertEqual(test([], name=None), -1) self.assertEqual(stdout.getvalue(), "second finally\nfirst finally\n") def test_stack_size_with_dead_code(self): # Simply demonstrate more directly the previously mentioned issue. def test(*args): # pragma: no cover return 0 try: a = args[0] except IndexError: return -1 else: return a test.__code__ = Bytecode.from_code(test.__code__).to_code() self.assertEqual(test.__code__.co_stacksize, 1) self.assertEqual(test(1), 0) def test_huge_code_with_numerous_blocks(self): def base_func(x): pass def mk_if_then_else(depth): instructions = [] for i in range(depth): label_else = Label() instructions.extend( [ Instr("LOAD_FAST", "x"), Instr("POP_JUMP_IF_FALSE", label_else), Instr("LOAD_GLOBAL", "f{}".format(i)), Instr("RETURN_VALUE"), label_else, ] ) instructions.extend([Instr("LOAD_CONST", None), Instr("RETURN_VALUE")]) return instructions bytecode = Bytecode(mk_if_then_else(5000)) bytecode.compute_stacksize() def test_extended_arg_unpack_ex(self): def test(): p = [1, 2, 3, 4, 5, 6] q, r, *s, t = p return q, r, s, t test.__code__ = ConcreteBytecode.from_code(test.__code__, extended_arg=True).to_code() self.assertEqual(test.__code__.co_stacksize, 6) self.assertEqual(test(), (1, 2, [3, 4, 5], 6)) def test_expected_arg_with_many_consts(self): def test(): var = 0 var = 1 var = 2 var = 3 var = 4 var = 5 var = 6 var = 7 var = 8 var = 9 var = 10 var = 11 var = 12 var = 13 var = 14 var = 15 var = 16 var = 17 var = 18 var = 19 var = 20 var = 21 var = 22 var = 23 var = 24 var = 25 var = 26 var = 27 var = 28 var = 29 var = 30 var = 31 var = 32 var = 33 var = 34 var = 35 var = 36 var = 37 var = 38 var = 39 var = 40 var = 41 var = 42 var = 43 var = 44 var = 45 var = 46 var = 47 var = 48 var = 49 var = 50 var = 51 var = 52 var = 53 var = 54 var = 55 var = 56 var = 57 var = 58 var = 59 var = 60 var = 61 var = 62 var = 63 var = 64 var = 65 var = 66 var = 67 var = 68 var = 69 var = 70 var = 71 var = 72 var = 73 var = 74 var = 75 var = 76 var = 77 var = 78 var = 79 var = 80 var = 81 var = 82 var = 83 var = 84 var = 85 var = 86 var = 87 var = 88 var = 89 var = 90 var = 91 var = 92 var = 93 var = 94 var = 95 var = 96 var = 97 var = 98 var = 99 var = 100 var = 101 var = 102 var = 103 var = 104 var = 105 var = 106 var = 107 var = 108 var = 109 var = 110 var = 111 var = 112 var = 113 var = 114 var = 115 var = 116 var = 117 var = 118 var = 119 var = 120 var = 121 var = 122 var = 123 var = 124 var = 125 var = 126 var = 127 var = 128 var = 129 var = 130 var = 131 var = 132 var = 133 var = 134 var = 135 var = 136 var = 137 var = 138 var = 139 var = 140 var = 141 var = 142 var = 143 var = 144 var = 145 var = 146 var = 147 var = 148 var = 149 var = 150 var = 151 var = 152 var = 153 var = 154 var = 155 var = 156 var = 157 var = 158 var = 159 var = 160 var = 161 var = 162 var = 163 var = 164 var = 165 var = 166 var = 167 var = 168 var = 169 var = 170 var = 171 var = 172 var = 173 var = 174 var = 175 var = 176 var = 177 var = 178 var = 179 var = 180 var = 181 var = 182 var = 183 var = 184 var = 185 var = 186 var = 187 var = 188 var = 189 var = 190 var = 191 var = 192 var = 193 var = 194 var = 195 var = 196 var = 197 var = 198 var = 199 var = 200 var = 201 var = 202 var = 203 var = 204 var = 205 var = 206 var = 207 var = 208 var = 209 var = 210 var = 211 var = 212 var = 213 var = 214 var = 215 var = 216 var = 217 var = 218 var = 219 var = 220 var = 221 var = 222 var = 223 var = 224 var = 225 var = 226 var = 227 var = 228 var = 229 var = 230 var = 231 var = 232 var = 233 var = 234 var = 235 var = 236 var = 237 var = 238 var = 239 var = 240 var = 241 var = 242 var = 243 var = 244 var = 245 var = 246 var = 247 var = 248 var = 249 var = 250 var = 251 var = 252 var = 253 var = 254 var = 255 var = 256 var = 257 var = 258 var = 259 return var test.__code__ = ConcreteBytecode.from_code(test.__code__, extended_arg=True).to_code() self.assertEqual(test.__code__.co_stacksize, 1) self.assertEqual(test(), 259) if sys.version_info >= (3, 6): @unittest.expectedFailure def test_fail_extended_arg_jump(self): def test(): var = None for _ in range(0, 1): var = 0 var = 1 var = 2 var = 3 var = 4 var = 5 var = 6 var = 7 var = 8 var = 9 var = 10 var = 11 var = 12 var = 13 var = 14 var = 15 var = 16 var = 17 var = 18 var = 19 var = 20 var = 21 var = 22 var = 23 var = 24 var = 25 var = 26 var = 27 var = 28 var = 29 var = 30 var = 31 var = 32 var = 33 var = 34 var = 35 var = 36 var = 37 var = 38 var = 39 var = 40 var = 41 var = 42 var = 43 var = 44 var = 45 var = 46 var = 47 var = 48 var = 49 var = 50 var = 51 var = 52 var = 53 var = 54 var = 55 var = 56 var = 57 var = 58 var = 59 var = 60 var = 61 var = 62 var = 63 var = 64 var = 65 var = 66 var = 67 var = 68 var = 69 var = 70 return var # Generate the bytecode with extended arguments bytecode = ConcreteBytecode.from_code(test.__code__, extended_arg=True) # This is where computation fails # It seems like it is caused by the split of blocks and a wrong start size # for one block. bytecode.to_code() if __name__ == "__main__": unittest.main() # pragma: no cover
import xml.etree.ElementTree as ET import collections class Node_struct: def __init__(self): self.nodeId = None self.browseName = None self.isAbstract = True self.parentNodeId = None self.dataType = None self.displayName = None self.description = None self.references = [] def __hash__(self): return hash(self.nodeId, self.browseName, self.isAbstract, self.parentNodeId, self.dataType, self.displayName, self.description, self.references) def __eq__(self, other): return (self.nodeId, self.browseName, self.isAbstract, self.parentNodeId, self.dataType, self.displayName, self.description, self.references) == (other.nodeId, other.browseName, other.isAbstract, other.parentNodeId, other.dataType, other.displayName, other.description, other.references) def __ne__(self, other): return not(self == other) class Reference: def __init__(self): self.referenceType = None self.refId = None def __hash__(self): return hash(self.referenceType, self.refId) def __eq__(self, other): return (self.referenceType, self.refId) == (other.referenceType, other.refValue) def __ne__(self, other): return not(self == other) class Model_Event: def __init__(self): self.structs = [] def get_struct(self, nodeId): for struct in self.structs: if struct.nodeId == nodeId: return struct raise Exception("No struct with the Id: " + str(nodeId)) class Parser(object): nameSpace = "{http://opcfoundation.org/UA/2011/03/UANodeSet.xsd}" def __init__(self, path): self.path = path self.model = None def findNodeWithNodeId(self,root, nodeId): node = Node_struct() for child in root: if nodeId == child.attrib.get('NodeId'): node.browseName = str(child.attrib.get('BrowseName')) node.nodeId = child.attrib.get('NodeId') node.isAbstract = child.attrib.get('IsAbstract') node.dataType = child.attrib.get('DataType') if (node.dataType == None): node.dataType = 'Variant' node.displayName = child.find(self.nameSpace + 'DisplayName').text if (child.find(self.nameSpace + 'Description') != None): node.description = child.find(self.nameSpace + 'Description').text for ref in child.find(self.nameSpace + 'References').findall(self.nameSpace + 'Reference'): reference = Reference() reference.referenceType = ref.attrib.get('ReferenceType') reference.refId = ref.text if ref.attrib.get('IsForward')!=None: node.parentNodeId = reference.refId node.references.append(reference) return node def checkNodeType(self, node): if (node.tag == self.nameSpace + "UAObjectType") or (node.tag == self.nameSpace + "UAVariable") or ( node.tag == self.nameSpace + "UAObject") or (node.tag == self.nameSpace + "UAMethod") or (node.tag == self.nameSpace + "UAVariableType"): return True def parse(self): print("Parsing: " + self.path) tree = ET.parse(self.path) root = tree.getroot() listEventType = {} for child in root: browseName = str(child.attrib.get('BrowseName')) if browseName.endswith("EventType"): if browseName == "EventType": continue node = Node_struct() node.browseName = browseName.replace("Type", "") node.nodeId = child.attrib.get('NodeId') node.isAbstract = child.attrib.get('IsAbstract') node.displayName = child.find(self.nameSpace + 'DisplayName').text if (child.find(self.nameSpace + 'Description') != None): node.description = child.find(self.nameSpace + 'Description').text for ref in child.find(self.nameSpace + 'References').findall(self.nameSpace + 'Reference'): reference = Reference() reference.referenceType = ref.attrib.get('ReferenceType') reference.refId = ref.text self.refNode = self.findNodeWithNodeId(root, reference.refId).browseName reference.refBrowseName = self.findNodeWithNodeId(root, reference.refId).browseName reference.refDataType = self.findNodeWithNodeId(root, reference.refId).dataType if ref.attrib.get('IsForward')!=None: node.parentNodeId = reference.refId node.references.append(reference) listEventType.update({node.nodeId:node}) return collections.OrderedDict(sorted(sorted(listEventType.items(), key=lambda t: t[0]), key=lambda u: len(u[0])))
<filename>workbench/executor.py import urllib import json import google.auth.transport.requests import google.oauth2.id_token import uuid import time from google.cloud import storage def execute_local_notebook(gcp_project: str, location: str, input_notebook_file_path: str, gcs_notebook_folder_path: str, execution_id="", env_uri="gcr.io/deeplearning-platform-release/base-cu110:latest", kernel="python3", master_type="n1-standard-4", wait=True, output_notebook_gcs_path=None): gcs_bucket_name = _get_gcs_bucket_name_from_gcs_uri(gcs_notebook_folder_path) file_name = input_notebook_file_path.split("/")[-1] gcs_out_path = "/".join(gcs_notebook_folder_path.replace("gs://", "").split("/")[1:]) + "/" + file_name input_gcs_notebook_path = f"gs://{gcs_notebook_folder_path}/{file_name}" _upload_blob(gcp_project, gcs_bucket_name, input_notebook_file_path, gcs_out_path) return execute_notebook(gcp_project, location, input_gcs_notebook_path, gcs_notebook_folder_path, execution_id, env_uri, kernel, master_type, wait, output_notebook_gcs_path) def execute_notebook(gcp_project: str, location: str, gcs_input_notebook_file_path: str, gcs_output_notebook_folder_path: str, execution_id="", env_uri="gcr.io/deeplearning-platform-release/base-cu110:latest", kernel="python3", master_type="n1-standard-4", wait=True, output_notebook_gcs_path=None): if not execution_id: execution_id = str(uuid.uuid1()) service_url = f"https://notebooks.googleapis.com/v1/projects/{gcp_project}/locations/{location}/executions?execution_id={execution_id}" values = { "description": f"Execution for {gcs_input_notebook_file_path}", "executionTemplate": { "scaleTier": "CUSTOM", "masterType": master_type, "inputNotebookFile": gcs_input_notebook_file_path, "outputNotebookFolder": gcs_output_notebook_folder_path, "containerImageUri": env_uri, "kernelSpec": kernel } } if output_notebook_gcs_path: values["outputNotebookFile"] = output_notebook_gcs_path data = json.dumps(values).encode('utf-8') data_from_gcp = _send_generic_request(service_url, data) if not data_from_gcp: return None operation_uri = data_from_gcp["name"] execution_uri = data_from_gcp["metadata"]["target"] notebook_gcs_url = get_output_notebook_path(execution_uri) notebook_gcs_url_without_scheme = notebook_gcs_url.replace("gs://", "") viewer_url = f"https://notebooks.cloud.google.com/view/{notebook_gcs_url_without_scheme}" if not wait: return { "operation_uri": operation_uri, "execution_uri": execution_uri, "notebook_gcs_url": notebook_gcs_url, "viewer_url": viewer_url } else: execution_status = _wait_execution_to_complete(execution_uri) return { "operation_uri": operation_uri, "execution_uri": execution_uri, "notebook_gcs_url": notebook_gcs_url, "viewer_url": viewer_url, "execution_status": execution_status } def get_output_notebook_path(execution_uri: str) -> str: reuqest_url = f"https://notebooks.googleapis.com/v1/{execution_uri}" response = _send_generic_request(reuqest_url) return response["outputNotebookFile"] def _get_notebook_execution_operation_status(execution_uri: str): service_url = f"https://notebooks.googleapis.com/v1/{execution_uri}" data_from_gcp = _send_generic_request(service_url) # print(str(data_from_gcp)) if "state" in data_from_gcp: return data_from_gcp["state"] elif "response" in data_from_gcp: return data_from_gcp["response"]["state"] else: return None def _upload_blob(gcp_project, bucket_name, source_file_name, destination_blob_name): storage_client = storage.Client(project=gcp_project) bucket = storage_client.bucket(bucket_name) blob = bucket.blob(destination_blob_name) blob.upload_from_filename(source_file_name) print( "File {} uploaded to {}.".format( source_file_name, destination_blob_name ) ) def _wait_execution_to_complete(execution_uri): execution_status = "IN_PROGRESS" while (execution_status != "DONE" and execution_status != "FAILED" and execution_status != "COMPLETED" and execution_status != "FINISHED" and execution_status != "SUCCEEDED"): execution_status = _get_notebook_execution_operation_status(execution_uri) print(f"Execution status: {execution_status}") time.sleep(10) # Sleep for 10 seconds return execution_status def _get_gcs_bucket_name_from_gcs_uri(gcs_uri): return gcs_uri.split("/")[2] def _send_generic_request(url, data=None): creds, _ = google.auth.default() auth_req = google.auth.transport.requests.Request() creds.refresh(auth_req) req = urllib.request.Request(url, data=data) req.add_header('Content-Type', 'application/json') req.add_header("Authorization", f"Bearer {creds.token}") response = urllib.request.urlopen(req) if response.status != 200: print(f"Error: {response.status}") print(f"Error: {response.read()}") return None encoding = response.info().get_content_charset('utf-8') return json.loads(response.read().decode(encoding)) # if "__main__" == __name__: # print(_wait_execution_to_complete("projects/ml-lab-152505/locations/us-central1/executions/bb55aab0-94ca-11ec-a020-0242c0a80a02")) # print(_get_notebook_execution_operation_status("projects/ml-lab-152505/locations/us-central1/executions/58662e0e-9446-11ec-a214-0242c0a80a02")) # https://notebooks.googleapis.com/v1/projects/ml-lab-152505/locations/us-central1/executions?execution_id=3a5c9802-8f8d-11ec-b585-acde48001122 # https://notebooks.googleapis.com/v1/projects/ml-lab-152505/locations/us-central1/executions?execution_id=0e05f924-8f8d-11ec-9223-acde48001122 # :path: /aipn/v2/proxy/notebooks.googleapis.com%2Fv1%2Fprojects%2Fml-lab-152505%2Flocations%2Fus-central1%2Fexecutions%3Fexecution_id%3Duntitled__1645052627007?1645052645915 # URL: https://7cc62a62987d13d7-dot-us-central1.notebooks.googleusercontent.com/aipn/v2/proxy/notebooks.googleapis.com%2Fv1%2Fprojects%2Fml-lab-152505%2Flocations%2Fus-central1%2Fexecutions%3Fexecution_id%3Duntitled__1645052627007?1645052645915 # print(str(execute_notebook("ml-lab-152505", "us-central1", "gs://test-bucket-for-notebooks/executor_files/untitled__1645052627007/Untitled.ipynb", "gs://test-bucket-for-notebooks/executor_files/untitled__1645052627007", wait=True))) # notebook_gcs_url = get_output_notebook_path("projects/ml-lab-152505/locations/us-central1/executions/83214dfe-90d1-11ec-bd9d-acde48001122") # notebook_gcs_url_without_scheme = notebook_gcs_url.replace("gs://", "") # viewer_url = f"https://notebooks.cloud.google.com/view/{notebook_gcs_url_without_scheme}" # print(viewer_url) # print(execute_local_notebook("ml-lab-152505", "us-central1", "/Users/vsk/src/notebooks-ci-showcase/notebooks/clean.ipynb", "gs://test-bucket-for-notebooks/executor_files/"))
<reponame>donlo/geopandas from shapely.geometry import Point from geopandas import read_file, datasets, GeoSeries # Derive list of valid query predicates based on underlying index backend; # we have to create a non-empty instance of the index to get these index = GeoSeries([Point(0, 0)]).sindex predicates = sorted(p for p in index.valid_query_predicates if p is not None) geom_types = ("mixed", "points", "polygons") def generate_test_df(): world = read_file(datasets.get_path("naturalearth_lowres")) capitals = read_file(datasets.get_path("naturalearth_cities")) countries = world.to_crs("epsg:3395")[["geometry"]] capitals = capitals.to_crs("epsg:3395")[["geometry"]] mixed = capitals.append(countries) # get a mix of geometries points = capitals polygons = countries # filter out invalid geometries data = { "mixed": mixed[mixed.is_valid], "points": points[points.is_valid], "polygons": polygons[polygons.is_valid], } # ensure index is pre-generated for data_type in data.keys(): data[data_type].sindex.query(data[data_type].geometry.values.data[0]) return data class BenchIntersection: param_names = ["input_geom_type", "tree_geom_type"] params = [ geom_types, geom_types, ] def setup(self, *args): self.data = generate_test_df() # cache bounds so that bound creation is not counted in benchmarks self.bounds = { data_type: [g.bounds for g in self.data[data_type].geometry] for data_type in self.data.keys() } def time_intersects(self, input_geom_type, tree_geom_type): tree = self.data[tree_geom_type].sindex for bounds in self.bounds[input_geom_type]: tree.intersection(bounds) class BenchIndexCreation: param_names = ["tree_geom_type"] params = [ geom_types, ] def setup(self, *args): self.data = generate_test_df() def time_index_creation(self, tree_geom_type): """Time creation of spatial index. Note: requires running a single query to ensure that lazy-building indexes are actually built. """ # Note: the GeoDataFram._sindex_generated attribute will # be removed by GH#1444 but is kept here (in the benchmarks # so that we can compare pre GH#1444 to post GH#1444 if needed self.data[tree_geom_type]._sindex_generated = None self.data[tree_geom_type].geometry.values._sindex = None tree = self.data[tree_geom_type].sindex # also do a single query to ensure the index is actually # generated and used tree.query( self.data[tree_geom_type].geometry.values.data[0] ) class BenchQuery: param_names = ["predicate", "input_geom_type", "tree_geom_type"] params = [ predicates, geom_types, geom_types, ] def setup(self, *args): self.data = generate_test_df() def time_query_bulk(self, predicate, input_geom_type, tree_geom_type): self.data[tree_geom_type].sindex.query_bulk( self.data[input_geom_type].geometry.values.data, predicate=predicate, ) def time_query(self, predicate, input_geom_type, tree_geom_type): tree = self.data[tree_geom_type].sindex for geom in self.data[input_geom_type].geometry.values.data: tree.query( geom, predicate=predicate )
<gh_stars>1-10 import os import sys import getopt import csv import numpy as np import zipfile, glob import cv2,shutil import h5py as hf import matplotlib.pyplot as plt from faceDetector import FaceDetection from imageExtractor import ImageExtractor tmp_path = "tmp/"; fpsNew = 2; cols = 64; rows = 64; database = 0; size_v_old = 0; size_n_old = 0; size_f_old = 0; size_l_old = 0; def runOnVideo(): input_path = "XXXXX/FaceDBGenerator_V2/Facedetector/"; filename = "CFK8ib0aWe8.000.mp4"; print("create image extractor..."); imageExtractor = ImageExtractor(input_path); file = open(filename, 'r'); print("-------------------------"); print(filename); ret = imageExtractor.loadVideo(input_path + filename); if(ret == True): imageExtractor.reduceFrameRate(filename, 4); frames = imageExtractor.getFrames(); labels = imageExtractor.getLabels(); videonames = imageExtractor.getVideonames(); vids = imageExtractor.getVids(); print("-------------------------"); def run(): print("run..."); print("create image extractor..."); imageExtractor = ImageExtractor(input_path); # init hdf5 database dataset_size = 100 * 75 * 80; hdf5file = output_path + "database_TEST"; initHDF5Database(hdf5file, dataset_size); print("run image extractor..."); # extract videofiles to tmp folder cnt = 0; for i in range(15, 16, 1): cnt = cnt + 1; # load training data print("process training80_" + str(i).zfill(2) + ".zip ..."); filename = "training80_" + str(i).zfill(2) + ".zip"; try: zipArchive = zipfile.ZipFile(input_path + filename, "r"); except: print("ERROR: file not found!"); ret = False; if not os.path.exists(tmp_path): os.mkdir( "tmp", 0755 ); zipArchive.extractall(tmp_path); zipArchive.close(); # videofiles in tmp folder frames = []; labels = []; videonames = []; vids = []; #filenames = glob.glob(tmp_path + "/*.mp4"); filenames = os.listdir(tmp_path); #print(filenames); for filename in filenames: if filename.endswith(".mp4"): #print(file) file = open(tmp_path + filename, 'r'); print("-------------------------"); print(filename); ret = imageExtractor.loadVideo(tmp_path + filename); if(ret == True): imageExtractor.reduceFrameRate(filename, fpsNew); frames = imageExtractor.getFrames(); labels = imageExtractor.getLabels(); videonames = imageExtractor.getVideonames(); vids = imageExtractor.getVids(); print("-------------------------"); #imageExtractor.printVideoDetails(); #imageExtractor.playExtractedFrames(); # delete tmp folder files = glob.glob(tmp_path + "/*"); for name in files: os.remove(name); files = glob.glob(tmp_path + "/.*"); for name in files: os.remove(name); #shutil.rmtree("/tmp", ignore_errors=False, onerror=None); print("number of frames: " + str(len(frames))); print("number of labels: " + str(len(labels))); print("number of vids: " + str(len(vids))); print("number of videonames: " + str(len(videonames))); #save to numpy array saveDataAsHDF5(output_path + "database_part1", videonames, vids, frames, labels); imageExtractor.frames = []; imageExtractor.labels = []; imageExtractor.vids = []; imageExtractor.videonames = []; frames = []; labels = []; vids = []; videonames = []; def initHDF5Database(filename, dataset_size): global database; database = hf.File(filename + ".h5", 'a'); database.create_dataset('id', (dataset_size,), dtype='int32'); database.create_dataset('name', (dataset_size,), dtype='S30'); database.create_dataset('data', (dataset_size,rows,cols,3), dtype='uint8'); database.create_dataset('labels', (dataset_size,5), dtype='float32'); database.close(); def saveDataAsHDF5(filename, n, v, f, l): global size_v_old; global size_n_old; global size_f_old; global size_l_old; print("save to hdf5"); database = hf.File(filename + ".h5", 'a'); v_numpy = np.array(v); v_numpy = v_numpy.astype('int32'); n_numpy = np.array(n); n_numpy = n_numpy.astype('string'); f_numpy = np.array(f); f_numpy = f_numpy.astype('uint8'); l_numpy = np.array(l); l_numpy = l_numpy.astype('float32'); size_v = size_v_old + v_numpy.shape[0]; size_n = size_n_old + n_numpy.shape[0]; size_f = size_f_old + f_numpy.shape[0]; size_l = size_l_old + l_numpy.shape[0]; print("shuffle"); shuffled_name, shuffled_vids, shuffled_f, shuffled_l = shuffleDataset(n_numpy, v_numpy, f_numpy, l_numpy); print("save to database"); database["id"][int(size_v_old):int(size_v)] = shuffled_vids; database["name"][int(size_n_old):int(size_n)] = shuffled_name; database["data"][int(size_f_old):int(size_f)] = shuffled_f; database["labels"][int(size_f_old):int(size_f)] = shuffled_l; size_v_old = size_v; size_n_old = size_n; size_f_old = size_f; size_l_old = size_l; database.close(); def shuffleDataset(name, vid, f, l): datasize = f.shape[0]; print(datasize); randomize = np.arange(datasize); np.random.shuffle(randomize); shuffled_name = name[randomize]; shuffled_vid = vid[randomize]; shuffled_f = f[randomize]; shuffled_l = l[randomize]; return shuffled_name, shuffled_vid, shuffled_f, shuffled_l; def getCmdParameter(argv): ipath = ""; opath = ""; try: opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="]); except getopt.GetoptError: print("ERROR: false input!"); print("Example: preProcessing_v1.py -i <inputfile> -o <outputfile> "); sys.exit(); for opt, arg in opts: if opt == '-h': print("Example: preProcessing_v1.py -i <inputfile> -o <outputfile> "); sys.exit(); elif opt in ("-i", "--ifile"): ipath = arg; elif opt in ("-o", "--ofile"): opath = arg; print ("Input path: " + str(ipath)); print ("Output path: " + str(opath)); return ipath, opath; def main(argv): global input_path; global output_path; # read input parameters input_path, output_path = getCmdParameter(argv); if (input_path == "" or output_path == ""): print("ERROR: false input!"); print("Example: main.py -i <inputfile> -o <outputfile>"); sys.exit(); else: #run(); runOnVideo(); if __name__ == "__main__": main(sys.argv[1:]);
"""Window utilities and related functions. A window is an instance of Window Window(column_offset, row_offset, width, height) or a 2D N-D array indexer in the form of a tuple. ((row_start, row_stop), (col_start, col_stop)) The latter can be evaluated within the context of a given height and width and a boolean flag specifying whether the evaluation is boundless or not. If boundless=True, negative index values do not mean index from the end of the array dimension as they do in the boundless=False case. The newer float precision read-write window capabilities of Rasterio require instances of Window to be used. """ from __future__ import division import collections import functools import math from operator import itemgetter import warnings import attr from affine import Affine import numpy as np from rasterio.errors import RasterioDeprecationWarning, WindowError from rasterio.transform import rowcol PIXEL_PRECISION = 6 def iter_args(function): """Decorator to allow function to take either *args or a single iterable which gets expanded to *args. """ @functools.wraps(function) def wrapper(*args, **kwargs): if len(args) == 1 and isinstance(args[0], collections.Iterable): return function(*args[0]) else: return function(*args) return wrapper def toranges(window): """Normalize Windows to range tuples""" if isinstance(window, Window): return window.toranges() else: return window def get_data_window(arr, nodata=None): """Window covering the input array's valid data pixels. Parameters ---------- arr: numpy ndarray, <= 3 dimensions nodata: number If None, will either return a full window if arr is not a masked array, or will use the mask to determine non-nodata pixels. If provided, it must be a number within the valid range of the dtype of the input array. Returns ------- Window """ num_dims = len(arr.shape) if num_dims > 3: raise WindowError( "get_data_window input array must have no more than " "3 dimensions") if nodata is None: if not hasattr(arr, 'mask'): return Window.from_slices((0, arr.shape[-2]), (0, arr.shape[-1])) else: arr = np.ma.masked_array(arr, arr == nodata) if num_dims == 2: data_rows, data_cols = np.where(np.equal(arr.mask, False)) else: data_rows, data_cols = np.where( np.any(np.equal(np.rollaxis(arr.mask, 0, 3), False), axis=2)) if data_rows.size: row_range = (data_rows.min(), data_rows.max() + 1) else: row_range = (0, 0) if data_cols.size: col_range = (data_cols.min(), data_cols.max() + 1) else: col_range = (0, 0) return Window.from_slices(row_range, col_range) @iter_args def union(*windows): """ Union windows and return the outermost extent they cover. Parameters ---------- windows: sequence One or more Windows. Returns ------- Window """ stacked = np.dstack([toranges(w) for w in windows]) return Window.from_slices( (stacked[0, 0].min(), stacked[0, 1].max()), (stacked[1, 0].min(), stacked[1, 1]. max())) @iter_args def intersection(*windows): """Innermost extent of window intersections. Will raise WindowError if windows do not intersect. Parameters ---------- windows: sequence One or more Windows. Returns ------- Window """ if not intersect(windows): raise WindowError("windows do not intersect") stacked = np.dstack([toranges(w) for w in windows]) return Window.from_slices( (stacked[0, 0].max(), stacked[0, 1].min()), (stacked[1, 0].max(), stacked[1, 1]. min())) @iter_args def intersect(*windows): """Test if all given windows intersect. Parameters ---------- windows: sequence One or more Windows. Returns ------- bool True if all windows intersect. """ from itertools import combinations def intersects(range1, range2): return not ( range1[0] >= range2[1] or range1[1] <= range2[0]) windows = np.array([toranges(w) for w in windows]) for i in (0, 1): for c in combinations(windows[:, i], 2): if not intersects(*c): return False return True def from_bounds(left, bottom, right, top, transform=None, height=None, width=None, precision=6, **kwargs): """Get the window corresponding to the bounding coordinates. Parameters ---------- left, bottom, right, top: float Left (west), bottom (south), right (east), and top (north) bounding coordinates. transform: Affine Affine transform matrix. height, width: int Number of rows and columns of the window. precision: int, optional Number of decimal points of precision when computing inverse transform. kwargs: mapping Absorbs deprecated keyword args Returns ------- Window A new Window """ if 'boundless' in kwargs: warnings.warn("boundless keyword should not be used", RasterioDeprecationWarning) row_start, col_start = rowcol( transform, left, top, op=float, precision=precision) row_stop, col_stop = rowcol( transform, right, bottom, op=float, precision=precision) return Window.from_slices( (row_start, row_stop), (col_start, col_stop), height=height, width=width, boundless=True) def transform(window, transform): """Construct an affine transform matrix relative to a window. Parameters ---------- window: Window The input window. transform: Affine an affine transform matrix. Returns ------- Affine The affine transform matrix for the given window """ window = evaluate(window, height=0, width=0) x, y = transform * (window.col_off or 0.0, window.row_off or 0.0) return Affine.translation( x - transform.c, y - transform.f) * transform def bounds(window, transform, height=0, width=0): """Get the spatial bounds of a window. Parameters ---------- window: Window The input window. transform: Affine an affine transform matrix. Returns ------- left, bottom, right, top: float A tuple of spatial coordinate bounding values. """ window = evaluate(window, height=height, width=width) row_min = window.row_off row_max = row_min + window.height col_min = window.col_off col_max = col_min + window.width left, bottom = transform * (col_min, row_max) right, top = transform * (col_max, row_min) return left, bottom, right, top def crop(window, height, width): """Crops a window to given height and width. Parameters ---------- window : Window. The input window. height, width : int The number of rows and cols in the cropped window. Returns ------- Window A new Window object. """ window = evaluate(window, height=height, width=width) row_start = min(max(window.row_off, 0), height) col_start = min(max(window.col_off, 0), width) row_stop = max(0, min(window.row_off + window.height, height)) col_stop = max(0, min(window.col_off + window.width, width)) return Window(col_start, row_start, col_stop - col_start, row_stop - row_start) def evaluate(window, height, width, boundless=False): """Evaluates a window tuple that may contain relative index values. The height and width of the array the window targets is the context for evaluation. Parameters ---------- window: Window. The input window. height, width: int The number of rows or columns in the array that the window targets. Returns ------- Window A new Window object with absolute index values. """ if isinstance(window, Window): return window else: return Window.from_slices(window[0], window[1], height=height, width=width, boundless=boundless) def shape(window, height=-1, width=-1): """The shape of a window. height and width arguments are optional if there are no negative values in the window. Parameters ---------- window: Window The input window. height, width : int, optional The number of rows or columns in the array that the window targets. Returns ------- num_rows, num_cols The number of rows and columns of the window. """ evaluated = evaluate(window, height, width) return evaluated.height, evaluated.width def window_index(window, height=0, width=0): """Construct a pair of slice objects for ndarray indexing Starting indexes are rounded down, Stopping indexes are rounded up. Parameters ---------- window: Window The input window. Returns ------- row_slice, col_slice: slice A pair of slices in row, column order """ window = evaluate(window, height=height, width=width) (row_start, row_stop), (col_start, col_stop) = window.toranges() return ( slice(int(math.floor(row_start)), int(math.ceil(row_stop))), slice(int(math.floor(col_start)), int(math.ceil(col_stop)))) def round_window_to_full_blocks(window, block_shapes, height=0, width=0): """Round window to include full expanse of intersecting tiles. Parameters ---------- window: Window The input window. block_shapes : tuple of block shapes The input raster's block shape. All bands must have the same block/stripe structure Returns ------- Window """ if len(set(block_shapes)) != 1: # pragma: no cover raise WindowError( "All bands must have the same block/stripe structure") window = evaluate(window, height=height, width=width) height_shape = block_shapes[0][0] width_shape = block_shapes[0][1] (row_start, row_stop), (col_start, col_stop) = window.toranges() row_min = int(row_start // height_shape) * height_shape row_max = int(row_stop // height_shape) * height_shape + \ (height_shape if row_stop % height_shape != 0 else 0) col_min = int(col_start // width_shape) * width_shape col_max = int(col_stop // width_shape) * width_shape + \ (width_shape if col_stop % width_shape != 0 else 0) return Window(col_min, row_min, col_max - col_min, row_max - row_min) def validate_length_value(instance, attribute, value): if value and value < 0: raise ValueError("Number of columns or rows must be non-negative") _default = attr.Factory(lambda x: 0.0 if x is None else float(x)) @attr.s(slots=True) class Window(object): """Windows are rectangular subsets of rasters. This class abstracts the 2-tuples mentioned in the module docstring and adds methods and new constructors. Attributes ---------- col_off, row_off: float The offset for the window. width, height: float Lengths of the window. Previously the lengths were called 'num_cols' and 'num_rows' but this is a bit confusing in the new float precision world and the attributes have been changed. The originals are deprecated. """ col_off = attr.ib(default=_default) row_off = attr.ib(default=_default) width = attr.ib(default=_default, validator=validate_length_value) height = attr.ib(default=_default, validator=validate_length_value) def __repr__(self): """Return a nicely formatted representation string""" return ( "Window(col_off={self.col_off}, row_off={self.row_off}, " "width={self.width}, height={self.height})").format( self=self) def flatten(self): """A flattened form of the window. Returns ------- col_off, row_off, width, height: float Window offsets and lengths. """ return (self.col_off, self.row_off, self.width, self.height) def todict(self): """A mapping of attribute names and values. Returns ------- dict """ return collections.OrderedDict( col_off=self.col_off, row_off=self.row_off, width=self.width, height=self.height) def toranges(self): """Makes an equivalent pair of range tuples""" return ( (self.row_off, self.row_off + self.height), (self.col_off, self.col_off + self.width)) def toslices(self): """Slice objects for use as an ndarray indexer. Returns ------- row_slice, col_slice: slice A pair of slices in row, column order """ return tuple(slice(*rng) for rng in self.toranges()) @property def num_cols(self): warnings.warn("use 'width' attribute instead", RasterioDeprecationWarning) return self.width @property def num_rows(self): warnings.warn("use 'height' attribute instead", RasterioDeprecationWarning, stacklevel=2) return self.height def __getitem__(self, index): """Provides backwards compatibility for clients using tuples""" warnings.warn("This usage is deprecated", RasterioDeprecationWarning) return self.toranges()[index] @classmethod def from_offlen(cls, col_off, row_off, num_cols, num_rows): """For backwards compatibility only""" warnings.warn("Use the class constructor instead of this method", RasterioDeprecationWarning) return cls(col_off, row_off, num_cols, num_rows) @classmethod def from_slices(cls, rows, cols, height=-1, width=-1, boundless=False): """Construct a Window from row and column slices or tuples. Parameters ---------- rows, cols: slice or tuple Slices or 2-tuples containing start, stop indexes. height, width: float A shape to resolve relative values against. boundless: bool, optional Whether the inputs are bounded or bot. Returns ------- Window """ # Convert the rows indexing obj to offset and height. # Normalize to slices if not isinstance(rows, (tuple, slice)): raise WindowError("rows must be a tuple or slice") else: rows = slice(*rows) if isinstance(rows, tuple) else rows # Resolve the window height. # Fail if the stop value is relative or implicit and there # is no height context. if not boundless and ( (rows.start is not None and rows.start < 0) or rows.stop is None or rows.stop < 0) and height < 0: raise WindowError( "A non-negative height is required") row_off = rows.start or 0.0 if not boundless and row_off < 0: row_off += height row_stop = height if rows.stop is None else rows.stop if not boundless and row_stop < 0: row_stop += height num_rows = row_stop - row_off # Number of rows is never less than 0. num_rows = max(num_rows, 0.0) # Do the same for the cols indexing object. if not isinstance(cols, (tuple, slice)): raise WindowError("cols must be a tuple or slice") else: cols = slice(*cols) if isinstance(cols, tuple) else cols if not boundless and ( (cols.start is not None and cols.start < 0) or cols.stop is None or cols.stop < 0) and width < 0: raise WindowError("A non-negative width is required") col_off = cols.start or 0.0 if not boundless and col_off < 0: col_off += width col_stop = width if cols.stop is None else cols.stop if not boundless and col_stop < 0: col_stop += width num_cols = col_stop - col_off num_cols = max(num_cols, 0.0) return cls(col_off, row_off, num_cols, num_rows) @classmethod def from_ranges(cls, rows, cols): """For backwards compatibility only""" warnings.warn("Use the from_slices class method instead", RasterioDeprecationWarning) return cls.from_slices(rows, cols) def round_lengths(self, op='floor', pixel_precision=3): """Return a copy with width and height rounded. Lengths are rounded to the nearest whole number. The offsets are not changed. Parameters ---------- op: str 'ceil' or 'floor' pixel_precision: int Number of places of rounding precision. Returns ------- Window """ operator = getattr(math, op, None) if not operator: raise WindowError("operator must be 'ceil' or 'floor'") else: return Window(self.col_off, self.row_off, operator(round(self.width, pixel_precision)), operator(round(self.height, pixel_precision))) round_shape = round_lengths def round_offsets(self, op='floor', pixel_precision=3): """Return a copy with column and row offsets rounded. Offsets are rounded to the nearest whole number. The lengths are not changed. Parameters ---------- op: str 'ceil' or 'floor' pixel_precision: int Number of places of rounding precision. Returns ------- Window """ operator = getattr(math, op, None) if not operator: raise WindowError("operator must be 'ceil' or 'floor'") else: return Window(operator(round(self.col_off, pixel_precision)), operator(round(self.row_off, pixel_precision)), self.width, self.height) def crop(self, height, width): """Return a copy cropped to height and width""" return crop(self, height, width) def intersection(self, other): """Return the intersection of this window and another Parameters ---------- other: Window Another window Returns ------- Window """ return intersection([self, other])
# Copyright (c) 2021 The University of Texas at Austin # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Author: <NAME> # """ This script tests forking gem5 with the KVM cores and switching cores in the child process. First, the test boots linux with KVM and tests fast-forwarding with instruction exit events. Then the test forks the simulation, waits for the child to simulate until completion, and then simulates to completion in the parent process. """ import argparse import os import sys from textwrap import dedent import m5 from m5.objects import Root from gem5.components.boards.x86_board import X86Board from gem5.coherence_protocol import CoherenceProtocol from gem5.isas import ISA from gem5.components.memory.single_channel import SingleChannelDDR3_1600 from gem5.components.processors.cpu_types import CPUTypes from gem5.components.processors.simple_switchable_processor import ( SimpleSwitchableProcessor, ) from gem5.resources.resource import Resource from gem5.runtime import ( get_runtime_coherence_protocol, get_runtime_isa ) from gem5.utils.requires import requires parser = argparse.ArgumentParser( description="A script to test forking gem5 and switching cpus." ) parser.add_argument( "-m", "--mem-system", type=str, choices=("classic", "mi_example", "mesi_two_level"), required=True, help="The memory system.", ) parser.add_argument( "-n", "--num-cpus", type=int, choices=(1, 2, 4, 8), default=4, help="The number of CPUs.", ) parser.add_argument( "-c", "--cpu", type=str, choices=("kvm", "atomic", "timing", "o3"), required=True, help="The CPU type.", ) parser.add_argument( "-r", "--resource-directory", type=str, required=False, help="The directory in which resources will be downloaded or exist.", ) parser.add_argument( "-o", "--override-download", action="store_true", help="Override a local resource if the hashes do not match.", ) parser.add_argument( "-k", "--kernel-args", type=str, default="init=/root/gem5_init.sh", help="Additional kernel boot arguments.", ) parser.add_argument( "-f", "--num-forks", type=int, default=4, help="The number of times to fork gem5.", ) args = parser.parse_args() coherence_protocol_required = None if args.mem_system == "mi_example": coherence_protocol_required = CoherenceProtocol.MI_EXAMPLE elif args.mem_system == "mesi_two_level": coherence_protocol_required = CoherenceProtocol.MESI_TWO_LEVEL requires( isa_required=ISA.X86, coherence_protocol_required=coherence_protocol_required, kvm_required=(args.cpu == "kvm"), ) cache_hierarchy = None if args.mem_system == "mi_example": from gem5.components.cachehierarchies.ruby.\ mi_example_cache_hierarchy import ( MIExampleCacheHierarchy, ) cache_hierarchy = MIExampleCacheHierarchy(size="32kB", assoc=8) elif args.mem_system == "mesi_two_level": from gem5.components.cachehierarchies.ruby.\ mesi_two_level_cache_hierarchy import ( MESITwoLevelCacheHierarchy, ) cache_hierarchy = MESITwoLevelCacheHierarchy( l1d_size="16kB", l1d_assoc=8, l1i_size="16kB", l1i_assoc=8, l2_size="256kB", l2_assoc=16, num_l2_banks=1, ) elif args.mem_system == "classic": from gem5.components.cachehierarchies.classic.\ private_l1_cache_hierarchy import ( PrivateL1CacheHierarchy, ) cache_hierarchy = PrivateL1CacheHierarchy(l1d_size="16kB", l1i_size="16kB") else: raise NotImplementedError( "Memory system '{}' is not supported in the boot tests.".format( args.mem_system ) ) assert cache_hierarchy != None # Setup the system memory. memory = SingleChannelDDR3_1600(size="3GB") # Setup a Processor. cpu_type = None if args.cpu == "kvm": cpu_type = CPUTypes.KVM elif args.cpu == "atomic": cpu_type = CPUTypes.ATOMIC elif args.cpu == "timing": cpu_type = CPUTypes.TIMING elif args.cpu == "o3": cpu_type = CPUTypes.O3 else: raise NotImplementedError( "CPU type '{}' is not supported in the boot tests.".format(args.cpu) ) assert cpu_type != None processor = SimpleSwitchableProcessor( starting_core_type=CPUTypes.KVM, switch_core_type=cpu_type, num_cores=args.num_cpus, ) # Setup the motherboard. motherboard = X86Board( clk_freq="3GHz", processor=processor, memory=memory, cache_hierarchy=cache_hierarchy, exit_on_work_items=True, ) motherboard.connect_things() # Set the Full System workload. motherboard.set_workload( kernel=Resource( "x86-linux-kernel-5.4.49", override=args.override_download, resource_directory=args.resource_directory, ), disk_image=Resource( "x86-ubuntu-img", override=args.override_download, resource_directory=args.resource_directory, ), command=dedent( """ m5 exit # signal end of boot m5 exit # exit in children and parent """ ), kernel_args=[args.kernel_args] ) # Begin running of the simulation. This will exit once the Linux system boot # is complete. print("Running with ISA: " + get_runtime_isa().name) print("Running with protocol: " + get_runtime_coherence_protocol().name) print() root = Root(full_system=True, system=motherboard) # TODO: This of annoying. Is there a way to fix this to happen # automatically when running KVM? root.sim_quantum = int(1e9) # Disable the gdb ports. Required for forking. m5.disableAllListeners() m5.instantiate() # Simulate the inital boot with the starting KVM cpu exit_event = m5.simulate() print("Boot finished", exit_event.getCause()) print("Starting fork and switch processors test") pids = [] for i in range(args.num_forks): pid = m5.fork("%(parent)s/" + str(m5.curTick())) if pid == 0: # in child print(f"Switching processors in child {i}.") processor.switch() exit_event = m5.simulate() if exit_event.getCause() != "m5_exit instruction encountered": raise Exception(f"Expected m5 exit, got {exit_event.getCause()}") print("Child finished, exiting: ", exit_event.getCause()) sys.exit(0) else: pids.append(pid) print("Waiting for children...") for pid in pids: print (os.waitpid(pid, 0)) print("Children finished! Running to completion in parent.") exit_event = m5.simulate() if exit_event.getCause() != "m5_exit instruction encountered": raise Exception(f"Expected m5 exit, got {exit_event.getCause()}")