id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
416291
|
import torch
import os
from utils import geo_utils, ba_functions
import numpy as np
def prepare_predictions(data, pred_cam, conf, bundle_adjustment):
# Take the inputs from pred cam and turn to ndarray
outputs = {}
outputs['scan_name'] = data.scan_name
calibrated = conf.get_bool('dataset.calibrated')
Ns = data.Ns.cpu().numpy()
Ns_inv = data.Ns_invT.transpose(1, 2).cpu().numpy() # Ks for calibrated, a normalization matrix for uncalibrated
M = data.M.cpu().numpy()
xs = geo_utils.M_to_xs(M)
Ps_norm = pred_cam["Ps_norm"].cpu().numpy() # Normalized camera!!
Ps = Ns_inv @ Ps_norm # unnormalized cameras
pts3D_pred = geo_utils.pflat(pred_cam["pts3D"]).cpu().numpy()
pts3D_triangulated = geo_utils.n_view_triangulation(Ps, M=M, Ns=Ns)
outputs['xs'] = xs # to compute reprojection error later
outputs['Ps'] = Ps
outputs['Ps_norm'] = Ps_norm
outputs['pts3D_pred'] = pts3D_pred # 4,m
outputs['pts3D_triangulated'] = pts3D_triangulated # 4,n
if calibrated:
Ks = Ns_inv # data.Ns.inverse().cpu().numpy()
outputs['Ks'] = Ks
Rs_gt, ts_gt = geo_utils.decompose_camera_matrix(data.y.cpu().numpy(), Ks) # For alignment and R,t errors
outputs['Rs_gt'] = Rs_gt
outputs['ts_gt'] = ts_gt
Rs_pred, ts_pred = geo_utils.decompose_camera_matrix(Ps_norm)
outputs['Rs'] = Rs_pred
outputs['ts'] = ts_pred
Rs_fixed, ts_fixed, similarity_mat = geo_utils.align_cameras(Rs_pred, Rs_gt, ts_pred, ts_gt, return_alignment=True) # Align Rs_fixed, tx_fixed
outputs['Rs_fixed'] = Rs_fixed
outputs['ts_fixed'] = ts_fixed
outputs['pts3D_pred_fixed'] = (similarity_mat @ pts3D_pred) # 4,n
outputs['pts3D_triangulated_fixed'] = (similarity_mat @ pts3D_triangulated)
if bundle_adjustment:
repeat = conf.get_bool('ba.repeat')
triangulation = conf.get_bool('ba.triangulation')
ba_res = ba_functions.euc_ba(xs, Rs=Rs_pred, ts=ts_pred, Ks=np.linalg.inv(Ns),
Xs_our=pts3D_pred.T, Ps=None,
Ns=Ns, repeat=repeat, triangulation=triangulation, return_repro=True) # Rs, ts, Ps, Xs
outputs['Rs_ba'] = ba_res['Rs']
outputs['ts_ba'] = ba_res['ts']
outputs['Xs_ba'] = ba_res['Xs'].T # 4,n
outputs['Ps_ba'] = ba_res['Ps']
R_ba_fixed, t_ba_fixed, similarity_mat = geo_utils.align_cameras(ba_res['Rs'], Rs_gt, ba_res['ts'], ts_gt,
return_alignment=True) # Align Rs_fixed, tx_fixed
outputs['Rs_ba_fixed'] = R_ba_fixed
outputs['ts_ba_fixed'] = t_ba_fixed
outputs['Xs_ba_fixed'] = (similarity_mat @ outputs['Xs_ba'])
else:
if bundle_adjustment:
repeat = conf.get_bool('ba.repeat')
triangulation = conf.get_bool('ba.triangulation')
ba_res = ba_functions.proj_ba(Ps=Ps, xs=xs, Xs_our=pts3D_pred.T, Ns=Ns, repeat=repeat,
triangulation=triangulation, return_repro=True, normalize_in_tri=True) # Ps, Xs
outputs['Xs_ba'] = ba_res['Xs'].T # 4,n
outputs['Ps_ba'] = ba_res['Ps']
return outputs
def compute_errors(outputs, conf, bundle_adjustment):
model_errors = {}
calibrated = conf.get_bool('dataset.calibrated')
Ps = outputs['Ps']
pts3D_pred = outputs['pts3D_pred']
xs = outputs['xs']
pts3D_triangulated = outputs['pts3D_triangulated']
model_errors["our_repro"] = np.nanmean(geo_utils.reprojection_error_with_points(Ps, pts3D_pred.T, xs))
model_errors["triangulated_repro"] = np.nanmean(geo_utils.reprojection_error_with_points(Ps, pts3D_triangulated.T, xs))
if calibrated:
Rs_fixed = outputs['Rs_fixed']
ts_fixed = outputs['ts_fixed']
Rs_gt = outputs['Rs_gt']
ts_gt = outputs['ts_gt']
Rs_error, ts_error = geo_utils.tranlsation_rotation_errors(Rs_fixed, ts_fixed, Rs_gt, ts_gt)
model_errors["ts_mean"] = np.mean(ts_error)
model_errors["ts_med"] = np.median(ts_error)
model_errors["Rs_mean"] = np.mean(Rs_error)
model_errors["Rs_med"] = np.median(Rs_error)
if bundle_adjustment:
Xs_ba = outputs['Xs_ba']
Ps_ba = outputs['Ps_ba']
model_errors['repro_ba'] = np.nanmean(geo_utils.reprojection_error_with_points(Ps_ba, Xs_ba.T, xs))
if calibrated:
Rs_fixed = outputs['Rs_ba_fixed']
ts_fixed = outputs['ts_ba_fixed']
Rs_gt = outputs['Rs_gt']
ts_gt = outputs['ts_gt']
Rs_ba_error, ts_ba_error = geo_utils.tranlsation_rotation_errors(Rs_fixed, ts_fixed, Rs_gt, ts_gt)
model_errors["ts_ba_mean"] = np.mean(ts_ba_error)
model_errors["ts_ba_med"] = np.median(ts_ba_error)
model_errors["Rs_ba_mean"] = np.mean(Rs_ba_error)
model_errors["Rs_ba_med"] = np.median(Rs_ba_error)
# Rs errors mean, ts errors mean, ba repro, rs ba mean, ts ba mean
projected_pts = geo_utils.get_positive_projected_pts_mask(Ps @ pts3D_pred, conf.get_float('loss.infinity_pts_margin'))
valid_pts = geo_utils.xs_valid_points(xs)
unprojected_pts = np.logical_and(~projected_pts, valid_pts)
part_unprojected = unprojected_pts.sum() / valid_pts.sum()
model_errors['unprojected'] = part_unprojected
return model_errors
|
416301
|
import numpy as np
import pytest
from neutralocean.eos.tools import vectorize_eos
from neutralocean.eos.jmd95 import rho as rho_jmd95
from neutralocean.eos.jmd95 import rho_s_t as rho_s_t_jmd95
from neutralocean.eos.jmd95 import rho_p as rho_p_jmd95
from neutralocean.eos.jmdfwg06 import rho as rho_jmdfwg06
from neutralocean.eos.jmdfwg06 import rho_s_t as rho_s_t_jmdfwg06
from neutralocean.eos.jmdfwg06 import rho_p as rho_p_jmdfwg06
from neutralocean.eos.gsw import rho as rho_gsw
from neutralocean.eos.gsw import rho_s_t as rho_s_t_gsw
from neutralocean.eos.gsw import rho_p as rho_p_gsw
checkval_jmd95 = (35.5, 3.0, 3000.0, 1041.83267)
rho_jmd95_ufunc = vectorize_eos(rho_jmd95)
# Check Values from Jackett and McDougall (1995) Appendix, p. 388
# and Jackett et al (2006) Appendix A, p. 1723
@pytest.mark.parametrize(
"rho,checkval,decimals",
[
(rho_jmd95, checkval_jmd95, 5),
(rho_jmdfwg06, (35.0, 25.0, 2000.0, 1031.65056056576), 11),
(rho_jmdfwg06, (20.0, 20.0, 1000.0, 1017.72886801964), 11),
(rho_jmdfwg06, (40.0, 12.0, 8000.0, 1062.95279820631), 11),
],
)
def test_checkval(rho, checkval, decimals):
assert np.round(rho(*checkval[:-1]), decimals=decimals) == checkval[-1]
def test_jmd95_ufunc_scalar():
res = rho_jmd95_ufunc(*checkval_jmd95[:-1])
assert np.round(res, decimals=5) == checkval_jmd95[-1]
def test_jmd95_ufunc_array():
# Smoketest: broadcasting
s = np.ones((4, 5), dtype=float) * checkval_jmd95[0]
t = np.ones((5,), dtype=float) * checkval_jmd95[1]
p = checkval_jmd95[2]
res = rho_jmd95_ufunc(s, t, p)
assert res.shape == s.shape
assert np.all(np.round(res, decimals=5) == checkval_jmd95[-1])
@pytest.mark.parametrize(
"rho,rho_s_t,rho_p",
[
(rho_jmd95, rho_s_t_jmd95, rho_p_jmd95),
(rho_jmdfwg06, rho_s_t_jmdfwg06, rho_p_jmdfwg06),
(rho_gsw, rho_s_t_gsw, rho_p_gsw),
],
)
def test_rho_derivs(rho, rho_s_t, rho_p):
"""Check rho_s_t and rho_p functions by centred differences
Parameters
----------
rho : function
Equation of State, in terms of (S, T, P). e.g. `.gsw.rho`
rho_s_t : function
Function of (S, T, P) returning a tuple of length two, giving the
partial derivatives of `rho` with respect to `S` and `T`. e.g. `.gsw.rho_s_t`
rho_p : function
Function of (S, T, P) returning the partial derivatives of `rho` with
respect to `P`. e.g. `.gsw.rho_p`
Returns
-------
None.
Raises
------
AssertionError
If the results of `rho_s_t` and `rho_p` at a (hardcoded) checkvalue
disagree considerably with an approximation of partial derivatives
calculated by evaluating `rho` using centred finite differences.
"""
s, t, p = (35.0, 25.0, 2000.0)
ds, dt, dp = (1e-4, 1e-4, 1e-1)
rs_centred = (rho(s + ds, t, p) - rho(s - ds, t, p)) / (2.0 * ds)
rt_centred = (rho(s, t + dt, p) - rho(s, t - dt, p)) / (2.0 * dt)
rp_centred = (rho(s, t, p + dp) - rho(s, t, p - dp)) / (2.0 * dp)
rs, rt = rho_s_t(s, t, p)
rp = rho_p(s, t, p)
assert np.isclose(rs, rs_centred, rtol=0, atol=1e-8)
assert np.isclose(rt, rt_centred, rtol=0, atol=1e-8)
assert np.isclose(rp, rp_centred, rtol=0, atol=1e-11)
|
416306
|
import pyarrow.parquet as pq
import pandas as pd
import numpy as np
import json
from typing import List, Callable, Iterator, Union, Optional
from sportsdataverse.errors import SeasonNotFoundError
from sportsdataverse.dl_utils import download
def espn_mbb_schedule(dates=None, groups=None, season_type=None, limit=500) -> pd.DataFrame:
"""espn_mbb_schedule - look up the men's college basketball scheduler for a given season
Args:
dates (int): Used to define different seasons. 2002 is the earliest available season.
groups (int): Used to define different divisions. 50 is Division I, 51 is Division II/Division III.
season_type (int): 2 for regular season, 3 for post-season, 4 for off-season.
limit (int): number of records to return, default: 500.
Returns:
pd.DataFrame: Pandas dataframe containing schedule dates for the requested season.
"""
if dates is None:
dates = ''
else:
dates = '&dates=' + str(dates)
if groups is None:
groups = '&groups=50'
else:
groups = '&groups=' + str(groups)
if season_type is None:
season_type = ''
else:
season_type = '&seasontype=' + str(season_type)
url = "http://site.api.espn.com/apis/site/v2/sports/basketball/mens-college-basketball/scoreboard?limit={}{}{}{}".format(limit, dates, groups, season_type)
resp = download(url=url)
ev = pd.DataFrame()
if resp is not None:
events_txt = json.loads(resp)
events = events_txt.get('events')
for event in events:
event.get('competitions')[0].get('competitors')[0].get('team').pop('links',None)
event.get('competitions')[0].get('competitors')[1].get('team').pop('links',None)
if event.get('competitions')[0].get('competitors')[0].get('homeAway')=='home':
event['competitions'][0]['home'] = event.get('competitions')[0].get('competitors')[0].get('team')
event['competitions'][0]['home']['score'] = event.get('competitions')[0].get('competitors')[0].get('score')
event['competitions'][0]['home']['winner'] = event.get('competitions')[0].get('competitors')[0].get('winner')
event['competitions'][0]['away'] = event.get('competitions')[0].get('competitors')[1].get('team')
event['competitions'][0]['away']['score'] = event.get('competitions')[0].get('competitors')[1].get('score')
event['competitions'][0]['away']['winner'] = event.get('competitions')[0].get('competitors')[1].get('winner')
else:
event['competitions'][0]['away'] = event.get('competitions')[0].get('competitors')[0].get('team')
event['competitions'][0]['away']['score'] = event.get('competitions')[0].get('competitors')[0].get('score')
event['competitions'][0]['away']['winner'] = event.get('competitions')[0].get('competitors')[0].get('winner')
event['competitions'][0]['home'] = event.get('competitions')[0].get('competitors')[1].get('team')
event['competitions'][0]['home']['score'] = event.get('competitions')[0].get('competitors')[1].get('score')
event['competitions'][0]['home']['winner'] = event.get('competitions')[0].get('competitors')[1].get('winner')
del_keys = ['broadcasts','geoBroadcasts', 'headlines', 'series', 'situation', 'tickets', 'odds']
for k in del_keys:
event.get('competitions')[0].pop(k, None)
if len(event.get('competitions')[0]['notes'])>0:
event.get('competitions')[0]['notes_type'] = event.get('competitions')[0]['notes'][0].get("type")
event.get('competitions')[0]['notes_headline'] = event.get('competitions')[0]['notes'][0].get("headline").replace('"','')
else:
event.get('competitions')[0]['notes_type'] = ''
event.get('competitions')[0]['notes_headline'] = ''
event.get('competitions')[0].pop('notes', None)
x = pd.json_normalize(event.get('competitions')[0])
x['game_id'] = x['id'].astype(int)
x['season'] = event.get('season').get('year')
x['season_type'] = event.get('season').get('type')
ev = ev.append(x)
ev = pd.DataFrame(ev)
# ev = ev.astype({
# 'id': int,
# 'uid': str,
# 'date': str,
# 'notes_type': str,
# 'notes_headline': str,
# 'type.id': int,
# 'type.abbreviation': str,
# 'venue.id': int,
# 'venue.fullName': str,
# 'venue.address.city': str,
# 'venue.address.state': str,
# 'venue.capacity': int,
# 'venue.indoor': bool,
# 'status.clock': str,
# 'status.displayClock': str,
# 'status.period ': int,
# 'status.type.id': int,
# 'status.type.name': str,
# 'status.type.state': str,
# 'status.type.completed': bool,
# 'status.type.description': str,
# 'status.type.detail': str,
# 'status.type.shortDetail': str,
# 'format.regulation.periods': int,
# 'home.id': int,
# 'home.uid': str,
# 'home.location': str,
# 'home.name': str,
# 'home.abbreviation': str,
# 'home.displayName': str,
# 'home.shortDisplayName': str,
# 'home.color': str,
# 'home.alternateColor': str,
# 'home.isActive': bool,
# 'home.venue.id': int,
# 'home.logo': str,
# 'home.conferenceId': int,
# 'home.score': int,
# 'home.winner': bool,
# 'away.id': int,
# 'away.uid': str,
# 'away.location': str,
# 'away.name': str,
# 'away.abbreviation': str,
# 'away.displayName': str,
# 'away.shortDisplayName': str,
# 'away.color': str,
# 'away.alternateColor': str,
# 'away.isActive': bool,
# 'away.venue.id': int,
# 'away.logo': str,
# 'away.conferenceId': int,
# 'away.score': int,
# 'away.winner': bool,
# 'tournamentId': int
# },errors='ignore')
# print(ev.columns)
return ev
def espn_mbb_calendar(season=None) -> pd.DataFrame:
"""espn_mbb_calendar - look up the men's college basketball calendar for a given season
Args:
season (int): Used to define different seasons. 2002 is the earliest available season.
Returns:
pd.DataFrame: Pandas dataframe containing schedule dates for the requested season.
"""
if int(season) < 2002:
raise SeasonNotFoundError("season cannot be less than 2002")
url = "http://site.api.espn.com/apis/site/v2/sports/basketball/mens-college-basketball/scoreboard?dates={}".format(season)
resp = download(url=url)
txt = json.loads(resp)['leagues'][0]['calendar']
datenum = list(map(lambda x: x[:10].replace("-",""),txt))
date = list(map(lambda x: x[:10],txt))
year = list(map(lambda x: x[:4],txt))
month = list(map(lambda x: x[5:7],txt))
day = list(map(lambda x: x[8:10],txt))
data = {
"season": season,
"datetime" : txt,
"date" : date,
"year": year,
"month": month,
"day": day,
"dateURL": datenum
}
df = pd.DataFrame(data)
df['url']="http://site.api.espn.com/apis/site/v2/sports/basketball/mens-college-basketball/scoreboard?dates="
df['url']= df['url'] + df['dateURL']
return df
|
416311
|
from skidl import SKIDL, TEMPLATE, Part, Pin, SchLib
SKIDL_lib_version = '0.0.1'
Lattice = SchLib(tool=SKIDL).add_parts(*[
Part(name='GAL16V8',dest=TEMPLATE,tool=SKIDL,keywords='GAL PLD 16V8',description='Programmable Logic Array, DIP-20/SOIC-20/PLCC-20',ref_prefix='U',num_units=1,fplist=['DIP*', 'PDIP*', 'SOIC*', 'SO*', 'PLCC*'],do_erc=True,pins=[
Pin(num='10',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='20',name='VCC',func=Pin.PWRIN,do_erc=True),
Pin(num='1',name='I1/CLK',do_erc=True),
Pin(num='2',name='I2',do_erc=True),
Pin(num='3',name='I3',do_erc=True),
Pin(num='4',name='I4',do_erc=True),
Pin(num='5',name='I5',do_erc=True),
Pin(num='6',name='I6',do_erc=True),
Pin(num='7',name='I7',do_erc=True),
Pin(num='8',name='I8',do_erc=True),
Pin(num='9',name='I9',do_erc=True),
Pin(num='11',name='I10/~OE~',do_erc=True),
Pin(num='12',name='IO8',func=Pin.TRISTATE,do_erc=True),
Pin(num='13',name='IO7',func=Pin.TRISTATE,do_erc=True),
Pin(num='14',name='IO6',func=Pin.TRISTATE,do_erc=True),
Pin(num='15',name='IO5',func=Pin.TRISTATE,do_erc=True),
Pin(num='16',name='IO4',func=Pin.TRISTATE,do_erc=True),
Pin(num='17',name='I03',func=Pin.TRISTATE,do_erc=True),
Pin(num='18',name='IO2',func=Pin.TRISTATE,do_erc=True),
Pin(num='19',name='IO1',func=Pin.TRISTATE,do_erc=True)]),
Part(name='PAL16L8',dest=TEMPLATE,tool=SKIDL,keywords='PAL PLD 16L8',description='Programmable Logic Array, DIP-20',ref_prefix='U',num_units=1,fplist=['DIP*', 'PDIP*'],do_erc=True,pins=[
Pin(num='10',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='20',name='VCC',func=Pin.PWRIN,do_erc=True),
Pin(num='1',name='I1',do_erc=True),
Pin(num='2',name='I2',do_erc=True),
Pin(num='3',name='I3',do_erc=True),
Pin(num='4',name='I4',do_erc=True),
Pin(num='5',name='I5',do_erc=True),
Pin(num='6',name='I6',do_erc=True),
Pin(num='7',name='I7',do_erc=True),
Pin(num='8',name='I8',do_erc=True),
Pin(num='9',name='I9',do_erc=True),
Pin(num='11',name='I10',do_erc=True),
Pin(num='12',name='IO8',func=Pin.TRISTATE,do_erc=True),
Pin(num='13',name='IO7',func=Pin.TRISTATE,do_erc=True),
Pin(num='14',name='IO6',func=Pin.TRISTATE,do_erc=True),
Pin(num='15',name='IO5',func=Pin.TRISTATE,do_erc=True),
Pin(num='16',name='IO4',func=Pin.TRISTATE,do_erc=True),
Pin(num='17',name='I03',func=Pin.TRISTATE,do_erc=True),
Pin(num='18',name='IO2',func=Pin.TRISTATE,do_erc=True),
Pin(num='19',name='IO1',func=Pin.TRISTATE,do_erc=True)]),
Part(name='PAL20L8',dest=TEMPLATE,tool=SKIDL,keywords='PAL PLD 20L8',description='Programmable Logic Array, DIP-24',ref_prefix='U',num_units=1,fplist=['DIP*', 'PDIP*'],do_erc=True,pins=[
Pin(num='12',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='24',name='VCC',func=Pin.PWRIN,do_erc=True),
Pin(num='1',name='I1',do_erc=True),
Pin(num='2',name='I2',do_erc=True),
Pin(num='3',name='I3',do_erc=True),
Pin(num='4',name='I4',do_erc=True),
Pin(num='5',name='I5',do_erc=True),
Pin(num='6',name='I6',do_erc=True),
Pin(num='7',name='I7',do_erc=True),
Pin(num='8',name='I8',do_erc=True),
Pin(num='9',name='I9',do_erc=True),
Pin(num='10',name='I10',do_erc=True),
Pin(num='20',name='I020',func=Pin.TRISTATE,do_erc=True),
Pin(num='11',name='I11',do_erc=True),
Pin(num='21',name='IO21',func=Pin.TRISTATE,do_erc=True),
Pin(num='22',name='IO22',func=Pin.TRISTATE,do_erc=True),
Pin(num='13',name='I13',do_erc=True),
Pin(num='23',name='I23',do_erc=True),
Pin(num='14',name='I14',do_erc=True),
Pin(num='15',name='IO15',func=Pin.TRISTATE,do_erc=True),
Pin(num='16',name='IO16',func=Pin.TRISTATE,do_erc=True),
Pin(num='17',name='IO17',func=Pin.TRISTATE,do_erc=True),
Pin(num='18',name='IO18',func=Pin.TRISTATE,do_erc=True),
Pin(num='19',name='IO19',func=Pin.TRISTATE,do_erc=True)]),
Part(name='PAL20RS10',dest=TEMPLATE,tool=SKIDL,keywords='PAL PLD 20RS10',description='Programmable Logic Array, DIP-24 (Narrow)',ref_prefix='U',num_units=1,fplist=['DIP*', 'PDIP*'],do_erc=True,pins=[
Pin(num='1',name='CLK',do_erc=True),
Pin(num='2',name='I0',do_erc=True),
Pin(num='3',name='I1',do_erc=True),
Pin(num='4',name='I2',do_erc=True),
Pin(num='5',name='I3',do_erc=True),
Pin(num='6',name='I4',do_erc=True),
Pin(num='7',name='I5',do_erc=True),
Pin(num='8',name='I6',do_erc=True),
Pin(num='9',name='I7',do_erc=True),
Pin(num='10',name='I8',do_erc=True),
Pin(num='20',name='O3',func=Pin.OUTPUT,do_erc=True),
Pin(num='11',name='I9',do_erc=True),
Pin(num='21',name='O2',func=Pin.OUTPUT,do_erc=True),
Pin(num='12',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='22',name='O1',func=Pin.OUTPUT,do_erc=True),
Pin(num='13',name='~OE~',do_erc=True),
Pin(num='23',name='O0',func=Pin.OUTPUT,do_erc=True),
Pin(num='14',name='O9',func=Pin.OUTPUT,do_erc=True),
Pin(num='24',name='VCC',func=Pin.PWRIN,do_erc=True),
Pin(num='15',name='O8',func=Pin.OUTPUT,do_erc=True),
Pin(num='16',name='O7',func=Pin.OUTPUT,do_erc=True),
Pin(num='17',name='O6',func=Pin.OUTPUT,do_erc=True),
Pin(num='18',name='O5',func=Pin.OUTPUT,do_erc=True),
Pin(num='19',name='O4',func=Pin.OUTPUT,do_erc=True)])])
|
416329
|
import sys
import struct
import time
from i2cdriver import I2CDriver, EDS
if __name__ == '__main__':
i2 = I2CDriver(sys.argv[1])
d = EDS.Magnet(i2)
while 1:
print(d.measurement())
|
416372
|
from datetime import datetime
from agent import source, cli
from ..test_zpipeline_base import TestInputBase
from ...conftest import generate_input
class TestClickhouse(TestInputBase):
__test__ = True
params = {
'test_source_create': [{'name': 'test_jdbc_clickhouse', 'type': 'clickhouse', 'conn': 'clickhouse://clickhouse:8123/test'}],
'test_create': [
{'name': 'test_clickhouse', 'source': 'test_jdbc_clickhouse', 'timestamp_type': '', 'timestamp_name': 'timestamp_unix'},
{'name': 'test_clickhouse_timestamp_ms', 'source': 'test_jdbc_clickhouse', 'timestamp_type': 'unix_ms',
'timestamp_name': 'timestamp_unix_ms'},
{'name': 'test_clickhouse_timestamp_datetime', 'source': 'test_jdbc_clickhouse', 'timestamp_type': 'datetime',
'timestamp_name': 'timestamp_datetime'}],
'test_create_advanced': [{'name': 'test_clickhouse_advanced', 'source': 'test_jdbc_clickhouse'}],
'test_create_with_file': [{'file_name': 'jdbc_pipelines_clickhouse'}],
'test_create_source_with_file': [{'file_name': 'clickhouse_sources'}],
}
def test_source_create(self, cli_runner, name, type, conn):
input_ = {
"type": type,
"source name": name,
"source connection string": conn,
"username": "",
"password": ""
}
result = cli_runner.invoke(cli.source.create, catch_exceptions=False,
input=generate_input(input_))
assert result.exit_code == 0
assert source.repository.exists(name)
def test_create(self, cli_runner, name, source, timestamp_type, timestamp_name):
days_to_backfill = (datetime.now() - datetime(year=2017, month=12, day=10)).days + 1
input_ = {
"source name": source,
"pipeline name": name,
"query": "SELECT * FROM test WHERE {TIMESTAMP_CONDITION}",
"see preview": "",
"interval": 86400,
"days to backfill": days_to_backfill,
"delay": 1,
"timestamp name": timestamp_name,
"timestamp_type": timestamp_type,
"count records": "",
"values": "clicks:gauge impressions:gauge",
"dimensions": "adsize country",
"static dimensions": "",
"tags": "",
"preview": ""
}
result = cli_runner.invoke(cli.pipeline.create, catch_exceptions=False, input=generate_input(input_))
assert result.exit_code == 0
def test_create_advanced(self, cli_runner, name, source):
days_to_backfill = (datetime.now() - datetime(year=2017, month=12, day=10)).days + 1
input_ = {
"source name": source,
"pipeline name": name,
"query": "SELECT * FROM test WHERE {TIMESTAMP_CONDITION} AND country = 'USA'",
"see preview": "",
"interval": 86400,
"days to backfill": days_to_backfill,
"delay": 1,
"timestamp name": "timestamp_unix",
"timestamp_type": "unix",
"count records": "y",
"count records measurement name": "test",
"values": "clicks:gauge impressions:gauge",
"dimensions": "adsize country",
"static dimensions": "key1:val1 key2:val2",
"tags": "",
"preview": ""
}
result = cli_runner.invoke(cli.pipeline.create, ['-a'], catch_exceptions=False, input=generate_input(input_))
assert result.exit_code == 0
|
416472
|
import numpy as np;
m=5;
n=7;
c = np.array( [2, 1.5, 0, 0, 0, 0, 0] );
A = np.array( [ [12, 24, -1, 0, 0, 0, 0],
[16, 16, 0, -1, 0, 0, 0],
[30, 12, 0, 0, -1, 0, 0],
[1, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 1] ] );
b = np.array( [ 120, 120, 120, 15, 15] );
x = np.array( [14, 1, 72, 120, 312, 1, 14] );
y = np.array( [0.01, 0.01, 0.01, -1, -1 ] );
s = np.zeros( n );
s = c - np.dot( np.transpose(A), y );
alpha = 0.995;
epsilon = 0.001;
num = 0;
theta = 0;
print(" iter x1 x2 theta max(xi*si) min(xi*si) #");
while ( True ):
found = True;
print("============== iter: %d ============" % (num) );
maxxs = max( x * s );
minxs = min( x * s );
print(" %d %lf %lf %lf %lf %lf #" % (num, x[0], x[1], theta, maxxs, minxs) );
num = num + 1;
for i in range(n) :
print("x[%d]: %lf s[%d]: %lf x*s[%d]: %lf " % (i, x[i], i, s[i], i, x[i]*s[i] ) );
if ( x[i] * s[i] > epsilon ):
found = False;
if (found == True ):
break;
XSinv = np.zeros( shape=(n,n) );
for i in range(n) :
XSinv[i, i] = x[i] / s[i];
XinvS = np.zeros( shape=(n,n) );
for i in range(n) :
XinvS[i, i] = s[i] / x[i];
AXSinvAt = np.dot( np.dot( A, XSinv ), np.transpose(A) );
dy = np.linalg.solve( AXSinvAt, b );
dx = -x + np.dot( np.dot( XSinv, np.transpose(A) ), dy );
ds = -s - np.dot( XinvS, dx );
for i in range(n):
print("dx[%d]: %lf" % (i, dx[i] ) );
for i in range(m):
print("dy[%d]: %lf" % (i, dy[i] ) );
for i in range(n):
print("ds[%d]: %lf" % (i, ds[i] ) );
thetax = -1;
for i in range(n) :
if (dx[i] < 0 ):
t = x[i] / (-dx[i]);
print("t[%d]: %lf" % (i, t) );
if ( t < thetax or thetax < 0 ):
thetax = t;
thetas = -1;
for i in range(n) :
if (ds[i] < 0 ):
t = s[i] / (-ds[i]);
if ( t < thetas or thetas < 0 ):
thetas = t;
theta = min( 1, thetax*alpha, thetas*alpha);
print("thetax: %lf thetas: %lf theta: %lf" % (thetax, thetas, theta) );
x = x + theta * dx;
s = s + theta * ds;
y = y + theta * dy;
|
416491
|
import trio
import serverwamp
from serverwamp.adapters.trio import TrioAsyncSupport
async def long_running_job(session):
await session.send_event('job_events', job_status='STARTED')
await trio.sleep(3600)
await session.send_event('job_events', job_status='COMPLETED')
rpc_api = serverwamp.RPCRouteSet()
@rpc_api.route('doJob')
async def do_job(nursery, session):
nursery.start_soon(long_running_job(session))
return 'Job scheduled.'
async def application(*args, **kwargs):
async with trio.open_nursery() as rpc_nursery:
wamp = serverwamp.Application(async_support=TrioAsyncSupport)
wamp.set_default_arg('nursery', rpc_nursery)
wamp.add_rpc_routes(rpc_api)
return await wamp.asgi_application(*args, **kwargs)
|
416501
|
import asyncio
# This example uses the sounddevice library to get an audio stream from the
# microphone. It's not a dependency of the project but can be installed with
# `pip install sounddevice`.
import sounddevice
from amazon_transcribe.client import TranscribeStreamingClient
from amazon_transcribe.handlers import TranscriptResultStreamHandler
from amazon_transcribe.model import TranscriptEvent
"""
Here's an example of a custom event handler you can extend to
process the returned transcription results as needed. This
handler will simply print the text out to your interpreter.
"""
class MyEventHandler(TranscriptResultStreamHandler):
async def handle_transcript_event(self, transcript_event: TranscriptEvent):
# This handler can be implemented to handle transcriptions as needed.
# Here's an example to get started.
results = transcript_event.transcript.results
for result in results:
for alt in result.alternatives:
print(alt.transcript)
async def mic_stream():
# This function wraps the raw input stream from the microphone forwarding
# the blocks to an asyncio.Queue.
loop = asyncio.get_event_loop()
input_queue = asyncio.Queue()
def callback(indata, frame_count, time_info, status):
loop.call_soon_threadsafe(input_queue.put_nowait, (bytes(indata), status))
# Be sure to use the correct parameters for the audio stream that matches
# the audio formats described for the source language you'll be using:
# https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html
stream = sounddevice.RawInputStream(
channels=1,
samplerate=16000,
callback=callback,
blocksize=1024 * 2,
dtype="int16",
)
# Initiate the audio stream and asynchronously yield the audio chunks
# as they become available.
with stream:
while True:
indata, status = await input_queue.get()
yield indata, status
async def write_chunks(stream):
# This connects the raw audio chunks generator coming from the microphone
# and passes them along to the transcription stream.
async for chunk, status in mic_stream():
await stream.input_stream.send_audio_event(audio_chunk=chunk)
await stream.input_stream.end_stream()
async def basic_transcribe():
# Setup up our client with our chosen AWS region
client = TranscribeStreamingClient(region="us-west-2")
# Start transcription to generate our async stream
stream = await client.start_stream_transcription(
language_code="en-US",
media_sample_rate_hz=16000,
media_encoding="pcm",
)
# Instantiate our handler and start processing events
handler = MyEventHandler(stream.output_stream)
await asyncio.gather(write_chunks(stream), handler.handle_events())
loop = asyncio.get_event_loop()
loop.run_until_complete(basic_transcribe())
loop.close()
|
416522
|
import numpy as np
class Rotor:
def __init__(self, position_body, direction_body, clockwise, torque_coef):
self.position = position_body
self.direction = direction_body
self.direction = self.direction / np.linalg.norm(self.direction)
self.clockwise = clockwise
self.torque_coef = torque_coef
|
416566
|
from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
#import kratos core and applications
import KratosMultiphysics
import KratosMultiphysics.SolidMechanicsApplication as KratosSolid
## This proces sets the value of a scalar variable to conditions
from KratosMultiphysics.SolidMechanicsApplication.assign_modulus_and_direction_to_conditions_process import AssignModulusAndDirectionToConditionsProcess
def Factory(custom_settings, Model):
if( not isinstance(custom_settings,KratosMultiphysics.Parameters) ):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return AssignModulusAndDirectionToConditionsProcess(Model, custom_settings["Parameters"])
## All the processes python should be derived from "Process"
class AssignTorqueToConditionsProcess(AssignModulusAndDirectionToConditionsProcess):
def __init__(self, Model, custom_settings ):
KratosMultiphysics.Process.__init__(self)
##settings string in json format
default_settings = KratosMultiphysics.Parameters("""
{
"help" : "This process assigns a torque to conditions",
"model_part_name": "MODEL_PART_NAME",
"variable_name": "VARIABLE_NAME",
"modulus" : 0.0,
"direction": [0.0, 0.0, 0.0],
"center": [0.0, 0.0, 0.0],
"constrained": false,
"interval": [0.0, "End"]
}
""")
#trick to allow "value" to be a string or a double value
if(custom_settings.Has("modulus")):
if(custom_settings["modulus"].IsString()):
default_settings["modulus"].SetString("0.0")
##overwrite the default settings with user-provided parameters
self.settings = custom_settings
self.settings.ValidateAndAssignDefaults(default_settings)
self.custom_settings = custom_settings
###assign scalar process
params = KratosMultiphysics.Parameters("{}")
params.AddValue("model_part_name", self.settings["model_part_name"])
params.AddValue("variable_name",self.settings["variable_name"])
params.AddValue("modulus", self.settings["modulus"])
params.AddValue("direction", self.settings["direction"])
params.AddValue("constrained", self.settings["constrained"])
params.AddValue("interval",self.settings["interval"])
AssignModulusAndDirectionToConditionsProcess.__init__(self, Model, params)
def ExecuteInitialize(self):
# set model part
self.model_part = self.model[self.settings["model_part_name"].GetString()]
if not self.model_part.ProcessInfo[KratosMultiphysics.IS_RESTARTED]:
self.model_part.ProcessInfo.SetValue(KratosMultiphysics.INTERVAL_END_TIME, self.interval[1])
# set processes
params = KratosMultiphysics.Parameters("{}")
params.AddValue("model_part_name", self.settings["model_part_name"])
params.AddValue("variable_name", self.settings["variable_name"])
params.AddValue("modulus", self.custom_settings["modulus"])
params.AddValue("direction", self.custom_settings["direction"])
params.AddValue("center", self.custom_settings["center"])
self.CreateAssignmentProcess(params)
if( self.IsInsideInterval() and (self.interval_string == "initial" or self.interval_string == "start") ):
self.AssignValueProcess.Execute()
#
def CreateAssignmentProcess(self, params):
if( self.value_is_numeric ):
self.AssignValueProcess = KratosSolid.AssignTorqueAboutAnAxisToConditionsProcess(self.model_part, params)
else:
self.AssignValueProcess = KratosSolid.AssignTorqueFieldAboutAnAxisToConditionsProcess(self.model_part, self.compiled_function, "function", self.value_is_spatial_function, params)
|
416572
|
import datetime
import logging
import pathlib
from jinja2 import Environment, PackageLoader, select_autoescape
import async_imgkit.api
import pi_weatherstation.helpers as helpers
try:
import pi_weatherstation.output.display.ST7789_display as display
except ModuleNotFoundError:
logging.warning("Problem loading display module, using debug display")
import pi_weatherstation.output.display.debug_display as display
RESOURCES_PATH = pathlib.Path(
pathlib.Path(__file__).parent, "..", "template", "resources"
)
env = Environment(
loader=PackageLoader("pi_weatherstation", "template"),
autoescape=select_autoescape(["html"]),
)
template = env.get_template("index.html")
class ScreenOutput:
def __init__(self, store):
self.store = store
self.running = False
self.imgkit_config = async_imgkit.api.config()
self.display = display.Display()
async def _render_image(self):
rendered = template.render(
resources_folder=RESOURCES_PATH,
weather=self.store.get("weather_sensor"),
date=datetime.datetime.now(),
helpers=helpers,
)
img = await async_imgkit.api.from_string(
rendered,
False,
config=self.imgkit_config,
options={
"format": "png",
"width": "240",
"height": "240",
"enable-local-file-access": "",
"encoding": "UTF-8",
"quiet": "",
},
)
return img
async def output(self):
logging.debug("Screen render started")
try:
img_data = await self._render_image()
self.display.display_image(img_data)
except Exception as e:
logging.error(e)
finally:
logging.debug("Screen render done")
|
416576
|
import os
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'apps.api',
'apps.core',
'apps.dev',
'apps.web',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
]
AUTHENTICATION_BACKENDS = [
'apps.core.backends.EmailLoginBackend',
'apps.core.backends.LDAPLoginBackend',
'apps.core.backends.PasswordlessLoginBackend',
]
ROOT_URLCONF = 'sparcssso.urls'
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'apps.core.backends.context_processors.version',
],
},
}]
WSGI_APPLICATION = 'sparcssso.wsgi.application'
LOGIN_URL = '/account/login/'
# Security
CSRF_USE_SESSIONS = True
CSRF_FAILURE_VIEW = 'apps.core.views.general.csrf_failure'
# Facebook, Twitter, KAIST API keys
FACEBOOK_APP_ID = os.environ.get('FACEBOOK_APP_ID', '')
FACEBOOK_APP_SECRET = os.environ.get('FACEBOOK_APP_SECRET', '')
TWITTER_APP_ID = os.environ.get('TWITTER_APP_ID', '')
TWITTER_APP_SECRET = os.environ.get('TWITTER_APP_SECRET', '')
KAIST_APP_ENABLED = True if os.environ.get('KAIST_APP_ENABLED', '0') == '1' else False
KAIST_APP_SECRET = os.environ.get('KAIST_APP_SECRET', '')
RECAPTCHA_SECRET = os.environ.get('RECAPTCHA_SECRET', '')
# E-mail settings
EMAIL_HOST = 'localhost'
EMAIL_PORT = int(os.environ.get('SSO_EMAIL_PORT', '25'))
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = []
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ.get('SSO_DB_NAME'),
'USER': os.environ.get('SSO_DB_USER'),
'PASSWORD': os.environ.get('SSO_DB_PASSWORD'),
'HOST': os.environ.get('SSO_DB_HOST'),
'PORT': os.environ.get('SSO_DB_PORT', '3306'),
},
}
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LANGUAGES = (
('en', 'English'),
('ko', '한국어'),
)
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Admins & Logging
TEAM_EMAILS = ['<EMAIL>']
ADMINS = (('SSO SYSOP', '<EMAIL>'),)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'INFO',
'class': 'apps.logger.SSOLogHandler',
},
'mail': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
},
},
'loggers': {
'sso': {
'handlers': ['file'],
'level': 'INFO',
},
},
}
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
SENTRY_DSN = os.environ.get('SENTRY_DSN', '')
if SENTRY_DSN != '':
sentry_sdk.init(
dsn=SENTRY_DSN,
integrations=[DjangoIntegration()],
send_default_pii=True,
)
else:
print('SENTRY_DSN not provided. Metrics will not be sent.') # noqa: T001
|
416719
|
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.core import Activation
from keras.layers.merge import Concatenate
from keras.models import Model
from keras.regularizers import l2
from ..utils.net_utils import BilinearUpSampling, bn_act_convtranspose, bn_act_conv_block
from ..encoder import scope_table, build_encoder
def interp_block(inputs,
feature_map_shape,
level=1,
weight_decay=1e-4,
kernel_initializer="he_normal",
bn_epsilon=1e-3,
bn_momentum=0.99):
"""
:param inputs: 4-D tensor, shape of (batch_size, height, width, channel).
:param feature_map_shape: tuple, target shape of feature map.
:param level: int, default 1.
:param weight_decay: float, default 1e-4.
:param kernel_initializer: string, default "he_normal".
:param bn_epsilon: float, default 1e-3.
:param bn_momentum: float, default 0.99.
:return: 4-D tensor, shape of (batch_size, height, width, channel).
"""
ksize = (int(round(float(feature_map_shape[0]) / float(level))),
int(round(float(feature_map_shape[1]) / float(level))))
stride_size = ksize
x = MaxPooling2D(pool_size=ksize, strides=stride_size)(inputs)
x = Conv2D(512, (1, 1), activation=None,
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x)
x = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(x)
x = Activation("relu")(x)
x = BilinearUpSampling(target_size=feature_map_shape)(x)
return x
def pyramid_scene_pooling(inputs,
feature_map_shape,
weight_decay=1e-4,
kernel_initializer="he_normal",
bn_epsilon=1e-3,
bn_momentum=0.99):
""" PSP module.
:param inputs: 4-D tensor, shape of (batch_size, height, width, channel).
:param feature_map_shape: tuple, target shape of feature map.
:param weight_decay: float, default 1e-4.
:param kernel_initializer: string, default "he_normal".
:param bn_epsilon: float, default 1e-3.
:param bn_momentum: float, default 0.99.
:return: 4-D tensor, shape of (batch_size, height, width, channel).
"""
interp_block1 = interp_block(inputs, feature_map_shape, level=1,
weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
interp_block2 = interp_block(inputs, feature_map_shape, level=2,
weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
interp_block3 = interp_block(inputs, feature_map_shape, level=3,
weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
interp_block6 = interp_block(inputs, feature_map_shape, level=6,
weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
return Concatenate()([interp_block1, interp_block2, interp_block3, interp_block6])
def PSPNet(input_shape,
n_class,
encoder_name,
encoder_weights=None,
weight_decay=1e-4,
kernel_initializer="he_normal",
bn_epsilon=1e-3,
bn_momentum=0.99,
upscaling_method="bilinear"):
""" implementation of PSPNet for semantic segmentation.
ref: <NAME>, <NAME>, <NAME>, et al. Pyramid Scene Parsing Network[J]. arXiv preprint arXiv:1612.01105, 2016.
:param input_shape: tuple, i.e., (height, width, channel).
:param n_class: int, number of class, must >= 2.
:param encoder_name: string, name of encoder.
:param encoder_weights: string, path of weights, default None.
:param weight_decay: float, default 1e-4.
:param kernel_initializer: string, default "he_normal".
:param bn_epsilon: float, default 1e-3.
:param bn_momentum: float, default 0.99.
:param upscaling_method: string, "bilinear" of "conv", default "bilinear".
:return: a Keras Model instance.
"""
encoder = build_encoder(input_shape, encoder_name, encoder_weights=encoder_weights,
weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
features = encoder.get_layer(scope_table[encoder_name]["pool4"]).output
feature_map_shape = (int(input_shape[0]/16), int(input_shape[1]/16))
features = pyramid_scene_pooling(features, feature_map_shape,
weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
features = Conv2D(512, (3, 3), padding="same", use_bias=False, activation=None,
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(features)
features = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(features)
features = Activation("relu")(features)
# upsample
if upscaling_method == "conv":
features = bn_act_convtranspose(features, 512, (3, 3), 2,
weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
features = bn_act_conv_block(features, 512, (3, 3),
weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
features = bn_act_convtranspose(features, 256, (3, 3), 2,
weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
features = bn_act_conv_block(features, 256, (3, 3),
weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
features = bn_act_convtranspose(features, 128, (3, 3), 2,
weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
features = bn_act_conv_block(features, 128, (3, 3),
weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
features = bn_act_convtranspose(features, 64, (3, 3), 2,
weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
features = bn_act_conv_block(features, 64, (3, 3),
weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
else:
features = BilinearUpSampling(target_size=(input_shape[0], input_shape[1]))(features)
output = Conv2D(n_class, (1, 1), activation=None,
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(features)
output = Activation("softmax")(output)
return Model(encoder.input, output)
|
416757
|
import torch
from torchvision.transforms import functional as F
class Compose():
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
class ToTensor():
def __call__(self, image, target):
return F.to_tensor(image), target
class Normalize():
def __init__(self, mean, std, to_bgr=True):
self.mean = mean
self.std = std
self.to_bgr = to_bgr
def __call__(self, image, target):
if self.to_bgr:
image = image[[2, 1, 0]]
image = F.normalize(image, mean=self.mean, std=self.std)
return image, target
|
416774
|
import wx
from meerk40t.gui.laserrender import swizzlecolor
from meerk40t.kernel import Context
from meerk40t.svgelements import Color
_ = wx.GetTranslation
class PropertiesPanel(wx.Panel):
"""
PropertiesPanel is a generic panel that simply presents a simple list of properties to be viewed and edited.
In most cases it can be initialized by passing a choices value which will read the registered choice values
and display the given properties, automatically generating an appropriate changer for that property.
"""
def __init__(self, *args, context: Context = None, choices=None, **kwds):
kwds["style"] = kwds.get("style", 0) | wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
self.context = context
if choices is None:
return
if isinstance(choices, str):
try:
choices = self.context.registered["choices/%s" % choices]
except KeyError:
return
self.choices = choices
sizer_main = wx.BoxSizer(wx.VERTICAL)
for i, c in enumerate(self.choices):
if isinstance(c, tuple):
# If c is tuple
dict_c = dict()
try:
dict_c["object"] = c[0]
dict_c["attr"] = c[1]
dict_c["default"] = c[2]
dict_c["label"] = c[3]
dict_c["tip"] = c[4]
dict_c["type"] = c[5]
except IndexError:
pass
c = dict_c
try:
attr = c["attr"]
obj = c["object"]
except KeyError:
continue
# get default value
if hasattr(obj, attr):
data = getattr(obj, attr)
else:
# if obj can lack attr, default must have been assigned.
try:
data = c["default"]
except KeyError:
continue
data_type = type(data)
try:
# if type is explicitly given, use that to define data_type.
data_type = c["type"]
except KeyError:
pass
try:
# Get label
label = c["label"]
except KeyError:
# Undefined label is the attr
label = attr
if data_type == bool:
control = wx.CheckBox(self, label=label)
control.SetValue(data)
def on_checkbox_check(param, ctrl):
def check(event=None):
v = ctrl.GetValue()
setattr(obj, param, v)
return check
control.Bind(wx.EVT_CHECKBOX, on_checkbox_check(attr, control))
sizer_main.Add(control, 0, wx.EXPAND, 0)
elif data_type in (str, int, float):
control_sizer = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, label), wx.HORIZONTAL
)
control = wx.TextCtrl(self, -1)
control.SetValue(str(data))
control_sizer.Add(control)
def on_textbox_text(param, ctrl):
def text(event=None):
v = ctrl.GetValue()
try:
setattr(obj, param, data_type(v))
except ValueError:
# If cannot cast to data_type, pass
pass
return text
control.Bind(wx.EVT_TEXT, on_textbox_text(attr, control))
sizer_main.Add(control_sizer, 0, wx.EXPAND, 0)
elif data_type == Color:
control_sizer = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, label), wx.HORIZONTAL
)
control = wx.Button(self, -1)
def set_color(color: Color):
control.SetLabel(str(color.hex))
control.SetBackgroundColour(wx.Colour(swizzlecolor(color)))
control.color = color
def on_button_color(param, ctrl):
def click(event=None):
color_data = wx.ColourData()
color_data.SetColour(wx.Colour(swizzlecolor(ctrl.color)))
dlg = wx.ColourDialog(self, color_data)
if dlg.ShowModal() == wx.ID_OK:
color_data = dlg.GetColourData()
data = Color(
swizzlecolor(color_data.GetColour().GetRGB()), 1.0
)
set_color(data)
try:
setattr(obj, param, data_type(data))
except ValueError:
# If cannot cast to data_type, pass
pass
return click
set_color(data)
control_sizer.Add(control)
control.Bind(wx.EVT_BUTTON, on_button_color(attr, control))
sizer_main.Add(control_sizer, 0, wx.EXPAND, 0)
else:
# Requires a registered data_type
continue
try:
# Set the tool tip if 'tip' is available
control.SetToolTip(c["tip"])
except KeyError:
pass
self.SetSizer(sizer_main)
sizer_main.Fit(self)
|
416799
|
import torch
from typing import Dict, Any
from .result import Result
class ModelInterface:
def create_input(self, data: Dict[str, torch.Tensor]) -> torch.Tensor:
raise NotImplementedError
def decode_outputs(self, outputs: Result) -> Any:
raise NotImplementedError
def __call__(self, data: Dict[str, torch.Tensor]) -> Result:
raise NotImplementedError
|
416842
|
import json
from datetime import datetime
from decimal import Decimal
import numpy as np
class CustomEncoder(json.JSONEncoder):
def default(self, obj):
"""If input object is an ndarray it will be converted into a dict
holding dtype, shape and the data, base64 encoded.
"""
numpy_types = (
np.bool_,
# np.bytes_, -- python `bytes` class is not json serializable
# np.complex64, -- python `complex` class is not json serializable
# np.complex128, -- python `complex` class is not json serializable
# np.complex256, -- python `complex` class is not json serializable
# np.datetime64, -- python `datetime.datetime` class is not json serializable
np.float16,
np.float32,
np.float64,
# np.float128, -- special handling below
np.int8,
np.int16,
np.int32,
np.int64,
# np.object_ -- should already be evaluated as python native
np.str_,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.void,
)
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, numpy_types):
return obj.item()
elif isinstance(obj, np.float128):
return obj.astype(np.float64).item()
elif isinstance(obj, Decimal):
return str(obj)
elif isinstance(obj, datetime):
return str(obj)
elif obj is np.ma.masked:
return str(np.NaN)
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
|
416847
|
from __future__ import division, print_function, unicode_literals
import os
import re
from DateTime import DateTime
from HTMLParser import HTMLParser
def seperate_metadata_and_content(s):
"""
Given a string the metadata is seperated from the content. The string must be in the following format:
metadata_tag1: metadata_value1
metadata_tag2: metadata_value2
...
metadata_tagn: metadata_valuen
<Two newlines>
content
content
content
...
Returns (metadata, content)
If no metadata is present then an empty string is returned, same with content.
"""
m_and_c = s.split('\n\n', 1)
metadata = ''
content = ''
# normal case where we have metadata and content
if len(m_and_c) == 2:
metadata = m_and_c[0].strip()
content = m_and_c[1].strip()
# if we only have one of content or metadata this process determines which it is
elif len(m_and_c) == 1:
value = m_and_c[0].strip()
if not len(value) == 0:
lines = value.split('\n')
is_metadata = True
for l in lines:
key_value = l.split(':', 1)
if len(key_value) != 2:
is_metadata = False
break
if is_metadata:
metadata = value
else:
content = value
return (metadata, content)
def metadata_to_dict(metadata):
"""
Takes metadata as specified in seperate_metadata_and_content() documentation and\
converts it to a dictionary.
Note that white space on either side of the metadata tags and values will be stripped; \
also, if there are any duplicate metadata key names, the value of the last duplicate will \
be in the resulting dictionary.
Also, the tags will have any spaces replaced with underscores and be set to lowercase.
"""
result = {}
lines = metadata.split('\n')
for l in lines:
key_value = l.split(':', 1)
if len(key_value) == 2:
key = key_value[0].strip().replace(' ', '_').lower()
value = key_value[1].strip()
result[key] = value
return result
def get_all_files_from_directory(directory, recursive=False):
"""
Set recursive to True to recursively find files.
Return a list of absolute file paths starting at the given directory.
"""
list_of_files = []
for root, dirs, files in os.walk(directory, followlinks=recursive):
for file in files:
list_of_files.append(os.path.join(root, file))
return list_of_files
def get_type(value):
"""
Returns the type of value, value is a string.
Returns one of the known types in the form of a string: 'int', 'float', 'bool', and 'text'.
Example: If value = '34' then the type returned is 'int'.
"""
try:
int(value)
return 'int'
except:
pass
try:
float(value)
return 'float'
except:
pass
lower_value = value.lower()
if lower_value == 'true' or lower_value == 'false' or lower_value == 't' or lower_value == 'f':
return 'bool'
try:
DateTime(value)
return 'datetime'
except:
pass
return 'text'
def collect_types(metadata_types, metadata):
"""
Takes a dictionary metadata_types that keeps track of the types so far.
For each key, value pair in metadata if the key is not present in \
metadata_types then it is added and the type of the value is also added.
Note that if there are conflicting types then the type in metadata_types is \
degraded to 'text'.
"""
for meta_key in metadata:
t = get_type(metadata[meta_key])
if not meta_key in metadata_types:
metadata_types[meta_key] = t
else:
if not metadata_types[meta_key] == t and metadata_types[meta_key] != 'text':
if t == 'float' and metadata_types[meta_key] == 'int':
metadata_types[meta_key] = 'float'
elif t == 'int' and metadata_types[meta_key] == 'float':
pass
elif t == 'text' and metadata_types[meta_key] == 'date':
metadata_types[meta_key] = 'date'
else:
metadata_types[meta_key] = 'text'
def create_subdocuments(name, content, major_delimiter='\n', min_chars=1000):
"""
Return a list of tuples where each tuple contains a subdocument name \
and a subsequence of the original content. Recombining each subdocument \
content in the order the list is iterated over will yield the original content \
(with varying white space.) The arguments min and max specify the allowed sizes of \
each subdocument by character count. Note that the max may be exceeded on some occassions \
if the remaining text doesn't consititute enough for another subdocument. The \
basic algorithm tries to split on new line boundaries since spliting on \
white space alone may generate odd subdocuments.
"""
subdoc_contents = {}
subdoc_number = 0
if content == '':
subdoc_contents[subdoc_number] = ''
subdoc_number += 1
while content != '':
#~ content = content.strip()
index = 0
while index < min_chars:
index = content.find(major_delimiter, index + 1)
if index < 0:
subdoc_contents[subdoc_number] = content
subdoc_number += 1
content = ''
break
elif index >= min_chars:
subdoc_contents[subdoc_number] = content[0: index]
content = content[index:]
if len(content) < min_chars:
subdoc_contents[subdoc_number] = subdoc_contents[subdoc_number] + content
content = ''
subdoc_number += 1
result = []
for index in xrange(0, subdoc_number):
result.append((name + '_subdoc' + str(index), subdoc_contents[index]))
return result
def remove_html_tags(text, remove_entities=False):
"""Return string that has no HTML tags and removes HTML entities if specified."""
class HTMLStripper(HTMLParser):
def __init__(self):
self.reset()
self.content = []
def handle_data(self, data):
self.content.append(data)
def handle_entityref(self, name):
if not remove_entities:
self.content.append('&' + name + ';')
def handle_charref(self, name):
if not remove_entities:
self.content.append('&#' + name + ';')
def get_content(self):
return ''.join(self.content)
stripper = HTMLStripper()
stripper.feed(text)
stripper.close()
return stripper.get_content()
def replace_html_entities(text):
"""Return string with HTML entities replaced with unicode characters."""
parser = HTMLParser()
return parser.unescape(text)
def get_unicode_content(file_path, encoding=None):
"""
Return a unicode string of the files contents using the given encoding. If no encoding is given
then chardet will be used to determine the encoding.
Note that this uses the chardet library and may cause problems, if an error is thrown then
a utf-8 encoding is assumed and unrecognize caracters are discarded.
"""
from chardet.universaldetector import UniversalDetector
try:
if not encoding:
detector = UniversalDetector()
contents = ''
with open(file_path, 'rb') as f:
contents = f.read()
detector.feed(contents)
detector.close()
determined_encoding = detector.result['encoding']
return contents.decode(encoding=determined_encoding)
else:
with open(file_path, 'r') as f:
return unicode(f.read(), encoding=encoding, errors='ignore')
except UnicodeError:
with open(file_path, 'r') as f:
return unicode(f.read(), encoding='utf-8', errors='ignore')
def remove_punctuation(s):
"""Return a string without punctuation (only alpha, numeric, underscore and whitespace characters survive)."""
result = ''
for sub in re.finditer(r'[\w\s]+', s, re.UNICODE):
result += sub.group(0)
return result
# vim: et sw=4 sts=4
|
416896
|
import os
from models.inference_model import InferenceModel
import config
import utils.utils as utils
import torch
import PIL.Image as Image
import torchvision.transforms as transforms
from tqdm import tqdm
import argparse
# inference configurations #
network_name = 'on_white_II'
use_saved_config = False # use the configuration saved at training time (if saved)
set_net_version = 'dual' # None/normal/dual, set to None if you want to use saved config file
alpha_0s = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1] # alpha_0 values for normal version
alpha_1s = [None] # alpha_1 values for normal version (if None alpha_0=alpha_1=alpha_2)
alpha_2s = [None] # alpha_2 values for normal version (if None alpha_0=alpha_1=alpha_2)
alpha_0s_dual = [-1, -0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1] + alpha_0s # alpha_0 values for dual version
alpha_1s_dual = [None] # alpha_1 values for dual version (if None alpha_0=alpha_1=alpha_2)
alpha_2s_dual = [None] # alpha_2 values for dual version (if None alpha_0=alpha_1=alpha_2)
# ------------------------ #
parser = argparse.ArgumentParser()
parser.add_argument('--network_name', default=network_name)
parser.add_argument('--use_saved_config', default=use_saved_config, type=lambda x:bool(utils.str2bool(x)))
parser.add_argument('--set_net_version', default=set_net_version)
inference_opt = parser.parse_args()
set_net_version = inference_opt.set_net_version
network_name = inference_opt.network_name
use_saved_config = inference_opt.use_saved_config
if set_net_version == 'None':
set_net_version = None
networks_path = os.path.join('trained_nets', network_name)
model_path = os.path.join(networks_path, 'model_dir', 'dynamic_net.pth')
config_path = os.path.join(networks_path, 'config.txt')
inference_images_path = os.path.join('images', 'inference_images')
save_path = os.path.join('results', 'inference_results', network_name)
if not os.path.exists(save_path):
utils.make_dirs(save_path)
opt = config.get_configurations(parser=parser)
if use_saved_config:
if os.path.exists(config_path):
utils.read_config_and_arrange_opt(config_path, opt)
set_net_version = None
else:
raise ValueError('config_path does not exists')
elif set_net_version is None:
raise ValueError('id use_saved_config=False you must set set_net_version!=None')
dynamic_model = InferenceModel(opt, set_net_version=set_net_version)
dynamic_model.load_network(model_path)
inference_images_list = list(os.listdir(inference_images_path))
inference_images_list.sort()
to_tensor = transforms.ToTensor()
to_pil_image = transforms.ToPILImage()
if set_net_version == 'dual' or (use_saved_config and opt.network_version == 'dual'):
alpha_0s = alpha_0s_dual
alpha_1s = alpha_1s_dual
alpha_2s = alpha_2s_dual
for image_name in inference_images_list:
input_image = Image.open(os.path.join(inference_images_path, image_name))
input_tensor = to_tensor(input_image).to(dynamic_model.device)
input_tensor = dynamic_model.normalize(input_tensor)
input_tensor = input_tensor.expand(1, -1, -1, -1)
save_name = image_name.split('.')[0]
for alpha_0 in tqdm(alpha_0s):
for alpha_1 in alpha_1s:
for alpha_2 in alpha_2s:
output_tensor = dynamic_model.forward_and_recover(input_tensor.requires_grad_(False), alpha_0=alpha_0, alpha_1=alpha_1, alpha_2=alpha_2)
output_image = to_pil_image(output_tensor.clamp(min=0.0, max=1).cpu().squeeze(dim=0))
if alpha_1 is not None and alpha_2 is not None:
output_image.save(os.path.join(save_path, '%s_%3f_%3f_%3f.png' % (save_name, alpha_0, alpha_1, alpha_2)))
else:
output_image.save(os.path.join(save_path, '%s_%3f.png' % (save_name, alpha_0)))
|
416919
|
import numpy as np
import torch
import torch.distributions as dist
from torch import nn
from modules.commons.conv import ConditionalConvBlocks
from modules.commons.normalizing_flow.res_flow import ResFlow
from modules.commons.wavenet import WN
class FVAEEncoder(nn.Module):
def __init__(self, c_in, hidden_size, c_latent, kernel_size,
n_layers, c_cond=0, p_dropout=0, strides=[4], nn_type='wn'):
super().__init__()
self.strides = strides
self.hidden_size = hidden_size
if np.prod(strides) == 1:
self.pre_net = nn.Conv1d(c_in, hidden_size, kernel_size=1)
else:
self.pre_net = nn.Sequential(*[
nn.Conv1d(c_in, hidden_size, kernel_size=s * 2, stride=s, padding=s // 2)
if i == 0 else
nn.Conv1d(hidden_size, hidden_size, kernel_size=s * 2, stride=s, padding=s // 2)
for i, s in enumerate(strides)
])
if nn_type == 'wn':
self.nn = WN(hidden_size, kernel_size, 1, n_layers, c_cond, p_dropout)
elif nn_type == 'conv':
self.nn = ConditionalConvBlocks(
hidden_size, c_cond, hidden_size, None, kernel_size,
layers_in_block=2, is_BTC=False, num_layers=n_layers)
self.out_proj = nn.Conv1d(hidden_size, c_latent * 2, 1)
self.latent_channels = c_latent
def forward(self, x, nonpadding, cond):
x = self.pre_net(x)
nonpadding = nonpadding[:, :, ::np.prod(self.strides)][:, :, :x.shape[-1]]
x = x * nonpadding
x = self.nn(x, nonpadding=nonpadding, cond=cond) * nonpadding
x = self.out_proj(x)
m, logs = torch.split(x, self.latent_channels, dim=1)
z = (m + torch.randn_like(m) * torch.exp(logs))
return z, m, logs, nonpadding
class FVAEDecoder(nn.Module):
def __init__(self, c_latent, hidden_size, out_channels, kernel_size,
n_layers, c_cond=0, p_dropout=0, strides=[4], nn_type='wn'):
super().__init__()
self.strides = strides
self.hidden_size = hidden_size
self.pre_net = nn.Sequential(*[
nn.ConvTranspose1d(c_latent, hidden_size, kernel_size=s, stride=s)
if i == 0 else
nn.ConvTranspose1d(hidden_size, hidden_size, kernel_size=s, stride=s)
for i, s in enumerate(strides)
])
if nn_type == 'wn':
self.nn = WN(hidden_size, kernel_size, 1, n_layers, c_cond, p_dropout)
elif nn_type == 'conv':
self.nn = ConditionalConvBlocks(
hidden_size, c_cond, hidden_size, [1] * n_layers, kernel_size,
layers_in_block=2, is_BTC=False)
self.out_proj = nn.Conv1d(hidden_size, out_channels, 1)
def forward(self, x, nonpadding, cond):
x = self.pre_net(x)
x = x * nonpadding
x = self.nn(x, nonpadding=nonpadding, cond=cond) * nonpadding
x = self.out_proj(x)
return x
class FVAE(nn.Module):
def __init__(self,
c_in_out, hidden_size, c_latent,
kernel_size, enc_n_layers, dec_n_layers, c_cond, strides,
use_prior_flow, flow_hidden=None, flow_kernel_size=None, flow_n_steps=None,
encoder_type='wn', decoder_type='wn'):
super(FVAE, self).__init__()
self.strides = strides
self.hidden_size = hidden_size
self.latent_size = c_latent
self.use_prior_flow = use_prior_flow
if np.prod(strides) == 1:
self.g_pre_net = nn.Conv1d(c_cond, c_cond, kernel_size=1)
else:
self.g_pre_net = nn.Sequential(*[
nn.Conv1d(c_cond, c_cond, kernel_size=s * 2, stride=s, padding=s // 2)
for i, s in enumerate(strides)
])
self.encoder = FVAEEncoder(c_in_out, hidden_size, c_latent, kernel_size,
enc_n_layers, c_cond, strides=strides, nn_type=encoder_type)
if use_prior_flow:
self.prior_flow = ResFlow(
c_latent, flow_hidden, flow_kernel_size, flow_n_steps, 4, c_cond=c_cond)
self.decoder = FVAEDecoder(c_latent, hidden_size, c_in_out, kernel_size,
dec_n_layers, c_cond, strides=strides, nn_type=decoder_type)
self.prior_dist = dist.Normal(0, 1)
def forward(self, x=None, nonpadding=None, cond=None, infer=False, noise_scale=1.0):
"""
:param x: [B, C_in_out, T]
:param nonpadding: [B, 1, T]
:param cond: [B, C_g, T]
:return:
"""
if nonpadding is None:
nonpadding = 1
cond_sqz = self.g_pre_net(cond)
if not infer:
z_q, m_q, logs_q, nonpadding_sqz = self.encoder(x, nonpadding, cond_sqz)
q_dist = dist.Normal(m_q, logs_q.exp())
if self.use_prior_flow:
logqx = q_dist.log_prob(z_q)
z_p = self.prior_flow(z_q, nonpadding_sqz, cond_sqz)
logpx = self.prior_dist.log_prob(z_p)
loss_kl = ((logqx - logpx) * nonpadding_sqz).sum() / nonpadding_sqz.sum() / logqx.shape[1]
else:
loss_kl = torch.distributions.kl_divergence(q_dist, self.prior_dist)
loss_kl = (loss_kl * nonpadding_sqz).sum() / nonpadding_sqz.sum() / z_q.shape[1]
z_p = None
return z_q, loss_kl, z_p, m_q, logs_q
else:
latent_shape = [cond_sqz.shape[0], self.latent_size, cond_sqz.shape[2]]
z_p = torch.randn(latent_shape).to(cond.device) * noise_scale
if self.use_prior_flow:
z_p = self.prior_flow(z_p, 1, cond_sqz, reverse=True)
return z_p
|
416956
|
class ArrayParam:
def mfouri(self, oper="", coeff="", mode="", isym="", theta="", curve="", **kwargs):
"""Calculates the coefficients for, or evaluates, a Fourier series.
APDL Command: ``*MFOURI``
Parameters
----------
oper
Type of Fourier operation:
Calculate Fourier coefficients COEFF from MODE, ISYM,
THETA, and CURVE. - Evaluate the Fourier curve CURVE from
COEFF, MODE, ISYM and THETA.
coeff
Name of the array parameter vector containing the Fourier
coefficients (calculated if Oper = FIT, required as input if Oper =
EVAL). See ``*SET`` for name restrictions.
mode
Name of the array parameter vector containing the mode numbers of
the desired Fourier terms.
isym
Name of the array parameter vector containing the symmetry key for
the corresponding Fourier terms. The vector should contain keys
for each term as follows:
Symmetric (cosine) term - Antisymmetric (sine) term.
theta, curve
Names of the array parameter vectors containing the theta vs. curve
description, respectively. Theta values should be input in
degrees. If Oper = FIT, one curve value should be supplied with
each theta value. If Oper = EVAL, one curve value will be
calculated for each theta value.
Notes
-----
Calculates the coefficients of a Fourier series for a given
curve, or evaluates the Fourier curve from the given (or
previously calculated) coefficients. The lengths of the
COEFF, MODE, and ISYM vectors must be the same--typically two
times the number of modes desired, since two terms (sine and
cosine) are generally required for each mode. The lengths of
the CURVE and THETA vectors should be the same or the smaller
of the two will be used. There should be a sufficient number
of points to adequately define the curve--at least two times
the number of coefficients. A starting array element number
(1) must be defined for each array parameter vector.
The vector specifications ``*VLEN``, ``*VCOL``, ``*VABS``,
``*VFACT``, and ``*VCUM`` do not apply to this command. Array
elements should not be skipped with the ``*VMASK`` and the
NINC value of the ``*VLEN`` specifications. The vector being
calculated (COEFF if Oper is FIT, or CURVE if Oper is EVAL)
must exist as a dimensioned array [``*DIM``].
This command is valid in any processor.
"""
command = f"*MFOURI,{oper},{coeff},{mode},{isym},{theta},{curve}"
return self.run(command, **kwargs)
def mfun(self, parr="", func="", par1="", **kwargs):
"""Copies or transposes an array parameter matrix.
APDL Command: ``*MFUN``
Parameters
----------
parr
The name of the resulting array parameter matrix. See ``*SET`` for
name restrictions.
func
Copy or transpose function:
Par1 is copied to ParR - Par1 is transposed to ParR. Rows
(m) and columns (n) of Par1 matrix are transposed to
resulting ParR matrix of shape (n,m).
par1
Array parameter matrix input to the operation.
Notes
-----
Operates on one input array parameter matrix and produces one output
array parameter matrix according to:
ParR = f(Par1)
where the function (f) is either a copy or transpose, as described
above.
Functions are based on the standard FORTRAN definitions where
possible. ParR may be the same as Par1. Starting array
element numbers must be defined for each array parameter
matrix if it does not start at the first location. For
example, ``*MFUN,A(1,5),COPY,B(2,3)`` copies matrix B
(starting at element (2,3)) to matrix A (starting at element
(1,5)). The diagonal corner elements for each submatrix must
be defined: the upper left corner by the array starting
element (on this command), the lower right corner by the
current values from the ``*VCOL`` and ``*VLEN`` commands. The
default values are the (1,1) element and the last element in
the matrix. No operations progress across matrix planes (in
the 3rd dimension). Absolute values and scale factors may be
applied to all parameters [``*VABS``, ``*VFACT``]. Results
may be cumulative [``*VCUM``].
Array elements should not be skipped with the ``*VMASK`` and the
NINC value of the ``*VLEN`` specifications. The number of
rows [``*VLEN``] applies to the Par1 array. See the
``*VOPER`` command for details.
This command is valid in any processor.
"""
command = f"*MFUN,{parr},{func},{par1}"
return self.run(command, **kwargs)
def moper(
self,
parr="",
par1="",
oper="",
val1="",
val2="",
val3="",
val4="",
val5="",
val6="",
**kwargs,
):
"""Performs matrix operations on array parameter matrices.
APDL Command: ``*MOPER``
Parameters
----------
parr
The name of the resulting array parameter matrix. See ``*SET`` for
name restrictions.
par1
First array parameter matrix input to the operation. For Oper =
MAP, this is an N x 3 array of coordinate locations at which to
interpolate. ParR will then be an N(out) x M array containing the
interpolated values.
oper
Matrix operations:
INVERT - ``(*MOPER, ParR, Par1, INVERT)``
Square matrix invert: Inverts the n x n matrix in Par1
into ParR. The matrix must be well conditioned.
Warning: Non-independent or ill-conditioned equations can
cause erroneous results. - For large matrices, use the
APDL Math operation ``*LSFACTOR`` for efficiency (see APDL
Math).
MULT - ``(*MOPER, ParR, Par1, MULT, Par2)``
Matrix multiply: Multiplies Par1 by Par2. The number of
rows of Par2 must equal the number of columns of Par1 for
the operation. If Par2 is input with a number of rows
greater than the number of columns of Par1, matrices are
still multiplied. However, the operation only uses a
number of rows of Par2 equal to the number of columns of
Par1.
COVAR - ``(*MOPER, ParR, Par1, COVAR, Par2)``
Covariance: The measure of association between two columns
of the input matrix (Par1). Par1, of size m runs (rows)
by n data (columns) is first processed to produce a row
vector containing the mean of each column which is
transposed to a column vector (Par2) of n array elements.
The Par1 and Par2 operation then produces a resulting n x
n matrix (ParR) of covariances (with the variances as the
diagonal terms).
CORR - ``(*MOPER, ParR, Par1, CORR, Par2)``
Correlation: The correlation coefficient between two
variables. The input matrix (Par1), of size m runs (rows)
by n data (columns), is first processed to produce a row
vector containing the mean of each column which is then
transposed to a column vector (Par2) of n array elements.
The Par1 and Par2 operation then produces a resulting n x
n matrix (ParR) of correlation coefficients (with a value
of 1.0 for the diagonal terms).
SOLV - ``(*MOPER, ParR, Par1, SOLV, Par2)``
Solution of simultaneous equations: Solves the set of n
equations of n terms of the form an1x1 + an2x2 + ... +
annxn = bn where Par1 contains the matrix of
a-coefficients, Par2 the vector(s) of b-values, and ParR
the vector(s) of x-results. Par1 must be a square matrix.
The equations must be linear, independent, and well
conditioned.
Warning: Non-independent or ill-conditioned equations can
cause erroneous results. - For large matrices, use the
APDL Math operation ``*LSFACTOR`` for efficiency (see APDL
Math).
SORT - ``(*MOPER, ParR, Par1, SORT, Par2, n1, n2, n3)``
Matrix sort: Sorts matrix Par1 according to sort vector
Par2 and places the result back in Par1. Rows of Par1 are
moved to the corresponding positions indicated by the
values of Par2. Par2 may be a column of Par1 (in which
case it will also be reordered). Alternatively, you may
specify the column of Par1 to sort using n1 (leaving Par2
blank). A secondary sort can be specified by column n2,
and a third sort using n3. ParR is the vector of initial
row positions (the permutation vector). Sorting Par1
according to ParR should reproduce the initial ordering.
NNEAR - ``(*MOPER, ParR, Par1, NNEAR, Toler)``
Nearest Node: Quickly determine all the nodes within a
specified tolerance of a given array. ParR is a vector of
the nearest selected nodes, or 0 if no nodes are nearer
than Toler. Par1 is the n x 3 array of coordinate
locations. Toler defaults to 1 and is limited to the
maximum model size.
ENEAR - ``(*MOPER, ParR, Par1, ENEAR, Toler)``
Nearest Element: Quickly determine the elements with
centroids that are within a specified tolerance of the
points in a given array. - ParR is a vector of the nearest
selected elements, or 0 if no element centroids are nearer
than Toler. Par1 is the n x 3 array of coordinate
locations.
MAP - ``(*MOPER, ParR, Par1, MAP, Par2, Par3, kDim, --, kOut, LIMIT)``
Maps the results from one set of points to another. For
example, you can map pressures from a CFD analysis onto
your model for a structural analysis.
Par1 is the Nout x 3 array of points that will be mapped
to. Par2 is the Nin x M array that contains M values of
data to be interpolated at each point and corresponds to
the Nin x 3 points in Par3. The resulting ParR is the Nout
x M array of mapped data points.
For each point in the destination mesh, all possible
triangles in the source mesh are searched to find the best
triangle containing each point. It then does a linear
interpolation inside this triangle. You should carefully
specify your interpolation method and search criteria in
order to provide faster and more accurate results (see
LIMIT, below).
kDim is the interpolation criteria. If kDim = 2 or 0, two
dimensional interpolation is applied (interpolate on a
surface). If kDim = 3, three dimensional interpolation is
applied (interpolate on a volume).
kOut specified how points outside of the domain are
handled. If kOut = 0, use the value(s) of the nearest
region point for points outside of the region. If kOut =
1, set results outside of the region to zero.
LIMIT specifies the number of nearby points considered for
interpolation. The default is 20, and the minimum is
5. Lower values will reduce processing time; however, some
distorted or irregular sets of points will require a
higher LIMIT value to encounter three nodes for
triangulation.
Output points are incorrect if they are not within the
domain (area or volume) defined by the specified input
points. Also, calculations for out-of-bound points require
much more processing time than do points that are within
bounds. Results mapping is available from the command line
only.
INTP - ``(*MOPER, ParR, Par1, INTP, Par2)``
Finds the elements that contain each point in the array of
n x 3 points in Par1. Par2 will contain the set of element
ID numbers and ParR will contain their n x 3 set of
natural element coordinates (values between -1 and
1). Par1 must be in global Cartesian coordinates.
SGET - ``(*MOPER, ParR, Par1, SGET, Par2, Label, Comp)``
Gets the nodal solution item corresponding to Label and
Comp (see the PLNSOL command) and interpolates it to the
given element locations. Par1 contains the n x 3 array of
natural element coordinates (values between -1 and 1) of
the n element ID numbers in Par2. Par1 and Par2 are
usually the output of the ``*MOPER,,,INTP`` operation. ParR
contains the n interpolated results.
Val1, Val2, ..., Val6
Additional input used in the operation. The meanings of
Val1 through Val6 vary depending on the specified matrix
operation. See the description of Oper for details.
"""
command = (
f"*MOPER,{parr},{par1},{oper},{val1},{val2},{val3},{val4},{val5},{val6}"
)
return self.run(command, **kwargs)
def mwrite(
self, parr="", fname="", ext="", label="", n1="", n2="", n3="", **kwargs
):
"""Writes a matrix to a file in a formatted sequence.
APDL Command: ``*MWRITE``
Parameters
----------
parr
The name of the array parameter. See ``*SET`` for name restrictions.
fname
File name and directory path (248 characters maximum, including the
characters needed for the directory path). An unspecified
directory path defaults to the working directory; in this case, you
can use all 248 characters for the file name.
ext
Filename extension (eight-character maximum).
label
Can use a value of IJK, IKJ, JIK, JKI, KIJ, KJI, or blank (JIK).
n1, n2, n3
Write as (((ParR(i,j,k), k = 1,n1), i = 1, n2), j = 1, n3) for
Label = KIJ. n1, n2, and n3 default to the corresponding dimensions
of the array parameter ParR.
Notes
-----
Writes a matrix or vector to a specified file in a formatted sequence.
You can also use the ``*VWRITE`` command to write data to a specified file.
Both commands contain format descriptors on the line immediately
following the command. The format descriptors can be in either Fortran
or C format.
Fortran format descriptors are enclosed in parentheses. They must
immediately follow the ``*MWRITE`` command on a separate line of the same
input file. The word FORMAT should not be included. The format must
specify the number of fields to be written per line, the field width,
the placement of the decimal point, etc. There should be one field
descriptor for each data item written. The write operation uses the
available system FORTRAN FORMAT conventions (see your system FORTRAN
manual). Any standard FORTRAN real format (such as (4F6.0),
(E10.3,2X,D8.2), etc.) and character format (A) may be used. Integer
(I) and list-directed (``*``) descriptors may not be used. Text may be
included in the format as a quoted string. The FORTRAN descriptor must
be enclosed in parentheses and the format must not exceed 80 characters
(including parentheses).
The "C" format descriptors are used if the first character of the
format descriptor line is not a left parenthesis. "C" format
descriptors may be up to 80 characters long, consisting of text strings
and predefined "data descriptors" between the strings where numeric or
alphanumeric character data are to be inserted. The normal descriptors
are %I for integer data, %G for double precision data, %C for
alphanumeric character data, and %/ for a line break. There must be one
data descriptor for each specified value in the order of the specified
values. The enhanced formats described in ``*MSG`` may also be used.
The starting array element number must be defined. Looping continues in
the directions indicated by the Label argument. The number of loops and
loop skipping may also be controlled with the ``*VLEN`` and ``*VMASK``
commands, which work in the n2 direction (by row on the output file),
and by the ``*VCOL`` command, which works in the n1 direction (by column
in the output file). The vector specifications ``*VABS`` and ``*VFACT`` apply
to this command, while ``*VCUM`` does not apply to this command. See the
``*VOPER`` command for details. If you are in the GUI, the ``*MWRITE`` command
must be contained in an externally prepared file and read into ANSYS
(i.e., ``*USE``, /INPUT, etc.).
This command is valid in any processor.
"""
command = f"*MWRITE,{parr},{fname},{ext},,{label},{n1},{n2},{n3}"
return self.run(command, **kwargs)
def starvput(
self,
parr="",
entity="",
entnum="",
item1="",
it1num="",
item2="",
it2num="",
kloop="",
**kwargs,
):
"""Restores array parameter values into the ANSYS database.
APDL Command: ``*VPUT``
Parameters
----------
parr
The name of the input vector array parameter. See ``*SET`` for name
restrictions. The parameter must exist as a dimensioned array
[``*DIM``] with data input.
entity
Entity keyword. Valid keywords are shown for Entity = in the table
below.
entnum
The number of the entity (as shown for ENTNUM= in the table below).
item1
The name of a particular item for the given entity. Valid items
are as shown in the Item1 columns of the table below.
it1num
The number (or label) for the specified Item1 (if any). Valid
IT1NUM values are as shown in the IT1NUM columns of the table
below. Some Item1 labels do not require an IT1NUM value.
item2, it2num
A second set of item labels and numbers to further qualify the item
for which data is to be stored. Most items do not require this
level of information.
kloop
Field to be looped on:
Loop on the ENTNUM field (default). - Loop on the Item1 field.
Loop on the IT1NUM field. Successive items are as shown with IT1NUM. - Loop on the Item2 field.
Notes
-----
The ``*VPUT`` command is not supported for PowerGraphics
displays. Inconsistent results may be obtained if this
command is not used in /GRAPHICS, FULL.
Plot and print operations entered via the GUI (Utility Menu>
Pltcrtls, Utility Menu> Plot) incorporate the AVPRIN
command. This means that the principal and equivalent values
are recalculated. If you use ``*VPUT`` to put data back into
the database, issue the plot commands from the command line to
preserve your data.
This operation is basically the inverse of the ``*VGET``
operation. Vector items are put directly (without any
coordinate system transformation) into the ANSYS database.
Items can only replace existing items of the database and not
create new items. Degree of freedom results that are replaced
in the database are available for all subsequent
postprocessing operations. Other results are changed
temporarily and are available mainly for the immediately
following print and display operations. The vector
specification ``*VCUM`` does not apply to this command. The
valid labels for the location fields (Entity, ENTNUM, Item1,
and IT1NUM) are listed below. Item2 and IT2NUM are not
currently used. Not all items from the ``*VGET`` list are
allowed on ``*VPUT`` since putting values into some locations
could cause the database to be inconsistent.
This command is valid in any processor.
"""
command = (
f"*VPUT,{parr},{entity},{entnum},{item1},{it1num},{item2},{it2num},{kloop}"
)
return self.run(command, **kwargs)
def sread(
self, strarray="", fname="", ext="", nchar="", nskip="", nread="", **kwargs
):
"""Reads a file into a string array parameter.
APDL Command: ``*SREAD``
Parameters
----------
strarray
Name of the "string array" parameter which will hold the read file.
String array parameters are similar to character arrays, but each
array element can be as long as 128 characters. If the string
parameter does not exist, it will be created. The array will be
created as: ``*DIM,StrArray,STRING,nChar,nRead```
fname
File name and directory path (248 characters maximum, including the
characters needed for the directory path). An unspecified
directory path defaults to the working directory; in this case, you
can use all 248 characters for the file name.
ext
Filename extension (eight-character maximum).
nchar
Number of characters per line to read (default is length of the
longest line in the file).
nskip
Number of lines to skip at the start of the file (default is 0).
nread
Number of lines to read from the file (default is the entire file).
Notes
-----
The ``*SREAD`` command reads from a file into a string array
parameter. The file must be an ASCII text file.
"""
command = f"*SREAD,{strarray},{fname},{ext},,{nchar},{nskip},{nread}"
return self.run(command, **kwargs)
def toper(
self, parr="", par1="", oper="", par2="", fact1="", fact2="", con1="", **kwargs
):
"""Operates on table parameters.
APDL Command: ``*TOPER``
Parameters
----------
parr
Name of the resulting table parameter. The command will create a
table array parameter with this name. Any existing parameter with
this name will be overwritten.
par1
Name of the first table parameter.
oper
The operation to be performed: ADD. The operation is:
``ParR(i,j,k) = FACT1*Par1(i,j,k) + FACT2 *Par2(i,j,k) +CON1``
par2
Name of the second table parameter.
fact1
The first table parameter multiplying constant. Defaults to 1.
fact2
The second table parameter multiplying constant. Defaults to 1.
con1
The constant increment for offset. Defaults to 0.
Notes
-----
``*TOPER`` operates on table parameters according to:
``ParR(i,j,k) = FACT1*Par1(i,j,k) + FACT2 *Par2(i,j,k) +CON1``
Par1 and Par2 must have the same dimensions and the same variable names
corresponding to those dimensions. Par1 and Par2 must also have
identical index values for rows, columns, etc.
If you want a local coordinate system for the resulting array, you must
dimension it as such using the ``*DIM`` command before issuing ``*TOPER``.
This command is valid in any processor.
"""
command = f"*TOPER,{parr},{par1},{oper},{par2},{fact1},{fact2},{con1}"
return self.run(command, **kwargs)
def vabs(self, kabsr="", kabs1="", kabs2="", kabs3="", **kwargs):
"""Applies the absolute value function to array parameters.
APDL Command: ``*VABS``
Parameters
----------
kabsr
Absolute value of results parameter:
Do not take absolute value of results parameter (ParR). - Take absolute value.
kabs1
Absolute value of first parameter:
Do not take absolute value of first parameter (Par1 or ParI). - Take absolute value.
kabs2
Absolute value of second parameter:
Do not take absolute value of second parameter (Par2 or ParJ). - Take absolute value.
kabs3
Absolute value of third parameter:
Do not take absolute value of third parameter (Par3 or ParK). - Take absolute value.
Notes
-----
Applies an absolute value to parameters used in certain ``*VXX`` and ``*MXX``
operations. Typical absolute value applications are of the form:
``ParR = |f(|Par1|)|``
or
``ParR = |(|Par1| o |Par2|)|``
The absolute values are applied to each input parameter value before
the operation and to the result value after the operation. Absolute
values are applied before the scale factors so that negative scale
factors may be used. The absolute value settings are reset to the
default (no absolute value) after each ``*VXX`` or ``*MXX`` operation. Use
``*VSTAT`` to list settings.
This command is valid in any processor.
"""
command = f"*VABS,{kabsr},{kabs1},{kabs2},{kabs3}"
return self.run(command, **kwargs)
def vcol(self, ncol1="", ncol2="", **kwargs):
"""Specifies the number of columns in matrix operations.
APDL Command: ``*VCOL``
Parameters
----------
ncol1
Number of columns to be used for Par1 with ``*MXX`` operations.
Defaults to whatever is needed to fill the result array.
ncol2
Number of columns to be used for Par2 with ``*MXX`` operations.
Defaults to whatever is needed to fill the result array.
Notes
-----
Specifies the number of columns to be used in array parameter matrix
operations. The size of the submatrix used is determined from the
upper left starting array element (defined on the operation command) to
the lower right array element (defined by the number of columns on this
command and the number of rows on the ``*VLEN`` command).
The default NCOL is calculated from the maximum number of columns of
the result array (the ``*DIM`` column dimension) minus the starting
location + 1. For example, ``*DIM,R,,1,10`` and a starting location of
R(1,7) gives a default of 4 columns ( starting with R(1,7), R(1,8),
R(1,9), and R(1,10)). Repeat operations automatically terminate at the
last column of the result array. Existing values in the rows and
columns of the results matrix remain unchanged where not overwritten by
the requested input or operation values.
The column control settings are reset to the defaults after each ``*MXX``
operation. Use ``*VSTAT`` to list settings.
This command is valid in any processor.
"""
command = f"*VCOL,{ncol1},{ncol2}"
return self.run(command, **kwargs)
def vcum(self, key="", **kwargs):
"""Allows array parameter results to add to existing results.
APDL Command: ``*VCUM``
Parameters
----------
key
Accumulation key:
Overwrite results. - Add results to the current value of the results parameter.
Notes
-----
Allows results from certain ``*VXX`` and ``*MXX`` operations to overwrite or
add to existing results. The cumulative operation is of the form:
``ParR = ParR + ParR(Previous)``
The cumulative setting is reset to the default (overwrite) after each
``*VXX`` or ``*MXX`` operation. Use ``*VSTAT`` to list settings.
This command is valid in any processor.
"""
command = f"*VCUM,{key}"
return self.run(command, **kwargs)
def vfact(self, factr="", fact1="", fact2="", fact3="", **kwargs):
"""Applies a scale factor to array parameters.
APDL Command: ``*VFACT``
Parameters
----------
factr
Scale factor applied to results (ParR) parameter. Defaults to 1.0.
fact1
Scale factor applied to first parameter (Par1 or ParI). Defaults
to 1.0.
fact2
Scale factor applied to second parameter (Par2 or ParJ). Defaults
to 1.0.
fact3
Scale factor applied to third parameter (Par3 or ParK). Defaults
to 1.0.
Notes
-----
Applies a scale factor to parameters used in certain ``*VXX`` and ``*MXX``
operations. Typical scale factor applications are of the form:
``ParR = FACTR*f(FACT1*Par1)``
or
``ParR = FACTR*((FACT1*Par1) o (FACT2*Par2))``
The factors are applied to each input parameter value before the
operation and to the result value after the operation. The scale
factor settings are reset to the default (1.0) after each ``*VXX`` or ``*MXX``
operation. Use ``*VSTAT`` to list settings.
This command is valid in any processor.
"""
command = f"*VFACT,{factr},{fact1},{fact2},{fact3}"
return self.run(command, **kwargs)
def vfun(self, parr="", func="", par1="", con1="", con2="", con3="", **kwargs):
"""Performs a function on a single array parameter.
APDL Command: ``*VFUN``
Parameters
----------
parr
The name of the resulting numeric array parameter vector. See ``*SET``
for name restrictions.
func
Function to be performed:
Arccosine: ACOS(Par1). - Arcsine: ASIN(Par1).
Par1 is sorted in ascending order. ``*VCOL``, ``*VMASK``,
``*VCUM``, and ``*VLEN,,NINC`` do not apply.
``*VLEN,NROW`` does apply.
Compress: Selectively compresses data set. "True"
(``*VMASK``) values of Par1 (or row positions to be considered
according to the NINC value on the ``*VLEN`` command) are
written in compressed form to ParR, starting at the
specified position. - Copy: Par1 copied to ParR.
Cosine: COS(Par1). - Hyperbolic cosine: COSH(Par1).
Direction cosines of the principal stresses (nX9). Par1
contains the nX6 component stresses for the n locations of
the calculations. - Par1 is sorted in descending order.
``*VCOL``, ``*VMASK``, ``*VCUM``, and ``*VLEN,,NINC`` do not apply.
``*VLEN,NROW`` does apply.
Euler angles of the principal stresses (nX3). Par1
contains the nX6 component stresses for the n locations of
the calculations. - Exponential: EXP(Par1).
Expand: Reverse of the COMP function. All elements of
Par1 (starting at the position specified) are written in
expanded form to corresponding "true" (``*VMASK``) positions
(or row positions to be considered according to the NINC
value on the ``*VLEN`` command) of ParR. - Natural logarithm:
LOG(Par1).
Common logarithm: LOG10(Par1). - Nearest integer: 2.783
becomes 3.0, -1.75 becomes -2.0.
Logical complement: values 0.0 (false) become 1.0 (true).
Values > 0.0 (true) become 0.0 (false). - Principal
stresses (nX5). Par1 contains the nX6 component stresses
for the n locations of the calculations.
Power function: ``Par1**CON1``. Exponentiation of any negative
number in the vector Par1 to a non-integer power is
performed by exponentiating the positive number and
prepending the minus sign. For example, ``-4**2.3`` is
``-(4**2.3)``.
Hyperbolic sine: SINH(Par1). - Square root: SQRT(Par1).
Tangent: TAN(Par1). - Hyperbolic tangent: TANH(Par1).
par1
Array parameter vector in the operation.
con1, con2, con3
Constants (used only with the PWR, NORM, LOCAL, and GLOBAL
functions).
Notes
-----
Operates on one input array parameter vector and produces one output
array parameter vector according to:
``ParR = f(Par1)``
"""
command = f"*VFUN,{parr},{func},{par1},{con1},{con2},{con3}"
return self.run(command, **kwargs)
def vitrp(self, parr="", part="", pari="", parj="", park="", **kwargs):
"""Forms an array parameter by interpolation of a table.
APDL Command: ``*VITRP``
Parameters
----------
parr
The name of the resulting array parameter. See ``*SET`` for name
restrictions.
part
The name of the TABLE array parameter. The parameter must exist as
a dimensioned array of type TABLE [``*DIM``].
pari
Array parameter vector of I (row) index values for interpolation in
ParT.
parj
Array parameter vector of J (column) index values for interpolation
in ParT (which must be at least 2-D).
park
Array parameter vector of K (depth) index values for interpolation
in ParT (which must be 3-D).
Notes
-----
Forms an array parameter (of type ARRAY) by interpolating values of an
array parameter (of type TABLE) at specified table index locations
according to:
ParR = f(ParT, Parl, ParJ, ParK)
where ParT is the type TABLE array parameter, and ParI, ParJ, ParK are
the type ARRAY array parameter vectors of index values for
interpolation in ParT. See the ``*DIM`` command for TABLE and ARRAY
declaration types. Linear interpolation is used. The starting array
element number for the TABLE array (ParT) is not used (but a value must
be input). Starting array element numbers must be defined for each
array parameter vector if it does not start at the first location. For
example, ``*VITRP,R(5),TAB(1,1),X(2),Y(4)`` uses the second element of X
and the fourth element of Y as index values (row and column) for a 2-D
interpolation in TAB and stores the result in the fifth element of R.
Operations continue on successive array elements ``[*VLEN, *VMASK]`` with
the default being all successive elements. Absolute values and scale
factors may be applied to the result parameter ``[*VABS, *VFACT]``.
Results may be cumulative ``[*VCUM]``. See the ``*VOPER`` command for details.
This command is valid in any processor.
"""
command = f"*VITRP,{parr},{part},{pari},{parj},{park}"
return self.run(command, **kwargs)
def vlen(self, nrow="", ninc="", **kwargs):
"""Specifies the number of rows to be used in array parameter operations.
APDL Command: ``*VLEN``
Parameters
----------
nrow
Number of rows to be used with the ``*VXX`` or ``*MXX`` operations.
Defaults to the number of rows needed to fill the result array.
ninc
Perform the operation on every NINC row (defaults to 1).
Notes
-----
Specifies the number of rows to be used in array parameter operations.
The size of the submatrix used is determined from the upper left
starting array element (defined on the operation command) to the lower
right array element (defined by the number of rows on this command and
the number of columns on the ``*VCOL`` command). NINC allows skipping row
operations for some operation commands. Skipped rows are included in
the row count. The starting row number must be defined on the
operation command for each parameter read and for the result written.
The default NROW is calculated from the maximum number of rows of the
result array (the ``*DIM`` row dimension) minus the starting location + 1.
For example, ``*DIM,R,,10`` and a starting location of R(7) gives a default
of 4 loops (filling R(7), R(8), R(9), and R(10)). Repeat operations
automatically terminate at the last row of the result array. Existing
values in the rows and columns of the results matrix remain unchanged
where not overwritten by the requested input or operation values.
The stride (NINC) allows operations to be performed at regular
intervals. It has no effect on the total number of row operations.
Skipped operations retain the previous result. For example, ``*DIM,R,,6,``
with a starting location of R(1), NROW = 10, and NINC = 2 calculates
values for locations R(1), R(3), and R(5) and retains values for
locations R(2), R(4), and R(6). A more general skip control may be
done by masking ``[*VMASK]``. The row control settings are reset to the
defaults after each ``*VXX`` or ``*MXX`` operation. Use ``*VSTAT`` to list
settings.
This command is valid in any processor.
"""
command = f"*VLEN,{nrow},{ninc}"
return self.run(command, **kwargs)
def vmask(self, par="", **kwargs):
"""Specifies an array parameter as a masking vector.
APDL Command: ``*VMASK``
Parameters
----------
par
Name of the mask parameter. The starting subscript must also be
specified.
Notes
-----
Specifies the name of the parameter whose values are to be checked for
each resulting row operation. The mask vector usually contains only 0
(for false) and 1 (for true) values. For each row operation the
corresponding mask vector value is checked. A true value allows the
operation to be done. A false value skips the operation (and retains
the previous results). A mask vector can be created from direct input,
such as M(1) = 1,0,0,1,1,0,1; or from the DATA function of the ``*VFILL``
command. The NOT function of the ``*VFUN`` command can be used to reverse
the logical sense of the mask vector. The logical compare operations
(LT, LE, EQ, NE, GE, and GT) of the ``*VOPER`` command also produce a mask
vector by operating on two other vectors. Any numeric vector can be
used as a mask vector since the actual interpretation assumes values
less than 0.0 are 0.0 (false) and values greater than 0.0 are 1.0
(true). If the mask vector is not specified (or has fewer values than
the result vector), true (1.0) values are assumed for the unspecified
values. Another skip control may be input with NINC on the ``*VLEN``
command. If both are present, operations occur only when both are
true. The mask setting is reset to the default (no mask) after each
``*VXX`` or ``*MXX`` operation. Use ``*VSTAT`` to list settings.
This command is valid in any processor.
"""
command = f"*VMASK,{par}"
return self.run(command, **kwargs)
def voper(self, parr="", par1="", oper="", par2="", con1="", con2="", **kwargs):
"""Operates on two array parameters.
APDL Command: ``*VOPER``
Parameters
----------
parr
The name of the resulting array parameter vector. See ``*SET`` for
name restrictions.
par1
First array parameter vector in the operation. May also be a
scalar parameter or a literal constant.
oper
Operations:
Addition: Par1+Par2. - Subtraction: Par1-Par2.
Multiplication: ``Par1*Par2``.
Division: Par1/Par2 (a divide by zero results in a value of zero).
Minimum: minimum of Par1 and Par2. - Maximum: maximum of Par1 and Par2.
Less than comparison: Par1<Par2 gives 1.0 if true, 0.0 if
false. - Less than or equal comparison: Par1 Par2 gives
1.0 if true, 0.0 if false.
Equal comparison: Par1 = Par2 gives 1.0 if true, 0.0 if
false. - Not equal comparison: Par1 ≠ Par2 gives 1.0 if
true, 0.0 if false.
Greater than or equal comparison: Par1 Par2 gives 1.0 if
true, 0.0 if false. - Greater than comparison: Par1>Par2
gives 1.0 if true, 0.0 if false.
First derivative: d(Par1)/d(Par2). The derivative at a
point is determined over points half way between the
previous and next points (by linear interpolation). Par1
must be a function (a unique Par1 value for each Par2
value) and Par2 must be in ascending order. - Second
derivative: d2(Par1)/d(Par2)2. See also DER1.
Single integral: Par1 d(Par2), where CON1 is the
integration constant. The integral at a point is
determined by using the single integration procedure
described in the Mechanical APDL Theory Reference. -
Double integral: Par1 d(Par2), where CON1 is the
integration constant of the first integral and CON2 is the
integration constant of the second integral. If Par1
contains acceleration data, CON1 is the initial velocity
and CON2 is the initial displacement. See also INT1.
Dot product: Par1 . Par2. Par1 and Par2 must each have
three consecutive columns of data, with the columns
containing the i, j, and k vector components,
respectively. Only the starting row index and the column
index for the i components are specified for Par1 and
Par2, such as A(1,1). The j and k components of the
vector are assumed to begin in the corresponding next
columns, such as A(1,2) and A(1,3). - Cross product: Par1
x Par2. Par1, Par2, and ParR must each have 3 components,
respectively. Only the starting row index and the column
index for the i components are specified for Par1, Par2,
and ParR, such as A(1,1). The j and k components of the
vector are assumed to begin in the corresponding next
columns, such as A(1,2) and A(1,3).
Gather: For a vector of position numbers, Par2, copy the
value of Par1 at each position number to ParR. Example:
for Par1 = 10,20,30,40 and Par2 = 2,4,1; ParR =
20,40,10. - Scatter: Opposite of GATH operation. For a
vector of position numbers, Par2, copy the value of Par1
to that position number in ParR. Example: for Par1 =
10,20,30,40,50 and Par2 = 2,1,0,5,3; ParR = 20,10,50,0,40.
Arctangent: arctangent of Par1/Par2 with the sign of each
component considered. - Transform the data in Par1 from
the global Cartesian coordinate system to the local
coordinate system given in CON1. Par1 must be an N x 3
(i.e., vector) or an N x 6 (i.e., stress or strain tensor)
array. If the local coordinate system is a cylindrical,
spherical, or toroidal system, then you must provide the
global Cartesian coordinates in Par2 as an N x 3 array.
Set CON2 = 1 if the data is strain data.
par2
Second array parameter vector in the operation. May also be a
scalar parameter or a literal constant.
con1
First constant (used only with the INT1 and INT2 operations).
con2
Second constant (used only with the INT2 operation).
Notes
-----
Operates on two input array parameter vectors and produces one output
array parameter vector according to:
ParR = Par1 o Par2
where the operations (o) are described below. ParR may be the same as
Par1 or Par2. Absolute values and scale factors may be applied to all
parameters [``*VABS``, ``*VFACT``]. Results may be cumulative [``*VCUM``].
Starting array element numbers must be defined for each array parameter
vector if it does not start at the first location, such as
``*VOPER,A,B(5),ADD,C(3)`` which adds the third element of C to the fifth
element of B and stores the result in the first element of A.
Operations continue on successive array elements ``[*VLEN, *VMASK]`` with
the default being all successive elements. Skipping array elements via
``*VMASK`` or ``*VLEN`` for the DER and INT functions skips only the writing
of the results (skipped array element data are used in all
calculations).
Parameter functions and operations are available to operate on a scalar
parameter or a single element of an array parameter, such as SQRT(B) or
SQRT(A(4)). See the ``*SET`` command for details. Operations on a
sequence of array elements can be done by repeating the desired
function or operation in a do-loop ``[*DO]``. The vector operations within
the ANSYS program (``*VXX`` commands) are internally programmed do-loops
that conveniently perform the indicated operation over a sequence of
array elements. If the array is multidimensional, only the first
subscript is incremented in the do-loop, that is, the operation repeats
in column vector fashion "down" the array. For example, for A(1,5),
A(2,5), A(3,5), etc. The starting location of the row index must be
defined for each parameter read and for the result written.
The default number of loops is from the starting result location to the
last result location and can be altered with the ``*VLEN`` command. A
logical mask vector may be defined to control at which locations the
operations are to be skipped [``*VMASK``]. The default is to skip no
locations. Repeat operations automatically terminate at the last array
element of the result array column if the number of loops is undefined
or if it exceeds the last result array element. Zeroes are used in
operations for values read beyond the last array element of an input
array column. Existing values in the rows and columns of the results
matrix
"""
command = f"*VOPER,{parr},{par1},{oper},{par2},{con1},{con2}"
return self.run(command, **kwargs)
def vscfun(self, parr="", func="", par1="", **kwargs):
"""Determines properties of an array parameter.
APDL Command: ``*VSCFUN``
Parameters
----------
parr
The name of the resulting scalar parameter. See ``*SET`` for name
restrictions.
func
Functions:
Maximum: the maximum Par1 array element value. - Minimum:
the minimum Par1 array element value.
Index location of the maximum Par1 array element value.
Array Par1 is searched starting from its specified
index. - Index location of the minimum Par1 array element
value. Array Par1 is searched starting from its specified
index.
Index location of the first nonzero value in array Par1.
Array Par1 is searched starting from its specified
index. - Index location of the last nonzero value in array
Par1. Array Par1 is searched starting from its specified
index.
Sum: Par1 (the summation of the Par1 array element
values). - Median: value of Par1 at which there are an
equal number of values above and below.
Mean: (σ Par1)/NUM, where NUM is the number of summed
values.
Variance: ``(σ ((Par1-MEAN)**2))/NUM``.
Standard deviation: square root of VARI. -
Root-mean-square: square root of ``(σ (Par1**2))/NUM``.
par1
Array parameter vector in the operation.
Notes
-----
Operates on one input array parameter vector and produces one output
scalar parameter according to:
ParR = f(Par1)
where the functions (f) are described below. The starting array element
number must be defined for the array parameter vector. For example,
``*VSCFUN,MU,MEAN,A(1)`` finds the mean of the A vector values, starting
from the first value and stores the result as parameter MU. Operations
use successive array elements ``[*VLEN, *VMASK]`` with the default being
all successive array elements. Absolute values and scale factors may
be applied to all parameters ``[*VABS, *VFACT]``. Results may be
cumulative ``[*VCUM]``. See the ``*VOPER`` command for details.
This command is valid in any processor.
"""
command = f"*VSCFUN,{parr},{func},{par1}"
return self.run(command, **kwargs)
def vstat(self, **kwargs):
"""Lists the current specifications for the array parameters.
APDL Command: ``*VSTAT``
Notes
-----
Lists the current specifications for the ``*VABS``, ``*VCOL``,
``*VCUM``, ``*VFACT``, ``*VLEN``, and ``*VMASK`` commands.
This command is valid in any processor.
"""
command = f"*VSTAT,"
return self.run(command, **kwargs)
def vwrite(
self,
par1="",
par2="",
par3="",
par4="",
par5="",
par6="",
par7="",
par8="",
par9="",
par10="",
par11="",
par12="",
par13="",
par14="",
par15="",
par16="",
par17="",
par18="",
par19="",
**kwargs,
):
"""Writes data to a file in a formatted sequence.
APDL Command: ``*VWRITE``
.. warning::
This command cannot be run interactively. See
:func:`non_interactive <ansys.mapdl.core.Mapdl.non_interactive>`.
Parameters
----------
par1, par2, par3, . . . , par19
You can write up to 19 parameters (or constants) at a time. Any Par
values after a blank Par value are ignored. If you leave them all
blank, one line will be written (to write a title or a blank line).
If you input the keyword SEQU, a sequence of numbers (starting from
1) will be written for that item.
Notes
-----
You use ``*VWRITE`` to write data to a file in a formatted sequence. Data
items (Par1, Par2, etc.) may be array parameters, scalar parameters,
character parameters (scalar or array), or constants. You must
evaluate expressions and functions in the data item fields before using
the ``*VWRITE`` command, since initially they will be evaluated to a
constant and remain constant throughout the operation. Unless a file
is defined with the ``*CFOPEN`` command, data is written to the standard
output file. Data written to the standard output file may be diverted
to a different file by first switching the current output file with the
/OUTPUT command. You can also use the ``*MWRITE`` command to write data to
a specified file. Both commands contain format descriptors on the line
immediately following the command. The format descriptors can be in
either Fortran or C format.
You must enclose Fortran format descriptors in parentheses. They must
immediately follow the ``*VWRITE`` command on a separate line of the same
input file. Do not include the word FORMAT. The format must specify
the number of fields to be written per line, the field width, the
placement of the decimal point, etc. You should use one field
descriptor for each data item written. The write operation uses your
system's available FORTRAN FORMAT conventions (see your system FORTRAN
manual). You can use any standard FORTRAN real format (such as
(4F6.0), (E10.3,2X,D8.2), etc.) and alphanumeric format (A).
Alphanumeric strings are limited to a maximum of 8 characters for any
field (A8) using the Fortran format. Use the "C" format for string
arrays larger than 8 characters. Integer (I) and list-directed (*)
descriptors may not be used. You can include text in the format as a
quoted string. The parentheses must be included in the format and the
format must not exceed 80 characters (including parentheses). The
output line length is limited to 128 characters.
The "C" format descriptors are used if the first character of the
format descriptor line is not a left parenthesis. "C" format
descriptors are up to 80 characters long, consisting of text strings
and predefined "data descriptors" between the strings where numeric or
alphanumeric character data will be inserted. The normal descriptors
are %I for integer data, %G for double precision data, %C for
alphanumeric character data, and %/ for a line break. There must be one
data descriptor for each specified value (8 maximum) in the order of
the specified values. The enhanced formats described in ``*MSG`` may also
be used.
For array parameter items, you must define the starting array
element number. Looping continues (incrementing the vector
index number of each array parameter by one) each time you
output a line, until the maximum array vector element is
written. For example, ``*VWRITE,A(1)`` followed by (F6.0)
will write one value per output line, i.e., A(1), A(2), A(3),
A(4), etc. You write constants and scalar parameters with the
same values for each loop. You can also control the number of
loops and loop skipping with the ``*VLEN`` and ``*VMASK``
commands. The vector specifications ``*VABS``, ``*VFACT``,
and ``*VCUM`` do not apply to this command. If looping
continues beyond the supplied data array's length, zeros will
be output for numeric array parameters and blanks for
character array parameters. For multi-dimensioned array
parameters, only the first (row) subscript is incremented.
See the ``*VOPER`` command for details. If you are in the GUI,
the ``*VWRITE`` command must be contained in an externally
prepared file and read into ANSYS (i.e., ``*USE``, /INPUT, etc.).
This command is valid in any processor.
"""
# cannot be in interactive mode
if not self._store_commands:
raise RuntimeError(
"VWRTIE cannot run interactively. \n\nPlease use "
"``with mapdl.non_interactive:``"
)
command = f"*VWRITE,{par1},{par2},{par3},{par4},{par5},{par6},{par7},{par8},{par9},{par10},{par11},{par12},{par13},{par14},{par15},{par16},{par17},{par18},{par19}"
return self.run(command, **kwargs)
|
416962
|
import os
import tempfile
import torch
from pytorch_transformers import BertTokenizer
from onir.interfaces import bert_models
from onir import vocab, util, config
from onir.modules import PrettrBertModel
import tokenizers as tk
@vocab.register('prettr_bert')
class PrettrBertVocab(vocab.Vocab):
@staticmethod
def default_config():
return {
'bert_base': 'bert-base-uncased',
'bert_weights': '',
'join_layer': 0, # all layers
'compress_size': 0, # disable
'compress_fp16': False,
}
def __init__(self, config, logger):
super().__init__(config, logger)
bert_model = bert_models.get_model(config['bert_base'], self.logger)
self.tokenizer = BertTokenizer.from_pretrained(bert_model)
# HACK! Until the transformers library adopts tokenizers, save and re-load vocab
with tempfile.TemporaryDirectory() as d:
self.tokenizer.save_vocabulary(d)
# this tokenizer is ~4x faster as the BertTokenizer, per my measurements
self.tokenizer = tk.BertWordPieceTokenizer(os.path.join(d, 'vocab.txt'))
def tokenize(self, text):
# return self.tokenizer.tokenize(text)
return self.tokenizer.encode(text).tokens[1:-1] # removes leading [CLS] and trailing [SEP]
def tok2id(self, tok):
# return self.tokenizer.vocab[tok]
return self.tokenizer.token_to_id(tok)
def id2tok(self, idx):
if torch.is_tensor(idx):
if len(idx.shape) == 0:
return self.id2tok(idx.item())
return [self.id2tok(x) for x in idx]
# return self.tokenizer.ids_to_tokens[idx]
return self.tokenizer.id_to_token(idx)
def encoder(self):
return PrettrBertEncoder(self)
def path_segment(self):
result = '{name}_{bert_base}'.format(name=self.name, **self.config)
if self.config['bert_weights']:
result += '_{}'.format(self.config['bert_weights'])
if self.config['join_layer'] != 0:
result += '_join-{}'.format(self.config['join_layer'])
if self.config['compress_size'] != 0:
result += '_compress-{}'.format(self.config['compress_size'])
if self.config['compress_fp16'] != 0:
result += '_compress-fp16'
return result
def lexicon_path_segment(self):
return 'bert_{bert_base}'.format(**self.config)
def lexicon_size(self) -> int:
return self.tokenizer._tokenizer.get_vocab_size()
class PrettrBertEncoder(vocab.VocabEncoder):
def __init__(self, vocabulary):
super().__init__(vocabulary)
bert_model = bert_models.get_model(vocabulary.config['bert_base'], vocabulary.logger)
self.bert = PrettrBertModel.from_pretrained(bert_model,
join_layer=vocabulary.config['join_layer'],
compress_size=vocabulary.config['compress_size'],
compress_fp16=vocabulary.config['compress_fp16'])
if vocabulary.config['bert_weights']:
weight_path = os.path.join(util.path_vocab(vocabulary), vocabulary.config['bert_weights'])
with vocabulary.logger.duration('loading BERT weights from {}'.format(weight_path)):
_, unexpected = self.bert.load_state_dict(torch.load(weight_path), strict=False)
if unexpected:
vocabulary.logger.warn('Unexpected keys found when loading {}: {}. Be sure it'
'is properly prefixed (e.g., without bert.)'.format(vocabulary.config['bert_weights'], unexpected))
self.CLS = vocabulary.tok2id('[CLS]')
self.SEP = vocabulary.tok2id('[SEP]')
def enc_query_doc(self, **inputs):
query_tok, query_len = inputs['query_tok'], inputs['query_len']
doc_tok, doc_len = inputs['doc_tok'], inputs['doc_len']
BATCH, QLEN = query_tok.shape
maxlen = self.bert.config.max_position_embeddings
MAX_DOC_TOK_LEN = maxlen - QLEN - 3 # -3 [CLS] and 2x[SEP]
doc_toks, sbcount = util.subbatch(doc_tok, MAX_DOC_TOK_LEN)
doc_mask = util.lens2mask(doc_len, doc_tok.shape[1])
doc_mask, _ = util.subbatch(doc_mask, MAX_DOC_TOK_LEN)
query_toks = torch.cat([query_tok] * sbcount, dim=0)
query_mask = util.lens2mask(query_len, query_toks.shape[1])
query_mask = torch.cat([query_mask] * sbcount, dim=0)
CLSS = torch.full_like(query_toks[:, :1], self.CLS)
SEPS = torch.full_like(query_toks[:, :1], self.SEP)
ONES = torch.ones_like(query_mask[:, :1])
NILS = torch.zeros_like(query_mask[:, :1])
toks = torch.cat([CLSS, query_toks, SEPS, doc_toks, SEPS], dim=1)
mask = torch.cat([ONES, query_mask, ONES, doc_mask, ONES], dim=1)
segment_ids = torch.cat([NILS] * (2 + QLEN) + [ONES] * (1 + doc_toks.shape[1]), dim=1)
# Change -1 padding to 0-padding (will be masked)
toks = torch.where(toks == -1, torch.zeros_like(toks), toks)
result = self.bert(toks, segment_ids, mask)
# extract relevant subsequences for query and doc
query_results = [r[:BATCH, 1:QLEN+1] for r in result]
doc_results = [r[:, QLEN+2:-1] for r in result]
doc_results = [util.un_subbatch(r, doc_tok, MAX_DOC_TOK_LEN) for r in doc_results]
cls_results = []
for layer in range(len(result)):
cls_output = result[layer][:, 0]
cls_result = []
for i in range(cls_output.shape[0] // BATCH):
cls_result.append(cls_output[i*BATCH:(i+1)*BATCH])
cls_result = torch.stack(cls_result, dim=2).mean(dim=2)
cls_results.append(cls_result)
return {
'query': query_results,
'doc': doc_results,
'cls': cls_results
}
def _enc_spec(self) -> dict:
return {
'dim': self.bert.config.hidden_size,
'views': self.bert.config.num_hidden_layers + 1,
'static': False,
'supports_forward': False,
'joint_fields': ['query', 'doc', 'cls']
}
|
416973
|
import time
import argparse
import traceback
import numpy as np
import torch
from torch.utils.data import DataLoader
import networkx as nx
import dgl
from models import MLP, InteractionNet, PrepareLayer
from dataloader import MultiBodyGraphCollator, MultiBodyTrainDataset,\
MultiBodyValidDataset, MultiBodyTestDataset
from utils import make_video
def train(optimizer, loss_fn,reg_fn, model, prep, dataloader, lambda_reg, device):
total_loss = 0
model.train()
for i, (graph_batch, data_batch, label_batch) in enumerate(dataloader):
graph_batch = graph_batch.to(device)
data_batch = data_batch.to(device)
label_batch = label_batch.to(device)
optimizer.zero_grad()
node_feat, edge_feat = prep(graph_batch, data_batch)
dummy_relation = torch.zeros(edge_feat.shape[0], 1).float().to(device)
dummy_global = torch.zeros(node_feat.shape[0], 1).float().to(device)
v_pred, out_e = model(graph_batch, node_feat[:, 3:5].float(
), edge_feat.float(), dummy_global, dummy_relation)
loss = loss_fn(v_pred, label_batch)
total_loss += float(loss)
zero_target = torch.zeros_like(out_e)
loss = loss + lambda_reg*reg_fn(out_e, zero_target)
reg_loss = 0
for param in model.parameters():
reg_loss = reg_loss + lambda_reg * \
reg_fn(param, torch.zeros_like(
param).float().to(device))
loss = loss + reg_loss
loss.backward()
optimizer.step()
return total_loss/(i+1)
# One step evaluation
def eval(loss_fn, model, prep, dataloader, device):
total_loss = 0
model.eval()
for i, (graph_batch, data_batch, label_batch) in enumerate(dataloader):
graph_batch = graph_batch.to(device)
data_batch = data_batch.to(device)
label_batch = label_batch.to(device)
node_feat, edge_feat = prep(graph_batch, data_batch)
dummy_relation = torch.zeros(
edge_feat.shape[0], 1).float().to(device)
dummy_global = torch.zeros(
node_feat.shape[0], 1).float().to(device)
v_pred, _ = model(graph_batch, node_feat[:, 3:5].float(
), edge_feat.float(), dummy_global, dummy_relation)
loss = loss_fn(v_pred, label_batch)
total_loss += float(loss)
return total_loss/(i+1)
# Rollout Evaluation based in initial state
# Need to integrate
def eval_rollout(model, prep, initial_frame, n_object, device):
current_frame = initial_frame.to(device)
base_graph = nx.complete_graph(n_object)
graph = dgl.from_networkx(base_graph).to(device)
pos_buffer = []
model.eval()
for step in range(100):
node_feats, edge_feats = prep(graph, current_frame)
dummy_relation = torch.zeros(
edge_feats.shape[0], 1).float().to(device)
dummy_global = torch.zeros(
node_feats.shape[0], 1).float().to(device)
v_pred, _ = model(graph, node_feats[:, 3:5].float(
), edge_feats.float(), dummy_global, dummy_relation)
current_frame[:, [1, 2]] += v_pred*0.001
current_frame[:, 3:5] = v_pred
pos_buffer.append(current_frame[:, [1, 2]].cpu().numpy())
pos_buffer = np.vstack(pos_buffer).reshape(100, n_object, -1)
make_video(pos_buffer, 'video_model.mp4')
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--lr', type=float, default=0.001,
help='learning rate')
argparser.add_argument('--epochs', type=int, default=40000,
help='Number of epochs in training')
argparser.add_argument('--lambda_reg', type=float, default=0.001,
help='regularization weight')
argparser.add_argument('--gpu', type=int, default=-1,
help='gpu device code, -1 means cpu')
argparser.add_argument('--batch_size', type=int, default=100,
help='size of each mini batch')
argparser.add_argument('--num_workers', type=int, default=0,
help='number of workers for dataloading')
argparser.add_argument('--visualize', action='store_true', default=False,
help='Whether enable trajectory rollout mode for visualization')
args = argparser.parse_args()
# Select Device to be CPU or GPU
if args.gpu != -1:
device = torch.device('cuda:{}'.format(args.gpu))
else:
device = torch.device('cpu')
train_data = MultiBodyTrainDataset()
valid_data = MultiBodyValidDataset()
test_data = MultiBodyTestDataset()
collator = MultiBodyGraphCollator(train_data.n_particles)
train_dataloader = DataLoader(
train_data, args.batch_size, True, collate_fn=collator, num_workers=args.num_workers)
valid_dataloader = DataLoader(
valid_data, args.batch_size, True, collate_fn=collator, num_workers=args.num_workers)
test_full_dataloader = DataLoader(
test_data, args.batch_size, True, collate_fn=collator, num_workers=args.num_workers)
node_feats = 5
stat = {'median': torch.from_numpy(train_data.stat_median).to(device),
'max': torch.from_numpy(train_data.stat_max).to(device),
'min': torch.from_numpy(train_data.stat_min).to(device)}
print("Weight: ", train_data.stat_median[0],
train_data.stat_max[0], train_data.stat_min[0])
print("Position: ", train_data.stat_median[[
1, 2]], train_data.stat_max[[1, 2]], train_data.stat_min[[1, 2]])
print("Velocity: ", train_data.stat_median[[
3, 4]], train_data.stat_max[[3, 4]], train_data.stat_min[[3, 4]])
prepare_layer = PrepareLayer(node_feats, stat).to(device)
interaction_net = InteractionNet(node_feats, stat).to(device)
print(interaction_net)
optimizer = torch.optim.Adam(interaction_net.parameters(), lr=args.lr)
state_dict = interaction_net.state_dict()
loss_fn = torch.nn.MSELoss()
reg_fn = torch.nn.MSELoss(reduction='sum')
try:
for e in range(args.epochs):
last_t = time.time()
loss = train(optimizer, loss_fn,reg_fn, interaction_net,
prepare_layer, train_dataloader, args.lambda_reg, device)
print("Epoch time: ", time.time()-last_t)
if e % 1 == 0:
valid_loss = eval(loss_fn, interaction_net,
prepare_layer, valid_dataloader, device)
test_full_loss = eval(
loss_fn, interaction_net, prepare_layer, test_full_dataloader, device)
print("Epoch: {}.Loss: Valid: {} Full: {}".format(
e, valid_loss, test_full_loss))
except:
traceback.print_exc()
finally:
if args.visualize:
eval_rollout(interaction_net, prepare_layer,
test_data.first_frame, test_data.n_particles, device)
make_video(test_data.test_traj[:100, :, [1, 2]], 'video_truth.mp4')
|
417017
|
import unittest
import datetime
from google.cloud import bigquery
from .. import bqclient_test
from ..bqclient import BqClient
class Test(unittest.TestCase):
def setUp(self):
self.query = 'SELECT count(*) FROM `bigquery-public-data.usa_names.usa_1910_current` WHERE year=2017 AND number>1000;'
def test_run_test(self):
self.assertTrue(bqclient_test.run_test(self.query))
def test_set_bq_instance(self):
self.assertIsInstance(
bqclient_test.set_bq_instance(), BqClient)
def test_set_client(self):
bq = bqclient_test.set_bq_instance()
self.assertIsNone(bqclient_test.set_client(bq))
def test_get_client(self):
bq = bqclient_test.set_bq_instance()
bqclient_test.set_client(bq)
self.assertIsInstance(bqclient_test.get_client(
bq), bigquery.client.Client)
def test_run_query(self):
bq = bqclient_test.set_bq_instance()
bq.location = 'US'
bqclient_test.set_client(bq)
self.assertIsNone(bqclient_test.run_query(bq, self.query))
# To flush results
bqclient_test.read_results(bq)
def test_get_query_job(self):
bq = bqclient_test.set_bq_instance()
bq.location = 'US'
bqclient_test.set_client(bq)
bqclient_test.run_query(bq, self.query)
self.assertIsInstance(
bqclient_test.get_query_job(bq), bigquery.job.QueryJob)
# To flush results
bqclient_test.read_results(bq)
|
417023
|
from datetime import timedelta, datetime
import pytest
from pandas_to_sql.testing.utils.asserters import assert_, get_expected_and_actual
from copy import copy
import pandas as pd
import pandas_to_sql
def test_replace():
df = pytest.df1
df['new_value'] = df.random_str.str.replace('m','v').str.replace('z','_3')
assert_(df)
def test_lower():
df = pytest.df1
df['new_value'] = df.random_str.str.lower()
assert_(df)
def test_upper():
df = pytest.df1
df['new_value'] = df.random_str.str.upper()
assert_(df)
def test_slice1():
df = pytest.df1
df['new_value'] = df.random_str.str.slice(1,3)
assert_(df)
def test_slice2():
df = pytest.df1
df['new_value'] = df.random_str.str.slice(2)
assert_(df)
def test_slice3():
df = pytest.df1
df['new_value'] = df.random_str.str.slice(stop=4)
assert_(df)
def test_slice4():
df = pytest.df1
df['new_value'] = df.random_str.str.slice(-1,-3)
assert_(df)
def test_strip():
df = pytest.df1
df['new_value'] = df.random_str.str.strip('ABCKSLFjadkj')
assert_(df)
def test_strip_none_chars():
df = pytest.df1
df['new_value1'] = df.random_str + ' '
df['new_value2'] = df.random_str.str.strip()
assert_(df)
def test_lstrip():
df = pytest.df1
df['new_value'] = df.random_str.str.lstrip('ABCKSLFjadkj')
assert_(df)
def test_rstrip():
df = pytest.df1
df['new_value'] = df.random_str.str.rstrip('ABCKSLFjadkj')
assert_(df)
def test_len():
df = pytest.df1
df['new_value'] = df.random_str.str.len()
assert_(df)
def test_contains():
df = pytest.df1
df['new_value1'] = df.random_str.str.contains('a')
df['new_value2'] = df.random_str.str.contains('B')
assert_(df)
def test_contains_case_false():
df = pytest.df1
df['new_value1'] = df.random_str.str.contains('a', case=False)
df['new_value2'] = df.random_str.str.contains('B', case=False)
assert_(df)
|
417039
|
from flask import session, Blueprint
from lexos.helpers import constants as constants
from lexos.managers import session_manager as session_manager
from lexos.models.k_means_model import KMeansModel
from lexos.views.base import render
k_means_blueprint = Blueprint("k-means", __name__)
@k_means_blueprint.route("/k-means", methods=["GET"])
def k_means() -> str:
"""Gets the k-means clustering page.
:return: The k-means clustering page.
"""
# Set default options
if "analyoption" not in session:
session["analyoption"] = constants.DEFAULT_ANALYZE_OPTIONS
if "kmeanoption" not in session:
session["kmeanoption"] = constants.DEFAULT_KMEAN_OPTIONS
# Return the k-means clustering page
return render("k-means.html")
@k_means_blueprint.route("/k-means/results", methods=["POST"])
def results():
"""Gets the k-means results.
:return: The k-means results.
"""
# Cache options
session_manager.cache_analysis_option()
session_manager.cache_k_mean_option()
# Get the k-means results
return KMeansModel().get_results()
|
417065
|
from typing import Optional
from abc import abstractmethod
from mobilium_proto_messages.message_processor import MessageProcessor
from mobilium_proto_messages.message_sender import MessageSender
from mobilium_server.utils.shell_executor import ShellExecutor
class ShellMessageProcessor(MessageProcessor):
def __init__(self, shell_excecutor: ShellExecutor, message_sender: MessageSender,
successor: Optional['MessageProcessor'] = None):
super().__init__(message_sender, successor)
self.shell_executor = shell_excecutor
@abstractmethod
async def _process(self, data: bytes):
pass
|
417066
|
from pyravendb.tests.test_base import TestBase
from pyravendb.data.query import Facet, FacetMode
from pyravendb.raven_operations.maintenance_operations import PutIndexesOperation
from pyravendb.data.indexes import IndexDefinition
import unittest
class Product(object):
def __init__(self, name, price_per_unit):
self.name = name
self.price_per_unit = price_per_unit
class ProductsAndPricePerUnit:
def __init__(self):
self.maps = ("from product in docs.Products "
"select new {"
"price_per_unit = product.price_per_unit}")
self.index_definition = IndexDefinition(name=ProductsAndPricePerUnit.__name__, maps=self.maps)
def execute(self, store):
store.maintenance.send(PutIndexesOperation(self.index_definition))
class TestFacets(TestBase):
def setUp(self):
super(TestFacets, self).setUp()
with self.store.open_session() as session:
session.store(Product("TV", 1022))
session.store(Product("jacket", 100))
session.save_changes()
ProductsAndPricePerUnit().execute(self.store)
self.facets = [Facet("Products", mode=FacetMode.ranges),
Facet("price_per_unit_L_Range", ranges=["{100 TO 3000]", "[402 TO 2000]"], mode=FacetMode.ranges)]
def tearDown(self):
super(TestFacets, self).tearDown()
self.delete_all_topology_files()
def test_facets_with_documents(self):
with self.store.open_session() as session:
query_results = session.query(object_type=Product,
index_name=ProductsAndPricePerUnit.__name__).where_greater_than(
"price_per_unit", 99).to_facets(self.facets)
assert len(query_results['Results']) > 0
if __name__ == "__main__":
unittest.main()
|
417142
|
from django.contrib import admin
# Register your models here.
from .models import Spot, Comment
# Register your models here.
class SpotAdmin(admin.ModelAdmin):
list_display = ('id', 'city', 'name', 'longitude', 'latitude',
'commit_user_name', 'commit_message', 'commit_date')
search_fields = (
'city', 'name', 'commit_user_name', 'commit_message')
# list_filter = ('data_joined',)
# date_hierarchy = 'data_joined'
class CommentAdmin(admin.ModelAdmin):
list_display = ('id','comment_user_name', 'comment_message', 'comment_date', 'comment_mark')
search_fields = (
'comment_user_name', 'comment_message', 'comment_mark')
# list_filter = ('data_joined',)
# date_hierarchy = 'data_joined'
admin.site.register(Spot, SpotAdmin)
admin.site.register(Comment, CommentAdmin)
|
417175
|
from . import sql
CUSTOM_REPORTS = (
('Custom Reports', (
sql.DistrictMonthly,
sql.HeathFacilityMonthly,
sql.DistrictWeekly,
sql.HealthFacilityWeekly,
)),
)
|
417179
|
from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer
from hazelcast.protocol.builtin import StringCodec
from hazelcast.protocol.builtin import DataCodec
from hazelcast.protocol.builtin import CodecUtil
# hex: 0x013900
_REQUEST_MESSAGE_TYPE = 80128
# hex: 0x013901
_RESPONSE_MESSAGE_TYPE = 80129
_REQUEST_INITIAL_FRAME_SIZE = REQUEST_HEADER_SIZE
def encode_request(name, aggregator):
buf = create_initial_buffer(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAGE_TYPE)
StringCodec.encode(buf, name)
DataCodec.encode(buf, aggregator, True)
return OutboundMessage(buf, True)
def decode_response(msg):
msg.next_frame()
return CodecUtil.decode_nullable(msg, DataCodec.decode)
|
417224
|
def LCSlength(X, Y, lx, ly): # Parameters are the two strings and their lengths
if lx == 0 or ly == 0:
return 0
if X[lx - 1] == Y[ly - 1]:
return LCSlength(X, Y, lx - 1, ly - 1) + 1
return max(LCSlength(X, Y, lx - 1, ly), LCSlength(X, Y, lx, ly - 1))
print("Enter the first string : \n")
X = input()
print("Enter the second string: \n")
Y = input()
print("The length of the LCS is : {}".format(LCSlength(X, Y, len(X), len(Y))))
# This solution has a time complexity of o(2^(lx+ly))
# Also, this LCS problem has overlapping subproblems
|
417247
|
import chainer
import chainer.functions as F
import chainer.links as L
class NumberRecognizeNN(chainer.Chain):
def __init__(self, input_size, output_size, hidden_size=200, layer_size=3):
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.layer_size = layer_size
super(NumberRecognizeNN, self).__init__(
l1=L.Linear(self.input_size, hidden_size),
l2=L.Linear(hidden_size, hidden_size),
l3=L.Linear(hidden_size, self.output_size),
)
def __call__(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
o = F.sigmoid(self.l3(h2))
return o
|
417280
|
from src.automata.ldba import LDBA
# an example automaton for "goal1 or goal2 while avoiding unsafe" or "(FG goal1 | FG goal2) & G !unsafe"
# automaton image is available in "./assets" or "https://i.imgur.com/gyDED4O.png"
# only the automaton "step" function and the "accepting_sets" attribute need to be specified
# "accepting_sets" for Generalised Büchi Accepting (more details here https://bit.ly/ldba_paper)
goal1_or_goal2 = LDBA(accepting_sets=[[1, 2]])
# "step" function for the automaton transitions (input: label, output: automaton_state, un-accepting sink state is "-1")
def step(self, label):
# state 0
if self.automaton_state == 0:
if 'epsilon_1' in label:
self.automaton_state = 1
elif 'epsilon_2' in label:
self.automaton_state = 2
elif 'unsafe' in label:
self.automaton_state = -1 # un-accepting sink state
else:
self.automaton_state = 0
# state 1
elif self.automaton_state == 1:
if 'goal1' in label and 'unsafe' not in label:
self.automaton_state = 1
else:
self.automaton_state = -1 # un-accepting sink state
# state 2
elif self.automaton_state == 2:
if 'goal2' in label and 'unsafe' not in label:
self.automaton_state = 2
else:
self.automaton_state = -1 # un-accepting sink state
# step function returns the new automaton state
return self.automaton_state
# now override the step function
LDBA.step = step.__get__(goal1_or_goal2, LDBA)
# finally, does the LDBA contains an epsilon transition? if so then
# for each state with outgoing epsilon-transition define a different epsilon
# example: <LDBA_object>.epsilon_transitions = {0: ['epsilon_0'], 4: ['epsilon_1']}
# "0" and "4" are automaton_states
# (for more details refer to https://bit.ly/ldba_paper)
goal1_or_goal2.epsilon_transitions = {0: ['epsilon_0', 'epsilon_1']}
|
417289
|
from pyziabm.orderbook3 import Orderbook
import unittest
class TestOrderbook(unittest.TestCase):
'''
Attribute objects in the Orderbook class include:
order_history: list
_bid_book: dictionary
_bid_book_prices: sorted list
_ask_book: dictionary
_ask_book_prices: sorted list
confirm_modify_collector: list
confirm_trade_collector: list
sip_collector: list
trade_book: list
Each method impacts one or more of these attributes.
'''
def setUp(self):
'''
setUp creates the Orderbook instance and a set of orders
'''
self.ex1 = Orderbook()
self.q1_buy = {'order_id': 't1_1', 'timestamp': 2, 'type': 'add', 'quantity': 1, 'side': 'buy',
'price': 50}
self.q2_buy = {'order_id': 't1_2', 'timestamp': 3, 'type': 'add', 'quantity': 1, 'side': 'buy',
'price': 50}
self.q3_buy = {'order_id': 't10_1', 'timestamp': 4, 'type': 'add', 'quantity': 3, 'side': 'buy',
'price': 49}
self.q4_buy = {'order_id': 't11_1', 'timestamp': 5, 'type': 'add', 'quantity': 3, 'side': 'buy',
'price': 47}
self.q1_sell = {'order_id': 't1_3', 'timestamp': 2, 'type': 'add', 'quantity': 1, 'side': 'sell',
'price': 52}
self.q2_sell = {'order_id': 't1_4', 'timestamp': 3, 'type': 'add', 'quantity': 1, 'side': 'sell',
'price': 52}
self.q3_sell = {'order_id': 't10_2', 'timestamp': 4, 'type': 'add', 'quantity': 3, 'side': 'sell',
'price': 53}
self.q4_sell = {'order_id': 't11_2', 'timestamp': 5, 'type': 'add', 'quantity': 3, 'side': 'sell',
'price': 55}
def test_add_order_to_history(self):
'''
add_order_to_history() impacts the order_history list
'''
h1 = {'order_id': 't1_5', 'timestamp': 4, 'type': 'add', 'quantity': 5, 'side': 'sell', 'price': 55}
self.assertFalse(self.ex1.order_history)
h1['exid'] = 1
self.ex1._add_order_to_history(h1)
self.assertDictEqual(h1, self.ex1.order_history[0])
def test_add_order_to_book(self):
'''
add_order_to_book() impacts _bid_book and _bid_book_prices or _ask_book and _ask_book_prices
Add two buy orders, then two sell orders
'''
# 2 buy orders
self.assertFalse(self.ex1._bid_book_prices)
self.assertFalse(self.ex1._bid_book)
self.ex1.add_order_to_book(self.q1_buy)
self.assertTrue(50 in self.ex1._bid_book_prices)
self.assertTrue(50 in self.ex1._bid_book.keys())
self.assertEqual(self.ex1._bid_book[50]['num_orders'], 1)
self.assertEqual(self.ex1._bid_book[50]['size'], 1)
self.assertEqual(self.ex1._bid_book[50]['order_ids'][0], self.q1_buy['order_id'])
self.assertDictEqual(self.ex1._bid_book[50]['orders'][self.q1_buy['order_id']], self.q1_buy)
self.ex1.add_order_to_book(self.q2_buy)
self.assertEqual(self.ex1._bid_book[50]['num_orders'], 2)
self.assertEqual(self.ex1._bid_book[50]['size'], 2)
self.assertEqual(self.ex1._bid_book[50]['order_ids'][1], self.q2_buy['order_id'])
self.assertDictEqual(self.ex1._bid_book[50]['orders'][self.q2_buy['order_id']], self.q2_buy)
# 2 sell orders
self.assertFalse(self.ex1._ask_book_prices)
self.assertFalse(self.ex1._ask_book)
self.ex1.add_order_to_book(self.q1_sell)
self.assertTrue(52 in self.ex1._ask_book_prices)
self.assertTrue(52 in self.ex1._ask_book.keys())
self.assertEqual(self.ex1._ask_book[52]['num_orders'], 1)
self.assertEqual(self.ex1._ask_book[52]['size'], 1)
self.assertEqual(self.ex1._ask_book[52]['order_ids'][0], self.q1_sell['order_id'])
self.assertDictEqual(self.ex1._ask_book[52]['orders'][self.q1_sell['order_id']], self.q1_sell)
self.ex1.add_order_to_book(self.q2_sell)
self.assertEqual(self.ex1._ask_book[52]['num_orders'], 2)
self.assertEqual(self.ex1._ask_book[52]['size'], 2)
self.assertEqual(self.ex1._ask_book[52]['order_ids'][1], self.q2_sell['order_id'])
self.assertDictEqual(self.ex1._ask_book[52]['orders'][self.q2_sell['order_id']], self.q2_sell)
def test_remove_order(self):
'''
_remove_order() impacts _bid_book and _bid_book_prices or _ask_book and _ask_book_prices
Add two orders, remove the second order twice
'''
# buy orders
self.ex1.add_order_to_book(self.q1_buy)
self.ex1.add_order_to_book(self.q2_buy)
self.assertTrue(50 in self.ex1._bid_book_prices)
self.assertTrue(50 in self.ex1._bid_book.keys())
self.assertEqual(self.ex1._bid_book[50]['num_orders'], 2)
self.assertEqual(self.ex1._bid_book[50]['size'], 2)
self.assertEqual(len(self.ex1._bid_book[50]['order_ids']), 2)
# remove first order
self.ex1._remove_order('buy', 50, 't1_1')
self.assertEqual(self.ex1._bid_book[50]['num_orders'], 1)
self.assertEqual(self.ex1._bid_book[50]['size'], 1)
self.assertEqual(len(self.ex1._bid_book[50]['order_ids']), 1)
self.assertFalse('t1_1' in self.ex1._bid_book[50]['orders'].keys())
self.assertTrue(50 in self.ex1._bid_book_prices)
# remove second order
self.ex1._remove_order('buy', 50, 't1_2')
self.assertFalse(self.ex1._bid_book_prices)
self.assertEqual(self.ex1._bid_book[50]['num_orders'], 0)
self.assertEqual(self.ex1._bid_book[50]['size'], 0)
self.assertEqual(len(self.ex1._bid_book[50]['order_ids']), 0)
self.assertFalse('t1_2' in self.ex1._bid_book[50]['orders'].keys())
self.assertFalse(50 in self.ex1._bid_book_prices)
# remove second order again
self.ex1._remove_order('buy', 50, 't1_2')
self.assertFalse(self.ex1._bid_book_prices)
self.assertEqual(self.ex1._bid_book[50]['num_orders'], 0)
self.assertEqual(self.ex1._bid_book[50]['size'], 0)
self.assertEqual(len(self.ex1._bid_book[50]['order_ids']), 0)
self.assertFalse('t1_2' in self.ex1._bid_book[50]['orders'].keys())
# sell orders
self.ex1.add_order_to_book(self.q1_sell)
self.ex1.add_order_to_book(self.q2_sell)
self.assertTrue(52 in self.ex1._ask_book_prices)
self.assertTrue(52 in self.ex1._ask_book.keys())
self.assertEqual(self.ex1._ask_book[52]['num_orders'], 2)
self.assertEqual(self.ex1._ask_book[52]['size'], 2)
self.assertEqual(len(self.ex1._ask_book[52]['order_ids']), 2)
# remove first order
self.ex1._remove_order('sell', 52, 't1_3')
self.assertEqual(self.ex1._ask_book[52]['num_orders'], 1)
self.assertEqual(self.ex1._ask_book[52]['size'], 1)
self.assertEqual(len(self.ex1._ask_book[52]['order_ids']), 1)
self.assertFalse('t1_1' in self.ex1._ask_book[52]['orders'].keys())
self.assertTrue(52 in self.ex1._ask_book_prices)
# remove second order
self.ex1._remove_order('sell', 52, 't1_4')
self.assertFalse(self.ex1._ask_book_prices)
self.assertEqual(self.ex1._ask_book[52]['num_orders'], 0)
self.assertEqual(self.ex1._ask_book[52]['size'], 0)
self.assertEqual(len(self.ex1._ask_book[52]['order_ids']), 0)
self.assertFalse('t1_2' in self.ex1._ask_book[52]['orders'].keys())
self.assertFalse(52 in self.ex1._ask_book_prices)
# remove second order again
self.ex1._remove_order('sell', 52, 't1_4')
self.assertFalse(self.ex1._ask_book_prices)
self.assertEqual(self.ex1._ask_book[52]['num_orders'], 0)
self.assertEqual(self.ex1._ask_book[52]['size'], 0)
self.assertEqual(len(self.ex1._ask_book[52]['order_ids']), 0)
self.assertFalse('t1_2' in self.ex1._ask_book[52]['orders'].keys())
def test_modify_order(self):
'''
_modify_order() primarily impacts _bid_book or _ask_book
_modify_order() could impact _bid_book_prices or _ask_book_prices if the order results
in removing the full quantity with a call to _remove_order()
Add 1 order, remove partial, then remainder
'''
# Buy order
q1 = {'order_id': 't1_1', 'timestamp': 5, 'type': 'add', 'quantity': 2, 'side': 'buy',
'price': 50}
self.ex1.add_order_to_book(q1)
self.assertEqual(self.ex1._bid_book[50]['size'], 2)
# remove 1
self.ex1._modify_order('buy', 1, 't1_1', 50)
self.assertEqual(self.ex1._bid_book[50]['size'], 1)
self.assertEqual(self.ex1._bid_book[50]['orders']['t1_1']['quantity'], 1)
self.assertTrue(self.ex1._bid_book_prices)
# remove remainder
self.ex1._modify_order('buy', 1, 't1_1', 50)
self.assertFalse(self.ex1._bid_book_prices)
self.assertEqual(self.ex1._bid_book[50]['num_orders'], 0)
self.assertEqual(self.ex1._bid_book[50]['size'], 0)
self.assertFalse('t1_1' in self.ex1._bid_book[50]['orders'].keys())
# Sell order
q2 = {'order_id': 't1_1', 'timestamp': 5, 'type': 'add', 'quantity': 2, 'side': 'sell',
'price': 50}
self.ex1.add_order_to_book(q2)
self.assertEqual(self.ex1._ask_book[50]['size'], 2)
# remove 1
self.ex1._modify_order('sell', 1, 't1_1', 50)
self.assertEqual(self.ex1._ask_book[50]['size'], 1)
self.assertEqual(self.ex1._ask_book[50]['orders']['t1_1']['quantity'], 1)
self.assertTrue(self.ex1._ask_book_prices)
# remove remainder
self.ex1._modify_order('sell', 1, 't1_1', 50)
self.assertFalse(self.ex1._ask_book_prices)
self.assertEqual(self.ex1._ask_book[50]['num_orders'], 0)
self.assertEqual(self.ex1._ask_book[50]['size'], 0)
self.assertFalse('t1_1' in self.ex1._ask_book[50]['orders'].keys())
def test_add_trade_to_book(self):
'''
add_trade_to_book() impacts trade_book
Check trade book empty, add a trade, check non-empty, verify dict equality
'''
t1 = dict(resting_order_id='t1_1', resting_timestamp=2, incoming_order_id='t2_1',
timestamp=5, price=50, quantity=1, side='buy')
self.assertFalse(self.ex1.trade_book)
self.ex1._add_trade_to_book('t1_1', 2, 't2_1', 5, 50, 1, 'buy')
self.assertTrue(self.ex1.trade_book)
self.assertDictEqual(t1, self.ex1.trade_book[0])
def test_confirm_trade(self):
'''
confirm_trade() impacts confirm_trade_collector
Check confirm trade collector empty, add a trade, check non-empty, verify dict equality
'''
t2 = dict(timestamp=5, trader='t3', order_id='t3_1', quantity=1,
side='sell', price=50)
self.assertFalse(self.ex1.confirm_trade_collector)
self.ex1._confirm_trade(5, 'sell', 1, 't3_1', 50)
self.assertTrue(self.ex1.confirm_trade_collector)
self.assertDictEqual(t2, self.ex1.confirm_trade_collector[0])
def test_confirm_modify(self):
'''
confirm_modify() impacts confirm_modify_collector
Check confirm modify collector empty, add a trade, check non-empty, verify dict equality
'''
m1 = dict(timestamp=7, trader='t5', order_id='t5_10', quantity=5, side='buy')
self.assertFalse(self.ex1.confirm_modify_collector)
self.ex1._confirm_modify(7, 'buy', 5, 't5_10')
self.assertTrue(self.ex1.confirm_modify_collector)
self.assertDictEqual(m1, self.ex1.confirm_modify_collector[0])
def test_process_order(self):
'''
process_order() impacts confirm_modify_collector, traded indicator, order_history,
_bid_book and _bid_book_prices or _ask_book and _ask_book_prices.
process_order() is a traffic manager. An order is either an add order or not. If it is an add order,
it is either priced to go directly to the book or is sent to match_trade (which is tested below). If it
is not an add order, it is either modified or cancelled. To test, we will add some buy and sell orders,
then test for trades, cancels and modifies. process_order() also resets some object collectors.
'''
self.q2_buy['quantity'] = 2
self.q2_sell['quantity'] = 2
self.assertEqual(len(self.ex1._ask_book_prices), 0)
self.assertEqual(len(self.ex1._bid_book_prices), 0)
self.assertFalse(self.ex1.confirm_modify_collector)
self.assertFalse(self.ex1.order_history)
self.assertFalse(self.ex1.traded)
# seed order book
self.ex1.add_order_to_book(self.q1_buy)
self.ex1.add_order_to_book(self.q1_sell)
# process new orders
self.ex1.process_order(self.q2_buy)
self.ex1.process_order(self.q2_sell)
self.assertEqual(len(self.ex1._ask_book_prices), 1)
self.assertEqual(len(self.ex1._bid_book_prices), 1)
self.assertEqual(len(self.ex1.order_history), 2)
# marketable sell takes out 1 share
q3_sell = {'order_id': 't3_1', 'timestamp': 5, 'type': 'add', 'quantity': 1, 'side': 'sell',
'price': 0}
self.ex1.process_order(q3_sell)
self.assertEqual(len(self.ex1.order_history), 3)
self.assertEqual(self.ex1._bid_book[50]['num_orders'], 1)
self.assertEqual(self.ex1._bid_book[50]['size'], 2)
self.assertTrue(self.ex1.traded)
# marketable buy takes out 1 share
q3_buy = {'order_id': 't3_2', 'timestamp': 5, 'type': 'add', 'quantity': 1, 'side': 'buy',
'price': 10000}
self.ex1.process_order(q3_buy)
self.assertEqual(len(self.ex1.order_history), 4)
self.assertEqual(self.ex1._ask_book[52]['num_orders'], 1)
self.assertEqual(self.ex1._ask_book[52]['size'], 2)
self.assertTrue(self.ex1.traded)
# add/cancel buy order
q4_buy = {'order_id': 't4_1', 'timestamp': 10, 'type': 'add', 'quantity': 1, 'side': 'buy',
'price': 48}
self.ex1.process_order(q4_buy)
self.assertEqual(len(self.ex1.order_history), 5)
self.assertEqual(len(self.ex1._bid_book_prices), 2)
self.assertEqual(self.ex1._bid_book[48]['num_orders'], 1)
self.assertEqual(self.ex1._bid_book[48]['size'], 1)
self.assertFalse(self.ex1.traded)
q4_cancel1 = {'order_id': 't4_1', 'timestamp': 10, 'type': 'cancel', 'quantity': 1, 'side': 'buy',
'price': 48}
self.ex1.process_order(q4_cancel1)
self.assertEqual(len(self.ex1.order_history), 6)
self.assertEqual(len(self.ex1._bid_book_prices), 1)
self.assertFalse(self.ex1.traded)
# add/cancel sell order
q4_sell = {'order_id': 't4_2', 'timestamp': 10, 'type': 'add', 'quantity': 1, 'side': 'sell',
'price': 54}
self.ex1.process_order(q4_sell)
self.assertEqual(len(self.ex1.order_history), 7)
self.assertEqual(len(self.ex1._ask_book_prices), 2)
self.assertEqual(self.ex1._ask_book[54]['num_orders'], 1)
self.assertEqual(self.ex1._ask_book[54]['size'], 1)
self.assertFalse(self.ex1.traded)
q4_cancel2 = {'order_id': 't4_2', 'timestamp': 10, 'type': 'cancel', 'quantity': 1, 'side': 'sell',
'price': 54}
self.ex1.process_order(q4_cancel2)
self.assertEqual(len(self.ex1.order_history), 8)
self.assertEqual(len(self.ex1._ask_book_prices), 1)
self.assertFalse(self.ex1.traded)
# add/modify buy order
q5_buy = {'order_id': 't5_1', 'timestamp': 10, 'type': 'add', 'quantity': 5, 'side': 'buy',
'price': 48}
self.ex1.process_order(q5_buy)
self.assertEqual(len(self.ex1.order_history), 9)
self.assertEqual(len(self.ex1._bid_book_prices), 2)
self.assertEqual(self.ex1._bid_book[48]['num_orders'], 1)
self.assertEqual(self.ex1._bid_book[48]['size'], 5)
q5_modify1 = {'order_id': 't5_1', 'timestamp': 10, 'type': 'modify', 'quantity': 2, 'side': 'buy',
'price': 48}
self.ex1.process_order(q5_modify1)
self.assertEqual(len(self.ex1.order_history), 10)
self.assertEqual(len(self.ex1._bid_book_prices), 2)
self.assertEqual(self.ex1._bid_book[48]['size'], 3)
self.assertEqual(self.ex1._bid_book[48]['orders']['t5_1']['quantity'], 3)
self.assertEqual(len(self.ex1.confirm_modify_collector), 1)
self.assertFalse(self.ex1.traded)
# add/modify sell order
q5_sell = {'order_id': 't5_1', 'timestamp': 10, 'type': 'add', 'quantity': 5, 'side': 'sell',
'price': 54}
self.ex1.process_order(q5_sell)
self.assertEqual(len(self.ex1.order_history), 11)
self.assertEqual(len(self.ex1._ask_book_prices), 2)
self.assertEqual(self.ex1._ask_book[54]['num_orders'], 1)
self.assertEqual(self.ex1._ask_book[54]['size'], 5)
q5_modify2 = {'order_id': 't5_1', 'timestamp': 10, 'type': 'modify', 'quantity': 2, 'side': 'sell',
'price': 54}
self.ex1.process_order(q5_modify2)
self.assertEqual(len(self.ex1.order_history), 12)
self.assertEqual(len(self.ex1._ask_book_prices), 2)
self.assertEqual(self.ex1._ask_book[54]['size'], 3)
self.assertEqual(self.ex1._ask_book[54]['orders']['t5_1']['quantity'], 3)
self.assertEqual(len(self.ex1.confirm_modify_collector), 1)
self.assertFalse(self.ex1.traded)
def test_match_trade_sell(self):
'''
An incoming order can:
1. take out part of an order,
2. take out an entire price level,
3. if priced, take out a price level and make a new inside market.
'''
# seed order book
self.ex1.add_order_to_book(self.q1_buy)
self.ex1.add_order_to_book(self.q1_sell)
# process new orders
self.ex1.process_order(self.q2_buy)
self.ex1.process_order(self.q2_sell)
self.ex1.process_order(self.q3_buy)
self.ex1.process_order(self.q3_sell)
self.ex1.process_order(self.q4_buy)
self.ex1.process_order(self.q4_sell)
# The book: bids: 2@50, 3@49, 3@47 ; asks: 2@52, 3@53, 3@55
self.assertEqual(self.ex1._bid_book[47]['size'], 3)
self.assertEqual(self.ex1._bid_book[49]['size'], 3)
self.assertEqual(self.ex1._bid_book[50]['size'], 2)
self.assertEqual(self.ex1._ask_book[52]['size'], 2)
self.assertEqual(self.ex1._ask_book[53]['size'], 3)
self.assertEqual(self.ex1._ask_book[55]['size'], 3)
#self.assertFalse(self.ex1.sip_collector)
# market sell order takes out part of first best bid
q1 = {'order_id': 't100_1', 'timestamp': 10, 'type': 'add', 'quantity': 1, 'side': 'sell',
'price': 0}
self.ex1.process_order(q1)
self.assertEqual(self.ex1._bid_book[50]['size'], 1)
self.assertTrue(50 in self.ex1._bid_book_prices)
self.assertEqual(self.ex1._bid_book[49]['size'], 3)
self.assertEqual(self.ex1._bid_book[47]['size'], 3)
self.assertEqual(self.ex1._bid_book[50]['orders'][self.ex1._bid_book[50]['order_ids'][0]]['quantity'], 1)
#self.assertEqual(len(self.ex1.sip_collector), 1)
# market sell order takes out remainder first best bid and all of the next level
self.assertEqual(len(self.ex1._bid_book_prices), 3)
q2 = {'order_id': 't100_2', 'timestamp': 11, 'type': 'add', 'quantity': 4, 'side': 'sell',
'price': 0}
self.ex1.process_order(q2)
self.assertEqual(len(self.ex1._bid_book_prices), 1)
self.assertFalse(50 in self.ex1._bid_book_prices)
self.assertFalse(49 in self.ex1._bid_book_prices)
self.assertTrue(47 in self.ex1._bid_book_prices)
#self.assertEqual(len(self.ex1.sip_collector), 3)
# make new market
q3 = {'order_id': 't101_1', 'timestamp': 12, 'type': 'add', 'quantity': 2, 'side': 'buy',
'price': 48}
q4 = {'order_id': 't102_1', 'timestamp': 13, 'type': 'add', 'quantity': 3, 'side': 'sell',
'price': 48}
self.ex1.process_order(q3)
self.assertEqual(len(self.ex1._bid_book_prices), 2)
self.assertTrue(48 in self.ex1._bid_book_prices)
self.assertTrue(47 in self.ex1._bid_book_prices)
self.assertEqual(self.ex1._bid_book_prices[-1], 48)
self.assertEqual(self.ex1._bid_book_prices[-2], 47)
# sip_collector does not reset until new trade at new time
#self.assertEqual(len(self.ex1.sip_collector), 3)
self.ex1.process_order(q4)
self.assertEqual(len(self.ex1._bid_book_prices), 1)
self.assertFalse(48 in self.ex1._bid_book_prices)
self.assertTrue(47 in self.ex1._bid_book_prices)
self.assertEqual(len(self.ex1._ask_book_prices), 4)
self.assertTrue(48 in self.ex1._ask_book_prices)
self.assertEqual(self.ex1._ask_book_prices[0], 48)
self.assertEqual(self.ex1._bid_book_prices[-1], 47)
#self.assertEqual(len(self.ex1.sip_collector), 1)
def test_match_trade_buy(self):
'''
An incoming order can:
1. take out part of an order,
2. take out an entire price level,
3. if priced, take out a price level and make a new inside market.
'''
# seed order book
self.ex1.add_order_to_book(self.q1_buy)
self.ex1.add_order_to_book(self.q1_sell)
# process new orders
self.ex1.process_order(self.q2_buy)
self.ex1.process_order(self.q2_sell)
self.ex1.process_order(self.q3_buy)
self.ex1.process_order(self.q3_sell)
self.ex1.process_order(self.q4_buy)
self.ex1.process_order(self.q4_sell)
# The book: bids: 2@50, 3@49, 3@47 ; asks: 2@52, 3@53, 3@55
self.assertEqual(self.ex1._bid_book[47]['size'], 3)
self.assertEqual(self.ex1._bid_book[49]['size'], 3)
self.assertEqual(self.ex1._bid_book[50]['size'], 2)
self.assertEqual(self.ex1._ask_book[52]['size'], 2)
self.assertEqual(self.ex1._ask_book[53]['size'], 3)
self.assertEqual(self.ex1._ask_book[55]['size'], 3)
# market buy order takes out part of first best ask
q1 = {'order_id': 't100_1', 'timestamp': 10, 'type': 'add', 'quantity': 1, 'side': 'buy',
'price': 100000}
self.ex1.process_order(q1)
self.assertEqual(self.ex1._ask_book[52]['size'], 1)
self.assertTrue(52 in self.ex1._ask_book_prices)
self.assertEqual(self.ex1._ask_book[53]['size'], 3)
self.assertEqual(self.ex1._ask_book[55]['size'], 3)
self.assertEqual(self.ex1._ask_book[52]['orders'][self.ex1._ask_book[52]['order_ids'][0]]['quantity'], 1)
# market buy order takes out remainder first best ask and all of the next level
self.assertEqual(len(self.ex1._ask_book_prices), 3)
q2 = {'order_id': 't100_2', 'timestamp': 11, 'type': 'add', 'quantity': 4, 'side': 'buy',
'price': 100000}
self.ex1.process_order(q2)
self.assertEqual(len(self.ex1._ask_book_prices), 1)
self.assertFalse(52 in self.ex1._ask_book_prices)
self.assertFalse(53 in self.ex1._ask_book_prices)
self.assertTrue(55 in self.ex1._ask_book_prices)
# make new market
q3 = {'order_id': 't101_1', 'timestamp': 12, 'type': 'add', 'quantity': 2, 'side': 'sell',
'price': 54}
q4 = {'order_id': 't102_1', 'timestamp': 13, 'type': 'add', 'quantity': 3, 'side': 'buy',
'price': 54}
self.ex1.process_order(q3)
self.assertEqual(len(self.ex1._ask_book_prices), 2)
self.assertTrue(55 in self.ex1._ask_book_prices)
self.assertTrue(54 in self.ex1._ask_book_prices)
self.assertEqual(self.ex1._ask_book_prices[0], 54)
self.assertEqual(self.ex1._ask_book_prices[1], 55)
self.ex1.process_order(q4)
self.assertEqual(len(self.ex1._ask_book_prices), 1)
self.assertFalse(54 in self.ex1._ask_book_prices)
self.assertTrue(55 in self.ex1._ask_book_prices)
self.assertEqual(len(self.ex1._bid_book_prices), 4)
self.assertTrue(54 in self.ex1._bid_book_prices)
self.assertEqual(self.ex1._ask_book_prices[0], 55)
self.assertEqual(self.ex1._bid_book_prices[-1], 54)
def test_report_top_of_book(self):
'''
At setup(), top of book has 2 to sell at 52 and 2 to buy at 50
at time = 3
'''
self.ex1.add_order_to_book(self.q1_buy)
self.ex1.add_order_to_book(self.q2_buy)
self.ex1.add_order_to_book(self.q1_sell)
self.ex1.add_order_to_book(self.q2_sell)
tob_check = {'timestamp': 5, 'best_bid': 50, 'best_ask': 52, 'bid_size': 2, 'ask_size': 2}
self.ex1.report_top_of_book(5)
self.assertDictEqual(self.ex1._sip_collector[0], tob_check)
def test_market_collapse(self):
'''
At setup(), there is 8 total bid size and 8 total ask size
A trade for 8 or more should collapse the market
'''
print('Market Collapse Tests to stdout:\n')
# seed order book
self.ex1.add_order_to_book(self.q1_buy)
self.ex1.add_order_to_book(self.q1_sell)
# process new orders
self.ex1.process_order(self.q2_buy)
self.ex1.process_order(self.q2_sell)
self.ex1.process_order(self.q3_buy)
self.ex1.process_order(self.q3_sell)
self.ex1.process_order(self.q4_buy)
self.ex1.process_order(self.q4_sell)
# The book: bids: 2@50, 3@49, 3@47 ; asks: 2@52, 3@53, 3@55
# market buy order takes out part of the asks: no collapse
q1 = {'order_id': 't100_1', 'timestamp': 10, 'type': 'add', 'quantity': 4, 'side': 'buy',
'price': 100000}
self.ex1.process_order(q1)
# next market buy order takes out the asks: market collapse
q2 = {'order_id': 't100_2', 'timestamp': 10, 'type': 'add', 'quantity': 5, 'side': 'buy',
'price': 100000}
self.ex1.process_order(q2)
# market sell order takes out part of the bids: no collapse
q3 = {'order_id': 't100_3', 'timestamp': 10, 'type': 'add', 'quantity': 4, 'side': 'sell',
'price': 0}
self.ex1.process_order(q3)
# next market sell order takes out the asks: market collapse
q4 = {'order_id': 't100_4', 'timestamp': 10, 'type': 'add', 'quantity': 5, 'side': 'sell',
'price': 0}
self.ex1.process_order(q4)
|
417316
|
import sys
import os
from . import performance_tester as pt
from argparse import ArgumentParser
def main(args=None):
if args is None:
# command line arguments
parser = ArgumentParser(description='performance_tester.py: '
'tests pyJac performance'
)
parser.add_argument('-w', '--working_directory',
type=str,
default='performance',
help='Directory storing the mechanisms / data.'
)
parser.add_argument('-uoo', '--use_old_opt',
action='store_true',
default=False,
required=False,
help='If True, allows performance_tester to use '
'any old optimization files found'
)
args = parser.parse_args()
pt.performance_tester(os.path.dirname(os.path.abspath(pt.__file__)),
args.working_directory,
args.use_old_opt)
if __name__ == '__main__':
sys.exit(main())
|
417350
|
import torch
from torch.nn.parameter import Parameter
import torch.nn.init as init
class ScaleLayer(torch.nn.Module):
def __init__(self, num_features, use_bias=True):
super(ScaleLayer, self).__init__()
self.weight = Parameter(torch.Tensor(num_features))
init.ones_(self.weight)
self.num_features = num_features
if use_bias:
self.bias = Parameter(torch.Tensor(num_features))
init.zeros_(self.bias)
else:
self.bias = None
def forward(self, inputs):
if self.bias is None:
return inputs * self.weight.view(1, self.num_features, 1, 1)
else:
return inputs * self.weight.view(1, self.num_features, 1, 1) + self.bias
|
417384
|
import os
from os import path
import sys
import shutil, glob
import logging
import uuid
import hashlib
from subprocess import PIPE, STDOUT
import lib
from lib import CouldNotLocate, task
LOG = logging.getLogger(__name__)
class IEError(Exception):
pass
@task
def package_ie(build, **kw):
'Sign executables, Run NSIS'
# On OS X use the nsis executable and files we ship
if sys.platform.startswith('darwin'):
nsis_osx = os.path.realpath(os.path.join(os.path.dirname(__file__), '../lib/nsis_osx'))
nsis_cmd = 'NSISDIR={}/share/nsis PATH={}/bin/:$PATH makensis'.format(nsis_osx, nsis_osx)
else:
nsis_cmd = 'makensis'
LOG.debug("Using nsis command: {nsis_cmd}".format(nsis_cmd=nsis_cmd))
nsis_check = lib.PopenWithoutNewConsole("{nsis_cmd} -VERSION".format(nsis_cmd=nsis_cmd),
shell=True, stdout=PIPE, stderr=STDOUT)
stdout, stderr = nsis_check.communicate()
if nsis_check.returncode != 0:
raise CouldNotLocate("Make sure the 'makensis' executable is in your path")
# JCB: need to check nsis version in stdout here?
# Sign executables
certificate = build.tool_config.get('ie.profile.developer_certificate')
certificate_path = build.tool_config.get('ie.profile.developer_certificate_path')
certificate_password = build.tool_config.get('ie.profile.developer_certificate_password')
if certificate:
# Figure out which signtool to use
signtool = _check_signtool(build)
if signtool == None:
raise CouldNotLocate("Make sure the 'signtool' or 'osslsigncode' executable is in your path")
LOG.info('Signing IE executables with: {signtool}'.format(signtool=signtool))
_sign_app(build=build,
signtool=signtool,
certificate=certificate,
certificate_path=certificate_path,
certificate_password=certificate_password)
development_dir = path.join("development", "ie")
release_dir = path.join("release", "ie")
if not path.isdir(release_dir):
os.makedirs(release_dir)
for arch in ('x86', 'x64'):
nsi_filename = "setup-{arch}.nsi".format(arch=arch)
package = lib.PopenWithoutNewConsole('{nsis_cmd} {nsi}'.format(
nsis_cmd=nsis_cmd,
nsi=path.join(development_dir, "dist", nsi_filename)),
stdout=PIPE, stderr=STDOUT, shell=True
)
out, err = package.communicate()
if package.returncode != 0:
raise IEError("problem running {arch} IE build: {stdout}".format(arch=arch, stdout=out))
# move output to release directory of IE directory and sign it
for exe in glob.glob(development_dir+"/dist/*.exe"):
destination = path.join(release_dir, "{name}-{version}-{arch}.exe".format(
name=build.config.get('name', 'Forge App'),
version=build.config.get('version', '0.1'),
arch=arch
))
shutil.move(exe, destination)
if certificate:
_sign_executable(build=build,
signtool=signtool,
target=destination,
certificate=certificate,
certificate_path=certificate_path,
certificate_password=certificate_password)
def _generate_package_name(build):
if "core" not in build.config:
build.config["core"] = {}
if "ie" not in build.config["core"]:
build.config["core"]["ie"] = {}
build.config["core"]["ie"]["package_name"] = _uuid_to_ms_clsid(build)
return build.config["core"]["ie"]["package_name"]
def _uuid_to_ms_clsid(build):
md5 = hashlib.md5(build.config['uuid'])
guid = uuid.UUID(md5.hexdigest())
clsid = uuid.UUID(guid.bytes_le.encode('hex'))
return "{" + str(clsid).upper() + "}"
def _check_signtool(build):
options = ["signtool /?", "osslsigncode -v"]
# Note: The follow code can be uncommented once osslsigncode_osx has been rebuilt to work
# on stock OS X. At the moment it is dynamically linked against
# /opt/local/lib/libcrypto.1.0.0.dylib which is not part of the OS and probably a
# homebrew library. The system libcrypto is at /usr/lib/libcrypto.dylib
#
# lib_dir = os.path.realpath(os.path.join(os.path.dirname(__file__), '../lib'))
# if sys.platform.startswith('darwin'):
# options.append('{lib_dir}/osslsigncode_osx -v'.format(lib_dir=lib_dir))
# elif sys.platform.startswith('linux'):
# options.append('{lib_dir}/osslsigncode_linux -v'.format(lib_dir=lib_dir))
for option in options:
LOG.info("Checking: %s", option[:-3])
check = lib.PopenWithoutNewConsole(option, shell=True, stdout=PIPE, stderr=STDOUT)
stdout, stderr = check.communicate()
if check.returncode == 0:
return option[:-3]
LOG.info("Could not find anything: %s" % stdout)
return None
def _sign_app(build, signtool=None, certificate=None, certificate_path=None, certificate_password=""):
'Sign all executable code'
path_win32 = path.join("development", "ie", "build", "Win32", "Release")
path_x64 = path.join("development", "ie", "build", "x64", "Release")
_sign_executable(build, signtool, path.join(path_win32, "bho32.dll"),
certificate, certificate_path, certificate_password)
_sign_executable(build, signtool, path.join(path_win32, "forge32.dll"),
certificate, certificate_path, certificate_password)
_sign_executable(build, signtool, path.join(path_win32, "forge32.exe"),
certificate, certificate_path, certificate_password)
_sign_executable(build, signtool, path.join(path_win32, "frame32.dll"),
certificate, certificate_path, certificate_password)
_sign_executable(build, signtool, path.join(path_x64, "bho64.dll"),
certificate, certificate_path, certificate_password)
_sign_executable(build, signtool, path.join(path_x64, "forge64.dll"),
certificate, certificate_path, certificate_password)
_sign_executable(build, signtool, path.join(path_x64, "forge64.exe"),
certificate, certificate_path, certificate_password)
_sign_executable(build, signtool, path.join(path_x64, "frame64.dll"),
certificate, certificate_path, certificate_password)
def _sign_executable(build, signtool, target, certificate = None, certificate_path = None, certificate_password = ""):
'Sign a single executable file'
LOG.info('Signing {target}'.format(target=target))
if signtool == 'signtool':
command = lib.PopenWithoutNewConsole('signtool sign /f {cert} /p {password} /v /t {time} "{target}"'.format(
cert=path.join(certificate_path, certificate),
password=<PASSWORD>,
time='http://timestamp.comodoca.com/authenticode',
target=target),
stdout=PIPE, stderr=STDOUT, shell=True
)
elif signtool == 'osslsigncode':
command = lib.PopenWithoutNewConsole('osslsigncode -pkcs12 {cert} -pass {password} -t {time} -in "{target}" -out "{target}.signed"'.format(
cert=path.join(certificate_path, certificate),
password=<PASSWORD>,
time='http://timestamp.comodoca.com/authenticode',
target=target),
stdout=PIPE, stderr=STDOUT, shell=True
)
else:
raise IEError("problem signing IE build, unknown code sign tool: {signtool}".format(signtool=signtool))
out, err = command.communicate()
if command.returncode != 0:
raise IEError("problem signing IE build: {stdout}".format(stdout=out))
if signtool == 'osslsigncode':
shutil.move(target + ".signed", target)
@task
def run_ie(build):
msg = """Currently it is not possible to launch an Internet Explorer extension via this interface."""
LOG.info(msg)
|
417385
|
import pandas
class Static:
CURRENT_PLOT = None
PLOTS = []
class Plot:
def __init__(self, **kwargs):
# TODO - fix how chart type is assuigned. the use of messy right now
self.layers = []
if "chart_type" in kwargs:
self.chart_type = kwargs["chart_type"]
else:
self.chart_type = "line"
def add_layer(self, *args, **kwargs):
if (len(args) == 1):
# TODO - how do we parse dims?
data_temp = pandas.Series(args[0])
x = data_temp.get_index().to_plot_data_v2();
y = data_temp.to_plot_data_v2();
data = { "x": x, "y": y };
self.layers.append({"data": data, "options": kwargs})
elif (len(args) == 2):
x = pandas.Series(args[0],name="x").to_plot_data_v2()
y = pandas.Series(args[1],name="y").to_plot_data_v2()
data = {"x": x, "y": y}
self.layers.append({"data": data, "options": kwargs})
elif (len(args) == 3):
# We don't know format strings
pass
def get_current_plot(**kwargs):
if Static.CURRENT_PLOT is None:
Static.CURRENT_PLOT = Plot(**kwargs)
return Static.CURRENT_PLOT
def close():
""" closes the current plot """
if Static.CURRENT_PLOT is not None:
Static.PLOTS.append(Static.CURRENT_PLOT)
Static.CURRENT_PLOT = None
def get_plots():
close()
old = map(lambda p: p.__dict__, Static.PLOTS)
Static.PLOTS = []
return old
def plot(*args, **kwargs): # kwargs could include (linewidth, lw, color, marker, linestyle, ls)
"""
from: http://matplotlib.org/api/pyplot_api.html
plot(x, y)
plot(x, y, 'formatstring')
plot(y) # plot y using x as index array
if first argument (?) is 2-dim, those columns will be plotted
"""
current_plot = get_current_plot(**kwargs)
current_plot.add_layer(*args, **kwargs)
def scatter(x, y, **kwargs):
plot(x, y, chart_type="scatter", **kwargs)
def bar(left, height, **kwargs):
plot(left, height, chart_type="bar", **kwargs)
def hist(*args, **kwargs):
pass
|
417437
|
from mpas_analysis.shared.regions.compute_region_masks_subtask \
import ComputeRegionMasksSubtask, get_feature_list
from mpas_analysis.shared.regions.compute_region_masks \
import ComputeRegionMasks
|
417440
|
from enum import Enum
from typing import Optional
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.utils._encode import _check_unknown
from sklearn.utils._encode import _encode
from etna.datasets import TSDataset
from etna.transforms.base import Transform
class ImputerMode(str, Enum):
"""Enum for different imputation strategy."""
new_value = "new_value"
mean = "mean"
none = "none"
class _LabelEncoder(preprocessing.LabelEncoder):
def transform(self, y: pd.Series, strategy: str):
diff = _check_unknown(y, known_values=self.classes_)
index = np.where(np.isin(y, diff))[0]
encoded = _encode(y, uniques=self.classes_, check_unknown=False).astype(float)
if strategy == ImputerMode.none:
filling_value = None
elif strategy == ImputerMode.new_value:
filling_value = -1
elif strategy == ImputerMode.mean:
filling_value = np.mean(encoded[~np.isin(y, diff)])
else:
raise ValueError(f"The strategy '{strategy}' doesn't exist")
encoded[index] = filling_value
return encoded
class LabelEncoderTransform(Transform):
"""Encode categorical feature with value between 0 and n_classes-1."""
def __init__(self, in_column: str, out_column: Optional[str] = None, strategy: str = ImputerMode.mean):
"""
Init LabelEncoderTransform.
Parameters
----------
in_column:
Name of column to be transformed
out_column:
Name of added column. If not given, use ``self.__repr__()``
strategy:
Filling encoding in not fitted values:
- If "new_value", then replace missing values with '-1'
- If "mean", then replace missing values using the mean in encoded column
- If "none", then replace missing values with None
"""
self.in_column = in_column
self.out_column = out_column
self.strategy = strategy
self.le = _LabelEncoder()
def fit(self, df: pd.DataFrame) -> "LabelEncoderTransform":
"""
Fit Label encoder.
Parameters
----------
df:
Dataframe with data to fit the transform
Returns
-------
:
Fitted transform
"""
y = TSDataset.to_flatten(df)[self.in_column]
self.le.fit(y=y)
return self
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Encode the ``in_column`` by fitted Label encoder.
Parameters
----------
df
Dataframe with data to transform
Returns
-------
:
Dataframe with column with encoded values
"""
out_column = self._get_column_name()
result_df = TSDataset.to_flatten(df)
result_df[out_column] = self.le.transform(result_df[self.in_column], self.strategy)
result_df[out_column] = result_df[out_column].astype("category")
result_df = TSDataset.to_dataset(result_df)
return result_df
def _get_column_name(self) -> str:
"""Get the ``out_column`` depending on the transform's parameters."""
if self.out_column:
return self.out_column
return self.__repr__()
class OneHotEncoderTransform(Transform):
"""Encode categorical feature as a one-hot numeric features.
If unknown category is encountered during transform, the resulting one-hot
encoded columns for this feature will be all zeros.
"""
def __init__(self, in_column: str, out_column: Optional[str] = None):
"""
Init OneHotEncoderTransform.
Parameters
----------
in_column:
Name of column to be encoded
out_column:
Prefix of names of added columns. If not given, use ``self.__repr__()``
"""
self.in_column = in_column
self.out_column = out_column
self.ohe = preprocessing.OneHotEncoder(handle_unknown="ignore", sparse=False)
def fit(self, df: pd.DataFrame) -> "OneHotEncoderTransform":
"""
Fit One Hot encoder.
Parameters
----------
df:
Dataframe with data to fit the transform
Returns
-------
:
Fitted transform
"""
x = TSDataset.to_flatten(df)[self.in_column].values.reshape(-1, 1)
self.ohe.fit(X=x)
return self
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Encode the `in_column` by fitted One Hot encoder.
Parameters
----------
df
Dataframe with data to transform
Returns
-------
:
Dataframe with column with encoded values
"""
out_column = self._get_column_name()
out_columns = [out_column + "_" + str(i) for i in range(len(self.ohe.categories_[0]))]
result_df = TSDataset.to_flatten(df)
x = result_df[self.in_column].values.reshape(-1, 1)
result_df[out_columns] = self.ohe.transform(X=x)
result_df[out_columns] = result_df[out_columns].astype("category")
result_df = TSDataset.to_dataset(result_df)
return result_df
def _get_column_name(self) -> str:
"""Get the ``out_column`` depending on the transform's parameters."""
if self.out_column:
return self.out_column
return self.__repr__()
|
417447
|
import os
from mattermostdriver import Driver
from requests.exceptions import ConnectionError, HTTPError
from patter.exceptions import (
FileUploadException,
MissingChannel,
MissingEnvVars,
MissingUser,
)
config_vars = {
"MATTERMOST_TEAM_NAME": os.getenv("MATTERMOST_TEAM_NAME", None),
"MATTERMOST_URL": os.getenv("MATTERMOST_URL", None),
"MATTERMOST_USERNAME": os.getenv("MATTERMOST_USERNAME", None),
"MATTERMOST_PASSWORD": os.getenv("MATTERMOST_PASSWORD", None),
"MATTERMOST_PORT": os.getenv("MATTERMOST_PORT", None),
}
class Patter(object):
def __init__(self, message, filename, format_as_code, user, channel, syntax,
verbose):
self.message = message
self.format_as_code = format_as_code
self.filename = filename
self.user = user
self.channel = channel
self.verbose = verbose
self.syntax = syntax
self.mm_client = Driver({
"url": config_vars["MATTERMOST_URL"],
"login_id": config_vars["MATTERMOST_USERNAME"],
"password": config_vars["<PASSWORD>"],
"scheme": "https",
"port": int(config_vars["MATTERMOST_PORT"]),
"basepath": "/api/v4",
"verify": True,
"timeout": 30,
"debug": False,
})
self.team_name = config_vars["MATTERMOST_TEAM_NAME"]
try:
self.mm_client.login()
except ConnectionError:
print("Unable to connect to the configured Mattermost server.")
raise
def check_env_vars(self):
"""Check that all of the required environment variables are set.
If not, raise exception noting the ones that are missing.
:raises: MissingEnvVars.
"""
missing_vars = list(k for k, v in config_vars.items() if v is None)
if len(missing_vars) > 0:
error_string = "\n\t".join(missing_vars)
raise MissingEnvVars(
"The following environment variables are required but not set:\n\t{}".format(
error_string
)
)
def send_message(self):
"""Send the outgoing message. If there is a file to send, attach it."""
channel_id = self._get_message_channel_id()
message = self._format_message(self.message, self.format_as_code, self.syntax)
options = {
"channel_id": channel_id,
"message": message,
}
if self.filename:
attached_file_id = self._attach_file(self.filename, channel_id)
options["file_ids"] = [attached_file_id]
self.mm_client.posts.create_post(options)
def _format_message(self, message, format_as_code, syntax):
"""Adds formatting to the given message.
:param message: String to be formatted.
:param format_as_code: Boolen if message should be formatted as code.
:returns: Formatted message.
"""
if not message:
return ""
formatted_message = message
if format_as_code:
formatted_message = "```{}\n{}```".format(syntax, message)
formatted_message += "\n⁽ᵐᵉˢˢᵃᵍᵉ ᵇʳᵒᵘᵍʰᵗ ᵗᵒ ʸᵒᵘ ᵇʸ ᵖᵃᵗᵗᵉʳ⁾"
return formatted_message
def _get_message_channel_id(self):
"""Get the channel to send the message to.
Raise if the channel can't be found.
:returns: Channel id string.
:raises: MissingChannel.
"""
if self.channel:
try:
return self._get_channel_id_by_name(self.channel)
except HTTPError:
raise MissingChannel(
"The channel \'{}\' does not exist.".format(
self.channel,
))
if self.user:
return self._get_channel_id_for_user(self.user)
def _get_channel_id_by_name(self, channel_name):
"""Use this function to get an ID from a channel name.
:param channel_name: Name of channel
:returns: Channel id string.
"""
channel = self.mm_client.channels.get_channel_by_name_and_team_name(
team_name=self.team_name,
channel_name=channel_name,
)
return channel["id"]
def _get_channel_id_for_user(self, user_name):
"""Get the channel id for a direct message with the target user.
Raise if the user can not be found.
:returns: Channel id string.
:raises: MissingUser.
"""
try:
recipient_user_id = self._get_user_id_by_name(user_name)
except HTTPError:
raise MissingUser("The user \'{}\' does not exist.".format(
user_name,
))
my_id = self.mm_client.users.get_user("me")["id"]
# The Mattermost API treats direct messages the same as regular
# channels so we need to first get a channel ID to send the direct
# message.
user_channel = self.mm_client.channels.create_direct_message_channel(
[recipient_user_id, my_id]
)
return user_channel["id"]
def _get_user_id_by_name(self, user_name):
"""The Mattermost API expects a user ID, not a username.
Use this function to get an ID from a username.
:param user_name: Name of user to get user id for.
:returns: User id string.
"""
user = self.mm_client.users.get_user_by_username(
username=user_name,
)
return user["id"]
def _attach_file(self, filename, channel_id):
"""Attach the given filename into the given channel.
:param filename: Name of file to attach.
:param channel_id: Id string of channel to attach file to.
:returns; Id string of file attachment.
"""
try:
file_upload = self.mm_client.files.upload_file(
channel_id=channel_id,
files={"files": (filename, open(filename))}
)
return file_upload["file_infos"][0]["id"]
except (KeyError, IndexError):
raise FileUploadException("Unable to upload file '{}'.".format(filename))
|
417465
|
import time
import os
import glob
import inspect
import gym
import pybullet_envs
from gym.envs.registration import load
from stable_baselines.deepq.policies import FeedForwardPolicy
from stable_baselines.common.policies import register_policy
from stable_baselines.bench import Monitor
from stable_baselines import logger
from stable_baselines import PPO2, A2C, ACER, ACKTR, DQN, DDPG
from stable_baselines.common.vec_env import DummyVecEnv, VecNormalize, \
VecFrameStack, SubprocVecEnv
from stable_baselines.common.cmd_util import make_atari_env
from stable_baselines.common import set_global_seeds
ALGOS = {
'a2c': A2C,
'acer': ACER,
'acktr': ACKTR,
'dqn': DQN,
'ddpg': DDPG,
'ppo2': PPO2
}
# ================== Custom Policies =================
class CustomDQNPolicy(FeedForwardPolicy):
def __init__(self, *args, **kwargs):
super(CustomDQNPolicy, self).__init__(*args, **kwargs,
layers=[64],
layer_norm=True,
feature_extraction="mlp")
register_policy('CustomDQNPolicy', CustomDQNPolicy)
def make_env(env_id, rank=0, seed=0, log_dir=None):
"""
Helper function to multiprocess training
and log the progress.
:param env_id: (str)
:param rank: (int)
:param seed: (int)
:param log_dir: (str)
"""
if log_dir is None and log_dir != '':
log_dir = "/tmp/gym/{}/".format(int(time.time()))
os.makedirs(log_dir, exist_ok=True)
def _init():
set_global_seeds(seed + rank)
env = gym.make(env_id)
env.seed(seed + rank)
env = Monitor(env, os.path.join(log_dir, str(rank)), allow_early_resets=True)
return env
return _init
def create_test_env(env_id, n_envs=1, is_atari=False,
stats_path=None, norm_reward=False, seed=0,
log_dir='', should_render=True):
"""
Create environment for testing a trained agent
:param env_id: (str)
:param n_envs: (int) number of processes
:param is_atari: (bool)
:param stats_path: (str) path to folder containing saved running averaged
:param norm_reward: (bool) Whether to normalize rewards or not when using Vecnormalize
:param seed: (int) Seed for random number generator
:param log_dir: (str) Where to log rewards
:param should_render: (bool) For Pybullet env, display the GUI
:return: (gym.Env)
"""
# HACK to save logs
if log_dir is not None:
os.environ["OPENAI_LOG_FORMAT"] = 'csv'
os.environ["OPENAI_LOGDIR"] = os.path.abspath(log_dir)
os.makedirs(log_dir, exist_ok=True)
logger.configure()
# Create the environment and wrap it if necessary
if is_atari:
print("Using Atari wrapper")
env = make_atari_env(env_id, num_env=n_envs, seed=seed)
# Frame-stacking with 4 frames
env = VecFrameStack(env, n_stack=4)
elif n_envs > 1:
env = SubprocVecEnv([make_env(env_id, i, seed, log_dir) for i in range(n_envs)])
# Pybullet envs does not follow gym.render() interface
elif "Bullet" in env_id:
spec = gym.envs.registry.env_specs[env_id]
class_ = load(spec._entry_point)
# HACK: force SubprocVecEnv for Bullet env that does not
# have a render argument
use_subproc = 'renders' not in inspect.getfullargspec(class_.__init__).args
# Create the env, with the original kwargs, and the new ones overriding them if needed
def _init():
# TODO: fix for pybullet locomotion envs
env = class_(**{**spec._kwargs}, renders=should_render)
env.seed(0)
if log_dir is not None:
env = Monitor(env, os.path.join(log_dir, "0"), allow_early_resets=True)
return env
if use_subproc:
env = SubprocVecEnv([make_env(env_id, 0, seed, log_dir)])
else:
env = DummyVecEnv([_init])
else:
env = DummyVecEnv([make_env(env_id, 0, seed, log_dir)])
# Load saved stats for normalizing input and rewards
# And optionally stack frames
if stats_path is not None:
if os.path.join(stats_path, 'obs_rms.pkl'):
print("Loading running average")
env = VecNormalize(env, training=False, norm_reward=norm_reward)
env.load_running_average(stats_path)
n_stack_file = os.path.join(stats_path, 'n_stack')
if os.path.isfile(n_stack_file):
with open(n_stack_file, 'r') as f:
n_stack = int(f.read())
print("Stacking {} frames".format(n_stack))
env = VecFrameStack(env, n_stack)
return env
def linear_schedule(initial_value):
"""
Linear learning rate schedule.
:param initial_value: (float or str)
:return: (function)
"""
if isinstance(initial_value, str):
initial_value = float(initial_value)
def func(progress):
"""
Progress will decrease from 1 (beginning) to 0
:param progress: (float)
:return: (float)
"""
return progress * initial_value
return func
def get_trained_models(log_folder):
"""
:param log_folder: (str) Root log folder
:return: (dict) Dict representing the trained agent
"""
algos = os.listdir(log_folder)
trained_models = {}
for algo in algos:
for env_id in glob.glob('{}/{}/*.pkl'.format(log_folder, algo)):
# Retrieve env name
env_id = env_id.split('/')[-1].split('.pkl')[0]
trained_models['{}-{}'.format(algo, env_id)] = (algo, env_id)
return trained_models
|
417510
|
import enum
class MoveDirection(enum.Enum):
UP = "up"
DOWN = "down"
LEFT = "left"
RIGHT = "right"
class CountDirection(enum.IntEnum):
HORIZONTAL = 0
VERTICAL = 1
class ResettableCount:
resetLoop = False
def __init__(self, resets_to_nonzero):
self.totalCount = 0
self.resets_to_nonzero = resets_to_nonzero
def get_horiz_vert(in_direction):
if in_direction == MoveDirection.UP or in_direction == MoveDirection.DOWN:
return CountDirection.VERTICAL
elif in_direction == MoveDirection.LEFT or in_direction == MoveDirection.RIGHT:
return CountDirection.HORIZONTAL
raise ValueError("Unknown direction")
def get_dir_dimension(in_direction, width, height):
if get_horiz_vert(in_direction) == CountDirection.VERTICAL:
return height
else:
return width
def get_cur_direction_names(in_direction):
if get_horiz_vert(in_direction) == CountDirection.VERTICAL:
return ("Up", "Down")
else:
return ("Left", "Right")
def get_trigger_count(prevPos, curPos, crossPos):
'''
prevPos - where we saw it before
curPos - where it is now
crossPos - value that triggers counting
Returns:
-1 (or 1) move in the direction of diminishing (increasing) coordinates AND
having crossed the line (crossPos)
0 otherwise
'''
if prevPos <= crossPos < curPos:
return 1
elif curPos < crossPos <= prevPos:
return -1
else:
return 0
|
417530
|
from django.conf.urls import url
from apps.mock_server.views import http_server
from apps.mock_server.views import http_interface
urlpatterns = [
url(r'^mock/([\w\-\.]+)/([\w\-\.]+)/([\w\-\.]+)([\w\-\.\/]+)$', http_server.mock),
url(r'^mockserver/readme$', http_server.readme, name="MOCK_HTTP_readme"),
#add page
url(r'^mockserver/HTTP_InterfaceCheck$', http_interface.http_interfaceCheck, name="MOCK_HTTP_InterfaceCheck"),
url(r'^mockserver/HTTP_InterfaceAddPage$', http_interface.interfaceAddPage, name="MOCK_HTTP_InterfaceAddPage"),
url(r'^mockserver/HTTP_InterfaceAdd$', http_interface.interfaceAdd, name="MOCK_HTTP_InterfaceAdd"),
#chakan page
url(r'^mockserver/HTTP_InterfaceListCheck$', http_interface.http_interfaceListCheck, name="MOCK_HTTP_InterfaceListCheck"),
url(r'^mockserver/HTTP_InterfaceDel$', http_interface.interfaceDel, name="MOCK_HTTP_InterfaceDel"),
url(r'^mockserver/HTTP_operationInterface$', http_interface.operationInterface, name="MOCK_HTTP_operationInterface"),
url(r'^mockserver/HTTP_getInterfaceDataForId$', http_interface.getInterfaceDataForId,name="MOCK_getInterfaceDataForId"),
url(r'^mockserver/HTTP_InterfaceSaveEdit$', http_interface.interfaceSaveEdit, name="MOCK_HTTP_InterfaceSaveEdit"),
url(r'^mockserver/RunContrackTask$', http_interface.runContrackTask, name="MOCK_RunContrackTask"),
url(r'^mockserver/getContrackTaskRecentExecInfos$', http_interface.getContrackTaskRecentExecInfos, name="MOCK_getContrackTaskRecentExecInfos"),
url(r'^mockserver/follow$', http_interface.follow, name="MOCK_follow"),
]
|
417546
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.SimulatedConv2d(4,4,4,'../simulation_files/tpu_16_16_pes.cfg', 'dogsandcats_tpu_tile.txt', sparsity_ratio=0.0, groups=4) # Number of input feature maps (input channels), number of output feature maps (output channels), filter dimension size
#print(self.conv1.weight)
self.real_conv1 = nn.Conv2d(4,4,4, groups=4)
self.real_conv1.weight=self.conv1.weight
self.real_conv1.bias = self.conv1.bias
print(self.conv1.bias.shape)
print("Weight size is ")
print(self.real_conv1.weight.shape)
def forward(self, x):
sim_x = self.conv1(x)
real_x = self.real_conv1(x)
return sim_x, real_x
net = Net()
print(net)
input_test = torch.randn(4,32,32).view(-1,4,32,32)
output_sim, output_real = net(input_test)
#print('Output simulated shape is ', output_sim.shape)
#print('Real tensor shape is ', output_real.shape)
print('Test value')
#print(output_sim)
#print(output_real)
#print(torch.eq(output_sim, output_real))
print(torch.all(torch.lt(torch.abs(torch.add(output_sim, -output_real)), 1e-4)))
print(output_sim[0][0][1][0])
print(output_real[0][0][1][0])
|
417571
|
import uuid
import walrus
from spyne.application import Application
from spyne.service import ServiceBase
from spyne.model.primitive import Integer, Unicode
from spyne.model.complex import Iterable, ComplexModel, Array
from spyne.decorator import srpc
from spyne.protocol.soap import Soap11
import time
from lxml import etree
from configobj import ConfigObj
from spyne.server.wsgi import WsgiApplication
from waitress import serve
import os
import logging
import redis
configfile = os.environ['QWC_CONFIG_FILE']
config = ConfigObj(configfile)
DEBUG2 = 8
LEVELS = {'DEBUG2': DEBUG2,
'DEBUG':logging.DEBUG,
'INFO':logging.INFO,
'WARNING':logging.WARNING,
'ERROR':logging.ERROR,
'CRITICAL':logging.CRITICAL,
}
logging.addLevelName(DEBUG2,"DEBUG2")
logging.basicConfig(level=LEVELS[config['qwc']['loglevel'].upper()])
#rdb = walrus.Database(
# host=config['redis']['host'],
# port=config['redis']['port'],
# password=config['redis']['password'],
# db=config['redis']['db'])
#? need to clear the hashes pointed to by waiting work first
#rdb.Hash('qwc:currentWork').clear()
#rdb.List('qwc:waitingWork').clear()
#rdb.set('qwc:sessionTicket','')
class QBWCService(ServiceBase):
@srpc(Unicode,Unicode,_returns=Array(Unicode))
def authenticate(strUserName,strPassword):
"""Authenticate the web connector to access this service.
@param strUserName user name to use for authentication
@param strPassword password to use for authentication
@return the completed array
"""
returnArray = []
if strUserName == config['qwc']['username'] and strPassword == config['qwc']['password']:
if session_manager.inSession():
returnArray.append("none")
returnArray.append("busy")
logging.debug('trying to authenticate during an open session')
elif session_manager.newJobs():
sessionticket = session_manager.setTicket()
returnArray.append(sessionticket)
returnArray.append(config['qwc']['qbwfilename']) # returning the filename indicates there is a request in the queue
else:
returnArray.append("none") # don't return a ticket if there are no requests
returnArray.append("none") #returning "none" indicates there are no requests at the moment
else:
returnArray.append("none") # don't return a ticket if username password does not authenticate
returnArray.append('nvu')
logging.debug('authenticate %s',returnArray)
return returnArray
@srpc(Unicode, _returns=Unicode)
def clientVersion( strVersion ):
""" sends Web connector version to this service
@param strVersion version of GB web connector
@return what to do in case of Web connector updates itself
"""
logging.debug('clientVersion %s',strVersion)
return ""
@srpc(Unicode, _returns=Unicode)
def closeConnection( ticket ):
""" used by web connector to indicate it is finished with update session
@param ticket session token sent from this service to web connector
@return string displayed to user indicating status of web service
"""
session_manager.closeSession()
logging.debug('closeConnection %s',ticket)
return "OK"
@srpc(Unicode,Unicode,Unicode, _returns=Unicode)
def connectionError( ticket, hresult, message ):
""" used by web connector to report errors connecting to Quickbooks
@param ticket session token sent from this service to web connector
@param hresult The HRESULT (in HEX) from the exception
@param message error message
@return string done indicating web service is finished.
"""
# need to push this error to the client. Should there be a message channel on Redis for this?
logging.debug('connectionError %s %s %s', ticket, hresult, message)
return "done"
@srpc(Unicode, _returns=Unicode)
def getLastError( ticket ):
""" Web connector error message
@param ticket session token sent from this service to web connector
@return string displayed to user indicating status of web service
"""
logging.debug('lasterror %s',ticket)
#return "Error message here!"
return "NoOp"
@srpc(Unicode,Unicode,Unicode,Unicode,Integer,Integer, _returns=Unicode)
def sendRequestXML( ticket, strHCPResponse, strCompanyFileName, qbXMLCountry, qbXMLMajorVers, qbXMLMinorVers ):
#?maybe we could hang here, wait for some xml and send it to qwc, that way shortening the wait
""" send request via web connector to Quickbooks
@param ticket session token sent from this service to web connector
@param strHCPResponse qbXML response from QuickBooks
@param strCompanyFileName The Quickbooks file to get the data from
@param qbXMLCountry the country version of QuickBooks
@param qbXMLMajorVers Major version number of the request processor qbXML
@param qbXMLMinorVers Minor version number of the request processor qbXML
@return string containing the request if there is one or a NoOp
"""
reqXML = session_manager.get_reqXML(ticket)
logging.debug('sendRequestXML')
logging.log(DEBUG2,'sendRequestXML reqXML %s ',reqXML)
logging.log(DEBUG2,'sendRequestXML strHCPResponse %s ',strHCPResponse)
return reqXML
@srpc(Unicode,Unicode,Unicode,Unicode, _returns=Integer)
def receiveResponseXML( ticket, response, hresult, message ):
""" contains data requested from Quickbooks
@param ticket session token sent from this service to web connector
@param response qbXML response from QuickBooks
@param hresult The HRESULT (in HEX) from any exception
@param message error message
@return string integer returned 100 means done anything less means there is more to come.
where can we best get that information?
"""
logging.debug('receiveResponseXML %s %s %s',ticket,hresult,message)
logging.log(DEBUG2,'receiveResponseXML %s',response)
percent_done = session_manager.process_response(ticket,response)
return percent_done
class qbwcSessionManager():
def __init__(self):
#requests are read from redis and responses are returned in redis
self.redisdb= walrus.Database(
host=config['redis']['host'],
port=config['redis']['port'],
password=config['redis']['password'],
db=config['redis']['db'])
self.currentWork = self.redisdb.Hash('qwc:currentWork')
self.waitingWork = self.redisdb.List('qwc:waitingWork')
self.sessionKey = 'qwc:sessionTicket'
def setTicket(self):
sessionTicket = str(uuid.uuid1())
self.redisdb.set(self.sessionKey,sessionTicket)
return sessionTicket
def clearTicket(self):
sessionTicket = ""
self.redisdb.set(self.sessionKey,sessionTicket)
return sessionTicket
def getTicket(self):
return self.redisdb.get(self.sessionKey)
def inSession(self):
return self.getTicket()
def closeSession(self):
self.clearTicket()
def is_iterative(self,reqXML):
root = etree.fromstring(str(reqXML))
isIterator = root.xpath('boolean(//@iterator)')
return isIterator
def process_response(self,ticket,response):
#?look for error responses here if you get an error, clear the redis keys and abort
#?you don't know what is happening so better to bail out than try and fix things
# first store it
reqID = self.currentWork['reqID']
responsekey = 'qwc:response:'+reqID
self.responseStore = self.redisdb.List(responsekey)
self.responseStore.append(response)
self.redisdb.publish(responsekey,"data")
logging.debug("storing response %s",responsekey)
#check if it is iterative
root = etree.fromstring(str(response))
isIterator = root.xpath('boolean(//@iteratorID)')
if isIterator:
reqXML = self.currentWork['reqXML']
reqroot = etree.fromstring(str(reqXML))
iteratorRemainingCount = int(root.xpath('string(//@iteratorRemainingCount)'))
iteratorID = root.xpath('string(//@iteratorID)')
logging.info("iteratorRemainingCount %s",iteratorRemainingCount)
if iteratorRemainingCount:
# update the iterativeWork hash reqXML
iteratornode = reqroot.xpath('//*[@iterator]')
iteratornode[0].set('iterator', 'Continue')
requestID = int(reqroot.xpath('//@requestID')[0])
iteratornode[0].set('requestID', str(requestID+1))
iteratornode[0].set('iteratorID', iteratorID)
ntree = etree.ElementTree(reqroot)
nextreqXML = etree.tostring(ntree, xml_declaration=True, encoding='UTF-8')
self.currentWork['reqXML'] = nextreqXML
return 50 # is there any reason to return an accurate number here? something less than 100 is all that is needed.
else:
# clear the currentWork hash
self.currentWork.clear()
# create a finish response
self.redisdb.publish(responsekey,"end")
#self.responseStore.append("TheEnd")
if self.newJobs():
return 50
else:
return 100 #100 percent done
else:
self.redisdb.publish(responsekey,"end")
#self.responseStore.append("TheEnd")
return 100 #100 percent done
def newJobs(self):
if len(self.waitingWork):
reqID = self.waitingWork.pop()
wwh = self.redisdb.Hash(reqID)
reqXML = wwh['reqXML']
self.currentWork['reqXML'] = reqXML
self.currentWork['reqID'] = reqID
wwh.clear()
return True
else:
return False
def get_reqXML(self,ticket):
return self.currentWork['reqXML']
def get_reqID(self,ticket):
return self.currentWork['reqID']
app = Application([QBWCService],
tns='http://developer.intuit.com/',
in_protocol=Soap11(validator='soft'),
out_protocol=Soap11()
)
session_manager = qbwcSessionManager()
application = WsgiApplication(app)
def start_server():
serve(application, host=config['qwc']['host'], port=int(config['qwc']['port']))
if __name__ == '__main__':
start_server()
|
417647
|
r"""
Contains helper functions to extract skeleton data from the NTU RGB+D dataset.
Three functions are provided.
- *read_skeleton*: Parses entire skeleton file and outputs skeleton data in a dictionary
- *read_xyz*: Only keeps 3D coordinates from dictionary and returns numpy version.
- *read_xy_ir*: Only keeps 2D IR coordinates from dictionary and returns numpy version.
"""
import numpy as np
def read_skeleton(file):
r"""Reads a skeleton file provided by the NTU RGB+D dataset and outputs a dictionary with the data.
This code is not original and is courtesy of the awesome ST-GCN repository by yysijie
(https://github.com/yysijie/st-gcn/)
Inputs:
**file** (str): Complete path to the skeleton file.
Outputs:
**skeleton_sequence (dict)**: The treated skeleton file mapped in a dictionary.
"""
with open(file, 'r') as f:
skeleton_sequence = {'numFrame': int(f.readline()), 'frameInfo': []}
for t in range(skeleton_sequence['numFrame']):
frame_info = {'numBody': int(f.readline()), 'bodyInfo': []}
for m in range(frame_info['numBody']):
body_info_key = [
'bodyID', 'clipedEdges', 'handLeftConfidence',
'handLeftState', 'handRightConfidence', 'handRightState',
'isResticted', 'leanX', 'leanY', 'trackingState'
]
body_info = {
k: float(v)
for k, v in zip(body_info_key, f.readline().split())
}
body_info['numJoint'] = int(f.readline())
body_info['jointInfo'] = []
for v in range(body_info['numJoint']):
joint_info_key = [
'<KEY> 'depthX', 'depthY', 'colorX', 'colorY',
'orientationW', 'orientationX', 'orientationY',
'orientationZ', 'trackingState'
]
joint_info = {
k: float(v)
for k, v in zip(joint_info_key, f.readline().split())
}
body_info['jointInfo'].append(joint_info)
frame_info['bodyInfo'].append(body_info)
skeleton_sequence['frameInfo'].append(frame_info)
return skeleton_sequence
def read_xyz(file, max_body=2, num_joint=25):
r"""Creates a numpy array containing the 3D skeleton data for a given skeleton file of the NTU RGB+D dataset.
This code is slightly modified and is courtesy of the awesome ST-GCN repository by yysijie
(https://github.com/yysijie/st-gcn/)
Inputs:
- **file** (str): Complete path to the skeleton file.
- **max_body** (int): Maximum number of subjects (2 for NTU RGB+D)
- **numb_joints** (int): Maximum number of joints (25 for Kinect v2)
Outputs:
**data (np array)**: Numpy array containing skeleton
of shape `(3 {x, y, z}, max_frame, num_joint, 2 {n_subjects})`
"""
seq_info = read_skeleton(file)
data = np.zeros((3, seq_info['numFrame'], num_joint, max_body), dtype=np.float32)
for n, f in enumerate(seq_info['frameInfo']):
for m, b in enumerate(f['bodyInfo']):
for j, v in enumerate(b['jointInfo']):
if m < max_body and j < num_joint:
data[:, n, j, m] = [v['x'], v['y'], v['z']]
else:
pass
data = np.around(data, decimals=3)
return data
def read_xy_ir(file, max_body=2, num_joint=25):
r"""Creates a numpy array containing the 2D skeleton data projected on the IR frames
for a given skeleton file of the NTU RGB+D dataset.
This code is slightly modified and is courtesy of the awesome ST-GCN repository by yysijie
(https://github.com/yysijie/st-gcn/)
Inputs:
- **file** (str): Complete path to the skeleton file.
- **max_body** (int): Maximum number of subjects (2 for NTU RGB+D)
- **numb_joints** (int): Maximum number of joints (25 for Kinect v2)
Outputs:
**data (np array)**: Numpy array containing skeleton
of shape `(2 {x, y}, max_frame, num_joint, 2 {n_subjects})`
"""
seq_info = read_skeleton(file)
data = np.zeros((2, seq_info['numFrame'], num_joint, max_body), dtype=np.float32)
for n, f in enumerate(seq_info['frameInfo']):
for m, b in enumerate(f['bodyInfo']):
for j, v in enumerate(b['jointInfo']):
if m < max_body and j < num_joint:
data[:, n, j, m] = [v['depthX'], v['depthY']]
else:
pass
return data
|
417716
|
from utils import *
data_dir = '/Users/kylemathewson/Desktop/data/'
exp = 'P3'
subs = [ '001']
sessions = ['ActiveWet']
nsesh = len(sessions)
event_id = {'Target': 1, 'Standard': 2}
sub = subs[0]
session = sessions[0]
raw = LoadBVData(sub,session,data_dir,exp)
epochs = PreProcess(raw,event_id,
emcp_epochs=False, rereference=True,
plot_erp=True, rej_thresh_uV=250,
epoch_time=(-.2,1), baseline=(-.2,0) )
epochs_new = PreProcess(raw,event_id,
emcp_epochs=True, rereference=True,
plot_erp=True, rej_thresh_uV=250,
epoch_time=(-.2,1), baseline=(-.2,0) )
#plot results
epochs['Target'].plot()
epochs_new['Target'].plot()
|
417724
|
from .Config import Config
class SearchHelper:
@staticmethod
def getChannelIdExamples():
return Config.CHANNEL_ID_EXAMPLES
@staticmethod
def getVideoClipIdExamples():
return Config.VIDEO_CLIP_ID_EXAMPLES
@staticmethod
def getUrlExamples():
return Config.URL_EXAMPLES
|
417734
|
import torch
import torch.nn.functional as F
from tqdm import tqdm
from dice_loss import dice_coeff
def eval_net(net, loader, device, n_val):
"""Evaluation without the densecrf with the dice coefficient"""
net.eval()
tot = 0
with tqdm(total=n_val, desc='Validation round', unit='img', leave=False) as pbar:
for batch in loader:
imgs = batch['image']
true_masks = batch['mask']
imgs = imgs.to(device=device, dtype=torch.float32)
mask_type = torch.float32 if net.n_classes == 1 else torch.long
true_masks = true_masks.to(device=device, dtype=mask_type)
mask_pred = net(imgs)
for true_mask, pred in zip(true_masks, mask_pred):
pred = (pred > 0.5).float()
if net.n_classes > 1:
tot += F.cross_entropy(pred.unsqueeze(dim=0), true_mask.unsqueeze(dim=0)).item()
else:
tot += dice_coeff(pred, true_mask.squeeze(dim=1)).item()
pbar.update(imgs.shape[0])
return tot / n_val
|
417746
|
import logging
from typing import BinaryIO, List
import numpy as np
from .known import vlr_factory, IKnownVLR
from .vlr import VLR
from ..utils import encode_to_len
logger = logging.getLogger(__name__)
RESERVED_LEN = 2
USER_ID_LEN = 16
DESCRIPTION_LEN = 32
class VLRList(list):
"""Class responsible for managing the vlrs"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def index(self, value, start: int = 0, stop: int = None) -> int:
if stop is None:
stop = len(self)
if isinstance(value, str):
for i, vlr in enumerate(self[start:stop]):
if vlr.__class__.__name__ == value:
return i + start
else:
return super().index(value, start, stop)
def get_by_id(self, user_id="", record_ids=(None,)):
"""Function to get vlrs by user_id and/or record_ids.
Always returns a list even if only one vlr matches the user_id and record_id
>>> import pylas
>>> from pylas.vlrs.known import ExtraBytesVlr, WktCoordinateSystemVlr
>>> las = pylas.read("pylastests/extrabytes.las")
>>> las.vlrs
[<ExtraBytesVlr(extra bytes structs: 5)>]
>>> las.vlrs.get(WktCoordinateSystemVlr.official_user_id())
[]
>>> las.vlrs.get(WktCoordinateSystemVlr.official_user_id())[0]
Traceback (most recent call last):
IndexError: list index out of range
>>> las.vlrs.get_by_id(ExtraBytesVlr.official_user_id())
[<ExtraBytesVlr(extra bytes structs: 5)>]
>>> las.vlrs.get_by_id(ExtraBytesVlr.official_user_id())[0]
<ExtraBytesVlr(extra bytes structs: 5)>
Parameters
----------
user_id: str, optional
the user id
record_ids: iterable of int, optional
THe record ids of the vlr(s) you wish to get
Returns
-------
:py:class:`list`
a list of vlrs matching the user_id and records_ids
"""
if user_id != "" and record_ids != (None,):
return [
vlr
for vlr in self
if vlr.user_id == user_id and vlr.record_id in record_ids
]
else:
return [
vlr
for vlr in self
if vlr.user_id == user_id or vlr.record_id in record_ids
]
def get(self, vlr_type: str) -> List[IKnownVLR]:
"""Returns the list of vlrs of the requested type
Always returns a list even if there is only one VLR of type vlr_type.
>>> import pylas
>>> las = pylas.read("pylastests/extrabytes.las")
>>> las.vlrs
[<ExtraBytesVlr(extra bytes structs: 5)>]
>>> las.vlrs.get("WktCoordinateSystemVlr")
[]
>>> las.vlrs.get("WktCoordinateSystemVlr")[0]
Traceback (most recent call last):
IndexError: list index out of range
>>> las.vlrs.get('ExtraBytesVlr')
[<ExtraBytesVlr(extra bytes structs: 5)>]
>>> las.vlrs.get('ExtraBytesVlr')[0]
<ExtraBytesVlr(extra bytes structs: 5)>
Parameters
----------
vlr_type: str
the class name of the vlr
Returns
-------
:py:class:`list`
a List of vlrs matching the user_id and records_ids
"""
return [v for v in self if v.__class__.__name__ == vlr_type]
def extract(self, vlr_type: str) -> List[IKnownVLR]:
"""Returns the list of vlrs of the requested type
The difference with get is that the returned vlrs will be removed from the list
Parameters
----------
vlr_type: str
the class name of the vlr
Returns
-------
list
a List of vlrs matching the user_id and records_ids
"""
kept_vlrs, extracted_vlrs = [], []
for vlr in self:
if vlr.__class__.__name__ == vlr_type:
extracted_vlrs.append(vlr)
else:
kept_vlrs.append(vlr)
self.clear()
self.extend(kept_vlrs)
return extracted_vlrs
def __repr__(self):
return "[{}]".format(", ".join(repr(vlr) for vlr in self))
@classmethod
def read_from(
cls, data_stream: BinaryIO, num_to_read: int, extended: bool = False
) -> "VLRList":
"""Reads vlrs and parse them if possible from the stream
Parameters
----------
data_stream : io.BytesIO
stream to read from
num_to_read : int
number of vlrs to be read
extended : bool
whether the vlrs are regular vlr or extended vlr
Returns
-------
pylas.vlrs.vlrlist.VLRList
List of vlrs
"""
vlrlist = cls()
for _ in range(num_to_read):
data_stream.read(RESERVED_LEN)
user_id = data_stream.read(USER_ID_LEN).decode().rstrip("\0")
record_id = int.from_bytes(
data_stream.read(2), byteorder="little", signed=False
)
if extended:
record_data_len = int.from_bytes(
data_stream.read(8), byteorder="little", signed=False
)
else:
record_data_len = int.from_bytes(
data_stream.read(2), byteorder="little", signed=False
)
description = data_stream.read(DESCRIPTION_LEN).decode().rstrip("\0")
record_data_bytes = data_stream.read(record_data_len)
vlr = VLR(user_id, record_id, description, record_data_bytes)
vlrlist.append(vlr_factory(vlr))
return vlrlist
def write_to(self, stream: BinaryIO, as_extended: bool = False) -> int:
bytes_written = 0
for vlr in self:
record_data = vlr.record_data_bytes()
stream.write(b"\0\0")
stream.write(encode_to_len(vlr.user_id, USER_ID_LEN))
stream.write(vlr.record_id.to_bytes(2, byteorder="little", signed=False))
if as_extended:
if len(record_data) > np.iinfo("uint16").max:
raise ValueError("vlr record_data is too long")
stream.write(
len(record_data).to_bytes(8, byteorder="little", signed=False)
)
else:
stream.write(
len(record_data).to_bytes(2, byteorder="little", signed=False)
)
stream.write(encode_to_len(vlr.description, DESCRIPTION_LEN))
stream.write(record_data)
bytes_written += 54 if not as_extended else 60
bytes_written += len(record_data)
return bytes_written
|
417751
|
from .. import *
def get_dataset_from_code(code, batch_size):
""" interface to get function object
Args:
code(str): specific data type
Returns:
(torch.utils.data.DataLoader): train loader
(torch.utils.data.DataLoader): test loader
"""
dataset_root = "./assets/data"
if code == 'mnist':
train_loader, test_loader = get_mnist_data(batch_size=batch_size,
data_folder_path=os.path.join(dataset_root, 'mnist-data'))
elif code == 'cifar10':
train_loader, test_loader = get_cifar10_data(batch_size=batch_size,
data_folder_path=os.path.join(dataset_root, 'cifar10-data'))
elif code == 'fmnist':
train_loader, test_loader = get_fasionmnist_data(batch_size=batch_size,
data_folder_path=os.path.join(dataset_root, 'fasionmnist-data'))
else:
raise ValueError("Unknown data type : [{}] Impulse Exists".format(data_name))
return train_loader, test_loader
def get_fasionmnist_data(data_folder_path, batch_size=64):
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
#transforms.Normalize((0.2860,), (0.3530,)),
])
# Download and load the training data
trainset = datasets.FashionMNIST(data_folder_path, download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=False)
# Download and load the test data
testset = datasets.FashionMNIST(data_folder_path, download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False)
return trainloader, testloader
def get_mnist_data(data_folder_path, batch_size=64):
""" mnist data
Args:
train_batch_size(int): training batch size
test_batch_size(int): test batch size
Returns:
(torch.utils.data.DataLoader): train loader
(torch.utils.data.DataLoader): test loader
"""
train_data = datasets.MNIST(data_folder_path, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
#transforms.Normalize((0.1307,), (0.3081,))
])
)
test_data = datasets.MNIST(data_folder_path, train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
#transforms.Normalize((0.1307,), (0.3081,))
])
)
kwargs = {'num_workers': 4, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=batch_size, shuffle=False, **kwargs)
test_loader = torch.utils.data.DataLoader(test_data,
batch_size=batch_size, shuffle=False, **kwargs)
return train_loader, test_loader
def get_cifar10_data(data_folder_path, batch_size=64):
""" cifar10 data
Args:
train_batch_size(int): training batch size
test_batch_size(int): test batch size
Returns:
(torch.utils.data.DataLoader): train loader
(torch.utils.data.DataLoader): test loader
"""
transform_train = transforms.Compose([
#transforms.RandomCrop(32, padding=4),
#transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
#transforms.Normalize((0.4913, 0.4821, 0.4465), (0.2470, 0.2434, 0.2615)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
#transforms.Normalize((0.4913, 0.4821, 0.4465), (0.2470, 0.2434, 0.2615)),
])
train_data = datasets.CIFAR10(data_folder_path, train=True,
download=True, transform=transform_train)
test_data = datasets.CIFAR10(data_folder_path, train=False,
download=True, transform=transform_test)
kwargs = {'num_workers': 4, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=batch_size, shuffle=False, **kwargs)
test_loader = torch.utils.data.DataLoader(test_data,
batch_size=batch_size, shuffle=False, **kwargs)
return train_loader, test_loader
|
417760
|
import traceback
import cv2
import os
from pluto.common.utils import SrtUtil, TimeUtil
class Snapshot(object):
def __init__(self):
self.video_file = ""
self.video_capture = None
self.srt_file = ""
self.srt_subs = []
self.width = 0
self.height = 0
self.duration = 0
self.fps = 0
def __exit__(self, exc_type, exc_val, exc_tb):
self.__initialise()
def __initialise(self):
if self.video_capture:
self.video_capture.release()
self.video_capture = None
self.srt_file = ""
self.srt_subs = []
self.video_file = ""
def load_video(self, video_file):
self.__initialise()
self.video_capture = cv2.VideoCapture(filename=video_file)
self.video_file = video_file
srt_file = self.detect_srt(video_file)
if srt_file:
self.load_srt(srt_file)
if self.video_capture.isOpened():
self.width = self.video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.fps = self.video_capture.get(cv2.CAP_PROP_FPS)
self.duration = self.video_capture.get(cv2.CAP_PROP_FRAME_COUNT) * 1000.0 / self.fps
return self.video_capture.isOpened()
@staticmethod
def detect_srt(video_file):
srt_files = [video_file + '.srt', os.path.splitext(video_file)[0] + '.srt']
for str_file in srt_files:
if os.path.isfile(str_file):
return str_file
def load_srt(self, srt_file):
self.srt_file = srt_file
self.srt_subs = SrtUtil.parse_srt(srt_file)
def estimate(self, start=0, end=None):
count = 0
if len(self.srt_subs) == 0:
return count
if end is None:
end = self.__default_end()
for sub in self.srt_subs:
if sub.start >= start and sub.end <= end:
count += 1
return count
def __default_end(self):
return self.srt_subs[len(self.srt_subs) - 1].end
def snapshot(self, position, output_file):
"""
Take a snapshot for certain position of the video
:param position: integer
:param output_file: image name
:return: True for success
:raise Exception for cv snapshot failure.
"""
if position < 0 or position > self.duration:
return False
self.video_capture.set(cv2.CAP_PROP_POS_MSEC, position)
success, image = self.video_capture.read()
if success:
try:
dir_path = os.path.dirname(os.path.realpath(output_file))
if not os.path.exists(dir_path):
os.mkdir(dir_path)
result = cv2.imwrite(output_file, image)
return result
except:
traceback.print_exc()
raise Exception('Snapshot position %s failed %s' % (position, output_file))
else:
raise Exception('Snapshot position %s failed %s' % (position, output_file))
def snapshot_range(self, output_folder, start=0, end=None, callback_progress=None, callback_complete=None):
"""
Take snapshots for a given range
:param output_folder: str output folder
:param start:
:param end:
:param callback_progress: call_back_progress(total, current, position, output_file, output_result)
:param callback_complete: callback_complete(total, success)
:return:
"""
total = self.estimate(start, end)
if total == 0:
if callback_complete:
callback_complete(0, 0)
return
if end is None:
end = self.__default_end()
current = 0
success = 0
for sub in self.srt_subs:
if sub.start >= start and sub.end <= end:
current += 1
position = int((sub.start + sub.end) / 2)
print("current %s start %s end %s mid %s" % (current, sub.start, sub.end, position))
output_file = os.path.join(output_folder,
"%s_auto_%s.jpg" % (os.path.basename(self.video_file),
TimeUtil.format_ms(position).replace(":", "_")))
output_result = self.snapshot(position, output_file)
success += 1 if output_result else 0
if callback_progress:
try:
callback_progress(total, current, position, output_file, output_result)
except:
traceback.print_exc()
if callback_complete:
callback_complete(total, success)
|
417772
|
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from torchvision.datasets.folder import default_loader
from tqdm import tqdm
logger = logging.getLogger(__file__)
def _remove_all_not_found_image(df: pd.DataFrame, path_to_images: Path) -> pd.DataFrame:
clean_rows = []
for _, row in df.iterrows():
image_id = row["image_id"]
try:
file_name = path_to_images / f"{image_id}.jpg"
_ = default_loader(file_name)
except (FileNotFoundError, OSError, UnboundLocalError) as ex:
logger.info(f"broken image {file_name} : {ex}")
else:
clean_rows.append(row)
df_clean = pd.DataFrame(clean_rows)
return df_clean
def remove_all_not_found_image(df: pd.DataFrame, path_to_images: Path, num_workers: int) -> pd.DataFrame:
futures = []
results = []
with ThreadPoolExecutor(max_workers=num_workers) as executor:
for df_batch in np.array_split(df, num_workers):
future = executor.submit(_remove_all_not_found_image, df=df_batch, path_to_images=path_to_images)
futures.append(future)
for future in tqdm(as_completed(futures), total=len(futures)):
results.append(future.result())
new_df = pd.concat(results)
return new_df
def read_ava_txt(path_to_ava: Path) -> pd.DataFrame:
# NIMA origin file format and indexes
df = pd.read_csv(path_to_ava, header=None, sep=" ")
del df[0]
score_first_column = 2
score_last_column = 12
tag_first_column = 1
tag_last_column = 4
score_names = [f"score{i}" for i in range(score_first_column, score_last_column)]
tag_names = [f"tag{i}" for i in range(tag_first_column, tag_last_column)]
df.columns = ["image_id"] + score_names + tag_names
# leave only score columns
df = df[["image_id"] + score_names]
return df
def clean_and_split(
path_to_ava_txt: Path, path_to_save_csv: Path, path_to_images: Path, train_size: float, num_workers: int
):
logger.info("read ava txt")
df = read_ava_txt(path_to_ava_txt)
logger.info("removing broken images")
df = remove_all_not_found_image(df, path_to_images, num_workers=num_workers)
logger.info("train val test split")
df_train, df_val_test = train_test_split(df, train_size=train_size)
df_val, df_test = train_test_split(df_val_test, train_size=0.5)
train_path = path_to_save_csv / "train.csv"
val_path = path_to_save_csv / "val.csv"
test_path = path_to_save_csv / "test.csv"
logger.info(f"saving to {train_path} {val_path} and {test_path}")
df_train.to_csv(train_path, index=False)
df_val.to_csv(val_path, index=False)
df_test.to_csv(test_path, index=False)
|
417807
|
from sqlalchemy import (
Column,
ForeignKey,
Integer,
String,
Float,
Index,
Table,
Enum
)
from ..app import db
class SourceRole(db.Model):
"""
A role linked to a document source.
"""
__tablename__ = "source_roles"
id = Column(Integer, primary_key=True)
name = Column(String(100), index=True, nullable=False, unique=True)
indication = Column(Enum('positive', 'negative', 'neutral', name='indication_enum'), server_default='neutral', nullable=False)
def __repr__(self):
return "<SourceRole name='%s'>" % (self.name.encode('utf-8'),)
@classmethod
def create_defaults(self):
text = """
Learner, student|2
Baby/infant|2
Toddler|2
Child |2
Teenager|2
Youth - be sure it is not a child!|2
Survivor|2
Victim|2
Missing child|2
Caregiver: domestic or nanny|2
Child Soldier|2
Child Slave/ Trafficked Child|2
Refugee|2
Sick child|2
Child with disability|2
Child in need|2
Head of household|2
Orphan|2
Perpetrator|2
Criminal|2
Child Offender|2
Gang member|2
Suspect|2
Ward of court|2
Award winners|2
Entertainer|2
Hero|2
Activist/protestor|2
Fan/supporter|2
Friend|2
Member of a group of children|2
Leader|2
Spokesperson|2
Sportsperson|2
Artist|2
Sex Worker|2
Sex Object|2
Beauty contestant/model|2
Labourer|2
Street Child|2
Child as member of family unit, e.g., son daughter, nephew etc.|2
Dependents|2
Other|2
Unknown|2
Child Witness|2
Teenage Mother|2
"""
roles = []
for x in text.strip().split("\n"):
r = SourceRole()
parts = x.strip().split("|", 1)
r.name = parts[0]
r.analysis_nature_id = int(parts[1])
roles.append(r)
return roles
class SourceAge(db.Model):
"""
An age linked to a document source.
"""
__tablename__ = "source_ages"
id = Column(Integer, primary_key=True)
name = Column(String(100), index=True, nullable=False, unique=True)
def __repr__(self):
return "<SourceAge name='%s'>" % (self.name.encode('utf-8'),)
@classmethod
def all(cls):
return cls.query.order_by(cls.id).all()
@classmethod
def create_defaults(self):
text = """
0 to 1
1 to 2
3 to 9
10 to 12
13 to 18
Adult
Unknown
Many
Not identified
"""
ages = []
for x in text.strip().split("\n"):
a = SourceAge()
a.name = x
ages.append(a)
return ages
|
417815
|
import logging
import os
import tkinter as tk
import tkinter.ttk as ttk
import typing
from tkinter.filedialog import askopenfilename
from fishy.engine.common.event_handler import IEngineHandler
from fishy.engine.fullautofisher.mode.imode import FullAutoMode
from fishy.helper import helper
from fishy import web
from fishy.helper import helper
from fishy.helper.config import config
from fishy.helper.popup import PopUp
if typing.TYPE_CHECKING:
from fishy.gui import GUI
def start_fullfisher_config(gui: 'GUI'):
top = PopUp(helper.empty_function, gui._root, background=gui._root["background"])
controls_frame = ttk.Frame(top)
top.title("Config")
def file_name():
file = config.get("full_auto_rec_file", None)
if file is None:
return "Not Selected"
return os.path.basename(file)
def select_file():
file = askopenfilename(filetypes=[('Python Files', '*.fishy')])
if not file:
logging.error("file not selected")
else:
config.set("full_auto_rec_file", file)
logging.info(f"loaded {file}")
file_name_label.set(file_name())
def start_calibrate():
top.quit_top()
config.set("calibrate", True)
gui.engine.toggle_fullfisher()
def mode_command():
config.set("full_auto_mode", mode_var.get())
edit_cb['state'] = "normal" if config.get("full_auto_mode", 0) == FullAutoMode.Recorder.value else "disable"
file_name_label = tk.StringVar(value=file_name())
mode_var = tk.IntVar(value=config.get("full_auto_mode", 0))
edit_var = tk.IntVar(value=config.get("edit_recorder_mode", 0))
tabout_var = tk.IntVar(value=config.get("tabout_stop", 1))
row = 0
ttk.Label(controls_frame, text="Calibration: ").grid(row=row, column=0, pady=(5, 0))
ttk.Button(controls_frame, text="RUN", command=start_calibrate).grid(row=row, column=1)
row += 1
ttk.Label(controls_frame, text="Mode: ").grid(row=row, column=0, rowspan=2)
ttk.Radiobutton(controls_frame, text="Player", variable=mode_var, value=FullAutoMode.Player.value, command=mode_command).grid(row=row, column=1, sticky="w", pady=(5, 0))
row += 1
ttk.Radiobutton(controls_frame, text="Recorder", variable=mode_var, value=FullAutoMode.Recorder.value, command=mode_command).grid(row=2, column=1, sticky="w")
row += 1
ttk.Label(controls_frame, text="Edit Mode: ").grid(row=row, column=0)
edit_state = tk.NORMAL if config.get("full_auto_mode", 0) == FullAutoMode.Recorder.value else tk.DISABLED
edit_cb = ttk.Checkbutton(controls_frame, variable=edit_var, state=edit_state, command=lambda: config.set("edit_recorder_mode", edit_var.get()))
edit_cb.grid(row=row, column=1, pady=(5, 0))
row += 1
ttk.Label(controls_frame, text="Tabout Stop: ").grid(row=row, column=0)
ttk.Checkbutton(controls_frame, variable=tabout_var, command=lambda: config.set("tabout_stop", tabout_var.get())).grid(row=row, column=1, pady=(5, 0))
row += 1
ttk.Label(controls_frame, text="Fishy file: ").grid(row=row, column=0, rowspan=2)
ttk.Button(controls_frame, text="Select", command=select_file).grid(row=row, column=1, pady=(5, 0))
row += 1
ttk.Label(controls_frame, textvariable=file_name_label).grid(row=row, column=1, columnspan=2)
row += 1
ttk.Label(controls_frame, text="Use semi-fisher config for rest").grid(row=row, column=0, columnspan=2, pady=(20, 0))
controls_frame.pack(padx=(5, 5), pady=(5, 10))
top.start()
def start_semifisher_config(gui: 'GUI'):
def save():
gui.config.set("action_key", action_key_entry.get(), False)
gui.config.set("collect_key", collect_key_entry.get(), False)
gui.config.set("jitter", jitter.instate(['selected']), False)
gui.config.set("sound_notification", sound.instate(['selected']), False)
gui.config.save_config()
def toggle_sub():
if web.is_subbed()[0]:
if web.unsub():
gui._notify.set(0)
else:
if web.sub():
gui._notify.set(1)
def del_entry_key(event):
event.widget.delete(0, "end")
event.widget.insert(0, str(event.char))
top = PopUp(save, gui._root, background=gui._root["background"])
controls_frame = ttk.Frame(top)
top.title("Config")
ttk.Label(controls_frame, text="Notification:").grid(row=0, column=0)
gui._notify = tk.IntVar(0)
gui._notify_check = ttk.Checkbutton(controls_frame, command=toggle_sub, variable=gui._notify)
gui._notify_check.grid(row=0, column=1)
gui._notify_check['state'] = tk.DISABLED
is_subbed = web.is_subbed()
if is_subbed[1]:
gui._notify_check['state'] = tk.NORMAL
gui._notify.set(is_subbed[0])
ttk.Label(controls_frame, text="Action Key:").grid(row=1, column=0)
action_key_entry = ttk.Entry(controls_frame, justify=tk.CENTER)
action_key_entry.grid(row=1, column=1)
action_key_entry.insert(0, config.get("action_key", "e"))
action_key_entry.bind("<KeyRelease>", del_entry_key)
ttk.Label(controls_frame, text="Looting Key:").grid(row=3, column=0, pady=(5, 5))
collect_key_entry = ttk.Entry(controls_frame, justify=tk.CENTER)
collect_key_entry.grid(row=3, column=1, pady=(5, 5))
collect_key_entry.insert(0, config.get("collect_key", "r"))
collect_key_entry.bind("<KeyRelease>", del_entry_key)
ttk.Label(controls_frame, text="Sound Notification: ").grid(row=4, column=0, pady=(5, 5))
sound = ttk.Checkbutton(controls_frame, var=tk.BooleanVar(value=config.get("sound_notification")))
sound.grid(row=4, column=1)
ttk.Label(controls_frame, text="Human-Like Delay: ").grid(row=5, column=0, pady=(5, 5))
jitter = ttk.Checkbutton(controls_frame, var=tk.BooleanVar(value=config.get("jitter")))
jitter.grid(row=5, column=1)
controls_frame.pack(padx=(5, 5), pady=(5, 5))
top.start()
if __name__ == '__main__':
from fishy.gui import GUI
gui = GUI(lambda: IEngineHandler())
gui.call_in_thread(lambda: start_semifisher_config(gui))
gui.create()
|
417861
|
import os
import sys
import shlex
import logging
import datetime
import subprocess
def git_hash():
cmd = 'git log -n 1 --pretty="%h"'
ret = subprocess.check_output(shlex.split(cmd)).strip()
if isinstance(ret, bytes):
ret = ret.decode()
return ret
def git_diff_config(name):
cmd = f'git diff --unified=0 {name}'
ret = subprocess.check_output(shlex.split(cmd)).strip()
if isinstance(ret, bytes):
ret = ret.decode()
return ret
def setup_logger(name, save_dir, template = None):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
if save_dir:
date = str(datetime.datetime.now().strftime('%m%d%H'))
fh = logging.FileHandler(os.path.join(save_dir, f'log-{date}_{template}.txt'))
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")
fh.setFormatter(formatter)
logger.addHandler(fh)
os.system(f"git diff HEAD > {save_dir}/gitdiff.patch")
return logger
|
417862
|
import ast
import glob
import os
import sys
from libpy.build import LibpyExtension
from setuptools import find_packages, setup
if ast.literal_eval(os.environ.get("LIBPY_SIMDJSON_DEBUG_BUILD", "0")):
optlevel = 0
debug_symbols = True
max_errors = 5
else:
optlevel = 3
debug_symbols = False
max_errors = None
def extension(*args, **kwargs):
extra_compile_args = ["-DLIBPY_AUTOCLASS_UNSAFE_API"]
if sys.platform == "darwin":
extra_compile_args.append("-mmacosx-version-min=10.15")
return LibpyExtension(
*args,
optlevel=optlevel,
debug_symbols=debug_symbols,
werror=True,
max_errors=max_errors,
include_dirs=(
[".", "submodules/range-v3/include/"] + kwargs.pop("include_dirs", [])
),
extra_compile_args=extra_compile_args,
depends=glob.glob("**/*.h", recursive=True),
**kwargs
)
install_requires = [
"setuptools",
"libpy",
]
setup(
name="libpy_simdjson",
version="0.4.0",
description="Python bindings for smidjson, using libpy",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/gerrymanoim/libpy_simdjson",
author="<NAME>, <NAME>",
author_email=(
"<NAME> <<EMAIL>>, <NAME> <<EMAIL>>"
),
packages=find_packages(),
license="Apache 2.0",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Topic :: Software Development",
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: C++",
"Operating System :: POSIX",
"Intended Audience :: Developers",
],
# we need the headers to be available to the C compiler as regular files;
# we cannot be imported from a ziparchive.
zip_safe=False,
install_requires=install_requires,
extras_require={
"test": ["pytest"],
"benchmark": [
"pytest-benchmark",
"orjson",
"python-rapidjson",
"pysimdjson",
"ujson",
],
},
ext_modules=[
extension(
"libpy_simdjson.parser",
["libpy_simdjson/parser.cc", "libpy_simdjson/simdjson.cpp"],
),
],
)
|
417901
|
from triggerflow.service.storage import TriggerStorage
def add_event_source(trigger_storage: TriggerStorage, workspace: str, event_source: dict, overwrite: bool):
# Check eventsource schema
if {'name', 'class', 'parameters'} != set(event_source):
return {"error": "Invalid eventsource object"}
exists = trigger_storage.key_exists(workspace=workspace, document_id='event_sources', key=event_source['name'])
if not exists or (exists and overwrite):
trigger_storage.set_key(workspace=workspace, document_id='event_sources',
key=event_source['name'], value=event_source.copy())
res = {"message": "Created/updated {}".format(event_source['name'])}, 201
else:
res = {"error": "Event source {} already exists".format(event_source['name'])}, 409
return res
def get_event_source(trigger_storage: TriggerStorage, workspace: str, event_source_name: str):
event_source = trigger_storage.get_key(workspace=workspace, document_id='event_sources', key=event_source_name)
if event_source is not None:
return {event_source_name: event_source}, 200
else:
return {"error": "Event source {} not found".format(event_source_name)}, 404
def list_event_sources(trigger_storage: TriggerStorage, workspace: str):
event_sources = trigger_storage.keys(workspace=workspace, document_id='event_sources')
return event_sources, 200
def delete_event_source(trigger_storage: TriggerStorage, workspace: str, event_source_name: str):
if trigger_storage.key_exists(workspace=workspace, document_id='event_sources', key=event_source_name):
trigger_storage.delete_key(workspace=workspace, document_id='event_sources', key=event_source_name)
return {"message": "Event source {} deleted".format(event_source_name)}, 200
else:
return {"error": "Event source {} not found".format(event_source_name)}, 404
|
417918
|
import requests
import json
# Extracts a region from each frame of a given GIF file
# https://pixlab.io/#/cmd?id=cropgif for more info.
req = requests.get('https://api.pixlab.io/cropgif',params={
'img': 'http://cloud.addictivetips.com/wp-content/uploads/2009/testing.gif',
'key':'My_PixLab_Key',
"x":150,
"y":70,
"width":256
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("GIF location: "+ reply['link'])
|
417919
|
from typing import Any, Dict, Sequence, Tuple, Union
import higher
import hydra
import pytorch_lightning as pl
import torch
from omegaconf import DictConfig
from torch.optim import Optimizer
import torch.nn.functional as F
class BaseModel(pl.LightningModule):
def __init__(self, cfg: DictConfig, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.cfg = cfg
self.save_hyperparameters(cfg)
def forward(self, **kwargs) -> Dict[str, torch.Tensor]:
"""
Method for the forward pass.
'training_step', 'validation_step' and 'test_step' should call
this method in order to compute the output predictions and the loss.
Returns:
output_dict: forward output containing the predictions (output logits ecc...) and the loss if any.
"""
raise NotImplementedError
def step(self, train: bool, batch: Any):
raise NotImplementedError
def training_step(self, batch: Any, batch_idx: int):
outer_loss, inner_loss, outer_acc, inner_acc = self.step(True, batch)
self.log_dict(
{"metatrain/inner_loss": inner_loss.item(),
"metatrain/inner_accuracy": inner_acc.compute()},
on_epoch=False,
on_step=True,
prog_bar=False
)
self.log_dict(
{"metatrain/outer_loss": outer_loss.item(),
"metatrain/outer_accuracy": outer_acc.compute()},
on_epoch=False,
on_step=True,
prog_bar=True
)
def validation_step(self, batch: Any, batch_idx: int):
torch.set_grad_enabled(True)
self.cnn.train()
outer_loss, inner_loss, outer_acc, inner_acc = self.step(False, batch)
self.log_dict(
{"metaval/inner_loss": inner_loss.item(),
"metaval/inner_accuracy": inner_acc.compute()},
prog_bar=False
)
self.log_dict(
{"metaval/outer_loss": outer_loss.item(),
"metaval/outer_accuracy": outer_acc.compute()},
prog_bar=True
)
def test_step(self, batch: Any, batch_idx: int):
torch.set_grad_enabled(True)
self.cnn.train()
outer_loss, inner_loss, outer_acc, inner_acc = self.step(False, batch)
self.log_dict(
{"metatest/outer_loss": outer_loss.item(),
"metatest/inner_loss": inner_loss.item(),
"metatest/inner_accuracy": inner_acc.compute(),
"metatest/outer_accuracy": outer_acc.compute()},
)
class MAMLModel(BaseModel):
def __init__(self, cfg: DictConfig, *args, **kwargs) -> None:
super().__init__(cfg=cfg, *args, **kwargs)
self.cnn = hydra.utils.instantiate(cfg.model.torch_module,
num_classes=cfg.data.datamodule.nway)
self.cnn = self.cnn.to(device=self.device)
self.inner_optimizer = hydra.utils.instantiate(
cfg.optim.inner_optimizer, params=self.cnn.parameters())
self.cfg = cfg
self.save_hyperparameters(cfg)
metric = pl.metrics.Accuracy()
self.train_inner_accuracy = metric.clone()
self.train_outer_accuracy = metric.clone()
self.val_inner_accuracy = metric.clone()
self.val_outer_accuracy = metric.clone()
self.test_inner_accuracy = metric.clone()
self.test_outer_accuracy = metric.clone()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.cnn(x)
def step(self, train: bool, batch: Any):
self.cnn.zero_grad()
outer_optimizer = self.optimizers()
train_inputs, train_targets = batch['support']
test_inputs, test_targets = batch['query']
train_inputs = train_inputs.to(device=self.device)
train_targets = train_targets.to(device=self.device)
test_inputs = test_inputs.to(device=self.device)
test_targets = test_targets.to(device=self.device)
metric = pl.metrics.Accuracy()
outer_loss = torch.tensor(0., device=self.device)
inner_loss = torch.tensor(0., device='cpu')
outer_accuracy = metric.clone()
inner_accuracy = metric.clone()
for task_idx, (train_input, train_target, test_input,
test_target) in enumerate(
zip(train_inputs, train_targets,
test_inputs, test_targets)):
track_higher_grads = True if train else False
with higher.innerloop_ctx(self.cnn, self.inner_optimizer,
copy_initial_weights=False,
track_higher_grads=track_higher_grads) as (
fmodel, diffopt):
for k in range(self.cfg.data.datamodule.num_inner_steps):
train_logit = fmodel(train_input)
loss = F.cross_entropy(train_logit, train_target)
diffopt.step(loss)
with torch.no_grad():
train_logit = fmodel(train_input)
train_preds = torch.softmax(train_logit, dim=-1)
inner_loss += F.cross_entropy(train_logit,
train_target).cpu()
inner_accuracy.update(train_preds.cpu(), train_target.cpu())
test_logit = fmodel(test_input)
outer_loss += F.cross_entropy(test_logit, test_target)
with torch.no_grad():
test_preds = torch.softmax(train_logit, dim=-1)
outer_accuracy.update(test_preds.cpu(), test_target.cpu())
if train:
self.manual_backward(outer_loss, outer_optimizer)
outer_optimizer.step()
outer_loss.div_(task_idx + 1)
inner_loss.div_(task_idx + 1)
return outer_loss, inner_loss, outer_accuracy, inner_accuracy
def configure_optimizers(
self,
) -> Union[Optimizer, Tuple[Sequence[Optimizer], Sequence[Any]]]:
outer_optimizer = hydra.utils.instantiate(
self.cfg.optim.outer_optimizer, params=self.parameters()
)
if self.cfg.optim.use_lr_scheduler:
scheduler = hydra.utils.instantiate(
self.cfg.optim.lr_scheduler, optimizer=outer_optimizer
)
return [outer_optimizer], [scheduler]
return outer_optimizer
|
417940
|
from unittest import mock
import unittest
import os
import shutil
import io
import re
import zipfile
import time
import functools
from webscrapbook import WSB_DIR, Config
from webscrapbook import util
from webscrapbook.scrapbook.host import Host
from webscrapbook.scrapbook import book as wsb_book
from webscrapbook.scrapbook.book import Book
root_dir = os.path.abspath(os.path.dirname(__file__))
test_root = os.path.join(root_dir, 'test_scrapbook_book')
def setUpModule():
# mock out user config
global mockings
mockings = [
mock.patch('webscrapbook.scrapbook.host.WSB_USER_DIR', os.path.join(test_root, 'wsb')),
mock.patch('webscrapbook.WSB_USER_DIR', os.path.join(test_root, 'wsb')),
mock.patch('webscrapbook.WSB_USER_CONFIG', test_root),
]
for mocking in mockings:
mocking.start()
def tearDownModule():
# stop mock
for mocking in mockings:
mocking.stop()
class TestBook(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = 8192
cls.test_root = os.path.join(test_root, 'general')
cls.test_wsbdir = os.path.join(cls.test_root, WSB_DIR)
cls.test_config = os.path.join(cls.test_root, WSB_DIR, 'config.ini')
def setUp(self):
"""Set up a general temp test folder
"""
try:
shutil.rmtree(self.test_root)
except NotADirectoryError:
os.remove(self.test_root)
except FileNotFoundError:
pass
os.makedirs(self.test_wsbdir)
def tearDown(self):
"""Remove general temp test folder
"""
try:
shutil.rmtree(self.test_root)
except NotADirectoryError:
os.remove(self.test_root)
except FileNotFoundError:
pass
def create_general_config(self):
with open(self.test_config, 'w', encoding='UTF-8') as f:
f.write("""[book ""]
name = scrapbook
top_dir =
data_dir = data
tree_dir = tree
index = tree/map.html
no_tree = false
""")
def test_init01(self):
"""Check basic"""
with open(self.test_config, 'w', encoding='UTF-8') as f:
f.write("""[book ""]
name = scrapbook
top_dir =
data_dir = data
tree_dir = tree
index = tree/map.html
no_tree = false
""")
host = Host(self.test_root)
book = Book(host)
self.assertEqual(book.host, host)
self.assertEqual(book.id, '')
self.assertEqual(book.name, 'scrapbook')
self.assertEqual(book.root, self.test_root)
self.assertEqual(book.top_dir, self.test_root)
self.assertEqual(book.data_dir, os.path.join(self.test_root, 'data'))
self.assertEqual(book.tree_dir, os.path.join(self.test_root, 'tree'))
self.assertFalse(book.no_tree)
def test_init02(self):
"""Check book_id param"""
with open(self.test_config, 'w', encoding='UTF-8') as f:
f.write("""[book "book1"]
name = scrapbook1
top_dir =
data_dir =
tree_dir = .wsb/tree
index = .wsb/tree/map.html
no_tree = false
""")
host = Host(self.test_root)
book = Book(host, 'book1')
self.assertEqual(book.host, host)
self.assertEqual(book.id, 'book1')
self.assertEqual(book.name, 'scrapbook1')
self.assertEqual(book.root, self.test_root)
self.assertEqual(book.top_dir, self.test_root)
self.assertEqual(book.data_dir, self.test_root)
self.assertEqual(book.tree_dir, os.path.join(self.test_root, '.wsb', 'tree'))
self.assertFalse(book.no_tree)
def test_init03(self):
"""Check modified path"""
with open(self.test_config, 'w', encoding='UTF-8') as f:
f.write("""[app]
root = public
[book ""]
name = scrapbook
top_dir = sb
data_dir = data
tree_dir = tree
index = tree/map.html
no_tree = false
""")
host = Host(self.test_root)
book = Book(host)
self.assertEqual(book.host, host)
self.assertEqual(book.id, '')
self.assertEqual(book.name, 'scrapbook')
self.assertEqual(book.root, self.test_root)
self.assertEqual(book.top_dir, os.path.join(self.test_root, 'public', 'sb'))
self.assertEqual(book.data_dir, os.path.join(self.test_root, 'public', 'sb', 'data'))
self.assertEqual(book.tree_dir, os.path.join(self.test_root, 'public', 'sb', 'tree'))
self.assertFalse(book.no_tree)
def test_get_subpath(self):
self.create_general_config()
book = Book(Host(self.test_root))
self.assertEqual(book.get_subpath(os.path.join(self.test_root, 'tree', 'meta.js')), 'tree/meta.js')
def test_get_tree_file(self):
self.create_general_config()
book = Book(Host(self.test_root))
self.assertEqual(book.get_tree_file('meta'), os.path.join(self.test_root, 'tree', 'meta.js'))
self.assertEqual(book.get_tree_file('toc', 1), os.path.join(self.test_root, 'tree', 'toc1.js'))
def test_iter_tree_files01(self):
self.create_general_config()
os.makedirs(os.path.join(self.test_root, 'tree'))
with open(os.path.join(self.test_root, 'tree', 'meta.js'), 'w', encoding='UTF-8') as f:
pass
with open(os.path.join(self.test_root, 'tree', 'meta1.js'), 'w', encoding='UTF-8') as f:
pass
with open(os.path.join(self.test_root, 'tree', 'meta2.js'), 'w', encoding='UTF-8') as f:
pass
book = Book(Host(self.test_root))
self.assertEqual(list(book.iter_tree_files('meta')), [
os.path.join(self.test_root, 'tree', 'meta.js'),
os.path.join(self.test_root, 'tree', 'meta1.js'),
os.path.join(self.test_root, 'tree', 'meta2.js'),
])
def test_iter_tree_files02(self):
"""Break since nonexisting index"""
self.create_general_config()
os.makedirs(os.path.join(self.test_root, 'tree'))
with open(os.path.join(self.test_root, 'tree', 'meta.js'), 'w', encoding='UTF-8') as f:
pass
with open(os.path.join(self.test_root, 'tree', 'meta1.js'), 'w', encoding='UTF-8') as f:
pass
with open(os.path.join(self.test_root, 'tree', 'meta3.js'), 'w', encoding='UTF-8') as f:
pass
book = Book(Host(self.test_root))
self.assertEqual(list(book.iter_tree_files('meta')), [
os.path.join(self.test_root, 'tree', 'meta.js'),
os.path.join(self.test_root, 'tree', 'meta1.js'),
])
def test_iter_tree_files03(self):
"""Works when directory not exist"""
book = Book(Host(self.test_root))
self.assertEqual(list(book.iter_tree_files('meta')), [])
@mock.patch('webscrapbook.scrapbook.book.Book.iter_tree_files')
def test_iter_meta_files(self, mock_func):
book = Book(Host(self.test_root))
for i in book.iter_meta_files():
pass
mock_func.assert_called_once_with('meta')
@mock.patch('webscrapbook.scrapbook.book.Book.iter_tree_files')
def test_iter_toc_files(self, mock_func):
book = Book(Host(self.test_root))
for i in book.iter_toc_files():
pass
mock_func.assert_called_once_with('toc')
@mock.patch('webscrapbook.scrapbook.book.Book.iter_tree_files')
def test_iter_fulltext_files(self, mock_func):
book = Book(Host(self.test_root))
for i in book.iter_fulltext_files():
pass
mock_func.assert_called_once_with('fulltext')
def test_load_tree_file01(self):
"""Test normal loading"""
self.create_general_config()
with open(os.path.join(self.test_root, 'meta.js'), 'w', encoding='UTF-8') as f:
f.write("""/**
* This file is generated by WebScrapBook and is not intended to be edited.
*/
scrapbook.meta({
"20200101000000000": {
"index": "20200101000000000/index.html",
"title": "Dummy",
"type": "",
"create": "20200101000000000",
"modify": "20200101000000000"
}
})""")
book = Book(Host(self.test_root))
self.assertEqual(
book.load_tree_file(os.path.join(self.test_root, 'meta.js')), {
'20200101000000000': {
'index': '20200101000000000/index.html',
'title': 'Dummy',
'type': '',
'create': '20200101000000000',
'modify': '20200101000000000',
},
})
def test_load_tree_file02(self):
"""Test malformed wrapping"""
self.create_general_config()
with open(os.path.join(self.test_root, 'meta.js'), 'w', encoding='UTF-8') as f:
f.write("""
scrapbook.meta({
"20200101000000000": {
"index": "20200101000000000/index.html",
"title": "Dummy",
"type": "",
"create": "20200101000000000",
"modify": "20200101000000000"
}
}""")
book = Book(Host(self.test_root))
with self.assertRaises(wsb_book.TreeFileMalformedWrappingError):
book.load_tree_file(os.path.join(self.test_root, 'meta.js'))
def test_load_tree_file03(self):
"""Test malformed wrapping"""
self.create_general_config()
with open(os.path.join(self.test_root, 'meta.js'), 'w', encoding='UTF-8') as f:
f.write("""
scrapbook.meta{
"20200101000000000": {
"index": "20200101000000000/index.html",
"title": "Dummy",
"type": "",
"create": "20200101000000000",
"modify": "20200101000000000"
}
})""")
book = Book(Host(self.test_root))
with self.assertRaises(wsb_book.TreeFileMalformedWrappingError):
book.load_tree_file(os.path.join(self.test_root, 'meta.js'))
def test_load_tree_file04(self):
"""Test malformed wrapping"""
self.create_general_config()
with open(os.path.join(self.test_root, 'meta.js'), 'w', encoding='UTF-8') as f:
f.write("""({
"20200101000000000": {
"index": "20200101000000000/index.html",
"title": "Dummy",
"type": "",
"create": "20200101000000000",
"modify": "20200101000000000"
}
})""")
book = Book(Host(self.test_root))
with self.assertRaises(wsb_book.TreeFileMalformedWrappingError):
book.load_tree_file(os.path.join(self.test_root, 'meta.js'))
def test_load_tree_file05(self):
"""Test malformed JSON"""
self.create_general_config()
with open(os.path.join(self.test_root, 'meta.js'), 'w', encoding='UTF-8') as f:
f.write("""
scrapbook.meta({
'20200101000000000': {
index: '20200101000000000/index.html',
title: 'Dummy',
type: '',
create: '20200101000000000',
modify: '20200101000000000'
}
})""")
book = Book(Host(self.test_root))
with self.assertRaises(wsb_book.TreeFileMalformedJsonError):
book.load_tree_file(os.path.join(self.test_root, 'meta.js'))
def test_load_tree_file06(self):
"""Test empty file should not error out."""
self.create_general_config()
with open(os.path.join(self.test_root, 'meta.js'), 'w', encoding='UTF-8') as f:
f.write('')
book = Book(Host(self.test_root))
self.assertEqual(book.load_tree_file(os.path.join(self.test_root, 'meta.js')), {})
def test_load_tree_files01(self):
"""Test normal loading
- Item of same ID from the latter overwrites the formatter.
- Item with None value should be removed.
"""
self.create_general_config()
os.makedirs(os.path.join(self.test_root, 'tree'))
with open(os.path.join(self.test_root, 'tree', 'meta.js'), 'w', encoding='UTF-8') as f:
f.write("""/**
* This file is generated by WebScrapBook and is not intended to be edited.
*/
scrapbook.meta({
"20200101000000000": {
"index": "20200101000000000/index.html",
"title": "Dummy",
"type": "",
"create": "20200101000000000",
"modify": "20200101000000000",
"comment": "comment"
},
"20200101000000001": {
"index": "20200101000000001/index.html",
"title": "Dummy1",
"type": "",
"create": "20200101000000001",
"modify": "20200101000000001",
"comment": "comment1"
},
"20200101000000002": {
"index": "20200101000000002/index.html",
"title": "Dummy2",
"type": "",
"create": "20200101000000002",
"modify": "20200101000000002",
"comment": "comment2"
}
})""")
with open(os.path.join(self.test_root, 'tree', 'meta1.js'), 'w', encoding='UTF-8') as f:
f.write("""/**
* This file is generated by WebScrapBook and is not intended to be edited.
*/
scrapbook.meta({
"20200101000000001": {
"index": "20200101000000001/index.html",
"title": "Dummy1rev",
"type": "",
"create": "20200101000000001",
"modify": "20200101000000011"
},
"20200101000000002": null,
"20200101000000003": {
"index": "20200101000000003/index.html",
"title": "Dummy3",
"type": "",
"create": "20200101000000003",
"modify": "20200101000000003",
"comment": "comment3"
}
})""")
book = Book(Host(self.test_root))
self.assertEqual(book.load_tree_files('meta'), {
'20200101000000000': {
'index': '20200101000000000/index.html',
'title': 'Dummy',
'type': '',
'create': '20200101000000000',
'modify': '20200101000000000',
'comment': 'comment',
},
'20200101000000001': {
'index': '20200101000000001/index.html',
'title': 'Dummy1rev',
'type': '',
'create': '20200101000000001',
'modify': '20200101000000011',
},
'20200101000000003': {
'index': '20200101000000003/index.html',
'title': 'Dummy3',
'type': '',
'create': '20200101000000003',
'modify': '20200101000000003',
'comment': 'comment3',
},
})
def test_load_tree_files02(self):
"""Works when directory not exist"""
book = Book(Host(self.test_root))
self.assertEqual(book.load_tree_files('meta'), {})
@mock.patch('webscrapbook.scrapbook.book.Book.load_tree_files')
def test_load_meta_files01(self, mock_func):
book = Book(Host(self.test_root))
book.load_meta_files()
mock_func.assert_called_once_with('meta')
@mock.patch('webscrapbook.scrapbook.book.Book.load_tree_files')
def test_load_meta_files02(self, mock_func):
book = Book(Host(self.test_root))
book.meta = {}
book.load_meta_files()
mock_func.assert_not_called()
@mock.patch('webscrapbook.scrapbook.book.Book.load_tree_files')
def test_load_meta_files03(self, mock_func):
book = Book(Host(self.test_root))
book.meta = {}
book.load_meta_files(refresh=True)
mock_func.assert_called_once_with('meta')
@mock.patch('webscrapbook.scrapbook.book.Book.load_tree_files')
def test_load_toc_files01(self, mock_func):
book = Book(Host(self.test_root))
book.load_toc_files()
mock_func.assert_called_once_with('toc')
@mock.patch('webscrapbook.scrapbook.book.Book.load_tree_files')
def test_load_toc_files02(self, mock_func):
book = Book(Host(self.test_root))
book.toc = {}
book.load_toc_files()
mock_func.assert_not_called()
@mock.patch('webscrapbook.scrapbook.book.Book.load_tree_files')
def test_load_toc_files03(self, mock_func):
book = Book(Host(self.test_root))
book.toc = {}
book.load_toc_files(refresh=True)
mock_func.assert_called_once_with('toc')
@mock.patch('webscrapbook.scrapbook.book.Book.load_tree_files')
def test_load_fulltext_files01(self, mock_func):
book = Book(Host(self.test_root))
book.load_fulltext_files()
mock_func.assert_called_once_with('fulltext')
@mock.patch('webscrapbook.scrapbook.book.Book.load_tree_files')
def test_load_fulltext_files02(self, mock_func):
book = Book(Host(self.test_root))
book.fulltext = {}
book.load_fulltext_files()
mock_func.assert_not_called()
@mock.patch('webscrapbook.scrapbook.book.Book.load_tree_files')
def test_load_fulltext_files03(self, mock_func):
book = Book(Host(self.test_root))
book.fulltext = {}
book.load_fulltext_files(refresh=True)
mock_func.assert_called_once_with('fulltext')
def test_save_meta_files01(self):
self.create_general_config()
book = Book(Host(self.test_root))
book.meta = {
'20200101000000000': {'title': 'Dummy 1 中文'},
'20200101000000001': {'title': 'Dummy 2 中文'},
}
book.save_meta_files()
with open(os.path.join(self.test_root, 'tree', 'meta.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), """/**
* Feel free to edit this file, but keep data code valid JSON format.
*/
scrapbook.meta({
"20200101000000000": {
"title": "Dummy 1 中文"
},
"20200101000000001": {
"title": "Dummy 2 中文"
}
})""")
@mock.patch('webscrapbook.scrapbook.book.Book.SAVE_META_THRESHOLD', 3)
def test_save_meta_files02(self):
self.create_general_config()
book = Book(Host(self.test_root))
book.meta = {
'20200101000000000': {'title': 'Dummy 1 中文'},
'20200101000000001': {'title': 'Dummy 2 中文'},
'20200101000000002': {'title': 'Dummy 3 中文'},
'20200101000000003': {'title': 'Dummy 4 中文'},
}
book.save_meta_files()
with open(os.path.join(self.test_root, 'tree', 'meta.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), """/**
* Feel free to edit this file, but keep data code valid JSON format.
*/
scrapbook.meta({
"20200101000000000": {
"title": "Dummy 1 中文"
},
"20200101000000001": {
"title": "Dummy 2 中文"
}
})""")
with open(os.path.join(self.test_root, 'tree', 'meta1.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), """/**
* Feel free to edit this file, but keep data code valid JSON format.
*/
scrapbook.meta({
"20200101000000002": {
"title": "Dummy 3 中文"
},
"20200101000000003": {
"title": "Dummy 4 中文"
}
})""")
def test_save_meta_files03(self):
self.create_general_config()
os.makedirs(os.path.join(self.test_root, 'tree'))
with open(os.path.join(self.test_root, 'tree', 'meta.js'), 'w', encoding='UTF-8') as fh:
fh.write('dummy')
with open(os.path.join(self.test_root, 'tree', 'meta1.js'), 'w', encoding='UTF-8') as fh:
fh.write('dummy1')
with open(os.path.join(self.test_root, 'tree', 'meta2.js'), 'w', encoding='UTF-8') as fh:
fh.write('dummy2')
with open(os.path.join(self.test_root, 'tree', 'meta3.js'), 'w', encoding='UTF-8') as fh:
fh.write('dummy3')
book = Book(Host(self.test_root))
book.meta = {
'20200101000000000': {'title': 'Dummy 1 中文'},
'20200101000000001': {'title': 'Dummy 2 中文'},
}
book.save_meta_files()
with open(os.path.join(self.test_root, 'tree', 'meta.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), """/**
* Feel free to edit this file, but keep data code valid JSON format.
*/
scrapbook.meta({
"20200101000000000": {
"title": "Dummy 1 中文"
},
"20200101000000001": {
"title": "Dummy 2 中文"
}
})""")
self.assertFalse(os.path.exists(os.path.join(self.test_root, 'tree', 'meta1.js')))
self.assertFalse(os.path.exists(os.path.join(self.test_root, 'tree', 'meta2.js')))
self.assertFalse(os.path.exists(os.path.join(self.test_root, 'tree', 'meta3.js')))
self.assertFalse(os.path.exists(os.path.join(self.test_root, 'tree', 'meta4.js')))
def test_save_meta_files04(self):
"""Check if U+2028 and U+2029 are escaped in the embedded JSON."""
self.create_general_config()
book = Book(Host(self.test_root))
book.meta = {
'20200101\u2028000000000': {'title\u20281': 'Dummy 1\u2028中文'},
'20200101\u2029000000001': {'title\u20292': 'Dummy 2\u2029中文'},
}
book.save_meta_files()
with open(os.path.join(self.test_root, 'tree', 'meta.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), r"""/**
* Feel free to edit this file, but keep data code valid JSON format.
*/
scrapbook.meta({
"20200101\u2028000000000": {
"title\u20281": "Dummy 1\u2028中文"
},
"20200101\u2029000000001": {
"title\u20292": "Dummy 2\u2029中文"
}
})""")
def test_save_toc_files01(self):
self.create_general_config()
book = Book(Host(self.test_root))
book.toc = {
'root': [
'20200101000000000',
'20200101000000001',
'20200101000000002',
],
'20200101000000000': [
'20200101000000003'
]
}
book.save_toc_files()
with open(os.path.join(self.test_root, 'tree', 'toc.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), """/**
* Feel free to edit this file, but keep data code valid JSON format.
*/
scrapbook.toc({
"root": [
"20200101000000000",
"20200101000000001",
"20200101000000002"
],
"20200101000000000": [
"20200101000000003"
]
})""")
@mock.patch('webscrapbook.scrapbook.book.Book.SAVE_TOC_THRESHOLD', 3)
def test_save_toc_files02(self):
self.create_general_config()
book = Book(Host(self.test_root))
book.toc = {
'root': [
'20200101000000000',
'20200101000000001',
'20200101000000002',
'20200101000000003',
'20200101000000004',
],
'20200101000000001': [
'20200101000000011'
],
'20200101000000002': [
'20200101000000021'
],
'20200101000000003': [
'20200101000000031',
'20200101000000032'
],
}
book.save_toc_files()
with open(os.path.join(self.test_root, 'tree', 'toc.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), """/**
* Feel free to edit this file, but keep data code valid JSON format.
*/
scrapbook.toc({
"root": [
"20200101000000000",
"20200101000000001",
"20200101000000002",
"20200101000000003",
"20200101000000004"
]
})""")
with open(os.path.join(self.test_root, 'tree', 'toc1.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), """/**
* Feel free to edit this file, but keep data code valid JSON format.
*/
scrapbook.toc({
"20200101000000001": [
"20200101000000011"
],
"20200101000000002": [
"20200101000000021"
]
})""")
with open(os.path.join(self.test_root, 'tree', 'toc2.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), """/**
* Feel free to edit this file, but keep data code valid JSON format.
*/
scrapbook.toc({
"20200101000000003": [
"20200101000000031",
"20200101000000032"
]
})""")
def test_save_toc_files03(self):
self.create_general_config()
os.makedirs(os.path.join(self.test_root, 'tree'))
with open(os.path.join(self.test_root, 'tree', 'toc.js'), 'w', encoding='UTF-8') as fh:
fh.write('dummy')
with open(os.path.join(self.test_root, 'tree', 'toc1.js'), 'w', encoding='UTF-8') as fh:
fh.write('dummy1')
with open(os.path.join(self.test_root, 'tree', 'toc2.js'), 'w', encoding='UTF-8') as fh:
fh.write('dummy2')
with open(os.path.join(self.test_root, 'tree', 'toc4.js'), 'w', encoding='UTF-8') as fh:
fh.write('dummy4')
book = Book(Host(self.test_root))
book.toc = {
'root': [
'20200101000000000',
'20200101000000001',
'20200101000000002',
],
'20200101000000000': [
'20200101000000003'
]
}
book.save_toc_files()
with open(os.path.join(self.test_root, 'tree', 'toc.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), """/**
* Feel free to edit this file, but keep data code valid JSON format.
*/
scrapbook.toc({
"root": [
"20200101000000000",
"20200101000000001",
"20200101000000002"
],
"20200101000000000": [
"20200101000000003"
]
})""")
self.assertFalse(os.path.exists(os.path.join(self.test_root, 'tree', 'toc1.js')))
self.assertFalse(os.path.exists(os.path.join(self.test_root, 'tree', 'toc2.js')))
self.assertFalse(os.path.exists(os.path.join(self.test_root, 'tree', 'toc3.js')))
self.assertTrue(os.path.exists(os.path.join(self.test_root, 'tree', 'toc4.js')))
def test_save_toc_files04(self):
"""Check if U+2028 and U+2029 are escaped in the embedded JSON."""
self.create_general_config()
book = Book(Host(self.test_root))
book.toc = {
'root': [
'20200101\u2028000000000',
'20200101\u2029000000001',
],
'20200101\u2028000000000': [
'20200101\u2029000000003'
]
}
book.save_toc_files()
with open(os.path.join(self.test_root, 'tree', 'toc.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), r"""/**
* Feel free to edit this file, but keep data code valid JSON format.
*/
scrapbook.toc({
"root": [
"20200101\u2028000000000",
"20200101\u2029000000001"
],
"20200101\u2028000000000": [
"20200101\u2029000000003"
]
})""")
def test_save_fulltext_files01(self):
self.create_general_config()
book = Book(Host(self.test_root))
book.fulltext = {
"20200101000000000": {
'index.html': {
'content': 'dummy text 1 中文',
}
},
"20200101000000001": {
'index.html': {
'content': 'dummy text 2 中文',
}
},
}
book.save_fulltext_files()
with open(os.path.join(self.test_root, 'tree', 'fulltext.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), """/**
* This file is generated by WebScrapBook and is not intended to be edited.
*/
scrapbook.fulltext({
"20200101000000000": {
"index.html": {
"content": "dummy text 1 中文"
}
},
"20200101000000001": {
"index.html": {
"content": "dummy text 2 中文"
}
}
})""")
@mock.patch('webscrapbook.scrapbook.book.Book.SAVE_FULLTEXT_THRESHOLD', 10)
def test_save_fulltext_files02(self):
self.create_general_config()
book = Book(Host(self.test_root))
book.fulltext = {
"20200101000000000": {
'index.html': {
'content': 'dummy text 1 中文',
},
'frame.html': {
'content': 'frame page content',
},
},
"20200101000000001": {
'index.html': {
'content': 'dummy text 2 中文',
},
},
"20200101000000002": {
'index.html': {
'content': 'dummy text 3 中文',
},
},
}
book.save_fulltext_files()
with open(os.path.join(self.test_root, 'tree', 'fulltext.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), """/**
* This file is generated by WebScrapBook and is not intended to be edited.
*/
scrapbook.fulltext({
"20200101000000000": {
"index.html": {
"content": "dummy text 1 中文"
},
"frame.html": {
"content": "frame page content"
}
}
})""")
with open(os.path.join(self.test_root, 'tree', 'fulltext1.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), """/**
* This file is generated by WebScrapBook and is not intended to be edited.
*/
scrapbook.fulltext({
"20200101000000001": {
"index.html": {
"content": "dummy text 2 中文"
}
}
})""")
with open(os.path.join(self.test_root, 'tree', 'fulltext2.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), """/**
* This file is generated by WebScrapBook and is not intended to be edited.
*/
scrapbook.fulltext({
"20200101000000002": {
"index.html": {
"content": "dummy text 3 中文"
}
}
})""")
@mock.patch('webscrapbook.scrapbook.book.Book.SAVE_FULLTEXT_THRESHOLD', 10)
def test_save_fulltext_files03(self):
self.create_general_config()
os.makedirs(os.path.join(self.test_root, 'tree'))
with open(os.path.join(self.test_root, 'tree', 'fulltext.js'), 'w', encoding='UTF-8') as fh:
fh.write('dummy')
with open(os.path.join(self.test_root, 'tree', 'fulltext1.js'), 'w', encoding='UTF-8') as fh:
fh.write('dummy1')
with open(os.path.join(self.test_root, 'tree', 'fulltext2.js'), 'w', encoding='UTF-8') as fh:
fh.write('dummy2')
with open(os.path.join(self.test_root, 'tree', 'fulltext3.js'), 'w', encoding='UTF-8') as fh:
fh.write('dummy3')
with open(os.path.join(self.test_root, 'tree', 'fulltext4.js'), 'w', encoding='UTF-8') as fh:
fh.write('dummy4')
with open(os.path.join(self.test_root, 'tree', 'fulltext6.js'), 'w', encoding='UTF-8') as fh:
fh.write('dummy6')
book = Book(Host(self.test_root))
book.fulltext = {
"20200101000000000": {
'index.html': {
'content': 'dummy text 1 中文',
},
'frame.html': {
'content': 'frame page content',
},
},
"20200101000000001": {
'index.html': {
'content': 'dummy text 2 中文',
},
},
}
book.save_fulltext_files()
with open(os.path.join(self.test_root, 'tree', 'fulltext.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), """/**
* This file is generated by WebScrapBook and is not intended to be edited.
*/
scrapbook.fulltext({
"20200101000000000": {
"index.html": {
"content": "dummy text 1 中文"
},
"frame.html": {
"content": "frame page content"
}
}
})""")
with open(os.path.join(self.test_root, 'tree', 'fulltext1.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), """/**
* This file is generated by WebScrapBook and is not intended to be edited.
*/
scrapbook.fulltext({
"20200101000000001": {
"index.html": {
"content": "dummy text 2 中文"
}
}
})""")
self.assertFalse(os.path.exists(os.path.join(self.test_root, 'tree', 'fulltext2.js')))
self.assertFalse(os.path.exists(os.path.join(self.test_root, 'tree', 'fulltext3.js')))
self.assertFalse(os.path.exists(os.path.join(self.test_root, 'tree', 'fulltext4.js')))
self.assertFalse(os.path.exists(os.path.join(self.test_root, 'tree', 'fulltext5.js')))
self.assertTrue(os.path.exists(os.path.join(self.test_root, 'tree', 'fulltext6.js')))
def test_save_fulltext_files04(self):
"""Check if U+2028 and U+2029 are escaped in the embedded JSON."""
self.create_general_config()
book = Book(Host(self.test_root))
book.fulltext = {
"20200101\u2028000000000": {
'index.html': {
'content': 'dummy text 1 中文',
}
},
"20200101\u2029000000001": {
'index.html': {
'content': 'dummy text 2 中文',
}
},
}
book.save_fulltext_files()
with open(os.path.join(self.test_root, 'tree', 'fulltext.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), r"""/**
* This file is generated by WebScrapBook and is not intended to be edited.
*/
scrapbook.fulltext({
"20200101\u2028000000000": {
"index.html": {
"content": "dummy text 1 中文"
}
},
"20200101\u2029000000001": {
"index.html": {
"content": "dummy text 2 中文"
}
}
})""")
@mock.patch('webscrapbook.scrapbook.host.Host.auto_backup')
def test_backup(self, mock_func):
test_file = os.path.join(self.test_root, 'tree', 'meta.js')
host = Host(self.test_root)
book = Book(host)
book.backup(test_file)
mock_func.assert_called_with(test_file)
book.backup(test_file, base=self.test_wsbdir, move=False)
mock_func.assert_called_with(test_file, base=self.test_wsbdir, move=False)
@mock.patch('webscrapbook.scrapbook.host.FileLock')
def test_get_lock01(self, mock_filelock):
self.create_general_config()
host = Host(self.test_root)
book = Book(host)
book.get_lock('test')
mock_filelock.assert_called_once_with(host, 'book--test')
@mock.patch('webscrapbook.scrapbook.host.FileLock')
def test_get_lock02(self, mock_filelock):
"""With parameters"""
self.create_general_config()
host = Host(self.test_root)
book = Book(host)
book.get_lock('test',
timeout=10, stale=120, poll_interval=0.3, assume_acquired=True)
mock_filelock.assert_called_once_with(host, 'book--test',
timeout=10, stale=120, poll_interval=0.3, assume_acquired=True)
@mock.patch('webscrapbook.scrapbook.book.Book.get_lock')
def test_get_tree_lock01(self, mock_get_lock):
self.create_general_config()
host = Host(self.test_root)
book = Book(host)
book.get_tree_lock()
mock_get_lock.assert_called_once_with('tree')
@mock.patch('webscrapbook.scrapbook.book.Book.get_lock')
def test_get_tree_lock02(self, mock_get_lock):
"""With parameters"""
self.create_general_config()
host = Host(self.test_root)
book = Book(host)
book.get_tree_lock(timeout=10, stale=120, poll_interval=0.3, assume_acquired=True)
mock_get_lock.assert_called_once_with('tree',
timeout=10, stale=120, poll_interval=0.3, assume_acquired=True)
def test_get_index_paths01(self):
self.create_general_config()
book = Book(Host(self.test_root))
self.assertEqual(book.get_index_paths('20200101000000000/index.html'), ['index.html'])
self.assertEqual(book.get_index_paths('20200101000000000.html'), ['20200101000000000.html'])
self.assertEqual(book.get_index_paths('20200101000000000.htz'), ['index.html'])
def test_get_index_paths02(self):
"""MAFF with single page"""
self.create_general_config()
os.makedirs(os.path.join(self.test_root, 'data'))
archive_file = os.path.join(self.test_root, 'data', '20200101000000000.maff')
with zipfile.ZipFile(archive_file, 'w') as zh:
zh.writestr('20200101000000000/index.html', """dummy""")
book = Book(Host(self.test_root))
self.assertEqual(book.get_index_paths('20200101000000000.maff'), ['20200101000000000/index.html'])
def test_get_index_paths03(self):
"""MAFF with multiple pages"""
self.create_general_config()
os.makedirs(os.path.join(self.test_root, 'data'))
archive_file = os.path.join(self.test_root, 'data', '20200101000000000.maff')
with zipfile.ZipFile(archive_file, 'w') as zh:
zh.writestr('20200101000000000/index.html', """dummy""")
zh.writestr('20200101000000001/index.html', """dummy""")
book = Book(Host(self.test_root))
self.assertEqual(book.get_index_paths('20200101000000000.maff'), ['20200101000000000/index.html', '20200101000000001/index.html'])
def test_get_index_paths04(self):
"""MAFF with no page"""
self.create_general_config()
os.makedirs(os.path.join(self.test_root, 'data'))
archive_file = os.path.join(self.test_root, 'data', '20200101000000000.maff')
with zipfile.ZipFile(archive_file, 'w') as zh:
pass
book = Book(Host(self.test_root))
self.assertEqual(book.get_index_paths('20200101000000000.maff'), [])
def test_get_icon_file01(self):
"""Pass if file not exist."""
book = Book(Host(self.test_root))
self.assertIsNone(book.get_icon_file({
'index': '20200101000000000/index.html',
'icon': 'http://example.com',
}))
self.assertIsNone(book.get_icon_file({
'index': '20200101000000000/index.html',
'icon': 'data:image/bmp;base64,Qk08AAAAAAAAADYAAAAoAAAAAQAAAAEAAAABACAAAAAAAAYAAAASCwAAEgsAAAAAAAAAAAAAAP8AAAAA',
}))
self.assertIsNone(book.get_icon_file({
'index': '20200101000000000/index.html',
'icon': '//example.com',
}))
self.assertIsNone(book.get_icon_file({
'index': '20200101000000000/index.html',
'icon': '/favicon.ico',
}))
self.assertIsNone(book.get_icon_file({
'index': '20200101000000000/index.html',
'icon': '',
}))
self.assertIsNone(book.get_icon_file({
'index': '20200101000000000/index.html',
'icon': '?id=123',
}))
self.assertIsNone(book.get_icon_file({
'index': '20200101000000000/index.html',
'icon': '#test',
}))
self.assertEqual(book.get_icon_file({
'icon': 'favicon.ico?id=123#test',
}),
os.path.join(book.data_dir, 'favicon.ico'),
)
self.assertEqual(book.get_icon_file({
'icon': '%E4%B8%AD%E6%96%87%231.ico?id=123#test',
}),
os.path.join(book.data_dir, '中文#1.ico'),
)
self.assertEqual(book.get_icon_file({
'index': '20200101000000000/index.html',
'icon': 'favicon.ico?id=123#test',
}),
os.path.join(book.data_dir, '20200101000000000', 'favicon.ico'),
)
self.assertEqual(book.get_icon_file({
'index': '20200101000000000.html',
'icon': 'favicon.ico?id=123#test',
}),
os.path.join(book.data_dir, 'favicon.ico'),
)
self.assertEqual(book.get_icon_file({
'index': '20200101000000000.maff',
'icon': 'favicon.ico?id=123#test',
}),
os.path.join(book.data_dir, 'favicon.ico'),
)
self.assertEqual(book.get_icon_file({
'index': '20200101000000000.maff',
'icon': '.wsb/tree/favicon/dbc82be549e49d6db9a5719086722a4f1c5079cd.bmp?id=123#test',
}),
os.path.join(book.tree_dir, 'favicon', 'dbc82be549e49d6db9a5719086722a4f1c5079cd.bmp'),
)
def test_load_note_file01(self):
"""Test for common note file wrapper."""
test_file = os.path.join(self.test_root, 'index.html')
with open(test_file, 'w', encoding='UTF-8') as f:
f.write("""\
<!DOCTYPE html><html><head><meta charset="UTF-8"><meta name="viewport" content="width=device-width"><style>pre{white-space: pre-wrap; overflow-wrap: break-word;}</style></head><body><pre>
Note content
2nd line
3rd line
</pre></body></html>""")
book = Book(Host(self.test_root))
content = book.load_note_file(test_file)
self.assertEqual(content, """\
Note content
2nd line
3rd line""")
def test_load_note_file02(self):
"""Test for common legacy note file wrapper."""
test_file = os.path.join(self.test_root, 'index.html')
with open(test_file, 'w', encoding='UTF-8') as f:
f.write("""\
<html><head><meta http-equiv="Content-Type" content="text/html;Charset=UTF-8"></head><body><pre>
Note content
2nd line
3rd line
</pre></body></html>""")
book = Book(Host(self.test_root))
content = book.load_note_file(test_file)
self.assertEqual(content, """\
Note content
2nd line
3rd line""")
def test_load_note_file03(self):
"""Return original text if malformatted."""
test_file = os.path.join(self.test_root, 'index.html')
html = """\
<html><head><meta http-equiv="Content-Type" content="text/html;Charset=UTF-8"></head><body>
Note content
2nd line
3rd line
</body></html>"""
with open(test_file, 'w', encoding='UTF-8') as f:
f.write(html)
book = Book(Host(self.test_root))
content = book.load_note_file(test_file)
self.assertEqual(content, html)
def test_save_note_file01(self):
"""Test saving. Enforce LF linefeeds."""
test_file = os.path.join(self.test_root, 'index.html')
book = Book(Host(self.test_root))
book.save_note_file(test_file, """\
Note content
2nd line
3rd line""")
with open(test_file, encoding='UTF-8', newline='') as fh:
self.assertEqual(fh.read(), """\
<!DOCTYPE html><html><head>\
<meta charset="UTF-8">\
<meta name="viewport" content="width=device-width">\
<style>pre { white-space: pre-wrap; overflow-wrap: break-word; }</style>\
</head><body><pre>
Note content
2nd line
3rd line
</pre></body></html>""")
def test_auto_backup(self):
"""Auto backup tree files if backup_dir is set."""
test_dir = os.path.join(self.test_root, WSB_DIR, 'tree')
os.makedirs(test_dir)
meta0 = """
scrapbook.meta({
"20200101000000000": {
"index": "20200101000000000/index.html",
"title": "Dummy",
"type": "",
"create": "20200101000000000",
"modify": "20200101000000000"
}
})"""
meta1 = """
scrapbook.meta({
"20200101000000001": {
"index": "20200101000000001/index.html",
"title": "Dummy",
"type": "",
"create": "20200101000000001",
"modify": "20200101000000001"
}
})"""
toc0 = """
scrapbook.toc({
"root": [
"20200101000000000",
"20200101000000001",
"20200101000000002"
],
"20200101000000000": [
"20200101000000003"
]
})"""
toc1 = """
scrapbook.toc({
"20200101000000001": [
"20200101000000004"
]
})"""
fulltext0 = """
scrapbook.fulltext({
"20200101000000000": {
"index.html": {
"content": "dummy text 1 中文"
}
},
"20200101000000001": {
"index.html": {
"content": "dummy text 2 中文"
}
}
})"""
fulltext1 = """
scrapbook.fulltext({
"20200101000000002": {
"index.html": {
"content": "dummy text 2 中文"
}
}
})"""
with open(os.path.join(test_dir, 'meta.js'), 'w', encoding='UTF-8') as fh:
fh.write(meta0)
with open(os.path.join(test_dir, 'meta1.js'), 'w', encoding='UTF-8') as fh:
fh.write(meta1)
with open(os.path.join(test_dir, 'toc.js'), 'w', encoding='UTF-8') as fh:
fh.write(toc0)
with open(os.path.join(test_dir, 'toc1.js'), 'w', encoding='UTF-8') as fh:
fh.write(toc1)
with open(os.path.join(test_dir, 'fulltext.js'), 'w', encoding='UTF-8') as fh:
fh.write(fulltext0)
with open(os.path.join(test_dir, 'fulltext1.js'), 'w', encoding='UTF-8') as fh:
fh.write(fulltext1)
host = Host(self.test_root)
book = Book(host)
host.init_backup()
book.load_meta_files()
book.load_toc_files()
book.load_fulltext_files()
book.save_meta_files()
book.save_toc_files()
book.save_fulltext_files()
with open(os.path.join(host._backup_dir, WSB_DIR, 'tree', 'meta.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), meta0)
with open(os.path.join(host._backup_dir, WSB_DIR, 'tree', 'meta1.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), meta1)
with open(os.path.join(host._backup_dir, WSB_DIR, 'tree', 'toc.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), toc0)
with open(os.path.join(host._backup_dir, WSB_DIR, 'tree', 'toc1.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), toc1)
with open(os.path.join(host._backup_dir, WSB_DIR, 'tree', 'fulltext.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), fulltext0)
with open(os.path.join(host._backup_dir, WSB_DIR, 'tree', 'fulltext1.js'), encoding='UTF-8') as fh:
self.assertEqual(fh.read(), fulltext1)
if __name__ == '__main__':
unittest.main()
|
418074
|
import pytest
grblas = pytest.importorskip("grblas")
from metagraph.plugins.python.types import PythonNodeMapType
from metagraph.plugins.numpy.types import NumpyNodeMap, NumpyNodeSet
from metagraph.plugins.graphblas.types import GrblasNodeMap, GrblasNodeSet
from metagraph import NodeLabels
import numpy as np
from grblas import Vector
def test_python():
PythonNodeMapType.assert_equal(
{"A": 1, "B": 2, "C": 3},
{"A": 1, "B": 2, "C": 3},
{"dtype": "int"},
{"dtype": "int"},
{},
{},
)
PythonNodeMapType.assert_equal(
{"A": 1, "C": 3.333333333333333333333333, "B": 2},
{"A": 1, "C": 3.333333333333333333333334, "B": 2 + 1e-9},
{"dtype": "float"},
{"dtype": "float"},
{},
{},
)
with pytest.raises(AssertionError):
PythonNodeMapType.assert_equal(
{"A": 1}, {"A": 1, "B": 2}, {"dtype": "int"}, {"dtype": "int"}, {}, {},
)
with pytest.raises(AssertionError):
PythonNodeMapType.assert_equal(
{"A": 1, "B": 22},
{"A": 1, "B": 2},
{"dtype": "int"},
{"dtype": "int"},
{},
{},
)
with pytest.raises(AssertionError):
PythonNodeMapType.assert_equal(
{"A": 1.1}, {"A": 1}, {"dtype": "float"}, {"dtype": "int"}, {}, {},
)
with pytest.raises(TypeError, match="Unable to compute dtype"):
PythonNodeMapType.compute_abstract_properties({0: 3 + 4j, 1: 5 - 2j}, {"dtype"})
def test_numpy():
NumpyNodeMap.Type.assert_equal(
NumpyNodeMap(np.array([1, 3, 5, 7, 9])),
NumpyNodeMap(np.array([1, 3, 5, 7, 9])),
{},
{},
{},
{},
)
NumpyNodeMap.Type.assert_equal(
NumpyNodeMap(np.array([1, 3, 5.5555555555555555555, 7, 9])),
NumpyNodeMap(np.array([1, 3, 5.5555555555555555556, 7, 9 + 1e-9])),
{},
{},
{},
{},
)
NumpyNodeMap.Type.assert_equal(
NumpyNodeMap(np.array([1, 3, 5, 7, 9]), [14, 2, 7, 8, 20]),
NumpyNodeMap(np.array([1, 3, 5, 7, 9]), [14, 2, 7, 8, 20]),
{},
{},
{},
{},
)
NumpyNodeMap.Type.assert_equal(
NumpyNodeMap(np.array([1, 3, 5, 7, 9]), [0, 2, 4, 6, 8]),
NumpyNodeMap.from_mask(
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
np.array([True, False, True, False, True, False, True, False, True, False]),
),
{},
{},
{},
{},
)
with pytest.raises(AssertionError):
NumpyNodeMap.Type.assert_equal(
NumpyNodeMap(np.array([1, 3, 5, 7, 9])),
NumpyNodeMap(np.array([1, 3, 5, 7, 9, 11])),
{},
{},
{},
{},
)
with pytest.raises(AssertionError):
NumpyNodeMap.Type.assert_equal(
NumpyNodeMap(np.array([1, 3, 5, 7, 9]), np.array([14, 2, 7, 8, 20])),
NumpyNodeMap(np.array([1, 3, 5, 7, 9]), np.array([2, 7, 8, 14, 20])),
{},
{},
{},
{},
)
# Exercise NumpyNodeSet
with pytest.raises(TypeError, match="Invalid number of dimensions: 2"):
NumpyNodeSet(np.array([[1, 2, 3], [4, 5, 6]]))
with pytest.raises(TypeError, match="Invalid dtype for NodeSet"):
NumpyNodeSet(np.array([1.1, 2.2, 3.3]))
# Handle duplicates
x = NumpyNodeSet([1, 1, 3, 4, 1, 2, 1])
assert len(x) == 4
assert 1 in x
assert [2, 3, 4] in x
# Exercise NumpyNodeMap
with pytest.raises(TypeError, match="Invalid number of dimensions: 2"):
NumpyNodeMap(np.array([[1, 2, 3], [4, 5, 6]]))
with pytest.raises(TypeError, match="Nodes must be same shape and size as data"):
NumpyNodeMap([1, 2, 3, 4], nodes=[1, 2, 3])
with pytest.raises(TypeError, match="Invalid dtype for nodes"):
NumpyNodeMap([1, 2, 3, 4], nodes=[1.1, 2.2, 3.3, 4.4])
with pytest.raises(TypeError, match="Duplicate node ids found"):
NumpyNodeMap([1, 2, 3, 4], nodes=[1, 1, 1, 2])
y = NumpyNodeMap([1.1, 2.2, 3.3, 4.4], nodes=[5, 6, 7, 22])
assert len(y) == 4
assert 22 in y
assert [5, 6, 7] in y
assert y[5] == 1.1
with pytest.raises(KeyError, match="is not in the NodeMap"):
y[17]
with pytest.raises(KeyError, match="are not all in the NodeMap"):
y[[7, 8, 9]]
def test_graphblas():
GrblasNodeMap.Type.assert_equal(
GrblasNodeMap(Vector.from_values([0, 1, 3, 4], [1, 2, 3, 4])),
GrblasNodeMap(Vector.from_values([0, 1, 3, 4], [1, 2, 3, 4])),
{},
{},
{},
{},
)
GrblasNodeMap.Type.assert_equal(
GrblasNodeMap(
Vector.from_values([0, 1, 3, 4], [1.0, 2.0, 3.333333333333333333, 4.0])
),
GrblasNodeMap(
Vector.from_values([0, 1, 3, 4], [1.0, 2.0, 3.333333333333333334, 4 + 1e-9])
),
{},
{},
{},
{},
)
with pytest.raises(AssertionError):
GrblasNodeMap.Type.assert_equal(
GrblasNodeMap(Vector.from_values([0, 1, 3, 4], [1, 2, 3, 4])),
GrblasNodeMap(Vector.from_values([0, 1, 2, 4], [1, 2, 3, 4])),
{},
{},
{},
{},
)
with pytest.raises(AssertionError):
GrblasNodeMap.Type.assert_equal(
GrblasNodeMap(Vector.from_values([0, 1, 2], [1, 2, 3])),
GrblasNodeMap(Vector.from_values([0, 1, 2, 3], [1, 2, 3, 4])),
{},
{},
{},
{},
)
# Exercise GrblasNodeSet
x = GrblasNodeSet(Vector.from_values([0, 1, 3], [1, 1, 1]))
assert len(x) == 3
assert 3 in x
assert 2 not in x
# Exercise GrblasNodeMap
y = GrblasNodeMap(Vector.from_values([0, 1, 3], [1.1, 2.2, 3.3]))
assert len(y) == 3
assert 3 in y
assert 2 not in y
assert y[3] == 3.3
|
418080
|
import euclid
from euclid import *
from solid.utils import * # Only needed for EPSILON. Tacky.
# NOTE: The PyEuclid on PyPi doesn't include several elements added to
# the module as of 13 Feb 2013. Add them here until euclid supports them
def as_arr_local( self):
return [ self.x, self.y, self.z]
def set_length_local( self, length):
d = self.magnitude()
if d:
factor = length/d
self.x *= factor
self.y *= factor
return self
def _intersect_line3_line3( A, B):
# Connect A & B
# If the length of the connecting segment is 0, they intersect
# at the endpoint(s) of the connecting segment
sol = euclid._connect_line3_line3( A, B)
# TODO: Ray3 and LineSegment3 would like to be able to know
# if their intersection points fall within the segment.
if sol.magnitude_squared() < EPSILON:
return sol.p
else:
return None
def run_patch():
if 'as_arr' not in dir( Vector3):
Vector3.as_arr = as_arr_local
if 'set_length' not in dir( Vector3):
Vector3.set_length = set_length_local
if '_intersect_line3' not in dir(Line3):
Line3._intersect_line3 = _intersect_line3_line3
|
418086
|
from DyCommon.Ui.DyTableWidget import *
class DyStockTradeStrategyMarketMonitorDataWidget(DyTableWidget):
""" 股票实盘策略数据窗口 """
def __init__(self, strategyCls):
super().__init__(None, True, False)
self._strategyCls = strategyCls
self.setColNames(strategyCls.dataHeader)
self.setAutoForegroundCol('涨幅(%)')
def update(self, data, newData=False):
""" @data: [[col0, col1, ...]] """
if newData: # !!!new, without considering keys
self.fastAppendRows(data, autoForegroundColName='涨幅(%)', new=True)
else: # updating by keys
rowKeys = []
for row in data:
code = row[0] # pos 0 is code, date or something else, but should be key for one row
self[code] = row
rowKeys.append(code)
self.setItemsForeground(rowKeys, (('买入', Qt.red), ('卖出', Qt.darkGreen)))
|
418087
|
import KratosMultiphysics as Kratos
import KratosMultiphysics.StatisticsApplication as KratosStats
import KratosMultiphysics.KratosUnittest as KratosUnittest
from KratosMultiphysics.StatisticsApplication.spatial_utilities import GetItemContainer
from KratosMultiphysics.StatisticsApplication.method_utilities import GetNormTypeContainer
from KratosMultiphysics.StatisticsApplication.method_utilities import GetMethod
from KratosMultiphysics.StatisticsApplication.test_utilities import CheckValues
from KratosMultiphysics.StatisticsApplication.test_utilities import CreateModelPart
from KratosMultiphysics.StatisticsApplication.test_utilities import InitializeModelPartVariables
from KratosMultiphysics.StatisticsApplication.test_utilities import GetInitialVariableValue
class SpatialMethodTests(KratosUnittest.TestCase):
def setUp(self):
self.model = Kratos.Model()
self.model_part = self.model.CreateModelPart("test_model_part")
self.containers_to_test = [
"nodal_historical", "nodal_non_historical",
"element_non_historical", "condition_non_historical"
]
self.test_cases = {}
self.test_cases[Kratos.PRESSURE] = ["none", "magnitude", "value"]
self.test_cases[Kratos.VELOCITY] = [
"none", "magnitude", "component_x", "component_y", "component_z"
]
self.test_cases[Kratos.LOAD_MESHES] = [
"magnitude", "index_0", "index_1", "index_2", "index_4"
]
self.test_cases[Kratos.GREEN_LAGRANGE_STRAIN_TENSOR] = [
"frobenius", "index_(0,0)", "index_(0,1)", "index_(4,1)",
"index_(1,1)"
]
self.norm_only_methods = ["min", "max", "median", "distribution"]
SpatialMethodTests.__AddNodalSolutionStepVariables(self.model_part)
CreateModelPart(self.model_part)
InitializeModelPartVariables(self.model_part)
def tearDown(self):
# Code here will be placed AFTER every test in this TestCase.
pass
def testSumMethod(self):
def analytical_method(container, container_type, norm_type, variable):
analytical_value = GetInitialVariableValue(variable, norm_type)
for item in container:
analytical_value += SpatialMethodTests.__GetNormValue(
variable,
SpatialMethodTests.__GetValue(item, container_type,
variable), norm_type)
return analytical_value
self.__TestMethod("sum", analytical_method)
def testRootMeanSquareMethod(self):
def analytical_method(container, container_type, norm_type, variable):
analytical_value = GetInitialVariableValue(variable, norm_type)
for item in container:
analytical_value += KratosStats.MethodUtilities.RaiseToPower(
SpatialMethodTests.__GetNormValue(
variable,
SpatialMethodTests.__GetValue(item, container_type,
variable), norm_type), 2)
return KratosStats.MethodUtilities.RaiseToPower(
analytical_value * (1.0 / len(container)), 0.5)
self.__TestMethod("rootmeansquare", analytical_method)
def testMeanMethod(self):
def analytical_method(container, container_type, norm_type, variable):
analytical_value = GetInitialVariableValue(variable, norm_type)
for item in container:
analytical_value += SpatialMethodTests.__GetNormValue(
variable,
SpatialMethodTests.__GetValue(item, container_type,
variable), norm_type)
return analytical_value / len(container)
self.__TestMethod("mean", analytical_method)
def testVarianceMethod(self):
def analytical_method(container, container_type, norm_type, variable):
mean_value = GetInitialVariableValue(variable, norm_type)
variance_value = GetInitialVariableValue(variable, norm_type)
for item in container:
current_value = SpatialMethodTests.__GetNormValue(
variable,
SpatialMethodTests.__GetValue(item, container_type,
variable), norm_type)
mean_value += current_value
variance_value += KratosStats.MethodUtilities.RaiseToPower(
current_value, 2)
n = len(container)
mean_value /= n
variance_value = variance_value / n - KratosStats.MethodUtilities.RaiseToPower(
mean_value, 2)
return mean_value, variance_value
self.__TestMethod("variance", analytical_method)
def testMinMethod(self):
def analytical_method(container, container_type, norm_type, variable):
analytical_value = 1e+12
for item in container:
current_value = SpatialMethodTests.__GetNormValue(
variable,
SpatialMethodTests.__GetValue(item, container_type,
variable), norm_type)
if (current_value < analytical_value):
analytical_value = current_value
analytical_id = item.Id
return analytical_value, analytical_id
self.__TestMethod("min", analytical_method)
def testMaxMethod(self):
def analytical_method(container, container_type, norm_type, variable):
analytical_value = -1e+12
for item in container:
current_value = SpatialMethodTests.__GetNormValue(
variable,
SpatialMethodTests.__GetValue(item, container_type,
variable), norm_type)
if (current_value > analytical_value):
analytical_value = current_value
analytical_id = item.Id
return analytical_value, analytical_id
self.__TestMethod("max", analytical_method)
def testMedianMethod(self):
def analytical_method(container, container_type, norm_type, variable):
item_values = []
for item in container:
current_value = SpatialMethodTests.__GetNormValue(
variable,
SpatialMethodTests.__GetValue(item, container_type,
variable), norm_type)
item_values.append(current_value)
item_values = sorted(item_values)
n = len(item_values)
if (n % 2 != 0):
return item_values[n // 2]
else:
return (item_values[(n - 1) // 2] + item_values[n // 2]) * 0.5
self.__TestMethod("median", analytical_method)
def testDistributionMethod(self):
default_parameters = Kratos.Parameters("""
{
"number_of_value_groups" : 10,
"min_value" : "min",
"max_value" : "max"
}""")
def analytical_method(container, container_type, norm_type, variable):
item_values = []
for item in container:
current_value = SpatialMethodTests.__GetNormValue(
variable,
SpatialMethodTests.__GetValue(item, container_type,
variable), norm_type)
item_values.append(current_value)
min_value = min(item_values)
max_value = max(item_values)
group_limits = [
min_value + (max_value - min_value) * i / 10 for i in range(11)
]
group_limits[-1] += 1e-16
group_limits.append(1e+100)
data_distribution = [0 for i in range(len(group_limits))]
mean_distribution = [0.0 for i in range(len(group_limits))]
variance_distribution = [0.0 for i in range(len(group_limits))]
for value in item_values:
for i, v in enumerate(group_limits):
if (value < v):
data_distribution[i] += 1
mean_distribution[i] += value
variance_distribution[i] += value**2.0
break
percentage_data_distribution = []
for i, _ in enumerate(group_limits):
percentage_data_distribution.append(data_distribution[i] /
len(item_values))
if (data_distribution[i] > 0):
mean_distribution[i] /= data_distribution[i]
variance_distribution[i] /= data_distribution[i]
variance_distribution[i] -= mean_distribution[i]**2.0
group_limits[-2] -= 1e-16
group_limits[-1] = max_value
return min_value, max_value, group_limits, data_distribution, percentage_data_distribution, mean_distribution, variance_distribution
self.__TestMethod("distribution", analytical_method,
default_parameters)
def __TestMethod(self,
test_method_name,
analytical_method,
method_params=Kratos.Parameters("""{}""")):
for container_type in self.containers_to_test:
container = SpatialMethodTests.__GetContainer(
self.model_part, container_type)
item_method_container = GetItemContainer(container_type)
for variable, norm_types in self.test_cases.items():
for norm_type in norm_types:
item_method_norm_container = GetNormTypeContainer(
item_method_container, norm_type)
if (norm_type == "none"
and test_method_name in self.norm_only_methods):
continue
test_method = GetMethod(item_method_norm_container,
test_method_name)
if (norm_type == "none"):
method_value = test_method(self.model_part, variable)
else:
method_value = test_method(self.model_part, variable,
norm_type, method_params)
analytical_value = analytical_method(
container, container_type, norm_type, variable)
CheckValues(self, analytical_value, method_value, 10)
@staticmethod
def __AddNodalSolutionStepVariables(model_part):
model_part.AddNodalSolutionStepVariable(Kratos.PRESSURE)
model_part.AddNodalSolutionStepVariable(Kratos.VELOCITY)
model_part.AddNodalSolutionStepVariable(Kratos.LOAD_MESHES)
model_part.AddNodalSolutionStepVariable(
Kratos.GREEN_LAGRANGE_STRAIN_TENSOR)
@staticmethod
def __GetValue(item, container_type, variable):
if (container_type.endswith("non_historical")):
return item.GetValue(variable)
else:
return item.GetSolutionStepValue(variable)
@staticmethod
def __GetNormValue(variable, value, norm_type):
if (norm_type == "none"):
return value
norm_method = KratosStats.MethodUtilities.GetNormMethod(
variable, norm_type)
return norm_method(value)
@staticmethod
def __GetContainer(model_part, container_type):
if (container_type.startswith("nodal")):
return model_part.Nodes
elif (container_type.startswith("element")):
return model_part.Elements
elif (container_type.startswith("condition")):
return model_part.Conditions
if __name__ == '__main__':
KratosUnittest.main()
|
418095
|
from __future__ import print_function
import errno
import matplotlib
matplotlib.use('Agg')
import shutil
from subprocess import Popen, PIPE, check_output
import os
import pwd
import shlex
import sys
import time
import glob
import math
print("*****************************")
print("OpenWorm Master Script v0.9.1")
print("*****************************")
print("")
print("This script attempts to run a full pass through the OpenWorm scientific libraries.")
print("This depends on several other repositories being loaded to work and presumes it is running in a preloaded Docker instance.")
print("It will report out where steps are missing.")
print("Eventually all the steps will be filled in.")
print("")
print("****************************")
print("Step 1: Rebuild c302 from the latest PyOpenWorm")
print("****************************")
print("Not yet implemented. See https://github.com/openworm/c302/issues/10")
print("****************************")
print("Step 2: Execute unit tests via the c302 simulation framework")
print("****************************")
"""
from runAndPlot import run_c302
orig_display_var = None
if os.environ.has_key('DISPLAY'):
orig_display_var = os.environ['DISPLAY']
del os.environ['DISPLAY'] # https://www.neuron.yale.edu/phpBB/viewtopic.php?f=6&t=1603
run_c302(DEFAULTS['reference'],
DEFAULTS['c302params'],
'',
DEFAULTS['duration'],
DEFAULTS['dt'],
'jNeuroML_NEURON',
data_reader=DEFAULTS['datareader'],
save=True,
show_plot_already=False,
target_directory=os.path.join(os.environ['C302_HOME'], 'examples'),
save_fig_to='tmp_images')
prev_dir = os.getcwd()
os.chdir(DEFAULTS['outDir'])
try:
os.mkdir('c302_out')
except OSError as e:
if e.errno != errno.EEXIST:
raise
src_files = os.listdir(os.path.join(os.environ['C302_HOME'], 'examples', 'tmp_images'))
for file_name in src_files:
full_file_name = os.path.join(os.environ['C302_HOME'], 'examples', 'tmp_images', file_name)
print("COPY %s" % full_file_name)
if (os.path.isfile(full_file_name)):
shutil.copy2(full_file_name, 'c302_out')
shutil.rmtree(os.path.join(os.environ['C302_HOME'], 'examples', 'tmp_images'))
os.chdir(prev_dir)
if orig_display_var:
os.environ['DISPLAY'] = orig_display_var
"""
print("****************************")
print("Step 3: Run c302 + Sibernetic in the same loop.")
print("****************************")
OW_OUT_DIR = os.environ['OW_OUT_DIR']
def execute_with_realtime_output(command, directory, env=None):
p = None
try:
p = Popen(shlex.split(command), stdout=PIPE, bufsize=1, cwd=directory, env=env)
with p.stdout:
for line in iter(p.stdout.readline, b''):
print(line, end="")
p.wait() # wait for the subprocess to exit
except KeyboardInterrupt as e:
print("Caught CTRL+C")
if p:
p.kill()
raise e
sys.path.append(os.environ['C302_HOME'])
try:
os.system('xhost +')
except:
print("Unexpected error: %s" % sys.exc_info()[0])
OW_OUT_DIR = os.environ['OW_OUT_DIR']
try:
if os.access(OW_OUT_DIR, os.W_OK) is not True:
os.system('sudo chown -R %s:%s %s' % (os.environ['USER'], os.environ['USER'], OW_OUT_DIR))
except:
print("Unexpected error: %s" % sys.exc_info()[0])
raise
#Default is 15 ms of simulation time.
sim_duration = 15.0
if 'DURATION' in os.environ:
sim_duration = float(os.environ['DURATION'])
DEFAULTS = {'duration': sim_duration,
'dt': 0.005,
'dtNrn': 0.05,
'logstep': 100,
'reference': 'FW',
'c302params': 'C2',
'verbose': False,
'device': 'GPU',
'configuration': 'worm_crawl_half_resolution',
'noc302': False,
'datareader': 'UpdatedSpreadsheetDataReader2',
'outDir': OW_OUT_DIR}
my_env = os.environ.copy()
my_env["DISPLAY"] = ":44"
os.system('Xvfb :44 -listen tcp -ac -screen 0 1920x1080x24+32 &') # TODO: terminate xvfb after recording
try:
command = """python sibernetic_c302.py
-duration %s
-dt %s
-dtNrn %s
-logstep %s
-device=%s
-configuration %s
-reference %s
-c302params %s
-datareader %s
-outDir %s""" % \
(DEFAULTS['duration'],
DEFAULTS['dt'],
DEFAULTS['dtNrn'],
DEFAULTS['logstep'],
DEFAULTS['device'],
DEFAULTS['configuration'],
DEFAULTS['reference'],
DEFAULTS['c302params'],
DEFAULTS['datareader'],
'simulations')
#DEFAULTS['outDir'])
execute_with_realtime_output(command, os.environ['SIBERNETIC_HOME'], env=my_env)
except KeyboardInterrupt as e:
pass
sibernetic_sim_dir = '%s/simulations' % os.environ['SIBERNETIC_HOME']
all_subdirs = []
for dirpath, dirnames, filenames in os.walk(sibernetic_sim_dir):
for directory in dirnames:
if directory.startswith('%s_%s' % (DEFAULTS['c302params'], DEFAULTS['reference'])):
all_subdirs.append(os.path.join(dirpath, directory))
latest_subdir = max(all_subdirs, key=os.path.getmtime)
try:
os.mkdir('%s/output' % OW_OUT_DIR)
except OSError as e:
if e.errno != errno.EEXIST:
raise
new_sim_out = '%s/output/%s' % (OW_OUT_DIR, os.path.split(latest_subdir)[-1])
try:
os.mkdir(new_sim_out)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Copy PNGs, created during the Sibernetic simulation, in a separate child-directory to find them more easily
figures = glob.glob('%s/*.png' % latest_subdir)
for figure in figures:
print("Moving %s to %s"%(figure, new_sim_out))
shutil.move(figure, new_sim_out)
# Copy reports etc.
reports = glob.glob('%s/report*' % latest_subdir)
for report in reports:
print("Moving %s to %s"%(report, new_sim_out))
shutil.move(report, new_sim_out)
# Copy WCON file(s)
wcons = glob.glob('%s/*.wcon' % latest_subdir)
for wcon in wcons:
print("Moving %s to %s"%(wcon, new_sim_out))
shutil.move(wcon, new_sim_out)
# Rerun and record simulation
os.system('export DISPLAY=:44')
sibernetic_movie_name = '%s.mp4' % os.path.split(latest_subdir)[-1]
os.system('tmux new-session -d -s SiberneticRecording "DISPLAY=:44 ffmpeg -r 30 -f x11grab -draw_mouse 0 -s 1920x1080 -i :44 -filter:v "crop=1200:800:100:100" -cpu-used 0 -b:v 384k -qmin 10 -qmax 42 -maxrate 384k -bufsize 1000k -an %s/%s"' % (new_sim_out, sibernetic_movie_name))
command = './Release/Sibernetic -f %s -l_from lpath=%s' % (DEFAULTS['configuration'], latest_subdir)
execute_with_realtime_output(command, os.environ['SIBERNETIC_HOME'], env=my_env)
os.system('tmux send-keys -t SiberneticRecording q')
os.system('tmux send-keys -t SiberneticRecording "exit" C-m')
time.sleep(3)
# Remove black frames at the beginning of the recorded video
command = "ffmpeg -i %s/%s -vf blackdetect=d=0:pic_th=0.70:pix_th=0.10 -an -f null - 2>&1 | grep blackdetect" % (new_sim_out, sibernetic_movie_name)
outstr = str(check_output(command, shell=True).decode('utf-8'))
outstr = outstr.split('\n')
black_start = 0.0
black_dur = None
out = outstr[0]
black_start_pos = out.find('black_start:')
black_end_pos = out.find('black_end:')
black_dur_pos = out.find('black_duration:')
if black_start_pos != -1:
black_start = float(out[black_start_pos + len('black_start:') : black_end_pos])
black_dur = float(out[black_dur_pos + len('black_duration:'):])
if black_start == 0.0 and black_dur:
black_dur = int(math.ceil(black_dur))
command = 'ffmpeg -ss 00:00:0%s -i %s/%s -c copy -avoid_negative_ts 1 %s/cut_%s' % (black_dur, new_sim_out, sibernetic_movie_name, new_sim_out, sibernetic_movie_name)
if black_dur > 9:
command = 'ffmpeg -ss 00:00:%s -i %s/%s -c copy -avoid_negative_ts 1 %s/cut_%s' % (black_dur, new_sim_out, sibernetic_movie_name, new_sim_out, sibernetic_movie_name)
os.system(command)
# SPEED-UP
try:
os.mkdir('tmp')
except OSError as e:
if e.errno != errno.EEXIST:
raise
os.system('ffmpeg -ss 1 -i %s/cut_%s -vf "select=gt(scene\,0.1)" -vsync vfr -vf fps=fps=1/1 %s' % (new_sim_out, sibernetic_movie_name, 'tmp/out%06d.jpg'))
os.system('ffmpeg -r 100 -i %s -r 100 -vb 60M %s/speeded_%s' % ('tmp/out%06d.jpg', new_sim_out, sibernetic_movie_name))
os.system('sudo rm -r tmp/*')
print("****************************")
print("Step 4: Run movement analysis")
print("****************************")
print("Not yet implemented.")
print("Note however the following WCON files have been generated into %s during the simulation: %s"%(new_sim_out, [w.split('/')[-1] for w in wcons]))
print("****************************")
print("Step 5: Report on movement analysis fit to real worm videos")
print("****************************")
print("Not yet implemented.")
|
418099
|
from marshmallow import Schema, fields
from werkzeug.exceptions import Forbidden
from opendc.models.topology import ObjectSchema
from opendc.models.model import Model
class PrefabSchema(Schema):
"""
Schema for a Prefab.
"""
_id = fields.String(dump_only=True)
authorId = fields.String(dump_only=True)
name = fields.String(required=True)
datetimeCreated = fields.DateTime()
datetimeLastEdited = fields.DateTime()
rack = fields.Nested(ObjectSchema)
class Prefab(Model):
"""Model representing a Prefab."""
collection_name = 'prefabs'
def check_user_access(self, user_id):
"""Raises an error if the user with given [user_id] has insufficient access to view this prefab.
:param user_id: The user ID of the user.
"""
if self.obj['authorId'] != user_id and self.obj['visibility'] == "private":
raise Forbidden("Forbidden from retrieving prefab.")
|
418115
|
import torch
import torch.nn as nn
import helper_functions.config as cfg
from helper_functions.losses import custom_loss
from helper_functions.Blocks import upScale, normalBlock, Residual
class G1(nn.Module):
def __init__(self, ngf, zDim = 100):
super(G1, self).__init__()
self.gf_dim = ngf
self.in_dim = zDim + cfg.embeddingsDim
self.define_module()
def define_module(self):
in_dim = self.in_dim
ngf = self.gf_dim
self.fc = nn.Sequential(
nn.Linear(in_dim, ngf * 4 * 4 * 2, bias=False),
nn.BatchNorm1d(ngf * 4 * 4 * 2),
custom_loss())
self.upsample1 = upScale(ngf, ngf // 2)
self.upsample2 = upScale(ngf // 2, ngf // 4)
self.upsample3 = upScale(ngf // 4, ngf // 8)
self.upsample4 = upScale(ngf // 8, ngf // 16)
def forward(self, z_code, c_code):
in_code = torch.cat((c_code, z_code), 1)
out_code = self.fc(in_code)
out_code = out_code.view(-1, self.gf_dim, 4, 4)
out_code = self.upsample1(out_code)
out_code = self.upsample2(out_code)
out_code = self.upsample3(out_code)
out_code = self.upsample4(out_code)
return out_code
class G2(nn.Module):
def __init__(self, ngf, num_residual = 2):
super(G2, self).__init__()
self.gf_dim = ngf
self.ef_dim = cfg.embeddingsDim
self.num_residual = num_residual
self.define_module()
def _make_layer(self, block, channel_num):
layers = []
for i in range(self.num_residual):
layers.append(block(channel_num))
return nn.Sequential(*layers)
def define_module(self):
self.jointConv = normalBlock(self.gf_dim + self.ef_dim, self.gf_dim)
self.residual = self._make_layer(Residual, self.gf_dim)
self.upsample = upScale(self.gf_dim, self.gf_dim // 2)
def forward(self, h_code, c_code):
s_size = h_code.size(2)
c_code = c_code.view(-1, self.ef_dim, 1, 1)
c_code = c_code.repeat(1, 1, s_size, s_size)
h_c_code = torch.cat((c_code, h_code), 1)
out_code = self.jointConv(h_c_code)
out_code = self.residual(out_code)
out_code = self.upsample(out_code)
return out_code
|
418141
|
import unittest
import os
import sys
if os.environ.get('USELIB') != '1':
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from pyleri import (
KeywordError,
create_grammar,
Sequence,
Choice,
Keyword,
Token,
List
) # nopep8
class TestList(unittest.TestCase):
def test_list(self):
k_hi = Keyword('hi')
list_ = List(k_hi)
grammar = create_grammar(list_)
self.assertEqual(list_.min, 0)
self.assertEqual(list_.max, None)
self.assertFalse(list_.opt_closing)
self.assertTrue(grammar.parse('hi, hi, hi').is_valid)
self.assertTrue(grammar.parse('hi').is_valid)
self.assertTrue(grammar.parse('').is_valid)
self.assertFalse(grammar.parse('hi,').is_valid)
self.assertEqual(
str(grammar.parse('hi.')),
'error at position 2, expecting: , or end_of_statement'
)
def test_list_all_options(self):
k_hi = Keyword('hi')
list_ = List(k_hi, delimiter='-', mi=1, ma=3, opt=True)
grammar = create_grammar(list_)
self.assertEqual(list_.min, 1)
self.assertEqual(list_.max, 3)
self.assertTrue(list_.opt_closing)
self.assertTrue(grammar.parse('hi - hi - hi').is_valid)
self.assertTrue(grammar.parse('hi-hi-hi-').is_valid)
self.assertTrue(grammar.parse('hi-').is_valid)
self.assertTrue(grammar.parse('hi').is_valid)
self.assertFalse(grammar.parse('').is_valid)
self.assertFalse(grammar.parse('-').is_valid)
self.assertFalse(grammar.parse('hi-hi-hi-hi').is_valid)
self.assertEqual(
str(grammar.parse('hi-hi-hi-hi-hi')),
'error at position 9, expecting: end_of_statement'
)
self.assertEqual(
str(grammar.parse('hi.')),
'error at position 2, expecting: - or end_of_statement'
)
self.assertEqual(
str(grammar.parse('')),
'error at position 0, expecting: hi'
)
if __name__ == '__main__':
unittest.main()
|
418147
|
from django.db import connections
from pdns.models import Domain
query = "INSERT INTO domainmetadata (domain_id, kind, content) VALUES (%s, 'ALLOW-AXFR-FROM', 'AUTO-NS')"
with connections["pdns"].cursor() as cursor:
cursor.execute('SELECT DISTINCT domain_id FROM domainmetadata')
ok_domains = [row[0] for row in cursor.fetchall()]
affected_domains = Domain.objects.exclude(id__in=ok_domains).values_list('id', flat=True)
for domain_id in affected_domains:
cursor.execute(query, [domain_id])
print('Inserted domainmetadata for domain ID: %s' % domain_id)
|
418149
|
import sc2
class ExampleBot(sc2.BotAI):
async def on_step(self, iteration):
# On first step, send all workers to attack enemy start location
if iteration == 0:
print("Game started")
for worker in self.workers:
await self.do(worker.attack(self.enemy_start_locations[0]))
def on_end(self, result):
print("OnGameEnd() was called.")
|
418174
|
import mmcv
from mmcv.utils import Registry
def _build_func(name: str, option: mmcv.ConfigDict, registry: Registry):
return registry.get(name)(option)
MODELS = Registry('models', build_func=_build_func)
|
418176
|
from fontTools.ttLib.tables._v_m_t_x import table__v_m_t_x
import _h_m_t_x_test
import unittest
class VmtxTableTest(_h_m_t_x_test.HmtxTableTest):
@classmethod
def setUpClass(cls):
cls.tableClass = table__v_m_t_x
cls.tag = "vmtx"
if __name__ == "__main__":
import sys
sys.exit(unittest.main())
|
418211
|
from functools import partial
from selenium.webdriver import Firefox
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import (
WebDriverWait
)
class EsperarElementoNotClick:
def __init__(self, locator):
self.locator = locator
def __call__(self, webdriver):
elementos = webdriver.find_elements(*self.locator)
if elementos:
return 'unclick' in elementos[0].get_attribute('class')
return False
def esperar_elemento(by, elemento, webdriver):
if webdriver.find_elements(by, elemento):
return True
return False
locator = (By.CSS_SELECTOR, 'button')
esperar_botao = EsperarElementoNotClick(locator)
url = 'https://selenium.dunossauro.live/aula_09.html'
driver = Firefox()
wdw = WebDriverWait(driver, 10)
driver.get(url)
wdw.until_not(esperar_botao, 'Deu ruim')
driver.find_element_by_css_selector('button').click()
wdw.until(
partial(esperar_elemento, 'id', 'finished'),
'A mensagem de sucesso não apareceu'
)
sucesso = driver.find_element_by_css_selector('#finished')
assert sucesso.text == 'Carregamento concluído'
|
418230
|
import numpy as np
from utility import *
# Not using rn, but maybe this is useful?
class SignalData:
I = None
II = None
III = None
aVR = None
aVL = None
aVF = None
V1 = None
V2 = None
V4 = None
V3 = None
V5 = None
V6 = None
def leadValues(text: str, conversion) -> bool:
if '\t' in text:
words = text.split('\t')
elif ',' in text:
words = text.split(',')
else:
words = text.split(' ')
areFloats = list(map(isFloat, words))
if not allTrue(areFloats):
print("Not all floats!:", words)
return None
values = list(map(conversion, words))
return values
def load(fileName: str) -> SignalData:
values = []
with open(fileName, 'r') as file:
for line in file.readlines():
text = line.strip()
valuesAtTime = leadValues(text, float) # ⚠️ Should this ever be float?
if valuesAtTime is not None:
values.append(valuesAtTime)
leads = np.swapaxes(np.array(values),0,1)
print("Loaded leads:", leads.shape)
return leads
|
418292
|
from threading import Lock, currentThread
class _SingletonPerThreadMetaClass(type):
""" A metaclass that creates a SingletonPerThread base class when called. """
_instances = {}
_lock = Lock()
def __call__(cls, *args, **kwargs):
with cls._lock:
obj_name = "%s__%s" % (cls.__name__, currentThread().getName()) # Object Name = className__threadName
if obj_name not in cls._instances:
cls._instances[obj_name] = super(_SingletonPerThreadMetaClass, cls).__call__(*args, **kwargs)
return cls._instances[obj_name]
class SingletonPerThread(_SingletonPerThreadMetaClass('SingleObjectPerThreadMetaClass', (object,), {})):
# This base class calls the metaclass above to create the singleton per thread object. This class provides an
# abstraction over how to invoke the Metaclass so just inheriting this class makes the
# child class a singleton per thread (As opposed to invoking the Metaclass separately for each derived classes)
# More info here - https://stackoverflow.com/questions/6760685/creating-a-singleton-in-python
#
# Usage:
# Inheriting this class will create a Singleton per thread for that class
# To delete the cached object of a class, call DerivedClassName.clear() to delete the object per thread
# Note: If the thread dies and is recreated with the same thread name, the existing object would be reused
# and no new object for the derived class would be created unless DerivedClassName.clear() is called explicitly to
# delete the cache
pass
|
418392
|
import FWCore.ParameterSet.Config as cms
idealMagneticFieldRecordSource = cms.ESSource("EmptyESSource",
recordName = cms.string('IdealMagneticFieldRecord'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
ParametrizedMagneticFieldProducer = cms.ESProducer("ParametrizedMagneticFieldProducer",
version = cms.string('OAE_1103l_071212'),
parameters = cms.PSet(
BValue = cms.string('3_8T')
),
label = cms.untracked.string('')
)
|
418403
|
from dexp.datasets.base_dataset import BaseDataset
from dexp.datasets.clearcontrol_dataset import CCDataset
from dexp.datasets.joined_dataset import JoinedDataset
from dexp.datasets.zarr_dataset import ZDataset
|
418425
|
import numpy as np
import vggnet, resnet, wide_resnet, inception_light
def one_hot(dense, ndim=10):
N = dense.shape[0]
ret = np.zeros([N, ndim])
ret[np.arange(N), dense] = 1
return ret
def get_model(name, learning_rate=0.001, SEED=777, resnet_layer_n=3):
# right position..?
if name == "vggnet":
model = vggnet.VGGNet(name="vggnet", lr=learning_rate, SEED=SEED)
elif name == "vggnet2":
model = vggnet.VGGNet(name="vggnet2", lr=learning_rate, SEED=SEED)
elif name == "resnet":
model = resnet.ResNet(name="resnet", lr=learning_rate, layer_n=resnet_layer_n, SEED=SEED)
elif name == "wide_resnet":
model = wide_resnet.WideResNet(name="wide_resnet", lr=learning_rate, layer_n=resnet_layer_n, SEED=SEED)
elif name == "inception":
model = inception_light.InceptionLight(name="inception_light", lr=learning_rate, SEED=SEED)
else:
assert False, "wrong model name"
return model
|
418428
|
from FordStuff import *
import time
# initalize
mydll = CDLL('Debug\\ecomcat_api')
# HS CAN
handle = mydll.open_device(1,0)
# get current
read_by_wid = mydll.read_message_by_wid_with_timeout
read_by_wid.restype = POINTER(SFFMessage)
z = mydll.read_message_by_wid_with_timeout(handle, 0x80, 1000)
current = (z.contents.data[0]<<8) + z.contents.data[1]
print "Current wheel at %x" % current
change = 0
y = pointer(SFFMessage())
while(True):
current += change
change += 12
mydll.DbgLineToSFF("IDH: 00, IDL: 81, Len: 08, Data: %02X %02X 12 00 00 00 00 00" % ((current & 0xff00) >> 8, current & 0xff), y)
mydll.write_message_cont(handle, y)
time.sleep(.0019) # should be 312 ticks
mydll.close_device(handle)
|
418458
|
import os
import subprocess
from datetime import datetime
from jinja2 import Environment
from moban.externals.file_system import exists, is_dir, read_unicode
import fs
import yehua.utils as utils
from yehua.utils import get_user_inputs
from jinja2_fsloader import FSLoader
class Project:
def __init__(self, yehua_file):
if not exists(yehua_file):
raise Exception("%s does not exist" % yehua_file)
if is_dir(yehua_file):
raise Exception("A yehua file is expected. Not a directory")
self.project_file = yehua_file
self.project_name = None
self.answers = None
self.name = None
self.directives = None
self._ask_questions()
self._append_magic_variables()
self._template_yehua_file()
def create_all_directories(self):
folder_tree = {
self.answers["project_name"]: self.directives.get("layout", None)
}
utils.make_directories(None, folder_tree)
def templating(self):
for template in self.directives["templates"]:
for output, template_file in template.items():
template = self.jj2_environment.get_template(template_file)
rendered_content = template.render(**self.answers)
target = os.path.join(self.project_name, output)
utils.save_file(target, rendered_content)
def copy_static_files(self):
if "static" not in self.directives:
return
for static in self.directives["static"]:
for output, source in static.items():
source = os.path.abspath(os.path.join(self.static_dir, source))
dest = os.path.join(self.project_name, output)
utils.copy_file(source, dest)
def inflate_all_by_moban(self):
current = os.getcwd()
project_name = self.answers["project_name"]
os.chdir(project_name)
cmd = "moban"
_run_command(cmd)
os.chdir(current)
utils.color_print(
f"\u2713 Files are generated under [info]{project_name}[/info]"
)
def post_moban(self):
if "post-moban" not in self.directives:
return
for key, value in self.directives["post-moban"].items():
if key == "git-repo-files":
self.initialize_git_and_add_all(value)
def initialize_git_and_add_all(self, project_files):
project_name = self.answers["project_name"]
current = os.getcwd()
os.chdir(project_name)
cmd = "git init"
_run_command(cmd)
for file_name in project_files:
_run_command(f"git add {file_name}")
os.chdir(current)
utils.color_print(
f"\u2713 Git repo initialized under [info]{project_name}[/info]"
+ " and is ready to commit"
)
def end(self):
utils.color_print(
"All done!! project [info]%s[/info] is created."
% self.project_name
)
utils.color_print(
"In the future, "
+ "run [info]moban[/info] to synchronize with the project template"
)
def _ask_questions(self):
content = read_unicode(self.project_file)
first_stage = utils.load_yaml(content)
utils.color_print(first_stage["introduction"])
base_path = fs.path.dirname(self.project_file)
with fs.open_fs(base_path) as the_fs:
self.template_dir = os.path.join(
the_fs._root_path,
first_stage["configuration"]["template_path"],
)
self.static_dir = os.path.join(
the_fs._root_path, first_stage["configuration"]["static_path"]
)
self.answers = get_user_inputs(first_stage["questions"])
def _append_magic_variables(self):
self.project_name = self.answers["project_name"]
self.answers["now"] = datetime.utcnow()
self.jj2_environment = self._create_jj2_environment(self.template_dir)
def _template_yehua_file(self):
base_path = fs.path.dirname(self.project_file)
with fs.open_fs(base_path) as the_fs:
base_path = the_fs._root_path
tmp_env = self._create_jj2_environment(base_path)
template = tmp_env.get_template(
os.path.basename(self.project_file)
)
renderred_content = template.render(**self.answers)
self.directives = utils.load_yaml(renderred_content)
def _create_jj2_environment(self, path):
template_loader = FSLoader(path)
environment = Environment(
loader=template_loader,
keep_trailing_newline=True,
trim_blocks=True,
lstrip_blocks=True,
)
return environment
def _run_command(command):
subprocess.check_call(
command.split(" "),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
|
418461
|
from django.core import paginator
from django.http import JsonResponse
from myapp import common
from myapp.common import ch_login
from myapp.models import MicroBlog, Collect
from myapp.const import *
# 收藏实体记录(0博客,1新闻【必须要有itemUrl】,2音乐)
@ch_login
def collect(request):
r_userid = request.META.get("HTTP_USERID")
type = ""
itemId = ""
itemTitle = ""
itemUrl = ""
coverImg = ""
if request.method == 'POST':
type = request.POST.get("colType", '0')
itemId = request.POST.get("itemId", '')
itemTitle = request.POST.get("itemTitle", '')
itemUrl = request.POST.get("itemUrl", '')
coverImg = request.POST.get("coverImg", '')
if request.method == 'GET':
type = request.GET.get("colType", '0')
itemId = request.GET.get("itemId", '')
itemTitle = request.POST.get("itemTitle", '')
itemUrl = request.POST.get("itemUrl", '')
coverImg = request.POST.get("coverImg", '')
if len(itemId) < 2:
return JsonResponse(common.build_result(CLIENT_ERROR, "id不正确"), safe=False)
if type == 1:
if len(itemUrl) < 8 and (not itemUrl.startswith("http")):
return JsonResponse(common.build_result(CLIENT_ERROR, "缺少参数"), safe=False)
col_qr = Collect.objects.filter(authorId=r_userid).filter(itemId=itemId)
if len(col_qr) > 0:
return JsonResponse(common.build_result(CLIENT_ERROR, "已收藏过"), safe=False)
# 博客收藏
if type == "0":
b_qr = MicroBlog.objects.filter(blogId=itemId)
if len(b_qr) == 0:
return JsonResponse(common.build_result(CLIENT_ERROR, "该博客不存在"), safe=False)
Collect(itemId=itemId, authorId=r_userid, itemTitle=b_qr[0].title
, linkUrl="", coverImg=b_qr[0].icon, colType=Collect.BLOG,isLargeIcon=b_qr[0].isLargeIcon).save()
return JsonResponse(common.build_result(SUCCESS, "success"), safe=False)
# 其它收藏
else:
Collect(itemId=itemId, authorId=r_userid, itemTitle=itemTitle
, linkUrl=itemUrl, coverImg=coverImg,
colType=Collect.NEWS if (type == "1") else Collect.MUSIC).save()
return JsonResponse(common.build_result(SUCCESS, "success"), safe=False)
return JsonResponse(common.build_result(CLIENT_ERROR, "没有更多数据"), safe=False)
# 取消收藏
@ch_login
def un_collect(request):
if request.method == 'POST':
r_userid = request.META.get("HTTP_USERID")
r_itemId = request.POST.get("itemId", '')
qr2 = Collect.objects.filter(itemId=r_itemId).filter(authorId=r_userid)
if not qr2.exists():
return JsonResponse(common.build_result(FATAL_WORK, "没有收藏过"), safe=False)
else:
qr2[0].delete()
return JsonResponse(common.build_result(SUCCESS, "success"), safe=False)
return JsonResponse(common.build_result(CLIENT_ERROR, ERROR_REQ_METHOD), safe=False)
# 判断是否收藏
@ch_login
def is_collected(request):
if request.method == 'POST':
r_userid = request.META.get("HTTP_USERID")
r_itemId = request.POST.get("itemId", '')
qr2 = Collect.objects.filter(itemId=r_itemId).filter(authorId=r_userid)
if not qr2.exists():
return JsonResponse(common.build_result(FATAL_WORK, "没有收藏过"), safe=False)
else:
return JsonResponse(common.build_result(SUCCESS, "已收藏"), safe=False)
return JsonResponse(common.build_result(CLIENT_ERROR, ERROR_REQ_METHOD), safe=False)
# 获得个人收藏列表
@ch_login
def get_collections(request):
if request.method == 'POST':
r_userid = request.META.get("HTTP_USERID")
r_type = request.POST.get("colType", '0')
qr2 = Collect.objects.filter(authorId=r_userid).filter(colType=r_type)
return JsonResponse(common.build_model_list(qr2), safe=False)
if request.method == 'GET':
r_userid = request.META.get("HTTP_USERID")
r_type = request.GET.get("colType", '0')
qr2 = Collect.objects.filter(authorId=r_userid).filter(colType=r_type)
return JsonResponse(common.build_model_list(qr2), safe=False)
|
418485
|
import graphAttack as ga
import numpy as np
import scipy.optimize
"""Control script"""
def run():
"""Run the model"""
N, T, D, H1, H2 = 2, 3, 4, 5, 4
trainData = np.linspace(- 0.1, 0.3, num=N * T * D).reshape(N, T, D)
trainLabels = np.random.random((N, T, D))
mainGraph = ga.Graph(False)
xop = mainGraph.addOperation(ga.Variable(trainData), feederOperation=True)
hactivations0, cStates0 = ga.addInitialLSTMLayer(mainGraph,
inputOperation=xop,
nHidden=H1)
hactivations1, cStates1 = ga.appendLSTMLayer(mainGraph,
previousActivations=hactivations0,
nHidden=H2)
# hactivations0 = ga.addInitialRNNLayer(mainGraph,
# inputOperation=xop,
# activation=ga.TanhActivation,
# nHidden=H1)
# hactivations1 = ga.appendRNNLayer(mainGraph,
# previousActivations=hactivations0,
# activation=ga.TanhActivation,
# nHidden=H2)
finalCost, costOperationsList = ga.addRNNCost(mainGraph,
hactivations1,
costActivation=ga.SoftmaxActivation,
costOperation=ga.CrossEntropyCostSoftmax,
nHidden=H2,
labelsShape=xop.shape,
labels=None)
def f(p, costOperationsList=costOperationsList, mainGraph=mainGraph):
data = trainData
labels = trainLabels
mainGraph.feederOperation.assignData(data)
mainGraph.resetAll()
for index, cop in enumerate(costOperationsList):
cop.assignLabels(labels[:, index, :])
mainGraph.attachParameters(p)
c = mainGraph.feedForward()
return c
hactivations = [hactivations0, hactivations1]
cStates = [cStates0, cStates1]
def fprime(p, data, labels, costOperationsList=costOperationsList, mainGraph=mainGraph):
mainGraph.feederOperation.assignData(data)
mainGraph.resetAll()
for index, cop in enumerate(costOperationsList):
cop.assignLabels(labels[:, index, :])
mainGraph.attachParameters(p)
c = mainGraph.feedForward()
mainGraph.feedBackward()
g = mainGraph.unrollGradients()
nLayers = len(hactivations)
for i in range(nLayers):
hactivations[i][0].assignData(hactivations[i][-1].getValue())
cStates[i][0].assignData(cStates[i][-1].getValue())
return c, g
params = mainGraph.unrollGradientParameters()
numGrad = scipy.optimize.approx_fprime(params, f, 1e-8)
analCostGraph, analGradientGraph = fprime(params, trainData, trainLabels)
return numGrad, analGradientGraph, analCostGraph, mainGraph
if (__name__ == "__main__"):
nGrad, aGrad, aCost, mainGraph = run()
params = mainGraph.unrollGradientParameters()
print(mainGraph)
print("\n%-16.16s %-16.16s" % ("Grad difference", "Total Gradient"))
print("%-16.8e %-16.8e" % (np.sum(np.abs(aGrad - nGrad)), np.sum(np.abs(aGrad))))
|
418486
|
import json
import os
from setuptools.command.install import install
class InstallEntry(install):
def run(self):
default_site = 'codeforces'
cache_dir = os.path.join(os.path.expanduser('~'), '.cache', 'coolkit')
from main import supported_sites
for site in supported_sites:
if not os.path.isdir(os.path.join(cache_dir, site)):
os.makedirs(os.path.join(cache_dir, site))
data = {'default_site': default_site.strip(), 'default_contest': None, 'cachedir': cache_dir}
with open(os.path.join(cache_dir, 'constants.json'), 'w') as f:
f.write(json.dumps(data, indent=2))
install.run(self)
|
418496
|
from scan_worker import scan
import time
import json
from storage.storage_manager import StorageManager
from push import RestApiPusher
from config import *
from storage.tables.vuln_task import VulnTask
storageManager = StorageManager()
storageManager.init_db()
def get_all_targets():
targets = ["http://testhtml5.vulnweb.com",
"http://testphp.vulnweb.com",
"http://testasp.vulnweb.com/",
"http://testaspnet.vulnweb.com/"
]
return targets
RestApiPusher.PUSH_URL = VULN_PUSH_API_URL
while True:
targets = get_all_targets()
results = []
for target in targets:
result = scan.delay(target)
results.append(result)
while True:
for i in range(len(results) - 1, -1, -1):
if results[i].ready():
r = results[i].get()
r=json.loads(r)
print(r)
try:
vulnTask = VulnTask(r['target'],r['target_id'], r)
storageManager.Session.add(vulnTask)
storageManager.save()
if ENABLE_VULN_PUSH:
res = RestApiPusher.push(r)
results.pop(i)
except Exception as e:
print("[!]处理任务结果异常:{0}".format(e))
if len(results) < 1:
break
time.sleep(5)
|
418512
|
from django import forms
from django.db import models
from django.test import TestCase
from django.test.utils import override_settings
from sanitizer.templatetags.sanitizer import (sanitize, sanitize_allow,
escape_html, strip_filter, strip_html)
from .forms import SanitizedCharField as SanitizedFormField
from .models import SanitizedCharField, SanitizedTextField
ALLOWED_TAGS = ['a']
ALLOWED_ATTRIBUTES = ['href', 'style']
ALLOWED_STYLES = ['width']
class TestingModel(models.Model):
test_field = SanitizedCharField(max_length=255, allowed_tags=ALLOWED_TAGS,
allowed_attributes=ALLOWED_ATTRIBUTES, allowed_styles=ALLOWED_STYLES)
class TestingTextModel(models.Model):
test_field = SanitizedTextField(allowed_tags=ALLOWED_TAGS,
allowed_attributes=ALLOWED_ATTRIBUTES, allowed_styles=ALLOWED_STYLES)
class TestForm(forms.Form):
test_field = SanitizedFormField(allowed_tags=['a'],
allowed_attributes=['href', 'style'], allowed_styles=['width'])
class SanitizerTest(TestCase):
@override_settings(SANITIZER_ALLOWED_TAGS=['a'])
def test_sanitize(self):
""" Test sanitize function in templatetags """
self.assertEqual(sanitize('test<script></script>'),
'test<script></script>')
def test_strip_filter(self):
""" Test strip_html filter """
self.assertEqual(strip_filter('test<script></script>'), 'test')
def test_sanitize_allow(self):
""" Test sanitize_allow function in templatetags """
self.assertEqual(sanitize_allow('test<script></script><br>', 'br'), 'test<br>')
self.assertEqual(sanitize_allow('test<script></script><br/>', 'br'), 'test<br>')
self.assertEqual(sanitize_allow('<a href="">test</a>', 'a'), '<a>test</a>')
self.assertEqual(sanitize_allow('<a href="">test</a>', 'a; href'), '<a href="">test</a>')
def test_SanitizedCharField(self):
TestingModel.objects.create(test_field='<a href="" style="width: 200px; height: 400px">foo</a><em>bar</em>')
test = TestingModel.objects.latest('id')
self.assertEqual(test.test_field, '<a href="" style="width: 200px;">foo</a><em>bar</em>')
def test_SanitizedTextField(self):
TestingTextModel.objects.create(test_field='<a href="" style="width: 200px; height: 400px">foo</a><em>bar</em>')
test = TestingTextModel.objects.latest('id')
self.assertEqual(test.test_field, '<a href="" style="width: 200px;">foo</a><em>bar</em>')
def test_SanitizedFormField(self):
html = '<a href="" style="width: 200px; height: 400px">foo</a><em class=""></em>'
form = TestForm({ 'test_field': html })
form.is_valid()
self.assertEqual(form.cleaned_data['test_field'],
'<a href="" style="width: 200px;">foo</a><em class=""></em>')
def test_escape_html(self):
html = '<a href="" class="" style="width: 200px; height: 400px">foo</a><em></em>'
self.assertEqual(escape_html(html, allowed_tags='a',
allowed_attributes='href,style', allowed_styles='width'),
'<a href="" style="width: 200px;">foo</a><em></em>')
self.assertEqual(escape_html(html, allowed_tags=['a'],
allowed_attributes=['href', 'style'], allowed_styles=['width']),
'<a href="" style="width: 200px;">foo</a><em></em>')
def test_strip_html(self):
html = '<a href="" class="" style="width: 200px; height: 400px">foo</a><em></em>'
self.assertEqual(strip_html(html, allowed_tags='a',
allowed_attributes='href,style', allowed_styles='width'),
'<a href="" style="width: 200px;">foo</a>')
self.assertEqual(strip_html(html, allowed_tags=['a'],
allowed_attributes=['href', 'style'], allowed_styles=['width']),
'<a href="" style="width: 200px;">foo</a>')
|
418529
|
import asyncio
import logging
import json
import re
import asyncio
import datetime
from pyenvisalink import EnvisalinkClient
from pyenvisalink.dsc_envisalinkdefs import *
_LOGGER = logging.getLogger(__name__)
from asyncio import ensure_future
class DSCClient(EnvisalinkClient):
"""Represents a dsc alarm client."""
def __init__(self, panel, loop):
self._refreshZoneByassState = False
self._zoneBypassRefreshTask = None
super().__init__(panel, loop)
def to_chars(self, string):
chars = []
for char in string:
chars.append(ord(char))
return chars
def get_checksum(self, code, data):
"""part of each command includes a checksum. Calculate."""
return ("%02X" % sum(self.to_chars(code)+self.to_chars(data)))[-2:]
def send_command(self, code, data):
"""Send a command in the proper honeywell format."""
to_send = code + data + self.get_checksum(code, data)
self.send_data(to_send)
def dump_zone_timers(self):
"""Send a command to dump out the zone timers."""
self.send_command(evl_Commands['DumpZoneTimers'], '')
def keypresses_to_partition(self, partitionNumber, keypresses):
"""Send keypresses (max of 6) to a particular partition."""
self.send_command(evl_Commands['PartitionKeypress'], str.format("{0}{1}", partitionNumber, keypresses[:6]))
async def keep_alive(self):
"""Send a keepalive command to reset it's watchdog timer."""
while not self._shutdown:
if self._loggedin:
self.send_command(evl_Commands['KeepAlive'], '')
await asyncio.sleep(self._alarmPanel.keepalive_interval, loop=self._eventLoop)
async def periodic_zone_timer_dump(self):
"""Used to periodically get the zone timers to make sure our zones are updated."""
while not self._shutdown:
if self._loggedin:
self.dump_zone_timers()
await asyncio.sleep(self._alarmPanel.zone_timer_interval, loop=self._eventLoop)
def arm_stay_partition(self, code, partitionNumber):
"""Public method to arm/stay a partition."""
self._cachedCode = code
self.send_command(evl_Commands['ArmStay'], str(partitionNumber))
def arm_away_partition(self, code, partitionNumber):
"""Public method to arm/away a partition."""
self._cachedCode = code
self.send_command(evl_Commands['ArmAway'], str(partitionNumber))
def arm_max_partition(self, code, partitionNumber):
"""Public method to arm/max a partition."""
self._cachedCode = code
self.send_command(evl_Commands['ArmMax'], str(partitionNumber))
def arm_night_partition(self, code, partitionNumber):
"""Public method to arm/max a partition."""
self.arm_max_partition(code, partitionNumber)
def disarm_partition(self, code, partitionNumber):
"""Public method to disarm a partition."""
self._cachedCode = code
self.send_command(evl_Commands['Disarm'], str(partitionNumber) + str(code))
def panic_alarm(self, panicType):
"""Public method to raise a panic alarm."""
self.send_command(evl_Commands['Panic'], evl_PanicTypes[panicType])
def command_output(self, code, partitionNumber, outputNumber):
"""Used to activate the selected command output"""
self._cachedCode = code
self.send_command(evl_Commands['CommandOutput'], str.format("{0}{1}", partitionNumber, outputNumber))
def parseHandler(self, rawInput):
"""When the envisalink contacts us- parse out which command and data."""
cmd = {}
dataoffset = 0
if rawInput != '':
if re.match('\d\d:\d\d:\d\d\s', rawInput):
dataoffset = dataoffset + 9
code = rawInput[dataoffset:dataoffset+3]
cmd['code'] = code
cmd['data'] = rawInput[dataoffset+3:][:-2]
try:
#Interpret the login command further to see what our handler is.
if evl_ResponseTypes[code]['handler'] == 'login':
if cmd['data'] == '3':
handler = 'login'
elif cmd['data'] == '2':
handler = 'login_timeout'
elif cmd['data'] == '1':
handler = 'login_success'
elif cmd['data'] == '0':
handler = 'login_failure'
cmd['handler'] = "handle_%s" % handler
cmd['callback'] = "callback_%s" % handler
else:
cmd['handler'] = "handle_%s" % evl_ResponseTypes[code]['handler']
cmd['callback'] = "callback_%s" % evl_ResponseTypes[code]['handler']
except KeyError:
_LOGGER.debug(str.format('No handler defined in config for {0}, skipping...', code))
return cmd
def handle_login(self, code, data):
"""When the envisalink asks us for our password- send it."""
self.send_command(evl_Commands['Login'], self._alarmPanel.password)
def handle_login_success(self, code, data):
"""Handler for when the envisalink accepts our credentials."""
super().handle_login_success(code, data)
dt = datetime.datetime.now().strftime('%H%M%m%d%y')
self.send_command(evl_Commands['SetTime'], dt)
self.send_command(evl_Commands['StatusReport'], '')
""" Initiate request for zone bypass information """
self._refreshZoneBypassStatus = True
if self._zoneBypassRefreshTask == None:
self._zoneBypassRefreshTask = ensure_future(self.dump_zone_bypass_status(), loop=self._eventLoop)
def handle_command_response(self, code, data):
"""Handle the envisalink's initial response to our commands."""
_LOGGER.debug("DSC ack recieved.")
def handle_command_response_error(self, code, data):
"""Handle the case where the DSC passes back a checksum failure."""
_LOGGER.error("The previous command resulted in a checksum failure.")
def handle_poll_response(self, code, data):
"""Handle the response to our keepalive messages."""
self.handle_command_response(code, data)
def handle_zone_state_change(self, code, data):
"""Handle when the envisalink sends us a zone change."""
"""Event 601-610."""
parse = re.match('^[0-9]{3,4}$', data)
if parse:
zoneNumber = int(data[-3:])
self._alarmPanel.alarm_state['zone'][zoneNumber]['status'].update(evl_ResponseTypes[code]['status'])
_LOGGER.debug(str.format("(zone {0}) state has updated: {1}", zoneNumber, json.dumps(evl_ResponseTypes[code]['status'])))
return zoneNumber
else:
_LOGGER.error("Invalid data has been passed in the zone update.")
def handle_partition_state_change(self, code, data):
"""Handle when the envisalink sends us a partition change."""
"""Event 650-674, 652 is an exception, because 2 bytes are passed for partition and zone type."""
if code == '652':
parse = re.match('^[0-9]{2}$', data)
if parse:
partitionNumber = int(data[0])
self._alarmPanel.alarm_state['partition'][partitionNumber]['status'].update(evl_ArmModes[data[1]]['status'])
_LOGGER.debug(str.format("(partition {0}) state has updated: {1}", partitionNumber, json.dumps(evl_ArmModes[data[1]]['status'])))
return partitionNumber
else:
_LOGGER.error("Invalid data has been passed when arming the alarm.")
else:
parse = re.match('^[0-9]+$', data)
if parse:
partitionNumber = int(data[0])
self._alarmPanel.alarm_state['partition'][partitionNumber]['status'].update(evl_ResponseTypes[code]['status'])
_LOGGER.debug(str.format("(partition {0}) state has updated: {1}", partitionNumber, json.dumps(evl_ResponseTypes[code]['status'])))
'''Log the user who last armed or disarmed the alarm'''
if code == '700':
lastArmedBy = {'last_armed_by_user': int(data[1:5])}
self._alarmPanel.alarm_state['partition'][partitionNumber]['status'].update(lastArmedBy)
elif code == '750':
lastDisarmedBy = {'last_disarmed_by_user': int(data[1:5])}
self._alarmPanel.alarm_state['partition'][partitionNumber]['status'].update(lastDisarmedBy)
if code == '655':
"""Partition was disarmed which means the bypassed zones have likley been reset so force a zone bypass refresh"""
self._refreshZoneBypassStatus = True
return partitionNumber
else:
_LOGGER.error("Invalid data has been passed in the parition update.")
def handle_send_code(self, code, data):
"""The DSC will, depending upon settings, challenge us with the code. If the user passed it in, we'll send it."""
if self._cachedCode is None:
_LOGGER.error("The envisalink asked for a code, but we have no code in our cache.")
else:
self.send_command(evl_Commands['SendCode'], self._cachedCode)
self._cachedCode = None
def handle_keypad_update(self, code, data):
"""Handle general- non partition based info"""
if code == '849':
bits = "{0:016b}".format(int(data,16))
trouble_description = ""
ac_present = True
for i in range(0, 7):
if bits[15-i] == '1':
trouble_description += evl_verboseTrouble[i] + ', '
if i == 1:
ac_present = False
new_status = {'alpha':trouble_description.strip(', '), 'ac_present': ac_present}
else:
new_status = evl_ResponseTypes[code]['status']
for part in self._alarmPanel.alarm_state['partition']:
self._alarmPanel.alarm_state['partition'][part]['status'].update(new_status)
_LOGGER.debug(str.format("(All partitions) state has updated: {0}", json.dumps(new_status)))
def handle_zone_bypass_update(self, code, data):
""" Handle zone bypass update triggered when *1 is used on the keypad """
self._refreshZoneBypassStatus = False
if len(data) == 16:
for byte in range(8):
bypassBitfield = int('0x' + data[byte * 2] + data[(byte * 2) + 1], 0)
for bit in range(8):
zoneNumber = (byte * 8) + bit + 1
bypassed = (bypassBitfield & (1 << bit) != 0)
self._alarmPanel.alarm_state['zone'][zoneNumber]['bypassed'] = bypassed
_LOGGER.debug(str.format("(zone {0}) bypass state has updated: {1}", zoneNumber, bypassed))
else:
_LOGGER.error(str.format("Invalid data length ({0}) has been received in the bypass update.", len(data)))
async def dump_zone_bypass_status(self):
""" Loop requesting zone bypass status until the command succeeds.
This is necessary to deal with TPI/DSC buffer overrun errors.
Ideally all commands would be queued with a retry mechanism when BUSY or BUFFER_OVERRUN is received back"""
while not self._shutdown:
if self._loggedin and self._refreshZoneBypassStatus:
""" Trigger a 616 'Bypassed Zones Bitfield Dump' to initialize the bypass state """
self.keypresses_to_partition(1, "*1#")
await asyncio.sleep(5, loop=self._eventLoop)
|
418536
|
from .Auth import Auth
from .Csrf import Csrf
from .Sign import Sign
from .MustVerifyEmail import MustVerifyEmail
|
418544
|
import random as rnd
import statistics as stat
import matplotlib.pyplot as plt
import numpy as np
import math
Avg_IAT = 1.0 # Average Inter-Arrival Time
Avg_ST = 0.5 # Average Service Time
Num_Sim_Pkts = 10000 # Number of Simulated Packets
Infinity = math.inf # A very large Number
N = 0.0 # Number of customers in the system
clock = 0.0 # Current Simulation Time
count = 0 # Count Packets
R = 5 # Number of simulation runs (i.e., replications)
Arr_Time = 0.0 # Time of the next arrival event
Dep_Time = Infinity # Time of the next departure event
Arr_Time_Out_Var = [] # Output variable for collecting arrival times
Dep_Time_Out_Var = [] # Output variable for collecting departure times
Delay = np.zeros( (R, Num_Sim_Pkts) )
for r in range(R):
while count < Num_Sim_Pkts:
if Arr_Time < Dep_Time: # Arrival Event
clock = Arr_Time
Arr_Time_Out_Var.append(clock)
N = N + 1.0
Arr_Time = clock + rnd.expovariate(1.0/Avg_IAT)
if N == 1:
Dep_Time = clock + rnd.expovariate(1.0/Avg_ST)
else: # Departure Event
clock = Dep_Time
Dep_Time_Out_Var.append(clock)
N = N - 1.0
count = count + 1 # Packet Simulated
if N > 0:
Dep_Time = clock + rnd.expovariate(1.0/Avg_ST)
else:
Dep_Time = Infinity
for i in range(Num_Sim_Pkts):
d = Dep_Time_Out_Var[i] - Arr_Time_Out_Var[i]
Delay[r, i] = d
# Initialize for next simulation run
Arr_Time = 0.0
Dep_Time = Infinity
N = 0.0
clock = 0.0
count = 0
Arr_Time_Out_Var = []
Dep_Time_Out_Var = []
#------------------------------------------------------------
# Average
Z = []
for i in range(Num_Sim_Pkts):
Z.append( sum(Delay[:,i]) / R )
#-----------------------------------------------------------
# Moving Average
H = []
H.append(Z[0])
for i in range(Num_Sim_Pkts):
j = i + 1
H.append( sum(Z[0:j]) / j )
#-----------------------------------------------------------
# Statistics
print('Mean of the Untruncated Sequence: ', stat.mean(H))
print('Mean of the Truncated Sequence: ', stat.mean(H[4000:6000]))
#-----------------------------------------------------------
x1 = [i for i in range(len(H))]
plt.plot(x1, H, label="H")
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.)
plt.show()
|
418565
|
import zipfile
import sys
import paradoxparser
import datetime
TECH_SCORE_MULTIPLIER = 10
ACCUMULATED_ENERGY_MULTIPLIER = 0.1
ACCUMULATED_MINERALS_MULTIPLIER = 0.05
ACCUMULATED_INFLUENCE_MULTIPLIER = 0.05
ENERGY_PRODUCTION_MULTIPLIER = 2
MINERAL_PRODUCTION_MULTIPLIER = 1.5
INFLUENCE_PRODUCTION_MULTIPLIER = 1
NUM_SUBJECTS_MULTIPLIER = 30
MILITARYPOWER_MULTIPLIER = 0.03
NUM_COLONIES_MULTIPLIER = 15
NUM_PLANETS_MULTIPLIER = 0.01
class Country:
def __init__(self):
self.name = ''
self.score = 0
self.techscore = 0
self.currentenergy = 0
self.currentminerals = 0
self.currentinfluence = 0
self.energyproduction = 0
self.mineralproduction = 0
self.influenceproduction = 0
self.physicsResearch = 0
self.societyResearch = 0
self.engineeringResearch = 0
self.population = 0
self.numsubjects = 0
self.militarypower = 0
self.numcolonies = 0
self.numplanets = 0
self.numarmies = 0
self.type = ''
self.id = '0'
def calcscore(self):
self.score += TECH_SCORE_MULTIPLIER * self.techscore
self.score += ACCUMULATED_ENERGY_MULTIPLIER * self.currentenergy
self.score += ACCUMULATED_MINERALS_MULTIPLIER * self.currentminerals
self.score += ACCUMULATED_INFLUENCE_MULTIPLIER * self.currentinfluence
self.score += ENERGY_PRODUCTION_MULTIPLIER * self.energyproduction
self.score += MINERAL_PRODUCTION_MULTIPLIER * self.mineralproduction
self.score += INFLUENCE_PRODUCTION_MULTIPLIER * self.influenceproduction
self.score += NUM_SUBJECTS_MULTIPLIER * self.numsubjects
self.score += MILITARYPOWER_MULTIPLIER * self.militarypower
self.score += NUM_COLONIES_MULTIPLIER * self.numcolonies
self.score += NUM_PLANETS_MULTIPLIER * self.numplanets
def _getResearchPenalty(self):
return 0.1 * max(0, self.numcolonies -1) + 0.01 * max(0, self.population-10)
def getPhysicsResearchWithPenalty(self):
return self.physicsResearch / (1 + self._getResearchPenalty())
def getSocietyResearchWithPenalty(self):
return self.societyResearch / (1 + self._getResearchPenalty())
def getEngineeringResearchWithPenalty(self):
return self.engineeringResearch / (1 + self._getResearchPenalty())
def getMatchedScope(text, scopeName):
countries = text[text.find(scopeName+'={'):]
t = 1
instring = False
for country_key_value_pair in range(len(scopeName+'={') + 1, len(countries)):
if countries[country_key_value_pair] == '{' and not instring:
if (t == 1):
k = countries[country_key_value_pair-1]
j = country_key_value_pair-1
while(k != '\t'):
j -= 1
k = countries[j]
t += 1
elif countries[country_key_value_pair] == '}' and not instring:
t -= 1
elif countries[country_key_value_pair] == '"':
instring = not instring
if (t == 0):
countries = countries[:country_key_value_pair+1]
break
result = paradoxparser.psr.parse(countries)
return result
def makeLedgerForSave(path, basePath):
save = zipfile.ZipFile(path)
f = save.open('gamestate')
s = str(f.read(), 'utf-8')
f.close()
playertaglocation = s.find('player={')
playertag = s[playertaglocation:s.find('}', playertaglocation)]
playercountry = playertag[playertag.find('country=')+len('country='):playertag.find('}')].strip()
country_raw_data = getMatchedScope(s,"country")[0][1]
planets = getMatchedScope(s,"planet")[0][1]
ret = ''
retlist = []
contactlist = []
num = 1
for i in country_raw_data:
if (i[1] != 'none'):
ret2 = ''
isUs = False
if (i[0] == playercountry):
isUs = True
contactlist.append(i[0])
relman_part = paradoxparser.paradox_dict_get_child_by_name(i[1], 'relations_manager')
if (relman_part is not None):
for j in relman_part:
countryid = paradoxparser.paradox_dict_get_child_by_name(j[1], 'country')
commun = paradoxparser.paradox_dict_get_child_by_name(j[1], 'communications')
if (commun != None):
contactlist.append(countryid)
country = Country()
country.id = i[0]
namepart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'name')
if (namepart is not None):
country.name = namepart.replace('"', '')
techpart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'tech_status')
if (techpart is not None):
country.techscore = sum(int(j[1]) for j in techpart if j[0] == 'level')
militarypowerpart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'military_power')
if (militarypowerpart is not None):
country.militarypower = float(militarypowerpart)
empiretype = paradoxparser.paradox_dict_get_child_by_name(i[1], 'type')
if (empiretype is not None):
country.type = empiretype.replace('"', '')
if (country.type not in ('fallen_empire', 'default')):
continue
subjectpart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'subjects')
if (subjectpart is not None):
country.numsubjects = len(subjectpart)
armiespart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'owned_armies')
if (armiespart is not None):
country.numarmies = len(armiespart)
planetsspart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'controlled_planets')
if (planetsspart is not None):
country.numplanets = len(planetsspart)
controlledplanetsspart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'owned_planets')
if (controlledplanetsspart is not None):
country.numcolonies = len(controlledplanetsspart)
country.population = 0
for planetId in controlledplanetsspart:
planetObject=planets[int(planetId)][1]
popObject= next((x[1] for x in planetObject if x[0]=='pop'),None)
# if the planet is under colonization, it doesn't have pop key.
if(popObject is not None):
country.population+=len(popObject)
modulespart = paradoxparser.paradox_dict_get_child_by_name(i[1], 'modules')
if (modulespart is not None):
economymodule = paradoxparser.paradox_dict_get_child_by_name(modulespart, 'standard_economy_module')
if (economymodule is not None):
resourcesmodule = paradoxparser.paradox_dict_get_child_by_name(economymodule, 'resources')
if (resourcesmodule is not None):
energy = paradoxparser.paradox_dict_get_child_by_name(resourcesmodule, 'energy')
if (energy is not None):
if (type(energy) == str):
country.currentenergy = float(energy)
else:
country.currentenergy = float(energy[0])
minerals = paradoxparser.paradox_dict_get_child_by_name(resourcesmodule, 'minerals')
if (minerals is not None):
if (type(minerals) == str):
country.currentminerals = float(minerals)
else:
country.currentminerals = float(minerals[0])
influence = paradoxparser.paradox_dict_get_child_by_name(resourcesmodule, 'influence')
if (influence is not None):
if (type(influence) == str):
country.currentinfluence = float(influence)
else:
country.currentinfluence = float(influence[0])
lastmonthmodule = paradoxparser.paradox_dict_get_child_by_name(economymodule, 'last_month')
if (lastmonthmodule is not None):
energy = paradoxparser.paradox_dict_get_child_by_name(lastmonthmodule, 'energy')
if (energy is not None):
if (type(energy) == str):
country.energyproduction = float(energy)
else:
country.energyproduction = float(energy[0])
minerals = paradoxparser.paradox_dict_get_child_by_name(lastmonthmodule, 'minerals')
if (minerals is not None):
if (type(minerals) == str):
country.mineralproduction = float(minerals)
else:
country.mineralproduction = float(minerals[0])
influence = paradoxparser.paradox_dict_get_child_by_name(lastmonthmodule, 'influence')
if (influence is not None):
if (type(influence) == str):
country.influenceproduction = float(influence)
else:
country.influenceproduction = float(influence[0])
physicsResearch=paradoxparser.paradox_dict_get_child_by_name(lastmonthmodule, 'physics_research')
if(physicsResearch is not None):
if (type(physicsResearch) == str):
country.physicsResearch = float(physicsResearch)
else:
country.physicsResearch = float(physicsResearch[0])
societyResearch=paradoxparser.paradox_dict_get_child_by_name(lastmonthmodule, 'society_research')
if(societyResearch is not None):
if (type(societyResearch) == str):
country.societyResearch = float(societyResearch)
else:
country.societyResearch = float(societyResearch[0])
engineeringResearch=paradoxparser.paradox_dict_get_child_by_name(lastmonthmodule, 'engineering_research')
if(engineeringResearch is not None):
if (type(engineeringResearch) == str):
country.engineeringResearch = float(engineeringResearch)
else:
country.engineeringResearch = float(engineeringResearch[0])
country.calcscore()
ret2 += '<tr>'
ret2 += '<td>%s</td>' % num
if (isUs):
ret2 += '<td hiddenvalue=%s>★</td>' % num
else:
ret2 += '<td hiddenvalue=%s> </td>' % num
ret2 += '<td class="name">%s</td>' % country.name
ret2 += '<td>{:10.0f}</td>'.format(country.score).strip()
ret2 += '<td>{:10.0f}</td>'.format(country.militarypower)
ret2 += '<td>%d</td>' % country.techscore
ret2 += '<td>%d</td>' % country.numcolonies
ret2 += '<td>%d</td>' % country.numplanets
ret2 += '<td>%d</td>' % country.numsubjects
production = ('{:10.0f}'.format(country.energyproduction)).strip()
if (country.energyproduction >= 0):
netincome = '<td class="positive">+%s</td>' % production
else:
netincome = '<td class="negative">%s</td>' % production
ret2 += '<td>{:10.0f}</td>'.format(country.currentenergy) + netincome
production = ('{:10.0f}'.format(country.mineralproduction)).strip()
if (country.mineralproduction >= 0):
netincome = '<td class="positive">+%s</td>' % production
else:
netincome = '<td class="negative">%s</td>' % production
ret2 += '<td>{:10.0f}</td>'.format(country.currentminerals) + netincome
production = ('{:10.1f}'.format(country.influenceproduction)).strip()
if (country.influenceproduction >= 0):
netincome = '<td class="positive">+%s</td>' % production
else:
netincome = '<td class="negative">%s</td>' % production
ret2 += '<td>{:10.0f}</td>'.format(country.currentinfluence) + netincome
ret2 += '<td>%.1f</td>' % country.getPhysicsResearchWithPenalty()
ret2 += '<td>%.1f</td>' % country.getSocietyResearchWithPenalty()
ret2 += '<td>%.1f</td>' % country.getEngineeringResearchWithPenalty()
ret2 += '<td>%d</td>' % country.population
ret2 += '</tr>'
retlist.append((country.id, ret2))
num += 1
## print(country.name)
## print(country.techscore)
## print(country.militarypower)
## print(country.type)
## print(country.numsubjects)
## print(country.numarmies)
## print(country.numplanets)
## print(country.numcolonies)
## print(country.currentenergy)
## print(country.currentminerals)
## print(country.currentinfluence)
## print(country.energyproduction)
## print(country.mineralproduction)
## print(country.influenceproduction)
retlist2 = []
for i in retlist:
if (i[0] in contactlist):
retlist2.append(i[1])
ret = "\n".join(retlist2)
return ret
|
418610
|
def get_inverse_ub_matrix_from_xparm(handle):
"""Get the inverse_ub_matrix from an xparm file handle
Params:
handle The file handle
Returns:
The inverse_ub_matrix
"""
from scitbx import matrix
return matrix.sqr(
handle.unit_cell_a_axis + handle.unit_cell_b_axis + handle.unit_cell_c_axis
)
def get_space_group_type_from_xparm(handle):
"""Get the space group tyoe object from an xparm file handle
Params:
handle The file handle
Returns:
The space group type object
"""
from cctbx import sgtbx
return sgtbx.space_group_type(
sgtbx.space_group(sgtbx.space_group_symbols(handle.space_group).hall())
)
|
418622
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from graphgallery.utils import tqdm
from graphgallery import functional as gf
from graphgallery.attack.targeted import Common
from ..targeted_attacker import TargetedAttacker
from .nettack import compute_alpha, update_Sx, compute_log_likelihood, filter_chisquare
@Common.register()
class GFA(TargetedAttacker):
"""
T=128 for citeseer and pubmed,
T=_N//2 for cora to reproduce results in paper.
"""
def process(self, K=2, T=128, reset=True):
adj, x = self.graph.adj_matrix, self.graph.node_attr
adj_with_I = adj + sp.eye(adj.shape[0])
rowsum = adj_with_I.sum(1).A1
degree_mat = np.diag(rowsum)
eig_vals, eig_vec = linalg.eigh(adj_with_I.A, degree_mat)
X_mean = np.sum(x, axis=1)
# The order of graph filter K
self.K = K
# Top-T largest eigen-values/vectors selected
self.T = T
self.eig_vals, self.eig_vec = eig_vals, eig_vec
self.X_mean = X_mean
if reset:
self.reset()
return self
def reset(self):
super().reset()
self.modified_adj = self.graph.adj_matrix.tolil(copy=True)
return self
def attack(self,
target,
num_budgets=None,
direct_attack=True,
structure_attack=True,
feature_attack=False,
ll_constraint=False,
ll_cutoff=0.004,
disable=False):
super().attack(target, num_budgets, direct_attack, structure_attack,
feature_attack)
# Setup starting values of the likelihood ratio test.
degree_sequence_start = self.degree
current_degree_sequence = self.degree.astype('float64')
d_min = 2 # denotes the minimum degree a node needs to have to be considered in the power-law test
S_d_start = np.sum(
np.log(degree_sequence_start[degree_sequence_start >= d_min]))
current_S_d = np.sum(
np.log(current_degree_sequence[current_degree_sequence >= d_min]))
n_start = np.sum(degree_sequence_start >= d_min)
current_n = np.sum(current_degree_sequence >= d_min)
alpha_start = compute_alpha(n_start, S_d_start, d_min)
log_likelihood_orig = compute_log_likelihood(n_start, alpha_start,
S_d_start, d_min)
N = self.num_nodes
if not direct_attack:
# Choose influencer nodes
# influence_nodes = self.graph.adj_matrix[target].nonzero()[1]
influence_nodes = self.graph.adj_matrix[target].indices
# Potential edges are all edges from any attacker to any other node, except the respective
# attacker itself or the node being attacked.
potential_edges = np.row_stack([
np.column_stack((np.tile(infl, N - 2),
np.setdiff1d(np.arange(N),
np.array([target, infl]))))
for infl in influence_nodes
])
else:
# direct attack
potential_edges = np.column_stack(
(np.tile(target, N - 1), np.setdiff1d(np.arange(N), target)))
influence_nodes = np.asarray([target])
for it in tqdm(range(self.num_budgets),
desc='Peturbing Graph',
disable=disable):
if not self.allow_singleton:
filtered_edges = gf.singleton_filter(potential_edges,
self.modified_adj)
else:
filtered_edges = potential_edges
if ll_constraint:
# Update the values for the power law likelihood ratio test.
deltas = 2 * (1 - self.modified_adj[tuple(
filtered_edges.T)].toarray()[0]) - 1
d_edges_old = current_degree_sequence[filtered_edges]
d_edges_new = current_degree_sequence[
filtered_edges] + deltas[:, None]
new_S_d, new_n = update_Sx(current_S_d, current_n, d_edges_old,
d_edges_new, d_min)
new_alphas = compute_alpha(new_n, new_S_d, d_min)
new_ll = compute_log_likelihood(new_n, new_alphas, new_S_d,
d_min)
alphas_combined = compute_alpha(new_n + n_start,
new_S_d + S_d_start, d_min)
new_ll_combined = compute_log_likelihood(
new_n + n_start, alphas_combined, new_S_d + S_d_start,
d_min)
new_ratios = -2 * new_ll_combined + 2 * (new_ll +
log_likelihood_orig)
# Do not consider edges that, if added/removed, would lead to a violation of the
# likelihood ration Chi_square cutoff value.
powerlaw_filter = filter_chisquare(new_ratios, ll_cutoff)
filtered_edges = filtered_edges[powerlaw_filter]
struct_scores = self.struct_score(self.modified_adj,
self.X_mean,
self.eig_vals,
self.eig_vec,
filtered_edges,
K=self.K,
T=self.T,
lambda_method="nosum")
best_edge_ix = struct_scores.argmax()
u, v = filtered_edges[best_edge_ix] # best edge
while (u, v) in self.adj_flips:
struct_scores[best_edge_ix] = 0
best_edge_ix = struct_scores.argmax()
u, v = filtered_edges[best_edge_ix]
self.modified_adj[(u, v)] = self.modified_adj[(
v, u)] = 1. - self.modified_adj[(u, v)]
self.adj_flips[(u, v)] = 1.0
if ll_constraint:
# Update likelihood ratio test values
current_S_d = new_S_d[powerlaw_filter][best_edge_ix]
current_n = new_n[powerlaw_filter][best_edge_ix]
current_degree_sequence[[
u, v
]] += deltas[powerlaw_filter][best_edge_ix]
return self
@staticmethod
def struct_score(A,
X_mean,
eig_vals,
eig_vec,
filtered_edges,
K,
T,
lambda_method="nosum"):
'''
Calculate the scores as formulated in paper.
Parameters
----------
K: int, default: 2
The order of graph filter K.
T: int, default: 128
Selecting the Top-T largest eigen-values/vectors.
lambda_method: "sum"/"nosum", default: "nosum"
Indicates the scores are calculated from which loss as in Equation (8) or Equation (12).
"nosum" denotes Equation (8), where the loss is derived from Graph Convolutional Networks,
"sum" denotes Equation (12), where the loss is derived from Sampling-based Graph Embedding Methods.
Returns
-------
Scores for candidate edges.
'''
results = []
A = A + sp.eye(A.shape[0])
# A[A > 1] = 1
rowsum = A.sum(1).A1
D_min = rowsum.min()
abs_V = len(eig_vals)
return_values = []
for j, (u, v) in enumerate(filtered_edges):
# eig_vals_res = np.zeros(len(eig_vals))
eig_vals_res = (1 - 2 * A[(u, v)]) * (
2 * eig_vec[u, :] * eig_vec[v, :] - eig_vals *
(np.square(eig_vec[u, :]) + np.square(eig_vec[v, :])))
eig_vals_res = eig_vals + eig_vals_res
if lambda_method == "sum":
if K == 1:
eig_vals_res = np.abs(eig_vals_res / K) * (1 / D_min)
else:
for itr in range(1, K):
eig_vals_res = eig_vals_res + np.power(
eig_vals_res, itr + 1)
eig_vals_res = np.abs(eig_vals_res / K) * (1 / D_min)
else:
eig_vals_res = np.square(
(eig_vals_res + np.ones(len(eig_vals_res))))
eig_vals_res = np.power(eig_vals_res, K)
eig_vals_idx = np.argsort(eig_vals_res) # from small to large
eig_vals_k_sum = eig_vals_res[eig_vals_idx[:T]].sum()
u_k = eig_vec[:, eig_vals_idx[:T]]
u_x_mean = u_k.T.dot(X_mean)
return_values.append(eig_vals_k_sum *
np.square(np.linalg.norm(u_x_mean)))
return np.asarray(return_values)
|
418630
|
from discoverlib import geom, graph
import numpy
import math
from multiprocessing import Pool
import os.path
from PIL import Image
import random
import scipy.ndimage
import sys
import time
def graph_filter_edges(g, bad_edges):
print 'filtering {} edges'.format(len(bad_edges))
ng = graph.Graph()
vertex_map = {}
for vertex in g.vertices:
vertex_map[vertex] = ng.add_vertex(vertex.point)
for edge in g.edges:
if edge not in bad_edges:
nedge = ng.add_edge(vertex_map[edge.src], vertex_map[edge.dst])
if hasattr(edge, 'prob'):
nedge.prob = edge.prob
return ng
def get_reachable_points(im, point, value_threshold, distance_threshold):
points = set()
search = set()
r = geom.Rectangle(geom.Point(0, 0), geom.Point(im.shape[0]-1, im.shape[1]-1))
search.add(point)
for _ in xrange(distance_threshold):
next_search = set()
for point in search:
for offset in [(-1, 0), (1, 0), (0, -1), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1)]:
adj_point = point.add(geom.Point(offset[0], offset[1]))
if r.contains(adj_point) and adj_point not in points and im[adj_point.x, adj_point.y] >= value_threshold:
points.add(adj_point)
next_search.add(adj_point)
search = next_search
return points
def count_adjacent(skeleton, point):
r = geom.Rectangle(geom.Point(0, 0), geom.Point(skeleton.shape[0], skeleton.shape[1]))
count = 0
for offset in [(-1, 0), (1, 0), (0, -1), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1)]:
adj_point = point.add(geom.Point(offset[0], offset[1]))
if skeleton[adj_point.x, adj_point.y] > 0:
count += 1
return count
def distance_from_value(value):
return 1.1**max(30-value, 0)
def get_shortest_path(im, src, max_distance):
r = geom.Rectangle(geom.Point(0, 0), geom.Point(im.shape[0], im.shape[1]))
in_r = r.add_tol(-1)
seen_points = set()
distances = {}
prev = {}
dst = None
distances[src] = 0
while len(distances) > 0:
closest_point = None
closest_distance = None
for point, distance in distances.items():
if closest_point is None or distance < closest_distance:
closest_point = point
closest_distance = distance
del distances[closest_point]
seen_points.add(closest_point)
if closest_distance > max_distance:
break
elif not in_r.contains(closest_point):
dst = closest_point
break
for offset in [(-1, 0), (1, 0), (0, -1), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1)]:
adj_point = closest_point.add(geom.Point(offset[0], offset[1]))
if r.contains(adj_point) and adj_point not in seen_points:
distance = closest_distance + distance_from_value(im[adj_point.x, adj_point.y])
if adj_point not in distances or distance < distances[adj_point]:
distances[adj_point] = distance
prev[adj_point] = closest_point
if dst is None:
return
return dst
def get_segment_confidence(segment, im):
def get_value(p):
p = p.scale(0.5)
sx = max(0, p.x-1)
sy = max(0, p.y-1)
ex = min(im.shape[0], p.x+2)
ey = min(im.shape[1], p.y+2)
return im[sx:ex, sy:ey].max()
values = []
for i in xrange(0, int(segment.length()), 2):
p = segment.point_at_factor(i)
values.append(get_value(p))
return numpy.mean(values)
def get_rs_confidence(rs, im):
def get_value(p):
p = p.scale(0.5)
sx = max(0, p.x-1)
sy = max(0, p.y-1)
ex = min(im.shape[0], p.x+2)
ey = min(im.shape[1], p.y+2)
return im[sx:ex, sy:ey].max()
values = []
for i in xrange(0, int(rs.length()), 2):
p = rs.point_at_factor(i)
values.append(get_value(p))
return numpy.mean(values)
def connect_up(g, im, threshold=40.0):
# connect road segments to projection
bad_edges = set()
updated_vertices = set()
road_segments, edge_to_rs = graph.get_graph_road_segments(g)
edgeIdx = g.edgeIndex()
add_points = []
for rs in road_segments:
for vertex in [rs.src(), rs.dst()]:
if len(vertex.out_edges) > 1 or vertex in updated_vertices:
continue
vector = vertex.in_edges[0].segment().vector()
vector = vector.scale(threshold / vector.magnitude())
best_edge = None
best_point = None
best_distance = None
for edge in edgeIdx.search(vertex.point.bounds().add_tol(threshold)):
if edge in rs.edges or edge in rs.get_opposite_rs(edge_to_rs).edges:
continue
s1 = edge.segment()
s2 = geom.Segment(vertex.point, vertex.point.add(vector))
p = s1.intersection(s2)
if p is None:
# maybe still connect if both edges are roughly the same angle, and vector connecting them would also be similar angle
p = edge.src.point
if vertex.point.distance(p) >= threshold:
continue
v1 = s1.vector()
v2 = p.sub(vertex.point)
if abs(v1.signed_angle(vector)) > math.pi / 4 or abs(v2.signed_angle(vector)) > math.pi / 4:
continue
elif get_segment_confidence(geom.Segment(vertex.point, p), im) < 55:
continue
if p is not None and (best_edge is None or vertex.point.distance(p) < best_distance):
best_edge = edge
best_point = p
best_distance = vertex.point.distance(p)
if best_edge is not None:
#print '*** insert new vertex at {} from {} with {}'.format(best_point, vertex.point, best_edge.segment())
bad_edges.add(best_edge)
add_points.append((best_point, [best_edge.src, best_edge.dst, vertex]))
updated_vertices.add(vertex)
for t in add_points:
nv = g.add_vertex(t[0])
for v in t[1]:
g.add_bidirectional_edge(nv, v)
return graph_filter_edges(g, bad_edges)
def cleanup_all(graph_fname, im_fname, cleaned_fname):
g = graph.read_graph(graph_fname)
im = numpy.swapaxes(scipy.ndimage.imread(im_fname), 0, 1)
r = geom.Rectangle(geom.Point(0, 0), geom.Point(1300, 1300))
small_r = r.add_tol(-20)
# filter lousy road segments
road_segments, _ = graph.get_graph_road_segments(g)
bad_edges = set()
for rs in road_segments:
if rs.length() < 80 and (len(rs.src().out_edges) < 2 or len(rs.dst().out_edges) < 2) and small_r.contains(rs.src().point) and small_r.contains(rs.dst().point):
bad_edges.update(rs.edges)
elif rs.length() < 400 and len(rs.src().out_edges) < 2 and len(rs.dst().out_edges) < 2 and small_r.contains(rs.src().point) and small_r.contains(rs.dst().point):
bad_edges.update(rs.edges)
ng = graph_filter_edges(g, bad_edges)
# connect road segments to the image edge
road_segments, _ = graph.get_graph_road_segments(ng)
segments = [
geom.Segment(geom.Point(0, 0), geom.Point(1300, 0)),
geom.Segment(geom.Point(0, 0), geom.Point(0, 1300)),
geom.Segment(geom.Point(1300, 1300), geom.Point(1300, 0)),
geom.Segment(geom.Point(1300, 1300), geom.Point(0, 1300)),
]
big_r = r.add_tol(-2)
small_r = r.add_tol(-40)
for rs in road_segments:
for vertex in [rs.src(), rs.dst()]:
if len(vertex.out_edges) == 1 and big_r.contains(vertex.point) and not small_r.contains(vertex.point):
'''d = min([segment.distance(vertex.point) for segment in segments])
dst = get_shortest_path(im, vertex.point.scale(0.5), max_distance=d*9)
if dst is None:
break
if dst is not None:
nv = ng.add_vertex(dst.scale(2))
ng.add_bidirectional_edge(vertex, nv)
print '*** add edge {} to {}'.format(vertex.point, nv.point)'''
'''closest_segment = None
closest_distance = None
for segment in segments:
d = segment.distance(vertex.point)
if closest_segment is None or d < closest_distance:
closest_segment = segment
closest_distance = d'''
for closest_segment in segments:
vector = vertex.in_edges[0].segment().vector()
vector = vector.scale(40.0 / vector.magnitude())
s = geom.Segment(vertex.point, vertex.point.add(vector))
p = s.intersection(closest_segment)
if p is not None:
nv = ng.add_vertex(p)
ng.add_bidirectional_edge(vertex, nv)
break
ng = connect_up(ng, im)
ng.save(cleaned_fname)
if __name__ == '__main__':
in_dir = sys.argv[1]
tile_dir = sys.argv[2]
out_dir = sys.argv[3]
fnames = [fname.split('.pix.graph')[0] for fname in os.listdir(in_dir) if '.pix.graph' in fname]
for fname in fnames:
cleanup_all('{}/{}.pix.graph'.format(in_dir, fname), '{}/{}.png'.format(tile_dir, fname), '{}/{}.graph'.format(out_dir, fname))
|
418643
|
import os
from weasyprint import urls
from bs4 import BeautifulSoup
# check if href is relative --
# if it is relative it *should* be an html that generates a PDF doc
def is_doc(href: str):
tail = os.path.basename(href)
_, ext = os.path.splitext(tail)
absurl = urls.url_is_absolute(href)
abspath = os.path.isabs(href)
htmlfile = ext.startswith('.html')
if absurl or abspath or not htmlfile:
return False
return True
def rel_pdf_href(href: str):
head, tail = os.path.split(href)
filename, _ = os.path.splitext(tail)
internal = href.startswith('#')
if not is_doc(href) or internal:
return href
return urls.iri_to_uri(os.path.join(head, filename + '.pdf'))
def abs_asset_href(href: str, base_url: str):
if urls.url_is_absolute(href) or os.path.isabs(href):
return href
return urls.iri_to_uri(urls.urljoin(base_url, href))
# makes all relative asset links absolute
def replace_asset_hrefs(soup: BeautifulSoup, base_url: str):
for link in soup.find_all('link', href=True):
link['href'] = abs_asset_href(link['href'], base_url)
for asset in soup.find_all(src=True):
asset['src'] = abs_asset_href(asset['src'], base_url)
return soup
# normalize href to site root
def normalize_href(href: str, rel_url: str):
# foo/bar/baz/../../index.html -> foo/index.html
def reduce_rel(x):
try:
i = x.index('..')
if i is 0:
return x
del x[i]
del x[i - 1]
return reduce_rel(x)
except ValueError:
return x
rel_dir = os.path.dirname(rel_url)
href = str.split(os.path.join(rel_dir, href), '/')
href = reduce_rel(href)
href[-1], _ = os.path.splitext(href[-1])
return os.path.join(*href)
def get_body_id(url: str):
section, _ = os.path.splitext(url)
return '{}:'.format(section)
|
418648
|
import astor
import z3
from .to_z3 import Z3Converter, get_minimum_dt_of_several_anonymous
from crestdsl import sourcehelper as SH
from .epsilon import Epsilon
import logging
logger = logging.getLogger(__name__)
def get_behaviour_change_dt_from_constraintset(solver, constraints, dt, ctx=z3.main_ctx()):
times = {cs: cs.check_behaviour_change(solver, dt, ctx) for cs in constraints}
times = {cs: time for cs, time in times.items() if time is not None}
if len(times) > 0:
minimum = min(times, key=times.get)
return times[minimum], minimum.label
else:
return None, None
class ConstraintSet(object):
def __init__(self, constraints_until_condition, condition):
self.constraints_until_condition = constraints_until_condition
self.condition = condition
self.label = ""
def translate_to_context(self, ctx):
condition = self.condition.translate(ctx)
constraints = [c.translate(ctx) for c in self.constraints_until_condition]
translated = ConstraintSet(constraints, condition)
translated.label = self.label
return translated
def set_label(self, label):
self.label = label
def check_behaviour_change(self, solver, dt, ctx):
"""
Returns either a numeric (Epsilon) value, or None.
Epsilon states the time until the constraint is solvable.
"""
condition = self.condition
constraints = self.constraints_until_condition
if ctx != condition.ctx: # the wrong context, translate to the correct one
condition = self.condition.translate(ctx)
constraints = [c.translate(ctx) for c in self.constraints_until_condition]
solver.push() # initial solver point (#1)
solver.add(constraints)
solver.push() # (#2)
solver.add(condition)
solver.add(dt == 0)
check = solver.check() == z3.sat
logger.debug(f"The {self.label} is currently {check}")
solver.pop() # (#2)
""" Let's see if we can change something by just passing time """
solver.push() # new backtracking point (#3)
solver.add(dt > 0) # time needs to pass
# flip it
if check:
solver.add(z3.Not(condition)) # currently sat, check if time can make it unsat
else: # currently not sat
solver.add(condition) # check if time can make it sat
objective = solver.minimize(dt) # get the minimum
returnvalue = None
if solver.check() == z3.sat:
logger.debug(f"The condition evaluation can change though with a dt of: {objective.value()}")
logger.debug(solver.model())
# epsilonify
inf_coeff, numeric_coeff, eps_coeff = objective.lower_values()
returnvalue = Epsilon(numeric_coeff, eps_coeff)
else:
logger.debug(f"The condition evaluation cannot change by passing of time")
solver.pop() # pop the second backtracking point (#3)
solver.pop() # final pop initial solver point (#1)
return returnvalue
class Z3ConditionChangeCalculator(Z3Converter):
def __init__(self, z3_vars, entity, container, use_integer_and_real=True):
super().__init__(z3_vars, entity, container, use_integer_and_real)
# self.to_z3 = copy.deepcopy(self.__class__.to_z3)
# self.to_z3.register(list, self.to_z3_list)
# self.to_z3.register(ast.If, self.to_z3_astIf)
# self.dts = [] # remove me
# self.dts_eps = []
self.all_constraints = []
self.constraint_sets_to_check = []
def calculate_constraints(self, function):
self.to_z3(function)
return self.constraint_sets_to_check
def to_z3_list(self, obj):
""" in a list, convert every one of the parts individually"""
constraints = []
for stmt in obj:
new_constraint = self.to_z3(stmt)
if isinstance(new_constraint, str):
continue
if new_constraint is None:
continue # skip if nothing happened (e.g. for print expressions or just a comment string)
# logger.info(f"adding {new_constraint}")
if isinstance(new_constraint, list):
constraints.extend(new_constraint)
self.all_constraints.extend(new_constraint)
else:
constraints.append(new_constraint)
self.all_constraints.append(new_constraint)
return constraints
# TODO: to be fully tested
def to_z3_astIfExp(self, obj):
""" a if b else c"""
condition = self.to_z3(obj.test)
condition_type = self.resolve_type(obj.test)
condition_cast = self.cast(condition, condition_type, BOOL)
cs = ConstraintSet(self.all_constraints.copy(), condition_cast)
cs.set_label(f"If-Expression at Line #{obj.lineno}")
self.constraint_sets_to_check.append(cs)
all_constraints_backup = self.all_constraints.copy() # save the state before exploring
self.all_constraints.append(z3.And(test))
body = self.to_z3(obj.body) # explores the then-branch and creates the constraints
self.all_constraints = all_constraints_backup.copy() # reset
self.all_constraints.append(z3.Not(test)) # condition to get in here
self.all_constraints.extend(else_ins)
orelse = self.to_z3(obj.orelse)
self.all_constraints = all_constraints_backup.copy() # reset again
then_type = self.resolve_type(obj.body)
else_type = self.resolve_type(obj.orelse)
target_type = self.resolve_two_types(then_type, else_type)
ret_val = z3.If(condition_cast,
self.cast(body, then_type, target_type),
self.cast(orelse, else_type, target_type)
)
return ret_val
# TODO: to be fully tested
def to_z3_astIf(self, obj):
test = self.to_z3(obj.test)
cs = ConstraintSet(self.all_constraints.copy(), z3.And(test))
cs.set_label(f"If-Condition at Line #{obj.lineno}")
self.constraint_sets_to_check.append(cs)
all_constraints_backup = self.all_constraints.copy() # save the state before exploring
body_ins, else_ins = self.get_astIf_ins(obj)
self.all_constraints.append(test)
self.all_constraints.extend(body_ins)
body = self.to_z3(obj.body) # explores the then-branch and creates the constraints
orelse = []
if obj.orelse:
self.all_constraints = all_constraints_backup.copy() # reset
self.all_constraints.append(z3.Not(test)) # condition to get in here
self.all_constraints.extend(else_ins)
orelse = self.to_z3(obj.orelse)
self.all_constraints = all_constraints_backup.copy() # reset again
# standard behaviour (unfortunately we have to copy)
body_outs = []
else_outs = []
ifstmt = z3.If(test,
z3.And(body_ins + body + body_outs),
z3.And(else_ins + orelse + else_outs))
return ifstmt
def to_z3_astCall(self, obj):
func_name = SH.get_attribute_string(obj.func)
if func_name == "min":
val1 = self.to_z3(obj.args[0])
val2 = self.to_z3(obj.args[1])
test = val1 <= val2
cs = ConstraintSet(self.all_constraints.copy(), z3.And(test))
cs.set_label(f"min function at Line #{obj.lineno}")
self.constraint_sets_to_check.append(cs)
return super().to_z3_astCall(obj)
if func_name == "max":
val1 = self.to_z3(obj.args[0])
val2 = self.to_z3(obj.args[1])
test = val1 >= val2
cs = ConstraintSet(self.all_constraints.copy(), z3.And(test))
cs.set_label(f"Max function at Line #{obj.lineno}")
self.constraint_sets_to_check.append(cs)
return super().to_z3_astCall(obj)
logger.error("You will probably see wrong results, because the analysis does not work for function calls yet.")
return super().to_z3_astCall(obj)
|
418652
|
import pandas as pd
from sklearn.datasets import load_boston
from sklearn.linear_model import LinearRegression
import pickle
data = load_boston()
X = pd.DataFrame(data.data, columns=data.feature_names)
y = pd.Series(data.target, name='target')
print('Column order:', list(X.columns))
X.sample(1, random_state=0).iloc[0].to_json('example.json')
model = LinearRegression()
model.fit(X, y)
with open('ml-model.pkl', 'wb') as f:
pickle.dump(model, f)
|
418670
|
from simple_NER.annotators.remote.dbpedia import SpotlightNER
# you can also self host
host='http://api.dbpedia-spotlight.org/en/annotate'
ner = SpotlightNER(host)
for r in ner.extract_entities("London was founded by the Romans"):
print(r.value, r.entity_type, r.uri)
score = r.similarityScore
"""
London Wikidata:Q515 http://dbpedia.org/resource/London
London Wikidata:Q486972 http://dbpedia.org/resource/London
London Schema:Place http://dbpedia.org/resource/London
London Schema:City http://dbpedia.org/resource/London
London DBpedia:Settlement http://dbpedia.org/resource/London
London DBpedia:PopulatedPlace http://dbpedia.org/resource/London
London DBpedia:Place http://dbpedia.org/resource/London
London DBpedia:Location http://dbpedia.org/resource/London
London DBpedia:City http://dbpedia.org/resource/London
Romans Wikidata:Q6256 http://dbpedia.org/resource/Ancient_Rome
Romans Schema:Place http://dbpedia.org/resource/Ancient_Rome
Romans Schema:Country http://dbpedia.org/resource/Ancient_Rome
Romans DBpedia:PopulatedPlace http://dbpedia.org/resource/Ancient_Rome
Romans DBpedia:Place http://dbpedia.org/resource/Ancient_Rome
Romans DBpedia:Location http://dbpedia.org/resource/Ancient_Rome
Romans DBpedia:Country http://dbpedia.org/resource/Ancient_Rome
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.